@article{PhilippDietzUllmannetal.2023, author = {Philipp, Marius and Dietz, Andreas and Ullmann, Tobias and Kuenzer, Claudia}, title = {A circum-Arctic monitoring framework for quantifying annual erosion rates of permafrost coasts}, series = {Remote Sensing}, volume = {15}, journal = {Remote Sensing}, number = {3}, issn = {2072-4292}, doi = {10.3390/rs15030818}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-304447}, year = {2023}, abstract = {This study demonstrates a circum-Arctic monitoring framework for quantifying annual change of permafrost-affected coasts at a spatial resolution of 10 m. Frequent cloud coverage and challenging lighting conditions, including polar night, limit the usability of optical data in Arctic regions. For this reason, Synthetic Aperture RADAR (SAR) data in the form of annual median and standard deviation (sd) Sentinel-1 (S1) backscatter images covering the months June-September for the years 2017-2021 were computed. Annual composites for the year 2020 were hereby utilized as input for the generation of a high-quality coastline product via a Deep Learning (DL) workflow, covering 161,600 km of the Arctic coastline. The previously computed annual S1 composites for the years 2017 and 2021 were employed as input data for the Change Vector Analysis (CVA)-based coastal change investigation. The generated DL coastline product served hereby as a reference. Maximum erosion rates of up to 67 m per year could be observed based on 400 m coastline segments. Overall highest average annual erosion can be reported for the United States (Alaska) with 0.75 m per year, followed by Russia with 0.62 m per year. Out of all seas covered in this study, the Beaufort Sea featured the overall strongest average annual coastal erosion of 1.12 m. Several quality layers are provided for both the DL coastline product and the CVA-based coastal change analysis to assess the applicability and accuracy of the output products. The predicted coastal change rates show good agreement with findings published in previous literature. The proposed methods and data may act as a valuable tool for future analysis of permafrost loss and carbon emissions in Arctic coastal environments.}, language = {en} } @article{WechAnkenbrandBleyetal.2022, author = {Wech, Tobias and Ankenbrand, Markus Johannes and Bley, Thorsten Alexander and Heidenreich, Julius Frederik}, title = {A data-driven semantic segmentation model for direct cardiac functional analysis based on undersampled radial MR cine series}, series = {Magnetic Resonance in Medicine}, volume = {87}, journal = {Magnetic Resonance in Medicine}, number = {2}, doi = {10.1002/mrm.29017}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-257616}, pages = {972-983}, year = {2022}, abstract = {Purpose Image acquisition and subsequent manual analysis of cardiac cine MRI is time-consuming. The purpose of this study was to train and evaluate a 3D artificial neural network for semantic segmentation of radially undersampled cardiac MRI to accelerate both scan time and postprocessing. Methods A database of Cartesian short-axis MR images of the heart (148,500 images, 484 examinations) was assembled from an openly accessible database and radial undersampling was simulated. A 3D U-Net architecture was pretrained for segmentation of undersampled spatiotemporal cine MRI. Transfer learning was then performed using samples from a second database, comprising 108 non-Cartesian radial cine series of the midventricular myocardium to optimize the performance for authentic data. The performance was evaluated for different levels of undersampling by the Dice similarity coefficient (DSC) with respect to reference labels, as well as by deriving ventricular volumes and myocardial masses. Results Without transfer learning, the pretrained model performed moderately on true radial data [maximum number of projections tested, P = 196; DSC = 0.87 (left ventricle), DSC = 0.76 (myocardium), and DSC =0.64 (right ventricle)]. After transfer learning with authentic data, the predictions achieved human level even for high undersampling rates (P = 33, DSC = 0.95, 0.87, and 0.93) without significant difference compared with segmentations derived from fully sampled data. Conclusion A 3D U-Net architecture can be used for semantic segmentation of radially undersampled cine acquisitions, achieving a performance comparable with human experts in fully sampled data. This approach can jointly accelerate time-consuming cine image acquisition and cumbersome manual image analysis.}, language = {en} } @article{DirscherlDietzKneiseletal.2021, author = {Dirscherl, Mariel and Dietz, Andreas J. and Kneisel, Christof and Kuenzer, Claudia}, title = {A novel method for automated supraglacial lake mapping in Antarctica using Sentinel-1 SAR imagery and deep learning}, series = {Remote Sensing}, volume = {13}, journal = {Remote Sensing}, number = {2}, issn = {2072-4292}, doi = {10.3390/rs13020197}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-222998}, year = {2021}, abstract = {Supraglacial meltwater accumulation on ice sheets can be a main driver for accelerated ice discharge, mass loss, and global sea-level-rise. With further increasing surface air temperatures, meltwater-induced hydrofracturing, basal sliding, or surface thinning will cumulate and most likely trigger unprecedented ice mass loss on the Greenland and Antarctic ice sheets. While the Greenland surface hydrological network as well as its impacts on ice dynamics and mass balance has been studied in much detail, Antarctic supraglacial lakes remain understudied with a circum-Antarctic record of their spatio-temporal development entirely lacking. This study provides the first automated supraglacial lake extent mapping method using Sentinel-1 synthetic aperture radar (SAR) imagery over Antarctica and complements the developed optical Sentinel-2 supraglacial lake detection algorithm presented in our companion paper. In detail, we propose the use of a modified U-Net for semantic segmentation of supraglacial lakes in single-polarized Sentinel-1 imagery. The convolutional neural network (CNN) is implemented with residual connections for optimized performance as well as an Atrous Spatial Pyramid Pooling (ASPP) module for multiscale feature extraction. The algorithm is trained on 21,200 Sentinel-1 image patches and evaluated in ten spatially or temporally independent test acquisitions. In addition, George VI Ice Shelf is analyzed for intra-annual lake dynamics throughout austral summer 2019/2020 and a decision-level fused Sentinel-1 and Sentinel-2 maximum lake extent mapping product is presented for January 2020 revealing a more complete supraglacial lake coverage (~770 km\(^2\)) than the individual single-sensor products. Classification results confirm the reliability of the proposed workflow with an average Kappa coefficient of 0.925 and a F\(_1\)-score of 93.0\% for the supraglacial water class across all test regions. Furthermore, the algorithm is applied in an additional test region covering supraglacial lakes on the Greenland ice sheet which further highlights the potential for spatio-temporal transferability. Future work involves the integration of more training data as well as intra-annual analyses of supraglacial lake occurrence across the whole continent and with focus on supraglacial lake development throughout a summer melt season and into Antarctic winter.}, language = {en} } @article{KrenzerBanckMakowskietal.2023, author = {Krenzer, Adrian and Banck, Michael and Makowski, Kevin and Hekalo, Amar and Fitting, Daniel and Troya, Joel and Sudarevic, Boban and Zoller, Wolfgang G. and Hann, Alexander and Puppe, Frank}, title = {A real-time polyp-detection system with clinical application in colonoscopy using deep convolutional neural networks}, series = {Journal of Imaging}, volume = {9}, journal = {Journal of Imaging}, number = {2}, issn = {2313-433X}, doi = {10.3390/jimaging9020026}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-304454}, year = {2023}, abstract = {Colorectal cancer (CRC) is a leading cause of cancer-related deaths worldwide. The best method to prevent CRC is with a colonoscopy. During this procedure, the gastroenterologist searches for polyps. However, there is a potential risk of polyps being missed by the gastroenterologist. Automated detection of polyps helps to assist the gastroenterologist during a colonoscopy. There are already publications examining the problem of polyp detection in the literature. Nevertheless, most of these systems are only used in the research context and are not implemented for clinical application. Therefore, we introduce the first fully open-source automated polyp-detection system scoring best on current benchmark data and implementing it ready for clinical application. To create the polyp-detection system (ENDOMIND-Advanced), we combined our own collected data from different hospitals and practices in Germany with open-source datasets to create a dataset with over 500,000 annotated images. ENDOMIND-Advanced leverages a post-processing technique based on video detection to work in real-time with a stream of images. It is integrated into a prototype ready for application in clinical interventions. We achieve better performance compared to the best system in the literature and score a F1-score of 90.24\% on the open-source CVC-VideoClinicDB benchmark.}, language = {en} } @article{KunzStellzigEisenhauerBoldt2023, author = {Kunz, Felix and Stellzig-Eisenhauer, Angelika and Boldt, Julian}, title = {Applications of artificial intelligence in orthodontics — an overview and perspective based on the current state of the art}, series = {Applied Sciences}, volume = {13}, journal = {Applied Sciences}, number = {6}, issn = {2076-3417}, doi = {10.3390/app13063850}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-310940}, year = {2023}, abstract = {Artificial intelligence (AI) has already arrived in many areas of our lives and, because of the increasing availability of computing power, can now be used for complex tasks in medicine and dentistry. This is reflected by an exponential increase in scientific publications aiming to integrate AI into everyday clinical routines. Applications of AI in orthodontics are already manifold and range from the identification of anatomical/pathological structures or reference points in imaging to the support of complex decision-making in orthodontic treatment planning. The aim of this article is to give the reader an overview of the current state of the art regarding applications of AI in orthodontics and to provide a perspective for the use of such AI solutions in clinical routine. For this purpose, we present various use cases for AI in orthodontics, for which research is already available. Considering the current scientific progress, it is not unreasonable to assume that AI will become an integral part of orthodontic diagnostics and treatment planning in the near future. Although AI will equally likely not be able to replace the knowledge and experience of human experts in the not-too-distant future, it probably will be able to support practitioners, thus serving as a quality-assuring component in orthodontic patient care.}, language = {en} } @article{VollmerSaraviVollmeretal.2022, author = {Vollmer, Andreas and Saravi, Babak and Vollmer, Michael and Lang, Gernot Michael and Straub, Anton and Brands, Roman C. and K{\"u}bler, Alexander and Gubik, Sebastian and Hartmann, Stefan}, title = {Artificial intelligence-based prediction of oroantral communication after tooth extraction utilizing preoperative panoramic radiography}, series = {Diagnostics}, volume = {12}, journal = {Diagnostics}, number = {6}, issn = {2075-4418}, doi = {10.3390/diagnostics12061406}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-278814}, year = {2022}, abstract = {Oroantral communication (OAC) is a common complication after tooth extraction of upper molars. Profound preoperative panoramic radiography analysis might potentially help predict OAC following tooth extraction. In this exploratory study, we evaluated n = 300 consecutive cases (100 OAC and 200 controls) and trained five machine learning algorithms (VGG16, InceptionV3, MobileNetV2, EfficientNet, and ResNet50) to predict OAC versus non-OAC (binary classification task) from the input images. Further, four oral and maxillofacial experts evaluated the respective panoramic radiography and determined performance metrics (accuracy, area under the curve (AUC), precision, recall, F1-score, and receiver operating characteristics curve) of all diagnostic approaches. Cohen's kappa was used to evaluate the agreement between expert evaluations. The deep learning algorithms reached high specificity (highest specificity 100\% for InceptionV3) but low sensitivity (highest sensitivity 42.86\% for MobileNetV2). The AUCs from VGG16, InceptionV3, MobileNetV2, EfficientNet, and ResNet50 were 0.53, 0.60, 0.67, 0.51, and 0.56, respectively. Expert 1-4 reached an AUC of 0.550, 0.629, 0.500, and 0.579, respectively. The specificity of the expert evaluations ranged from 51.74\% to 95.02\%, whereas sensitivity ranged from 14.14\% to 59.60\%. Cohen's kappa revealed a poor agreement for the oral and maxillofacial expert evaluations (Cohen's kappa: 0.1285). Overall, present data indicate that OAC cannot be sufficiently predicted from preoperative panoramic radiography. The false-negative rate, i.e., the rate of positive cases (OAC) missed by the deep learning algorithms, ranged from 57.14\% to 95.24\%. Surgeons should not solely rely on panoramic radiography when evaluating the probability of OAC occurrence. Clinical testing of OAC is warranted after each upper-molar tooth extraction.}, language = {en} } @article{VollmerVollmerLangetal.2023, author = {Vollmer, Andreas and Vollmer, Michael and Lang, Gernot and Straub, Anton and K{\"u}bler, Alexander and Gubik, Sebastian and Brands, Roman C. and Hartmann, Stefan and Saravi, Babak}, title = {Automated assessment of radiographic bone loss in the posterior maxilla utilizing a multi-object detection artificial intelligence algorithm}, series = {Applied Sciences}, volume = {13}, journal = {Applied Sciences}, number = {3}, issn = {2076-3417}, doi = {10.3390/app13031858}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-305050}, year = {2023}, abstract = {Periodontitis is one of the most prevalent diseases worldwide. The degree of radiographic bone loss can be used to assess the course of therapy or the severity of the disease. Since automated bone loss detection has many benefits, our goal was to develop a multi-object detection algorithm based on artificial intelligence that would be able to detect and quantify radiographic bone loss using standard two-dimensional radiographic images in the maxillary posterior region. This study was conducted by combining three recent online databases and validating the results using an external validation dataset from our organization. There were 1414 images for training and testing and 341 for external validation in the final dataset. We applied a Keypoint RCNN with a ResNet-50-FPN backbone network for both boundary box and keypoint detection. The intersection over union (IoU) and the object keypoint similarity (OKS) were used for model evaluation. The evaluation of the boundary box metrics showed a moderate overlapping with the ground truth, revealing an average precision of up to 0.758. The average precision and recall over all five folds were 0.694 and 0.611, respectively. Mean average precision and recall for the keypoint detection were 0.632 and 0.579, respectively. Despite only using a small and heterogeneous set of images for training, our results indicate that the algorithm is able to learn the objects of interest, although without sufficient accuracy due to the limited number of images and a large amount of information available in panoramic radiographs. Considering the widespread availability of panoramic radiographs as well as the increasing use of online databases, the presented model can be further improved in the future to facilitate its implementation in clinics.}, language = {en} } @article{KrenzerHeilFittingetal., author = {Krenzer, Adrian and Heil, Stefan and Fitting, Daniel and Matti, Safa and Zoller, Wolfram G. and Hann, Alexander and Puppe, Frank}, title = {Automated classification of polyps using deep learning architectures and few-shot learning}, series = {BMC Medical Imaging}, volume = {23}, journal = {BMC Medical Imaging}, doi = {10.1186/s12880-023-01007-4}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-357465}, abstract = {Background Colorectal cancer is a leading cause of cancer-related deaths worldwide. The best method to prevent CRC is a colonoscopy. However, not all colon polyps have the risk of becoming cancerous. Therefore, polyps are classified using different classification systems. After the classification, further treatment and procedures are based on the classification of the polyp. Nevertheless, classification is not easy. Therefore, we suggest two novel automated classifications system assisting gastroenterologists in classifying polyps based on the NICE and Paris classification. Methods We build two classification systems. One is classifying polyps based on their shape (Paris). The other classifies polyps based on their texture and surface patterns (NICE). A two-step process for the Paris classification is introduced: First, detecting and cropping the polyp on the image, and secondly, classifying the polyp based on the cropped area with a transformer network. For the NICE classification, we design a few-shot learning algorithm based on the Deep Metric Learning approach. The algorithm creates an embedding space for polyps, which allows classification from a few examples to account for the data scarcity of NICE annotated images in our database. Results For the Paris classification, we achieve an accuracy of 89.35 \%, surpassing all papers in the literature and establishing a new state-of-the-art and baseline accuracy for other publications on a public data set. For the NICE classification, we achieve a competitive accuracy of 81.13 \% and demonstrate thereby the viability of the few-shot learning paradigm in polyp classification in data-scarce environments. Additionally, we show different ablations of the algorithms. Finally, we further elaborate on the explainability of the system by showing heat maps of the neural network explaining neural activations. Conclusion Overall we introduce two polyp classification systems to assist gastroenterologists. We achieve state-of-the-art performance in the Paris classification and demonstrate the viability of the few-shot learning paradigm in the NICE classification, addressing the prevalent data scarcity issues faced in medical machine learning.}, language = {en} } @article{PhilippDietzUllmannetal.2022, author = {Philipp, Marius and Dietz, Andreas and Ullmann, Tobias and Kuenzer, Claudia}, title = {Automated extraction of annual erosion rates for Arctic permafrost coasts using Sentinel-1, Deep Learning, and Change Vector Analysis}, series = {Remote Sensing}, volume = {14}, journal = {Remote Sensing}, number = {15}, issn = {2072-4292}, doi = {10.3390/rs14153656}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-281956}, year = {2022}, abstract = {Arctic permafrost coasts become increasingly vulnerable due to environmental drivers such as the reduced sea-ice extent and duration as well as the thawing of permafrost itself. A continuous quantification of the erosion process on large to circum-Arctic scales is required to fully assess the extent and understand the consequences of eroding permafrost coastlines. This study presents a novel approach to quantify annual Arctic coastal erosion and build-up rates based on Sentinel-1 (S1) Synthetic Aperture RADAR (SAR) backscatter data, in combination with Deep Learning (DL) and Change Vector Analysis (CVA). The methodology includes the generation of a high-quality Arctic coastline product via DL, which acted as a reference for quantifying coastal erosion and build-up rates from annual median and standard deviation (sd) backscatter images via CVA. The analysis was applied on ten test sites distributed across the Arctic and covering about 1038 km of coastline. Results revealed maximum erosion rates of up to 160 m for some areas and an average erosion rate of 4.37 m across all test sites within a three-year temporal window from 2017 to 2020. The observed erosion rates within the framework of this study agree with findings published in the previous literature. The proposed methods and data can be applied on large scales and, prospectively, even for the entire Arctic. The generated products may be used for quantifying the loss of frozen ground, estimating the release of stored organic material, and can act as a basis for further related studies in Arctic coastal environments.}, language = {en} } @article{BaumhoerDietzKneiseletal.2019, author = {Baumhoer, Celia A. and Dietz, Andreas J. and Kneisel, C. and Kuenzer, C.}, title = {Automated Extraction of Antarctic Glacier and Ice Shelf Fronts from Sentinel-1 Imagery Using Deep Learning}, series = {Remote Sensing}, volume = {11}, journal = {Remote Sensing}, number = {21}, issn = {2072-4292}, doi = {10.3390/rs11212529}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-193150}, pages = {2529}, year = {2019}, abstract = {Sea level rise contribution from the Antarctic ice sheet is influenced by changes in glacier and ice shelf front position. Still, little is known about seasonal glacier and ice shelf front fluctuations as the manual delineation of calving fronts from remote sensing imagery is very time-consuming. The major challenge of automatic calving front extraction is the low contrast between floating glacier and ice shelf fronts and the surrounding sea ice. Additionally, in previous decades, remote sensing imagery over the often cloud-covered Antarctic coastline was limited. Nowadays, an abundance of Sentinel-1 imagery over the Antarctic coastline exists and could be used for tracking glacier and ice shelf front movement. To exploit the available Sentinel-1 data, we developed a processing chain allowing automatic extraction of the Antarctic coastline from Seninel-1 imagery and the creation of dense time series to assess calving front change. The core of the proposed workflow is a modified version of the deep learning architecture U-Net. This convolutional neural network (CNN) performs a semantic segmentation on dual-pol Sentinel-1 data and the Antarctic TanDEM-X digital elevation model (DEM). The proposed method is tested for four training and test areas along the Antarctic coastline. The automatically extracted fronts deviate on average 78 m in training and 108 m test areas. Spatial and temporal transferability is demonstrated on an automatically extracted 15-month time series along the Getz Ice Shelf. Between May 2017 and July 2018, the fronts along the Getz Ice Shelf show mostly an advancing tendency with the fastest moving front of DeVicq Glacier with 726 ± 20 m/yr.}, language = {en} }