@article{VollmerVollmerLangetal.2023, author = {Vollmer, Andreas and Vollmer, Michael and Lang, Gernot and Straub, Anton and K{\"u}bler, Alexander and Gubik, Sebastian and Brands, Roman C. and Hartmann, Stefan and Saravi, Babak}, title = {Automated assessment of radiographic bone loss in the posterior maxilla utilizing a multi-object detection artificial intelligence algorithm}, series = {Applied Sciences}, volume = {13}, journal = {Applied Sciences}, number = {3}, issn = {2076-3417}, doi = {10.3390/app13031858}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-305050}, year = {2023}, abstract = {Periodontitis is one of the most prevalent diseases worldwide. The degree of radiographic bone loss can be used to assess the course of therapy or the severity of the disease. Since automated bone loss detection has many benefits, our goal was to develop a multi-object detection algorithm based on artificial intelligence that would be able to detect and quantify radiographic bone loss using standard two-dimensional radiographic images in the maxillary posterior region. This study was conducted by combining three recent online databases and validating the results using an external validation dataset from our organization. There were 1414 images for training and testing and 341 for external validation in the final dataset. We applied a Keypoint RCNN with a ResNet-50-FPN backbone network for both boundary box and keypoint detection. The intersection over union (IoU) and the object keypoint similarity (OKS) were used for model evaluation. The evaluation of the boundary box metrics showed a moderate overlapping with the ground truth, revealing an average precision of up to 0.758. The average precision and recall over all five folds were 0.694 and 0.611, respectively. Mean average precision and recall for the keypoint detection were 0.632 and 0.579, respectively. Despite only using a small and heterogeneous set of images for training, our results indicate that the algorithm is able to learn the objects of interest, although without sufficient accuracy due to the limited number of images and a large amount of information available in panoramic radiographs. Considering the widespread availability of panoramic radiographs as well as the increasing use of online databases, the presented model can be further improved in the future to facilitate its implementation in clinics.}, language = {en} } @article{KunzStellzigEisenhauerBoldt2023, author = {Kunz, Felix and Stellzig-Eisenhauer, Angelika and Boldt, Julian}, title = {Applications of artificial intelligence in orthodontics — an overview and perspective based on the current state of the art}, series = {Applied Sciences}, volume = {13}, journal = {Applied Sciences}, number = {6}, issn = {2076-3417}, doi = {10.3390/app13063850}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-310940}, year = {2023}, abstract = {Artificial intelligence (AI) has already arrived in many areas of our lives and, because of the increasing availability of computing power, can now be used for complex tasks in medicine and dentistry. This is reflected by an exponential increase in scientific publications aiming to integrate AI into everyday clinical routines. Applications of AI in orthodontics are already manifold and range from the identification of anatomical/pathological structures or reference points in imaging to the support of complex decision-making in orthodontic treatment planning. The aim of this article is to give the reader an overview of the current state of the art regarding applications of AI in orthodontics and to provide a perspective for the use of such AI solutions in clinical routine. For this purpose, we present various use cases for AI in orthodontics, for which research is already available. Considering the current scientific progress, it is not unreasonable to assume that AI will become an integral part of orthodontic diagnostics and treatment planning in the near future. Although AI will equally likely not be able to replace the knowledge and experience of human experts in the not-too-distant future, it probably will be able to support practitioners, thus serving as a quality-assuring component in orthodontic patient care.}, language = {en} } @article{HenckertMalorgioSchweigeretal.2023, author = {Henckert, David and Malorgio, Amos and Schweiger, Giovanna and Raimann, Florian J. and Piekarski, Florian and Zacharowski, Kai and Hottenrott, Sebastian and Meybohm, Patrick and Tscholl, David W. and Spahn, Donat R. and Roche, Tadzio R.}, title = {Attitudes of anesthesiologists toward artificial intelligence in anesthesia: a multicenter, mixed qualitative-quantitative study}, series = {Journal of Clinical Medicine}, volume = {12}, journal = {Journal of Clinical Medicine}, number = {6}, issn = {2077-0383}, doi = {10.3390/jcm12062096}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-311189}, year = {2023}, abstract = {Artificial intelligence (AI) is predicted to play an increasingly important role in perioperative medicine in the very near future. However, little is known about what anesthesiologists know and think about AI in this context. This is important because the successful introduction of new technologies depends on the understanding and cooperation of end users. We sought to investigate how much anesthesiologists know about AI and what they think about the introduction of AI-based technologies into the clinical setting. In order to better understand what anesthesiologists think of AI, we recruited 21 anesthesiologists from 2 university hospitals for face-to-face structured interviews. The interview transcripts were subdivided sentence-by-sentence into discrete statements, and statements were then grouped into key themes. Subsequently, a survey of closed questions based on these themes was sent to 70 anesthesiologists from 3 university hospitals for rating. In the interviews, the base level of knowledge of AI was good at 86 of 90 statements (96\%), although awareness of the potential applications of AI in anesthesia was poor at only 7 of 42 statements (17\%). Regarding the implementation of AI in anesthesia, statements were split roughly evenly between pros (46 of 105, 44\%) and cons (59 of 105, 56\%). Interviewees considered that AI could usefully be used in diverse tasks such as risk stratification, the prediction of vital sign changes, or as a treatment guide. The validity of these themes was probed in a follow-up survey of 70 anesthesiologists with a response rate of 70\%, which confirmed an overall positive view of AI in this group. Anesthesiologists hold a range of opinions, both positive and negative, regarding the application of AI in their field of work. Survey-based studies do not always uncover the full breadth of nuance of opinion amongst clinicians. Engagement with specific concerns, both technical and ethical, will prove important as this technology moves from research to the clinic.}, language = {en} } @article{VollmerNaglerHoerneretal.2023, author = {Vollmer, Andreas and Nagler, Simon and H{\"o}rner, Marius and Hartmann, Stefan and Brands, Roman C. and Breitenb{\"u}cher, Niko and Straub, Anton and K{\"u}bler, Alexander and Vollmer, Michael and Gubik, Sebastian and Lang, Gernot and Wollborn, Jakob and Saravi, Babak}, title = {Performance of artificial intelligence-based algorithms to predict prolonged length of stay after head and neck cancer surgery}, series = {Heliyon}, volume = {9}, journal = {Heliyon}, number = {11}, issn = {2405-8440}, doi = {10.1016/j.heliyon.2023.e20752}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-350416}, year = {2023}, abstract = {Background Medical resource management can be improved by assessing the likelihood of prolonged length of stay (LOS) for head and neck cancer surgery patients. The objective of this study was to develop predictive models that could be used to determine whether a patient's LOS after cancer surgery falls within the normal range of the cohort. Methods We conducted a retrospective analysis of a dataset consisting of 300 consecutive patients who underwent head and neck cancer surgery between 2017 and 2022 at a single university medical center. Prolonged LOS was defined as LOS exceeding the 75th percentile of the cohort. Feature importance analysis was performed to evaluate the most important predictors for prolonged LOS. We then constructed 7 machine learning and deep learning algorithms for the prediction modeling of prolonged LOS. Results The algorithms reached accuracy values of 75.40 (radial basis function neural network) to 97.92 (Random Trees) for the training set and 64.90 (multilayer perceptron neural network) to 84.14 (Random Trees) for the testing set. The leading parameters predicting prolonged LOS were operation time, ischemia time, the graft used, the ASA score, the intensive care stay, and the pathological stages. The results revealed that patients who had a higher number of harvested lymph nodes (LN) had a lower probability of recurrence but also a greater LOS. However, patients with prolonged LOS were also at greater risk of recurrence, particularly when fewer (LN) were extracted. Further, LOS was more strongly correlated with the overall number of extracted lymph nodes than with the number of positive lymph nodes or the ratio of positive to overall extracted lymph nodes, indicating that particularly unnecessary lymph node extraction might be associated with prolonged LOS. Conclusions The results emphasize the need for a closer follow-up of patients who experience prolonged LOS. Prospective trials are warranted to validate the present results.}, language = {en} } @phdthesis{Hoeser2022, author = {H{\"o}ser, Thorsten}, title = {Global Dynamics of the Offshore Wind Energy Sector Derived from Earth Observation Data - Deep Learning Based Object Detection Optimised with Synthetic Training Data for Offshore Wind Energy Infrastructure Extraction from Sentinel-1 Imagery}, doi = {10.25972/OPUS-29285}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-292857}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2022}, abstract = {The expansion of renewable energies is being driven by the gradual phaseout of fossil fuels in order to reduce greenhouse gas emissions, the steadily increasing demand for energy and, more recently, by geopolitical events. The offshore wind energy sector is on the verge of a massive expansion in Europe, the United Kingdom, China, but also in the USA, South Korea and Vietnam. Accordingly, the largest marine infrastructure projects to date will be carried out in the upcoming decades, with thousands of offshore wind turbines being installed. In order to accompany this process globally and to provide a database for research, development and monitoring, this dissertation presents a deep learning-based approach for object detection that enables the derivation of spatiotemporal developments of offshore wind energy infrastructures from satellite-based radar data of the Sentinel-1 mission. For training the deep learning models for offshore wind energy infrastructure detection, an approach is presented that makes it possible to synthetically generate remote sensing data and the necessary annotation for the supervised deep learning process. In this synthetic data generation process, expert knowledge about image content and sensor acquisition techniques is made machine-readable. Finally, extensive and highly variable training data sets are generated from this knowledge representation, with which deep learning models can learn to detect objects in real-world satellite data. The method for the synthetic generation of training data based on expert knowledge offers great potential for deep learning in Earth observation. Applications of deep learning based methods can be developed and tested faster with this procedure. Furthermore, the synthetically generated and thus controllable training data offer the possibility to interpret the learning process of the optimised deep learning models. The method developed in this dissertation to create synthetic remote sensing training data was finally used to optimise deep learning models for the global detection of offshore wind energy infrastructure. For this purpose, images of the entire global coastline from ESA's Sentinel-1 radar mission were evaluated. The derived data set includes over 9,941 objects, which distinguish offshore wind turbines, transformer stations and offshore wind energy infrastructures under construction from each other. In addition to this spatial detection, a quarterly time series from July 2016 to June 2021 was derived for all objects. This time series reveals the start of construction, the construction phase and the time of completion with subsequent operation for each object. The derived offshore wind energy infrastructure data set provides the basis for an analysis of the development of the offshore wind energy sector from July 2016 to June 2021. For this analysis, further attributes of the detected offshore wind turbines were derived. The most important of these are the height and installed capacity of a turbine. The turbine height was calculated by a radargrammetric analysis of the previously detected Sentinel-1 signal and then used to statistically model the installed capacity. The results show that in June 2021, 8,885 offshore wind turbines with a total capacity of 40.6 GW were installed worldwide. The largest installed capacities are in the EU (15.2 GW), China (14.1 GW) and the United Kingdom (10.7 GW). From July 2016 to June 2021, China has expanded 13 GW of offshore wind energy infrastructure. The EU has installed 8 GW and the UK 5.8 GW of offshore wind energy infrastructure in the same period. This temporal analysis shows that China was the main driver of the expansion of the offshore wind energy sector in the period under investigation. The derived data set for the description of the offshore wind energy sector was made publicly available. It is thus freely accessible to all decision-makers and stakeholders involved in the development of offshore wind energy projects. Especially in the scientific context, it serves as a database that enables a wide range of investigations. Research questions regarding offshore wind turbines themselves as well as the influence of the expansion in the coming decades can be investigated. This supports the imminent and urgently needed expansion of offshore wind energy in order to promote sustainable expansion in addition to the expansion targets that have been set.}, language = {en} } @phdthesis{Griebel2022, author = {Griebel, Matthias}, title = {Applied Deep Learning: from Data to Deployment}, doi = {10.25972/OPUS-27765}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-277650}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2022}, abstract = {Novel deep learning (DL) architectures, better data availability, and a significant increase in computing power have enabled scientists to solve problems that were considered unassailable for many years. A case in point is the "protein folding problem", a 50-year-old grand challenge in biology that was recently solved by the DL-system AlphaFold. Other examples comprise the development of large DL-based language models that, for instance, generate newspaper articles that hardly differ from those written by humans. However, developing unbiased, reliable, and accurate DL models for various practical applications remains a major challenge - and many promising DL projects get stuck in the piloting stage, never to be completed. In light of these observations, this thesis investigates the practical challenges encountered throughout the life cycle of DL projects and proposes solutions to develop and deploy rigorous DL models. The first part of the thesis is concerned with prototyping DL solutions in different domains. First, we conceptualize guidelines for applied image recognition and showcase their application in a biomedical research project. Next, we illustrate the bottom-up development of a DL backend for an augmented intelligence system in the manufacturing sector. We then turn to the fashion domain and present an artificial curation system for individual fashion outfit recommendations that leverages DL techniques and unstructured data from social media and fashion blogs. After that, we showcase how DL solutions can assist fashion designers in the creative process. Finally, we present our award-winning DL solution for the segmentation of glomeruli in human kidney tissue images that was developed for the Kaggle data science competition HuBMAP - Hacking the Kidney. The second part continues the development path of the biomedical research project beyond the prototyping stage. Using data from five laboratories, we show that ground truth estimation from multiple human annotators and training of DL model ensembles help to establish objectivity, reliability, and validity in DL-based bioimage analyses. In the third part, we present deepflash2, a DL solution that addresses the typical challenges encountered during training, evaluation, and application of DL models in bioimaging. The tool facilitates the objective and reliable segmentation of ambiguous bioimages through multi-expert annotations and integrated quality assurance. It is embedded in an easy-to-use graphical user interface and offers best-in-class predictive performance for semantic and instance segmentation under economical usage of computational resources.}, language = {en} } @article{VollmerVollmerLangetal.2022, author = {Vollmer, Andreas and Vollmer, Michael and Lang, Gernot and Straub, Anton and K{\"u}bler, Alexander and Gubik, Sebastian and Brands, Roman C. and Hartmann, Stefan and Saravi, Babak}, title = {Performance analysis of supervised machine learning algorithms for automatized radiographical classification of maxillary third molar impaction}, series = {Applied Sciences}, volume = {12}, journal = {Applied Sciences}, number = {13}, issn = {2076-3417}, doi = {10.3390/app12136740}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-281662}, year = {2022}, abstract = {Background: Oro-antral communication (OAC) is a common complication following the extraction of upper molar teeth. The Archer and the Root Sinus (RS) systems can be used to classify impacted teeth in panoramic radiographs. The Archer classes B-D and the Root Sinus classes III, IV have been associated with an increased risk of OAC following tooth extraction in the upper molar region. In our previous study, we found that panoramic radiographs are not reliable for predicting OAC. This study aimed to (1) determine the feasibility of automating the classification (Archer/RS classes) of impacted teeth from panoramic radiographs, (2) determine the distribution of OAC stratified by classification system classes for the purposes of decision tree construction, and (3) determine the feasibility of automating the prediction of OAC utilizing the mentioned classification systems. Methods: We utilized multiple supervised pre-trained machine learning models (VGG16, ResNet50, Inceptionv3, EfficientNet, MobileNetV2), one custom-made convolutional neural network (CNN) model, and a Bag of Visual Words (BoVW) technique to evaluate the performance to predict the clinical classification systems RS and Archer from panoramic radiographs (Aim 1). We then used Chi-square Automatic Interaction Detectors (CHAID) to determine the distribution of OAC stratified by the Archer/RS classes to introduce a decision tree for simple use in clinics (Aim 2). Lastly, we tested the ability of a multilayer perceptron artificial neural network (MLP) and a radial basis function neural network (RBNN) to predict OAC based on the high-risk classes RS III, IV, and Archer B-D (Aim 3). Results: We achieved accuracies of up to 0.771 for EfficientNet and MobileNetV2 when examining the Archer classification. For the AUC, we obtained values of up to 0.902 for our custom-made CNN. In comparison, the detection of the RS classification achieved accuracies of up to 0.792 for the BoVW and an AUC of up to 0.716 for our custom-made CNN. Overall, the Archer classification was detected more reliably than the RS classification when considering all algorithms. CHAID predicted 77.4\% correctness for the Archer classification and 81.4\% for the RS classification. MLP (AUC: 0.590) and RBNN (AUC: 0.590) for the Archer classification as well as MLP 0.638) and RBNN (0.630) for the RS classification did not show sufficient predictive capability for OAC. Conclusions: The results reveal that impacted teeth can be classified using panoramic radiographs (best AUC: 0.902), and the classification systems can be stratified according to their relationship to OAC (81.4\% correct for RS classification). However, the Archer and RS classes did not achieve satisfactory AUCs for predicting OAC (best AUC: 0.638). Additional research is needed to validate the results externally and to develop a reliable risk stratification tool based on the present findings.}, language = {en} } @article{VollmerSaraviVollmeretal.2022, author = {Vollmer, Andreas and Saravi, Babak and Vollmer, Michael and Lang, Gernot Michael and Straub, Anton and Brands, Roman C. and K{\"u}bler, Alexander and Gubik, Sebastian and Hartmann, Stefan}, title = {Artificial intelligence-based prediction of oroantral communication after tooth extraction utilizing preoperative panoramic radiography}, series = {Diagnostics}, volume = {12}, journal = {Diagnostics}, number = {6}, issn = {2075-4418}, doi = {10.3390/diagnostics12061406}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-278814}, year = {2022}, abstract = {Oroantral communication (OAC) is a common complication after tooth extraction of upper molars. Profound preoperative panoramic radiography analysis might potentially help predict OAC following tooth extraction. In this exploratory study, we evaluated n = 300 consecutive cases (100 OAC and 200 controls) and trained five machine learning algorithms (VGG16, InceptionV3, MobileNetV2, EfficientNet, and ResNet50) to predict OAC versus non-OAC (binary classification task) from the input images. Further, four oral and maxillofacial experts evaluated the respective panoramic radiography and determined performance metrics (accuracy, area under the curve (AUC), precision, recall, F1-score, and receiver operating characteristics curve) of all diagnostic approaches. Cohen's kappa was used to evaluate the agreement between expert evaluations. The deep learning algorithms reached high specificity (highest specificity 100\% for InceptionV3) but low sensitivity (highest sensitivity 42.86\% for MobileNetV2). The AUCs from VGG16, InceptionV3, MobileNetV2, EfficientNet, and ResNet50 were 0.53, 0.60, 0.67, 0.51, and 0.56, respectively. Expert 1-4 reached an AUC of 0.550, 0.629, 0.500, and 0.579, respectively. The specificity of the expert evaluations ranged from 51.74\% to 95.02\%, whereas sensitivity ranged from 14.14\% to 59.60\%. Cohen's kappa revealed a poor agreement for the oral and maxillofacial expert evaluations (Cohen's kappa: 0.1285). Overall, present data indicate that OAC cannot be sufficiently predicted from preoperative panoramic radiography. The false-negative rate, i.e., the rate of positive cases (OAC) missed by the deep learning algorithms, ranged from 57.14\% to 95.24\%. Surgeons should not solely rely on panoramic radiography when evaluating the probability of OAC occurrence. Clinical testing of OAC is warranted after each upper-molar tooth extraction.}, language = {en} } @article{VollmerVollmerLangetal.2022, author = {Vollmer, Andreas and Vollmer, Michael and Lang, Gernot and Straub, Anton and Shavlokhova, Veronika and K{\"u}bler, Alexander and Gubik, Sebastian and Brands, Roman and Hartmann, Stefan and Saravi, Babak}, title = {Associations between periodontitis and COPD: An artificial intelligence-based analysis of NHANES III}, series = {Journal of Clinical Medicine}, volume = {11}, journal = {Journal of Clinical Medicine}, number = {23}, issn = {2077-0383}, doi = {10.3390/jcm11237210}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-312713}, year = {2022}, abstract = {A number of cross-sectional epidemiological studies suggest that poor oral health is associated with respiratory diseases. However, the number of cases within the studies was limited, and the studies had different measurement conditions. By analyzing data from the National Health and Nutrition Examination Survey III (NHANES III), this study aimed to investigate possible associations between chronic obstructive pulmonary disease (COPD) and periodontitis in the general population. COPD was diagnosed in cases where FEV (1)/FVC ratio was below 70\% (non-COPD versus COPD; binary classification task). We used unsupervised learning utilizing k-means clustering to identify clusters in the data. COPD classes were predicted with logistic regression, a random forest classifier, a stochastic gradient descent (SGD) classifier, k-nearest neighbors, a decision tree classifier, Gaussian naive Bayes (GaussianNB), support vector machines (SVM), a custom-made convolutional neural network (CNN), a multilayer perceptron artificial neural network (MLP), and a radial basis function neural network (RBNN) in Python. We calculated the accuracy of the prediction and the area under the curve (AUC). The most important predictors were determined using feature importance analysis. Results: Overall, 15,868 participants and 19 feature variables were included. Based on k-means clustering, the data were separated into two clusters that identified two risk characteristic groups of patients. The algorithms reached AUCs between 0.608 (DTC) and 0.953\% (CNN) for the classification of COPD classes. Feature importance analysis of deep learning algorithms indicated that age and mean attachment loss were the most important features in predicting COPD. Conclusions: Data analysis of a large population showed that machine learning and deep learning algorithms could predict COPD cases based on demographics and oral health feature variables. This study indicates that periodontitis might be an important predictor of COPD. Further prospective studies examining the association between periodontitis and COPD are warranted to validate the present results.}, language = {en} } @article{WannerHermHeinrichetal.2022, author = {Wanner, Jonas and Herm, Lukas-Valentin and Heinrich, Kai and Janiesch, Christian}, title = {The effect of transparency and trust on intelligent system acceptance: evidence from a user-based study}, series = {Electronic Markets}, volume = {32}, journal = {Electronic Markets}, number = {4}, issn = {1019-6781}, doi = {10.1007/s12525-022-00593-5}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-323829}, pages = {2079-2102}, year = {2022}, abstract = {Contemporary decision support systems are increasingly relying on artificial intelligence technology such as machine learning algorithms to form intelligent systems. These systems have human-like decision capacity for selected applications based on a decision rationale which cannot be looked-up conveniently and constitutes a black box. As a consequence, acceptance by end-users remains somewhat hesitant. While lacking transparency has been said to hinder trust and enforce aversion towards these systems, studies that connect user trust to transparency and subsequently acceptance are scarce. In response, our research is concerned with the development of a theoretical model that explains end-user acceptance of intelligent systems. We utilize the unified theory of acceptance and use in information technology as well as explanation theory and related theories on initial trust and user trust in information systems. The proposed model is tested in an industrial maintenance workplace scenario using maintenance experts as participants to represent the user group. Results show that acceptance is performance-driven at first sight. However, transparency plays an important indirect role in regulating trust and the perception of performance.}, language = {en} }