@article{BeierlePryssAizawa2023, author = {Beierle, Felix and Pryss, R{\"u}diger and Aizawa, Akiko}, title = {Sentiments about mental health on Twitter — before and during the COVID-19 pandemic}, series = {Healthcare}, volume = {11}, journal = {Healthcare}, number = {21}, issn = {2227-9032}, doi = {10.3390/healthcare11212893}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-355192}, year = {2023}, abstract = {During the COVID-19 pandemic, the novel coronavirus had an impact not only on public health but also on the mental health of the population. Public sentiment on mental health and depression is often captured only in small, survey-based studies, while work based on Twitter data often only looks at the period during the pandemic and does not make comparisons with the pre-pandemic situation. We collected tweets that included the hashtags \#MentalHealth and \#Depression from before and during the pandemic (8.5 months each). We used LDA (Latent Dirichlet Allocation) for topic modeling and LIWC, VADER, and NRC for sentiment analysis. We used three machine-learning classifiers to seek evidence regarding an automatically detectable change in tweets before vs. during the pandemic: (1) based on TF-IDF values, (2) based on the values from the sentiment libraries, (3) based on tweet content (deep-learning BERT classifier). Topic modeling revealed that Twitter users who explicitly used the hashtags \#Depression and especially \#MentalHealth did so to raise awareness. We observed an overall positive sentiment, and in tough times such as during the COVID-19 pandemic, tweets with \#MentalHealth were often associated with gratitude. Among the three classification approaches, the BERT classifier showed the best performance, with an accuracy of 81\% for \#MentalHealth and 79\% for \#Depression. Although the data may have come from users familiar with mental health, these findings can help gauge public sentiment on the topic. The combination of (1) sentiment analysis, (2) topic modeling, and (3) tweet classification with machine learning proved useful in gaining comprehensive insight into public sentiment and could be applied to other data sources and topics.}, language = {en} } @article{GriebelSegebarthSteinetal.2023, author = {Griebel, Matthias and Segebarth, Dennis and Stein, Nikolai and Schukraft, Nina and Tovote, Philip and Blum, Robert and Flath, Christoph M.}, title = {Deep learning-enabled segmentation of ambiguous bioimages with deepflash2}, series = {Nature Communications}, volume = {14}, journal = {Nature Communications}, doi = {10.1038/s41467-023-36960-9}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-357286}, year = {2023}, abstract = {Bioimages frequently exhibit low signal-to-noise ratios due to experimental conditions, specimen characteristics, and imaging trade-offs. Reliable segmentation of such ambiguous images is difficult and laborious. Here we introduce deepflash2, a deep learning-enabled segmentation tool for bioimage analysis. The tool addresses typical challenges that may arise during the training, evaluation, and application of deep learning models on ambiguous data. The tool's training and evaluation pipeline uses multiple expert annotations and deep model ensembles to achieve accurate results. The application pipeline supports various use-cases for expert annotations and includes a quality assurance mechanism in the form of uncertainty measures. Benchmarked against other tools, deepflash2 offers both high predictive accuracy and efficient computational resource usage. The tool is built upon established deep learning libraries and enables sharing of trained model ensembles with the research community. deepflash2 aims to simplify the integration of deep learning into bioimage analysis projects while improving accuracy and reliability.}, language = {en} } @article{VollmerNaglerHoerneretal.2023, author = {Vollmer, Andreas and Nagler, Simon and H{\"o}rner, Marius and Hartmann, Stefan and Brands, Roman C. and Breitenb{\"u}cher, Niko and Straub, Anton and K{\"u}bler, Alexander and Vollmer, Michael and Gubik, Sebastian and Lang, Gernot and Wollborn, Jakob and Saravi, Babak}, title = {Performance of artificial intelligence-based algorithms to predict prolonged length of stay after head and neck cancer surgery}, series = {Heliyon}, volume = {9}, journal = {Heliyon}, number = {11}, issn = {2405-8440}, doi = {10.1016/j.heliyon.2023.e20752}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-350416}, year = {2023}, abstract = {Background Medical resource management can be improved by assessing the likelihood of prolonged length of stay (LOS) for head and neck cancer surgery patients. The objective of this study was to develop predictive models that could be used to determine whether a patient's LOS after cancer surgery falls within the normal range of the cohort. Methods We conducted a retrospective analysis of a dataset consisting of 300 consecutive patients who underwent head and neck cancer surgery between 2017 and 2022 at a single university medical center. Prolonged LOS was defined as LOS exceeding the 75th percentile of the cohort. Feature importance analysis was performed to evaluate the most important predictors for prolonged LOS. We then constructed 7 machine learning and deep learning algorithms for the prediction modeling of prolonged LOS. Results The algorithms reached accuracy values of 75.40 (radial basis function neural network) to 97.92 (Random Trees) for the training set and 64.90 (multilayer perceptron neural network) to 84.14 (Random Trees) for the testing set. The leading parameters predicting prolonged LOS were operation time, ischemia time, the graft used, the ASA score, the intensive care stay, and the pathological stages. The results revealed that patients who had a higher number of harvested lymph nodes (LN) had a lower probability of recurrence but also a greater LOS. However, patients with prolonged LOS were also at greater risk of recurrence, particularly when fewer (LN) were extracted. Further, LOS was more strongly correlated with the overall number of extracted lymph nodes than with the number of positive lymph nodes or the ratio of positive to overall extracted lymph nodes, indicating that particularly unnecessary lymph node extraction might be associated with prolonged LOS. Conclusions The results emphasize the need for a closer follow-up of patients who experience prolonged LOS. Prospective trials are warranted to validate the present results.}, language = {en} } @article{CaliskanCaliskanRasbachetal.2023, author = {Caliskan, Aylin and Caliskan, Deniz and Rasbach, Lauritz and Yu, Weimeng and Dandekar, Thomas and Breitenbach, Tim}, title = {Optimized cell type signatures revealed from single-cell data by combining principal feature analysis, mutual information, and machine learning}, series = {Computational and Structural Biotechnology Journal}, volume = {21}, journal = {Computational and Structural Biotechnology Journal}, issn = {2001-0370}, doi = {10.1016/j.csbj.2023.06.002}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-349989}, pages = {3293-3314}, year = {2023}, abstract = {Machine learning techniques are excellent to analyze expression data from single cells. These techniques impact all fields ranging from cell annotation and clustering to signature identification. The presented framework evaluates gene selection sets how far they optimally separate defined phenotypes or cell groups. This innovation overcomes the present limitation to objectively and correctly identify a small gene set of high information content regarding separating phenotypes for which corresponding code scripts are provided. The small but meaningful subset of the original genes (or feature space) facilitates human interpretability of the differences of the phenotypes including those found by machine learning results and may even turn correlations between genes and phenotypes into a causal explanation. For the feature selection task, the principal feature analysis is utilized which reduces redundant information while selecting genes that carry the information for separating the phenotypes. In this context, the presented framework shows explainability of unsupervised learning as it reveals cell-type specific signatures. Apart from a Seurat preprocessing tool and the PFA script, the pipeline uses mutual information to balance accuracy and size of the gene set if desired. A validation part to evaluate the gene selection for their information content regarding the separation of the phenotypes is provided as well, binary and multiclass classification of 3 or 4 groups are studied. Results from different single-cell data are presented. In each, only about ten out of more than 30000 genes are identified as carrying the relevant information. The code is provided in a GitHub repository at https://github.com/AC-PHD/Seurat_PFA_pipeline.}, language = {en} } @article{DresiaKurudzijaDeekenetal.2023, author = {Dresia, Kai and Kurudzija, Eldin and Deeken, Jan and Waxenegger-Wilfing, G{\"u}nther}, title = {Improved wall temperature prediction for the LUMEN rocket combustion chamber with neural networks}, series = {Aerospace}, volume = {10}, journal = {Aerospace}, number = {5}, issn = {2226-4310}, doi = {10.3390/aerospace10050450}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-319169}, year = {2023}, abstract = {Accurate calculations of the heat transfer and the resulting maximum wall temperature are essential for the optimal design of reliable and efficient regenerative cooling systems. However, predicting the heat transfer of supercritical methane flowing in cooling channels of a regeneratively cooled rocket combustor presents a significant challenge. High-fidelity CFD calculations provide sufficient accuracy but are computationally too expensive to be used within elaborate design optimization routines. In a previous work it has been shown that a surrogate model based on neural networks is able to predict the maximum wall temperature along straight cooling channels with convincing precision when trained with data from CFD simulations for simple cooling channel segments. In this paper, the methodology is extended to cooling channels with curvature. The predictions of the extended model are tested against CFD simulations with different boundary conditions for the representative LUMEN combustor contour with varying geometries and heat flux densities. The high accuracy of the extended model's predictions, suggests that it will be a valuable tool for designing and analyzing regenerative cooling systems with greater efficiency and effectiveness.}, language = {en} } @phdthesis{Weigand2024, author = {Weigand, Matthias Johann}, title = {Fernerkundung und maschinelles Lernen zur Erfassung von urbanem Gr{\"u}n - Eine Analyse am Beispiel der Verteilungsgerechtigkeit in Deutschland}, doi = {10.25972/OPUS-34961}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-349610}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2024}, abstract = {Gr{\"u}nfl{\"a}chen stellen einen der wichtigsten Umwelteinfl{\"u}sse in der Wohnumwelt der Menschen dar. Einerseits wirken sie sich positiv auf die physische und mentale Gesundheit der Menschen aus, andererseits k{\"o}nnen Gr{\"u}nfl{\"a}chen auch negative Wirkungen anderer Faktoren abmildern, wie beispielsweise die im Laufe des Klimawandels zunehmenden Hitzeereignisse. Dennoch sind Gr{\"u}nfl{\"a}chen nicht f{\"u}r die gesamte Bev{\"o}lkerung gleichermaßen zug{\"a}nglich. Bestehende Forschung im Kontext der Umweltgerechtigkeit (UG) konnte bereits aufzeigen, dass unterschiedliche sozio-{\"o}konomische und demographische Gruppen der deutschen Bev{\"o}lkerung unterschiedlichen Zugriff auf Gr{\"u}nfl{\"a}chen haben. An bestehenden Analysen von Umwelteinfl{\"u}ssen im Kontext der UG wird kritisiert, dass die Auswertung geographischer Daten h{\"a}ufig auf zu stark aggregiertem Level geschieht, wodurch lokal spezifische Expositionen nicht mehr genau abgebildet werden. Dies trifft insbesondere f{\"u}r großfl{\"a}chig angelegte Studien zu. So werden wichtige r{\"a}umliche Informationen verloren. Doch moderne Erdbeobachtungs- und Geodaten sind so detailliert wie nie und Methoden des maschinellen Lernens erm{\"o}glichen die effiziente Verarbeitung zur Ableitung h{\"o}herwertiger Informationen. Das {\"u}bergeordnete Ziel dieser Arbeit besteht darin, am Beispiel von Gr{\"u}nfl{\"a}chen in Deutschland methodische Schritte der systematischen Umwandlung umfassender Geodaten in relevante Geoinformationen f{\"u}r die großfl{\"a}chige und hochaufgel{\"o}ste Analyse von Umwelteigenschaften aufzuzeigen und durchzuf{\"u}hren. An der Schnittstelle der Disziplinen Fernerkundung, Geoinformatik, Sozialgeographie und Umweltgerechtigkeitsforschung sollen Potenziale moderner Methoden f{\"u}r die Verbesserung der r{\"a}umlichen und semantischen Aufl{\"o}sung von Geoinformationen erforscht werden. Hierf{\"u}r werden Methoden des maschinellen Lernens eingesetzt, um Landbedeckung und -nutzung auf nationaler Ebene zu erfassen. Diese Entwicklungen sollen dazu beitragen bestehende Datenl{\"u}cken zu schließen und Aufschluss {\"u}ber die Verteilungsgerechtigkeit von Gr{\"u}nfl{\"a}chen zu bieten. Diese Dissertation gliedert sich in drei konzeptionelle Teilschritte. Im ersten Studienteil werden Erdbeobachtungsdaten der Sentinel-2 Satelliten zur deutschlandweiten Klassifikation von Landbedeckungsinformationen verwendet. In Kombination mit punktuellen Referenzdaten der europaweiten Erfassung f{\"u}r Landbedeckungs- und Landnutzungsinformationen des Land Use and Coverage Area Frame Survey (LUCAS) wird ein maschinelles Lernverfahren trainiert. In diesem Kontext werden verschiedene Vorverarbeitungsschritte der LUCAS-Daten und deren Einfluss auf die Klassifikationsgenauigkeit beleuchtet. Das Klassifikationsverfahren ist in der Lage Landbedeckungsinformationen auch in komplexen urbanen Gebieten mit hoher Genauigkeit abzuleiten. Ein Ergebnis des Studienteils ist eine deutschlandweite Landbedeckungsklassifikation mit einer Gesamtgenauigkeit von 93,07 \%, welche im weiteren Verlauf der Arbeit genutzt wird, um gr{\"u}ne Landbedeckung (GLC) r{\"a}umlich zu quantifizieren. Im zweiten konzeptionellen Teil der Arbeit steht die differenzierte Betrachtung von Gr{\"u}nfl{\"a}chen anhand des Beispiels {\"o}ffentlicher Gr{\"u}nfl{\"a}chen (PGS), die h{\"a}ufig Gegenstand der UG-Forschung ist, im Vordergrund. Doch eine h{\"a}ufig verwendete Quelle f{\"u}r r{\"a}umliche Daten zu {\"o}ffentlichen Gr{\"u}nfl{\"a}chen, der European Urban Atlas (EUA), wird bisher nicht fl{\"a}chendeckend f{\"u}r Deutschland erhoben. Dieser Studienteil verfolgt einen datengetriebenen Ansatz, die Verf{\"u}gbarkeit von {\"o}ffentlichem Gr{\"u}n auf der r{\"a}umlichen Ebene von Nachbarschaften f{\"u}r ganz Deutschland zu ermitteln. Hierf{\"u}r dienen bereits vom EUA erfasste Gebiete als Referenz. Mithilfe einer Kombination von Erdbeobachtungsdaten und Informationen aus dem OpenStreetMap-Projekt wird ein Deep Learning -basiertes Fusionsnetzwerk erstellt, welche die verf{\"u}gbare Fl{\"a}che von {\"o}ffentlichem Gr{\"u}n quantifiziert. Das Ergebnis dieses Schrittes ist ein Modell, welches genutzt wird, um die Menge {\"o}ffentlicher Gr{\"u}nfl{\"a}chen in der Nachbarschaft zu sch{\"a}tzen (𝑅 2 = 0.952). Der dritte Studienteil greift die Ergebnisse der ersten beiden Studienteile auf und betrachtet die Verteilung von Gr{\"u}nfl{\"a}chen in Deutschland unter Hinzunahme von georeferenzierten Bev{\"o}lkerungsdaten. Diese exemplarische Analyse unterscheidet dabei Gr{\"u}nfl{\"a}chen nach zwei Typen: GLC und PGS. Zun{\"a}chst wird mithilfe deskriptiver Statistiken die generelle Gr{\"u}nfl{\"a}chenverteilung in der Bev{\"o}lkerung Deutschlands beleuchtet. Daraufhin wird die Verteilungsgerechtigkeit anhand g{\"a}ngiger Gerechtigkeitsmetriken bestimmt. Abschließend werden die Zusammenh{\"a}nge zwischen der demographischen Komposition der Nachbarschaft und der verf{\"u}gbaren Menge von Gr{\"u}nfl{\"a}chen anhand dreier exemplarischer soziodemographischer Gesellschaftsgruppen untersucht. Die Analyse zeigt starke Unterschiede der Verf{\"u}gbarkeit von PGS zwischen st{\"a}dtischen und l{\"a}ndlichen Gebieten. Ein h{\"o}herer Prozentsatz der Stadtbev{\"o}lkerung hat Zugriff das Mindestmaß von PGS gemessen an der Vorgabe der Weltgesundheitsorganisation. Die Ergebnisse zeigen auch einen deutlichen Unterschied bez{\"u}glich der Verteilungsgerechtigkeit zwischen GLC und PGS und verdeutlichen die Relevanz der Unterscheidung von Gr{\"u}nfl{\"a}chentypen f{\"u}r derartige Untersuchungen. Die abschließende Betrachtung verschiedener Bev{\"o}lkerungsgruppen arbeitet Unterschiede auf soziodemographischer Ebene auf. In der Zusammenschau demonstriert diese Arbeit wie moderne Geodaten und Methoden des maschinellen Lernens genutzt werden k{\"o}nnen bisherige Limitierungen r{\"a}umlicher Datens{\"a}tze zu {\"u}berwinden. Am Beispiel von Gr{\"u}nfl{\"a}chen in der Wohnumgebung der Bev{\"o}lkerung Deutschlands wird gezeigt, dass landesweite Analysen zur Umweltgerechtigkeit durch hochaufgel{\"o}ste und lokal feingliedrige geographische Informationen bereichert werden k{\"o}nnen. Diese Arbeit verdeutlicht, wie die Methoden der Erdbeobachtung und Geoinformatik einen wichtigen Beitrag leisten k{\"o}nnen, die Ungleichheit der Wohnumwelt der Menschen zu identifizieren und schlussendlich den nachhaltigen Siedlungsbau in Form von objektiven Informationen zu unterst{\"u}tzen und {\"u}berwachen.}, subject = {Geografie}, language = {de} } @article{HaufeIsaiasPellegrinietal.2023, author = {Haufe, Stefan and Isaias, Ioannis U. and Pellegrini, Franziska and Palmisano, Chiara}, title = {Gait event prediction using surface electromyography in parkinsonian patients}, series = {Bioengineering}, volume = {10}, journal = {Bioengineering}, number = {2}, issn = {2306-5354}, doi = {10.3390/bioengineering10020212}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-304380}, year = {2023}, abstract = {Gait disturbances are common manifestations of Parkinson's disease (PD), with unmet therapeutic needs. Inertial measurement units (IMUs) are capable of monitoring gait, but they lack neurophysiological information that may be crucial for studying gait disturbances in these patients. Here, we present a machine learning approach to approximate IMU angular velocity profiles and subsequently gait events using electromyographic (EMG) channels during overground walking in patients with PD. We recorded six parkinsonian patients while they walked for at least three minutes. Patient-agnostic regression models were trained on temporally embedded EMG time series of different combinations of up to five leg muscles bilaterally (i.e., tibialis anterior, soleus, gastrocnemius medialis, gastrocnemius lateralis, and vastus lateralis). Gait events could be detected with high temporal precision (median displacement of <50 ms), low numbers of missed events (<2\%), and next to no false-positive event detections (<0.1\%). Swing and stance phases could thus be determined with high fidelity (median F1-score of ~0.9). Interestingly, the best performance was obtained using as few as two EMG probes placed on the left and right vastus lateralis. Our results demonstrate the practical utility of the proposed EMG-based system for gait event prediction, which allows the simultaneous acquisition of an electromyographic signal to be performed. This gait analysis approach has the potential to make additional measurement devices such as IMUs and force plates less essential, thereby reducing financial and preparation overheads and discomfort factors in gait studies.}, language = {en} } @article{KunzStellzigEisenhauerBoldt2023, author = {Kunz, Felix and Stellzig-Eisenhauer, Angelika and Boldt, Julian}, title = {Applications of artificial intelligence in orthodontics — an overview and perspective based on the current state of the art}, series = {Applied Sciences}, volume = {13}, journal = {Applied Sciences}, number = {6}, issn = {2076-3417}, doi = {10.3390/app13063850}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-310940}, year = {2023}, abstract = {Artificial intelligence (AI) has already arrived in many areas of our lives and, because of the increasing availability of computing power, can now be used for complex tasks in medicine and dentistry. This is reflected by an exponential increase in scientific publications aiming to integrate AI into everyday clinical routines. Applications of AI in orthodontics are already manifold and range from the identification of anatomical/pathological structures or reference points in imaging to the support of complex decision-making in orthodontic treatment planning. The aim of this article is to give the reader an overview of the current state of the art regarding applications of AI in orthodontics and to provide a perspective for the use of such AI solutions in clinical routine. For this purpose, we present various use cases for AI in orthodontics, for which research is already available. Considering the current scientific progress, it is not unreasonable to assume that AI will become an integral part of orthodontic diagnostics and treatment planning in the near future. Although AI will equally likely not be able to replace the knowledge and experience of human experts in the not-too-distant future, it probably will be able to support practitioners, thus serving as a quality-assuring component in orthodontic patient care.}, language = {en} } @article{HenckertMalorgioSchweigeretal.2023, author = {Henckert, David and Malorgio, Amos and Schweiger, Giovanna and Raimann, Florian J. and Piekarski, Florian and Zacharowski, Kai and Hottenrott, Sebastian and Meybohm, Patrick and Tscholl, David W. and Spahn, Donat R. and Roche, Tadzio R.}, title = {Attitudes of anesthesiologists toward artificial intelligence in anesthesia: a multicenter, mixed qualitative-quantitative study}, series = {Journal of Clinical Medicine}, volume = {12}, journal = {Journal of Clinical Medicine}, number = {6}, issn = {2077-0383}, doi = {10.3390/jcm12062096}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-311189}, year = {2023}, abstract = {Artificial intelligence (AI) is predicted to play an increasingly important role in perioperative medicine in the very near future. However, little is known about what anesthesiologists know and think about AI in this context. This is important because the successful introduction of new technologies depends on the understanding and cooperation of end users. We sought to investigate how much anesthesiologists know about AI and what they think about the introduction of AI-based technologies into the clinical setting. In order to better understand what anesthesiologists think of AI, we recruited 21 anesthesiologists from 2 university hospitals for face-to-face structured interviews. The interview transcripts were subdivided sentence-by-sentence into discrete statements, and statements were then grouped into key themes. Subsequently, a survey of closed questions based on these themes was sent to 70 anesthesiologists from 3 university hospitals for rating. In the interviews, the base level of knowledge of AI was good at 86 of 90 statements (96\%), although awareness of the potential applications of AI in anesthesia was poor at only 7 of 42 statements (17\%). Regarding the implementation of AI in anesthesia, statements were split roughly evenly between pros (46 of 105, 44\%) and cons (59 of 105, 56\%). Interviewees considered that AI could usefully be used in diverse tasks such as risk stratification, the prediction of vital sign changes, or as a treatment guide. The validity of these themes was probed in a follow-up survey of 70 anesthesiologists with a response rate of 70\%, which confirmed an overall positive view of AI in this group. Anesthesiologists hold a range of opinions, both positive and negative, regarding the application of AI in their field of work. Survey-based studies do not always uncover the full breadth of nuance of opinion amongst clinicians. Engagement with specific concerns, both technical and ethical, will prove important as this technology moves from research to the clinic.}, language = {en} } @article{OberdorfSchaschekWeinzierletal.2023, author = {Oberdorf, Felix and Schaschek, Myriam and Weinzierl, Sven and Stein, Nikolai and Matzner, Martin and Flath, Christoph M.}, title = {Predictive end-to-end enterprise process network monitoring}, series = {Business \& Information Systems Engineering}, volume = {65}, journal = {Business \& Information Systems Engineering}, number = {1}, issn = {2363-7005}, doi = {10.1007/s12599-022-00778-4}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-323814}, pages = {49-64}, year = {2023}, abstract = {Ever-growing data availability combined with rapid progress in analytics has laid the foundation for the emergence of business process analytics. Organizations strive to leverage predictive process analytics to obtain insights. However, current implementations are designed to deal with homogeneous data. Consequently, there is limited practical use in an organization with heterogeneous data sources. The paper proposes a method for predictive end-to-end enterprise process network monitoring leveraging multi-headed deep neural networks to overcome this limitation. A case study performed with a medium-sized German manufacturing company highlights the method's utility for organizations.}, language = {en} } @article{HermJanieschFuchs2022, author = {Herm, Lukas-Valentin and Janiesch, Christian and Fuchs, Patrick}, title = {Der Einfluss von menschlichen Denkmustern auf k{\"u}nstliche Intelligenz - eine strukturierte Untersuchung von kognitiven Verzerrungen}, series = {HMD Praxis der Wirtschaftsinformatik}, volume = {59}, journal = {HMD Praxis der Wirtschaftsinformatik}, number = {2}, issn = {1436-3011}, doi = {10.1365/s40702-022-00844-1}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-323787}, pages = {556-571}, year = {2022}, abstract = {K{\"u}nstliche Intelligenz (KI) dringt vermehrt in sensible Bereiche des allt{\"a}glichen menschlichen Lebens ein. Es werden nicht mehr nur noch einfache Entscheidungen durch intelligente Systeme getroffen, sondern zunehmend auch komplexe Entscheidungen. So entscheiden z. B. intelligente Systeme, ob Bewerber in ein Unternehmen eingestellt werden sollen oder nicht. Oftmals kann die zugrundeliegende Entscheidungsfindung nur schwer nachvollzogen werden und ungerechtfertigte Entscheidungen k{\"o}nnen dadurch unerkannt bleiben, weshalb die Implementierung einer solchen KI auch h{\"a}ufig als sogenannte Blackbox bezeichnet wird. Folglich steigt die Bedrohung, durch unfaire und diskriminierende Entscheidungen einer KI benachteiligt behandelt zu werden. Resultieren diese Verzerrungen aus menschlichen Handlungen und Denkmustern spricht man von einer kognitiven Verzerrung oder einem kognitiven Bias. Aufgrund der Neuigkeit dieser Thematik ist jedoch bisher nicht ersichtlich, welche verschiedenen kognitiven Bias innerhalb eines KI-Projektes auftreten k{\"o}nnen. Ziel dieses Beitrages ist es, anhand einer strukturierten Literaturanalyse, eine gesamtheitliche Darstellung zu erm{\"o}glichen. Die gewonnenen Erkenntnisse werden anhand des in der Praxis weit verbreiten Cross-Industry Standard Process for Data Mining (CRISP-DM) Modell aufgearbeitet und klassifiziert. Diese Betrachtung zeigt, dass der menschliche Einfluss auf eine KI in jeder Entwicklungsphase des Modells gegeben ist und es daher wichtig ist „mensch-{\"a}hnlichen" Bias in einer KI explizit zu untersuchen.}, language = {de} } @phdthesis{Kleineisel2024, author = {Kleineisel, Jonas}, title = {Variational networks in magnetic resonance imaging - Application to spiral cardiac MRI and investigations on image quality}, doi = {10.25972/OPUS-34737}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-347370}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2024}, abstract = {Acceleration is a central aim of clinical and technical research in magnetic resonance imaging (MRI) today, with the potential to increase robustness, accessibility and patient comfort, reduce cost, and enable entirely new kinds of examinations. A key component in this endeavor is image reconstruction, as most modern approaches build on advanced signal and image processing. Here, deep learning (DL)-based methods have recently shown considerable potential, with numerous publications demonstrating benefits for MRI reconstruction. However, these methods often come at the cost of an increased risk for subtle yet critical errors. Therefore, the aim of this thesis is to advance DL-based MRI reconstruction, while ensuring high quality and fidelity with measured data. A network architecture specifically suited for this purpose is the variational network (VN). To investigate the benefits these can bring to non-Cartesian cardiac imaging, the first part presents an application of VNs, which were specifically adapted to the reconstruction of accelerated spiral acquisitions. The proposed method is compared to a segmented exam, a U-Net and a compressed sensing (CS) model using qualitative and quantitative measures. While the U-Net performed poorly, the VN as well as the CS reconstruction showed good output quality. In functional cardiac imaging, the proposed real-time method with VN reconstruction substantially accelerates examinations over the gold-standard, from over 10 to just 1 minute. Clinical parameters agreed on average. Generally in MRI reconstruction, the assessment of image quality is complex, in particular for modern non-linear methods. Therefore, advanced techniques for precise evaluation of quality were subsequently demonstrated. With two distinct methods, resolution and amplification or suppression of noise are quantified locally in each pixel of a reconstruction. Using these, local maps of resolution and noise in parallel imaging (GRAPPA), CS, U-Net and VN reconstructions were determined for MR images of the brain. In the tested images, GRAPPA delivers uniform and ideal resolution, but amplifies noise noticeably. The other methods adapt their behavior to image structure, where different levels of local blurring were observed at edges compared to homogeneous areas, and noise was suppressed except at edges. Overall, VNs were found to combine a number of advantageous properties, including a good trade-off between resolution and noise, fast reconstruction times, and high overall image quality and fidelity of the produced output. Therefore, this network architecture seems highly promising for MRI reconstruction.}, subject = {Kernspintomografie}, language = {en} } @article{MarquardtHartrampfKollmannsbergeretal.2023, author = {Marquardt, Andr{\´e} and Hartrampf, Philipp and Kollmannsberger, Philip and Solimando, Antonio G. and Meierjohann, Svenja and K{\"u}bler, Hubert and Bargou, Ralf and Schilling, Bastian and Serfling, Sebastian E. and Buck, Andreas and Werner, Rudolf A. and Lapa, Constantin and Krebs, Markus}, title = {Predicting microenvironment in CXCR4- and FAP-positive solid tumors — a pan-cancer machine learning workflow for theranostic target structures}, series = {Cancers}, volume = {15}, journal = {Cancers}, number = {2}, issn = {2072-6694}, doi = {10.3390/cancers15020392}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-305036}, year = {2023}, abstract = {(1) Background: C-X-C Motif Chemokine Receptor 4 (CXCR4) and Fibroblast Activation Protein Alpha (FAP) are promising theranostic targets. However, it is unclear whether CXCR4 and FAP positivity mark distinct microenvironments, especially in solid tumors. (2) Methods: Using Random Forest (RF) analysis, we searched for entity-independent mRNA and microRNA signatures related to CXCR4 and FAP overexpression in our pan-cancer cohort from The Cancer Genome Atlas (TCGA) database — representing n = 9242 specimens from 29 tumor entities. CXCR4- and FAP-positive samples were assessed via StringDB cluster analysis, EnrichR, Metascape, and Gene Set Enrichment Analysis (GSEA). Findings were validated via correlation analyses in n = 1541 tumor samples. TIMER2.0 analyzed the association of CXCR4 / FAP expression and infiltration levels of immune-related cells. (3) Results: We identified entity-independent CXCR4 and FAP gene signatures representative for the majority of solid cancers. While CXCR4 positivity marked an immune-related microenvironment, FAP overexpression highlighted an angiogenesis-associated niche. TIMER2.0 analysis confirmed characteristic infiltration levels of CD8+ cells for CXCR4-positive tumors and endothelial cells for FAP-positive tumors. (4) Conclusions: CXCR4- and FAP-directed PET imaging could provide a non-invasive decision aid for entity-agnostic treatment of microenvironment in solid malignancies. Moreover, this machine learning workflow can easily be transferred towards other theranostic targets.}, language = {en} } @article{SchaffarczykKoehnOggianoetal.2022, author = {Schaffarczyk, Alois and Koehn, Silas and Oggiano, Luca and Schaffarczyk, Kai}, title = {Aerodynamic benefits by optimizing cycling posture}, series = {Applied Sciences}, volume = {12}, journal = {Applied Sciences}, number = {17}, issn = {2076-3417}, doi = {10.3390/app12178475}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-285942}, year = {2022}, abstract = {An approach to aerodynamically optimizing cycling posture and reducing drag in an Ironman (IM) event was elaborated. Therefore, four commonly used positions in cycling were investigated and simulated for a flow velocity of 10 m/s and yaw angles of 0-20° using OpenFoam-based Nabla Flow CFD simulation software software. A cyclist was scanned using an IPhone 12, and a special-purpose meshing software BLENDER was used. Significant differences were observed by changing and optimizing the cyclist's posture. Aerodynamic drag coefficient (CdA) varies by more than a factor of 2, ranging from 0.214 to 0.450. Within a position, the CdA tends to increase slightly at yaw angles of 5-10° and decrease at higher yaw angles compared to a straight head wind, except for the time trial (TT) position. The results were applied to the IM Hawaii bike course (180 km), estimating a constant power output of 300 W. Including the wind distributions, two different bike split models for performance prediction were applied. Significant time saving of roughly 1 h was found. Finally, a machine learning approach to deduce 3D triangulation for specific body shapes from 2D pictures was tested.}, language = {en} } @article{KoehlerBauerDietzetal.2022, author = {Koehler, Jonas and Bauer, Andr{\´e} and Dietz, Andreas J. and Kuenzer, Claudia}, title = {Towards forecasting future snow cover dynamics in the European Alps — the potential of long optical remote-sensing time series}, series = {Remote Sensing}, volume = {14}, journal = {Remote Sensing}, number = {18}, issn = {2072-4292}, doi = {10.3390/rs14184461}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-288338}, year = {2022}, abstract = {Snow is a vital environmental parameter and dynamically responsive to climate change, particularly in mountainous regions. Snow cover can be monitored at variable spatial scales using Earth Observation (EO) data. Long-lasting remote sensing missions enable the generation of multi-decadal time series and thus the detection of long-term trends. However, there have been few attempts to use these to model future snow cover dynamics. In this study, we, therefore, explore the potential of such time series to forecast the Snow Line Elevation (SLE) in the European Alps. We generate monthly SLE time series from the entire Landsat archive (1985-2021) in 43 Alpine catchments. Positive long-term SLE change rates are detected, with the highest rates (5-8 m/y) in the Western and Central Alps. We utilize this SLE dataset to implement and evaluate seven uni-variate time series modeling and forecasting approaches. The best results were achieved by Random Forests, with a Nash-Sutcliffe efficiency (NSE) of 0.79 and a Mean Absolute Error (MAE) of 258 m, Telescope (0.76, 268 m), and seasonal ARIMA (0.75, 270 m). Since the model performance varies strongly with the input data, we developed a combined forecast based on the best-performing methods in each catchment. This approach was then used to forecast the SLE for the years 2022-2029. In the majority of the catchments, the shift of the forecast median SLE level retained the sign of the long-term trend. In cases where a deviating SLE dynamic is forecast, a discussion based on the unique properties of the catchment and past SLE dynamics is required. In the future, we expect major improvements in our SLE forecasting efforts by including external predictor variables in a multi-variate modeling approach.}, language = {en} } @article{ReelReelErlicetal.2022, author = {Reel, Smarti and Reel, Parminder S. and Erlic, Zoran and Amar, Laurence and Pecori, Alessio and Larsen, Casper K. and Tetti, Martina and Pamporaki, Christina and Prehn, Cornelia and Adamski, Jerzy and Prejbisz, Aleksander and Ceccato, Filippo and Scaroni, Carla and Kroiss, Matthias and Dennedy, Michael C. and Deinum, Jaap and Eisenhofer, Graeme and Langton, Katharina and Mulatero, Paolo and Reincke, Martin and Rossi, Gian Paolo and Lenzini, Livia and Davies, Eleanor and Gimenez-Roqueplo, Anne-Paule and Assi{\´e}, Guillaume and Blanchard, Anne and Zennaro, Maria-Christina and Beuschlein, Felix and Jefferson, Emily R.}, title = {Predicting hypertension subtypes with machine learning using targeted metabolites and their ratios}, series = {Metabolites}, volume = {12}, journal = {Metabolites}, number = {8}, issn = {2218-1989}, doi = {10.3390/metabo12080755}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-286161}, year = {2022}, abstract = {Hypertension is a major global health problem with high prevalence and complex associated health risks. Primary hypertension (PHT) is most common and the reasons behind primary hypertension are largely unknown. Endocrine hypertension (EHT) is another complex form of hypertension with an estimated prevalence varying from 3 to 20\% depending on the population studied. It occurs due to underlying conditions associated with hormonal excess mainly related to adrenal tumours and sub-categorised: primary aldosteronism (PA), Cushing's syndrome (CS), pheochromocytoma or functional paraganglioma (PPGL). Endocrine hypertension is often misdiagnosed as primary hypertension, causing delays in treatment for the underlying condition, reduced quality of life, and costly antihypertensive treatment that is often ineffective. This study systematically used targeted metabolomics and high-throughput machine learning methods to predict the key biomarkers in classifying and distinguishing the various subtypes of endocrine and primary hypertension. The trained models successfully classified CS from PHT and EHT from PHT with 92\% specificity on the test set. The most prominent targeted metabolites and metabolite ratios for hypertension identification for different disease comparisons were C18:1, C18:2, and Orn/Arg. Sex was identified as an important feature in CS vs. PHT classification.}, language = {en} } @article{WangBachoferKoehleretal.2022, author = {Wang, Zhiyuan and Bachofer, Felix and Koehler, Jonas and Huth, Juliane and Hoeser, Thorsten and Marconcini, Mattia and Esch, Thomas and Kuenzer, Claudia}, title = {Spatial modelling and prediction with the spatio-temporal matrix: a study on predicting future settlement growth}, series = {Land}, volume = {11}, journal = {Land}, number = {8}, issn = {2073-445X}, doi = {10.3390/land11081174}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-281856}, year = {2022}, abstract = {In the past decades, various Earth observation-based time series products have emerged, which have enabled studies and analysis of global change processes. Besides their contribution to understanding past processes, time series datasets hold enormous potential for predictive modeling and thereby meet the demands of decision makers on future scenarios. In order to further exploit these data, a novel pixel-based approach has been introduced, which is the spatio-temporal matrix (STM). The approach integrates the historical characteristics of a specific land cover at a high temporal frequency in order to interpret the spatial and temporal information for the neighborhood of a given target pixel. The provided information can be exploited with common predictive models and algorithms. In this study, this approach was utilized and evaluated for the prediction of future urban/built-settlement growth. Random forest and multi-layer perceptron were employed for the prediction. The tests have been carried out with training strategies based on a one-year and a ten-year time span for the urban agglomerations of Surat (India), Ho-Chi-Minh City (Vietnam), and Abidjan (Ivory Coast). The slope, land use, exclusion, urban, transportation, hillshade (SLEUTH) model was selected as a baseline indicator for the performance evaluation. The statistical results from the receiver operating characteristic curve (ROC) demonstrate a good ability of the STM to facilitate the prediction of future settlement growth and its transferability to different cities, with area under the curve (AUC) values greater than 0.85. Compared with SLEUTH, the STM-based model achieved higher AUC in all of the test cases, while being independent of the additional datasets for the restricted and the preferential development areas.}, language = {en} } @article{FisserKhorsandiWegmannetal.2022, author = {Fisser, Henrik and Khorsandi, Ehsan and Wegmann, Martin and Baier, Frank}, title = {Detecting moving trucks on roads using Sentinel-2 data}, series = {Remote Sensing}, volume = {14}, journal = {Remote Sensing}, number = {7}, issn = {2072-4292}, doi = {10.3390/rs14071595}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-267174}, year = {2022}, abstract = {In most countries, freight is predominantly transported by road cargo trucks. We present a new satellite remote sensing method for detecting moving trucks on roads using Sentinel-2 data. The method exploits a temporal sensing offset of the Sentinel-2 multispectral instrument, causing spatially and spectrally distorted signatures of moving objects. A random forest classifier was trained (overall accuracy: 84\%) on visual-near-infrared-spectra of 2500 globally labelled targets. Based on the classification, the target objects were extracted using a developed recursive neighbourhood search. The speed and the heading of the objects were approximated. Detections were validated by employing 350 globally labelled target boxes (mean F\(_1\) score: 0.74). The lowest F\(_1\) score was achieved in Kenya (0.36), the highest in Poland (0.88). Furthermore, validated at 26 traffic count stations in Germany on in sum 390 dates, the truck detections correlate spatio-temporally with station figures (Pearson r-value: 0.82, RMSE: 43.7). Absolute counts were underestimated on 81\% of the dates. The detection performance may differ by season and road condition. Hence, the method is only suitable for approximating the relative truck traffic abundance rather than providing accurate absolute counts. However, existing road cargo monitoring methods that rely on traffic count stations or very high resolution remote sensing data have limited global availability. The proposed moving truck detection method could fill this gap, particularly where other information on road cargo traffic are sparse by employing globally and freely available Sentinel-2 data. It is inferior to the accuracy and the temporal detail of station counts, but superior in terms of spatial coverage.}, language = {en} } @article{WernerHiguchiNoseetal.2022, author = {Werner, Rudolf A. and Higuchi, Takahiro and Nose, Naoko and Toriumi, Fujio and Matsusaka, Yohji and Kuji, Ichiei and Kazuhiro, Koshino}, title = {Generative adversarial network-created brain SPECTs of cerebral ischemia are indistinguishable to scans from real patients}, series = {Scientific reports}, volume = {12}, journal = {Scientific reports}, doi = {10.1038/s41598-022-23325-3}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-300757}, year = {2022}, abstract = {Deep convolutional generative adversarial networks (GAN) allow for creating images from existing databases. We applied a modified light-weight GAN (FastGAN) algorithm to cerebral blood flow SPECTs and aimed to evaluate whether this technology can generate created images close to real patients. Investigating three anatomical levels (cerebellum, CER; basal ganglia, BG; cortex, COR), 551 normal (248 CER, 174 BG, 129 COR) and 387 pathological brain SPECTs using N-isopropyl p-I-123-iodoamphetamine (123I-IMP) were included. For the latter scans, cerebral ischemic disease comprised 291 uni- (66 CER, 116 BG, 109 COR) and 96 bilateral defect patterns (44 BG, 52 COR). Our model was trained using a three-compartment anatomical input (dataset 'A'; including CER, BG, and COR), while for dataset 'B', only one anatomical region (COR) was included. Quantitative analyses provided mean counts (MC) and left/right (LR) hemisphere ratios, which were then compared to quantification from real images. For MC, 'B' was significantly different for normal and bilateral defect patterns (P < 0.0001, respectively), but not for unilateral ischemia (P = 0.77). Comparable results were recorded for LR, as normal and ischemia scans were significantly different relative to images acquired from real patients (P ≤ 0.01, respectively). Images provided by 'A', however, revealed comparable quantitative results when compared to real images, including normal (P = 0.8) and pathological scans (unilateral, P = 0.99; bilateral, P = 0.68) for MC. For LR, only uni- (P = 0.03), but not normal or bilateral defect scans (P ≥ 0.08) reached significance relative to images of real patients. With a minimum of only three anatomical compartments serving as stimuli, created cerebral SPECTs are indistinguishable to images from real patients. The applied FastGAN algorithm may allow to provide sufficient scan numbers in various clinical scenarios, e.g., for "data-hungry" deep learning technologies or in the context of orphan diseases.}, language = {en} } @article{LoefflerWirthKreuzHoppetal.2019, author = {Loeffler-Wirth, Henry and Kreuz, Markus and Hopp, Lydia and Arakelyan, Arsen and Haake, Andrea and Cogliatti, Sergio B. and Feller, Alfred C. and Hansmann, Martin-Leo and Lenze, Dido and M{\"o}ller, Peter and M{\"u}ller-Hermelink, Hans Konrad and Fortenbacher, Erik and Willscher, Edith and Ott, German and Rosenwald, Andreas and Pott, Christiane and Schwaenen, Carsten and Trautmann, Heiko and Wessendorf, Swen and Stein, Harald and Szczepanowski, Monika and Tr{\"u}mper, Lorenz and Hummel, Michael and Klapper, Wolfram and Siebert, Reiner and Loeffler, Markus and Binder, Hans}, title = {A modular transcriptome map of mature B cell lymphomas}, series = {Genome Medicine}, volume = {11}, journal = {Genome Medicine}, doi = {10.1186/s13073-019-0637-7}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-237262}, year = {2019}, abstract = {Background Germinal center-derived B cell lymphomas are tumors of the lymphoid tissues representing one of the most heterogeneous malignancies. Here we characterize the variety of transcriptomic phenotypes of this disease based on 873 biopsy specimens collected in the German Cancer Aid MMML (Molecular Mechanisms in Malignant Lymphoma) consortium. They include diffuse large B cell lymphoma (DLBCL), follicular lymphoma (FL), Burkitt's lymphoma, mixed FL/DLBCL lymphomas, primary mediastinal large B cell lymphoma, multiple myeloma, IRF4-rearranged large cell lymphoma, MYC-negative Burkitt-like lymphoma with chr. 11q aberration and mantle cell lymphoma. Methods We apply self-organizing map (SOM) machine learning to microarray-derived expression data to generate a holistic view on the transcriptome landscape of lymphomas, to describe the multidimensional nature of gene regulation and to pursue a modular view on co-expression. Expression data were complemented by pathological, genetic and clinical characteristics. Results We present a transcriptome map of B cell lymphomas that allows visual comparison between the SOM portraits of different lymphoma strata and individual cases. It decomposes into one dozen modules of co-expressed genes related to different functional categories, to genetic defects and to the pathogenesis of lymphomas. On a molecular level, this disease rather forms a continuum of expression states than clearly separated phenotypes. We introduced the concept of combinatorial pattern types (PATs) that stratifies the lymphomas into nine PAT groups and, on a coarser level, into five prominent cancer hallmark types with proliferation, inflammation and stroma signatures. Inflammation signatures in combination with healthy B cell and tonsil characteristics associate with better overall survival rates, while proliferation in combination with inflammation and plasma cell characteristics worsens it. A phenotypic similarity tree is presented that reveals possible progression paths along the transcriptional dimensions. Our analysis provided a novel look on the transition range between FL and DLBCL, on DLBCL with poor prognosis showing expression patterns resembling that of Burkitt's lymphoma and particularly on 'double-hit' MYC and BCL2 transformed lymphomas. Conclusions The transcriptome map provides a tool that aggregates, refines and visualizes the data collected in the MMML study and interprets them in the light of previous knowledge to provide orientation and support in current and future studies on lymphomas and on other cancer entities.}, language = {en} } @article{VollmerVollmerLangetal.2022, author = {Vollmer, Andreas and Vollmer, Michael and Lang, Gernot and Straub, Anton and Shavlokhova, Veronika and K{\"u}bler, Alexander and Gubik, Sebastian and Brands, Roman and Hartmann, Stefan and Saravi, Babak}, title = {Associations between periodontitis and COPD: An artificial intelligence-based analysis of NHANES III}, series = {Journal of Clinical Medicine}, volume = {11}, journal = {Journal of Clinical Medicine}, number = {23}, issn = {2077-0383}, doi = {10.3390/jcm11237210}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-312713}, year = {2022}, abstract = {A number of cross-sectional epidemiological studies suggest that poor oral health is associated with respiratory diseases. However, the number of cases within the studies was limited, and the studies had different measurement conditions. By analyzing data from the National Health and Nutrition Examination Survey III (NHANES III), this study aimed to investigate possible associations between chronic obstructive pulmonary disease (COPD) and periodontitis in the general population. COPD was diagnosed in cases where FEV (1)/FVC ratio was below 70\% (non-COPD versus COPD; binary classification task). We used unsupervised learning utilizing k-means clustering to identify clusters in the data. COPD classes were predicted with logistic regression, a random forest classifier, a stochastic gradient descent (SGD) classifier, k-nearest neighbors, a decision tree classifier, Gaussian naive Bayes (GaussianNB), support vector machines (SVM), a custom-made convolutional neural network (CNN), a multilayer perceptron artificial neural network (MLP), and a radial basis function neural network (RBNN) in Python. We calculated the accuracy of the prediction and the area under the curve (AUC). The most important predictors were determined using feature importance analysis. Results: Overall, 15,868 participants and 19 feature variables were included. Based on k-means clustering, the data were separated into two clusters that identified two risk characteristic groups of patients. The algorithms reached AUCs between 0.608 (DTC) and 0.953\% (CNN) for the classification of COPD classes. Feature importance analysis of deep learning algorithms indicated that age and mean attachment loss were the most important features in predicting COPD. Conclusions: Data analysis of a large population showed that machine learning and deep learning algorithms could predict COPD cases based on demographics and oral health feature variables. This study indicates that periodontitis might be an important predictor of COPD. Further prospective studies examining the association between periodontitis and COPD are warranted to validate the present results.}, language = {en} } @article{KrenzerBanckMakowskietal.2023, author = {Krenzer, Adrian and Banck, Michael and Makowski, Kevin and Hekalo, Amar and Fitting, Daniel and Troya, Joel and Sudarevic, Boban and Zoller, Wolfgang G. and Hann, Alexander and Puppe, Frank}, title = {A real-time polyp-detection system with clinical application in colonoscopy using deep convolutional neural networks}, series = {Journal of Imaging}, volume = {9}, journal = {Journal of Imaging}, number = {2}, issn = {2313-433X}, doi = {10.3390/jimaging9020026}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-304454}, year = {2023}, abstract = {Colorectal cancer (CRC) is a leading cause of cancer-related deaths worldwide. The best method to prevent CRC is with a colonoscopy. During this procedure, the gastroenterologist searches for polyps. However, there is a potential risk of polyps being missed by the gastroenterologist. Automated detection of polyps helps to assist the gastroenterologist during a colonoscopy. There are already publications examining the problem of polyp detection in the literature. Nevertheless, most of these systems are only used in the research context and are not implemented for clinical application. Therefore, we introduce the first fully open-source automated polyp-detection system scoring best on current benchmark data and implementing it ready for clinical application. To create the polyp-detection system (ENDOMIND-Advanced), we combined our own collected data from different hospitals and practices in Germany with open-source datasets to create a dataset with over 500,000 annotated images. ENDOMIND-Advanced leverages a post-processing technique based on video detection to work in real-time with a stream of images. It is integrated into a prototype ready for application in clinical interventions. We achieve better performance compared to the best system in the literature and score a F1-score of 90.24\% on the open-source CVC-VideoClinicDB benchmark.}, language = {en} } @article{WoznickiLaquaBleyetal.2022, author = {Woznicki, Piotr and Laqua, Fabian and Bley, Thorsten and Baeßler, Bettina}, title = {AutoRadiomics: a framework for reproducible radiomics research}, series = {Frontiers in Radiology}, volume = {2}, journal = {Frontiers in Radiology}, issn = {2673-8740}, doi = {10.3389/fradi.2022.919133}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-284813}, year = {2022}, abstract = {Purpose Machine learning based on radiomics features has seen huge success in a variety of clinical applications. However, the need for standardization and reproducibility has been increasingly recognized as a necessary step for future clinical translation. We developed a novel, intuitive open-source framework to facilitate all data analysis steps of a radiomics workflow in an easy and reproducible manner and evaluated it by reproducing classification results in eight available open-source datasets from different clinical entities. Methods The framework performs image preprocessing, feature extraction, feature selection, modeling, and model evaluation, and can automatically choose the optimal parameters for a given task. All analysis steps can be reproduced with a web application, which offers an interactive user interface and does not require programming skills. We evaluated our method in seven different clinical applications using eight public datasets: six datasets from the recently published WORC database, and two prostate MRI datasets—Prostate MRI and Ultrasound With Pathology and Coordinates of Tracked Biopsy (Prostate-UCLA) and PROSTATEx. Results In the analyzed datasets, AutoRadiomics successfully created and optimized models using radiomics features. For WORC datasets, we achieved AUCs ranging from 0.56 for lung melanoma metastases detection to 0.93 for liposarcoma detection and thereby managed to replicate the previously reported results. No significant overfitting between training and test sets was observed. For the prostate cancer detection task, results were better in the PROSTATEx dataset (AUC = 0.73 for prostate and 0.72 for lesion mask) than in the Prostate-UCLA dataset (AUC 0.61 for prostate and 0.65 for lesion mask), with external validation results varying from AUC = 0.51 to AUC = 0.77. Conclusion AutoRadiomics is a robust tool for radiomic studies, which can be used as a comprehensive solution, one of the analysis steps, or an exploratory tool. Its wide applicability was confirmed by the results obtained in the diverse analyzed datasets. The framework, as well as code for this analysis, are publicly available under https://github.com/pwoznicki/AutoRadiomics.}, language = {en} } @article{KrenzerMakowskiHekaloetal.2022, author = {Krenzer, Adrian and Makowski, Kevin and Hekalo, Amar and Fitting, Daniel and Troya, Joel and Zoller, Wolfram G. and Hann, Alexander and Puppe, Frank}, title = {Fast machine learning annotation in the medical domain: a semi-automated video annotation tool for gastroenterologists}, series = {BioMedical Engineering OnLine}, volume = {21}, journal = {BioMedical Engineering OnLine}, number = {1}, doi = {10.1186/s12938-022-01001-x}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-300231}, year = {2022}, abstract = {Background Machine learning, especially deep learning, is becoming more and more relevant in research and development in the medical domain. For all the supervised deep learning applications, data is the most critical factor in securing successful implementation and sustaining the progress of the machine learning model. Especially gastroenterological data, which often involves endoscopic videos, are cumbersome to annotate. Domain experts are needed to interpret and annotate the videos. To support those domain experts, we generated a framework. With this framework, instead of annotating every frame in the video sequence, experts are just performing key annotations at the beginning and the end of sequences with pathologies, e.g., visible polyps. Subsequently, non-expert annotators supported by machine learning add the missing annotations for the frames in-between. Methods In our framework, an expert reviews the video and annotates a few video frames to verify the object's annotations for the non-expert. In a second step, a non-expert has visual confirmation of the given object and can annotate all following and preceding frames with AI assistance. After the expert has finished, relevant frames will be selected and passed on to an AI model. This information allows the AI model to detect and mark the desired object on all following and preceding frames with an annotation. Therefore, the non-expert can adjust and modify the AI predictions and export the results, which can then be used to train the AI model. Results Using this framework, we were able to reduce workload of domain experts on average by a factor of 20 on our data. This is primarily due to the structure of the framework, which is designed to minimize the workload of the domain expert. Pairing this framework with a state-of-the-art semi-automated AI model enhances the annotation speed further. Through a prospective study with 10 participants, we show that semi-automated annotation using our tool doubles the annotation speed of non-expert annotators compared to a well-known state-of-the-art annotation tool. Conclusion In summary, we introduce a framework for fast expert annotation for gastroenterologists, which reduces the workload of the domain expert considerably while maintaining a very high annotation quality. The framework incorporates a semi-automated annotation system utilizing trained object detection models. The software and framework are open-source.}, language = {en} } @article{VollmerVollmerLangetal.2022, author = {Vollmer, Andreas and Vollmer, Michael and Lang, Gernot and Straub, Anton and K{\"u}bler, Alexander and Gubik, Sebastian and Brands, Roman C. and Hartmann, Stefan and Saravi, Babak}, title = {Performance analysis of supervised machine learning algorithms for automatized radiographical classification of maxillary third molar impaction}, series = {Applied Sciences}, volume = {12}, journal = {Applied Sciences}, number = {13}, issn = {2076-3417}, doi = {10.3390/app12136740}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-281662}, year = {2022}, abstract = {Background: Oro-antral communication (OAC) is a common complication following the extraction of upper molar teeth. The Archer and the Root Sinus (RS) systems can be used to classify impacted teeth in panoramic radiographs. The Archer classes B-D and the Root Sinus classes III, IV have been associated with an increased risk of OAC following tooth extraction in the upper molar region. In our previous study, we found that panoramic radiographs are not reliable for predicting OAC. This study aimed to (1) determine the feasibility of automating the classification (Archer/RS classes) of impacted teeth from panoramic radiographs, (2) determine the distribution of OAC stratified by classification system classes for the purposes of decision tree construction, and (3) determine the feasibility of automating the prediction of OAC utilizing the mentioned classification systems. Methods: We utilized multiple supervised pre-trained machine learning models (VGG16, ResNet50, Inceptionv3, EfficientNet, MobileNetV2), one custom-made convolutional neural network (CNN) model, and a Bag of Visual Words (BoVW) technique to evaluate the performance to predict the clinical classification systems RS and Archer from panoramic radiographs (Aim 1). We then used Chi-square Automatic Interaction Detectors (CHAID) to determine the distribution of OAC stratified by the Archer/RS classes to introduce a decision tree for simple use in clinics (Aim 2). Lastly, we tested the ability of a multilayer perceptron artificial neural network (MLP) and a radial basis function neural network (RBNN) to predict OAC based on the high-risk classes RS III, IV, and Archer B-D (Aim 3). Results: We achieved accuracies of up to 0.771 for EfficientNet and MobileNetV2 when examining the Archer classification. For the AUC, we obtained values of up to 0.902 for our custom-made CNN. In comparison, the detection of the RS classification achieved accuracies of up to 0.792 for the BoVW and an AUC of up to 0.716 for our custom-made CNN. Overall, the Archer classification was detected more reliably than the RS classification when considering all algorithms. CHAID predicted 77.4\% correctness for the Archer classification and 81.4\% for the RS classification. MLP (AUC: 0.590) and RBNN (AUC: 0.590) for the Archer classification as well as MLP 0.638) and RBNN (0.630) for the RS classification did not show sufficient predictive capability for OAC. Conclusions: The results reveal that impacted teeth can be classified using panoramic radiographs (best AUC: 0.902), and the classification systems can be stratified according to their relationship to OAC (81.4\% correct for RS classification). However, the Archer and RS classes did not achieve satisfactory AUCs for predicting OAC (best AUC: 0.638). Additional research is needed to validate the results externally and to develop a reliable risk stratification tool based on the present findings.}, language = {en} } @article{AllgaierSchleeProbstetal.2022, author = {Allgaier, Johannes and Schlee, Winfried and Probst, Thomas and Pryss, R{\"u}diger}, title = {Prediction of tinnitus perception based on daily life mHealth data using country origin and season}, series = {Journal of Clinical Medicine}, volume = {11}, journal = {Journal of Clinical Medicine}, number = {15}, issn = {2077-0383}, doi = {10.3390/jcm11154270}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-281812}, year = {2022}, abstract = {Tinnitus is an auditory phantom perception without external sound stimuli. This chronic perception can severely affect quality of life. Because tinnitus symptoms are highly heterogeneous, multimodal data analyses are increasingly used to gain new insights. MHealth data sources, with their particular focus on country- and season-specific differences, can provide a promising avenue for new insights. Therefore, we examined data from the TrackYourTinnitus (TYT) mHealth platform to create symptom profiles of TYT users. We used gradient boosting engines to classify momentary tinnitus and regress tinnitus loudness, using country of origin and season as features. At the daily assessment level, tinnitus loudness can be regressed with a mean absolute error rate of 7.9\% points. In turn, momentary tinnitus can be classified with an F1 score of 93.79\%. Both results indicate differences in the tinnitus of TYT users with respect to season and country of origin. The significance of the features was evaluated using statistical and explainable machine learning methods. It was further shown that tinnitus varies with temperature in certain countries. The results presented show that season and country of origin appear to be valuable features when combined with longitudinal mHealth data at the level of daily assessment.}, language = {en} } @article{DhillonDahmsKuebertFlocketal.2023, author = {Dhillon, Maninder Singh and Dahms, Thorsten and Kuebert-Flock, Carina and Rummler, Thomas and Arnault, Joel and Steffan-Dewenter, Ingolf and Ullmann, Tobias}, title = {Integrating random forest and crop modeling improves the crop yield prediction of winter wheat and oil seed rape}, series = {Frontiers in Remote Sensing}, volume = {3}, journal = {Frontiers in Remote Sensing}, issn = {2673-6187}, doi = {10.3389/frsen.2022.1010978}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-301462}, year = {2023}, abstract = {The fast and accurate yield estimates with the increasing availability and variety of global satellite products and the rapid development of new algorithms remain a goal for precision agriculture and food security. However, the consistency and reliability of suitable methodologies that provide accurate crop yield outcomes still need to be explored. The study investigates the coupling of crop modeling and machine learning (ML) to improve the yield prediction of winter wheat (WW) and oil seed rape (OSR) and provides examples for the Free State of Bavaria (70,550 km2), Germany, in 2019. The main objectives are to find whether a coupling approach [Light Use Efficiency (LUE) + Random Forest (RF)] would result in better and more accurate yield predictions compared to results provided with other models not using the LUE. Four different RF models [RF1 (input: Normalized Difference Vegetation Index (NDVI)), RF2 (input: climate variables), RF3 (input: NDVI + climate variables), RF4 (input: LUE generated biomass + climate variables)], and one semi-empiric LUE model were designed with different input requirements to find the best predictors of crop monitoring. The results indicate that the individual use of the NDVI (in RF1) and the climate variables (in RF2) could not be the most accurate, reliable, and precise solution for crop monitoring; however, their combined use (in RF3) resulted in higher accuracies. Notably, the study suggested the coupling of the LUE model variables to the RF4 model can reduce the relative root mean square error (RRMSE) from -8\% (WW) and -1.6\% (OSR) and increase the R 2 by 14.3\% (for both WW and OSR), compared to results just relying on LUE. Moreover, the research compares models yield outputs by inputting three different spatial inputs: Sentinel-2(S)-MOD13Q1 (10 m), Landsat (L)-MOD13Q1 (30 m), and MOD13Q1 (MODIS) (250 m). The S-MOD13Q1 data has relatively improved the performance of models with higher mean R 2 [0.80 (WW), 0.69 (OSR)], and lower RRMSE (\%) (9.18, 10.21) compared to L-MOD13Q1 (30 m) and MOD13Q1 (250 m). Satellite-based crop biomass, solar radiation, and temperature are found to be the most influential variables in the yield prediction of both crops.}, language = {en} } @article{DirscherlDietzKneiseletal.2021, author = {Dirscherl, Mariel and Dietz, Andreas J. and Kneisel, Christof and Kuenzer, Claudia}, title = {A novel method for automated supraglacial lake mapping in Antarctica using Sentinel-1 SAR imagery and deep learning}, series = {Remote Sensing}, volume = {13}, journal = {Remote Sensing}, number = {2}, issn = {2072-4292}, doi = {10.3390/rs13020197}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-222998}, year = {2021}, abstract = {Supraglacial meltwater accumulation on ice sheets can be a main driver for accelerated ice discharge, mass loss, and global sea-level-rise. With further increasing surface air temperatures, meltwater-induced hydrofracturing, basal sliding, or surface thinning will cumulate and most likely trigger unprecedented ice mass loss on the Greenland and Antarctic ice sheets. While the Greenland surface hydrological network as well as its impacts on ice dynamics and mass balance has been studied in much detail, Antarctic supraglacial lakes remain understudied with a circum-Antarctic record of their spatio-temporal development entirely lacking. This study provides the first automated supraglacial lake extent mapping method using Sentinel-1 synthetic aperture radar (SAR) imagery over Antarctica and complements the developed optical Sentinel-2 supraglacial lake detection algorithm presented in our companion paper. In detail, we propose the use of a modified U-Net for semantic segmentation of supraglacial lakes in single-polarized Sentinel-1 imagery. The convolutional neural network (CNN) is implemented with residual connections for optimized performance as well as an Atrous Spatial Pyramid Pooling (ASPP) module for multiscale feature extraction. The algorithm is trained on 21,200 Sentinel-1 image patches and evaluated in ten spatially or temporally independent test acquisitions. In addition, George VI Ice Shelf is analyzed for intra-annual lake dynamics throughout austral summer 2019/2020 and a decision-level fused Sentinel-1 and Sentinel-2 maximum lake extent mapping product is presented for January 2020 revealing a more complete supraglacial lake coverage (~770 km\(^2\)) than the individual single-sensor products. Classification results confirm the reliability of the proposed workflow with an average Kappa coefficient of 0.925 and a F\(_1\)-score of 93.0\% for the supraglacial water class across all test regions. Furthermore, the algorithm is applied in an additional test region covering supraglacial lakes on the Greenland ice sheet which further highlights the potential for spatio-temporal transferability. Future work involves the integration of more training data as well as intra-annual analyses of supraglacial lake occurrence across the whole continent and with focus on supraglacial lake development throughout a summer melt season and into Antarctic winter.}, language = {en} } @article{KoehlerKuenzer2020, author = {Koehler, Jonas and Kuenzer, Claudia}, title = {Forecasting spatio-temporal dynamics on the land surface using Earth Observation data — a review}, series = {Remote Sensing}, volume = {12}, journal = {Remote Sensing}, number = {21}, issn = {2072-4292}, doi = {10.3390/rs12213513}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-216285}, year = {2020}, abstract = {Reliable forecasts on the impacts of global change on the land surface are vital to inform the actions of policy and decision makers to mitigate consequences and secure livelihoods. Geospatial Earth Observation (EO) data from remote sensing satellites has been collected continuously for 40 years and has the potential to facilitate the spatio-temporal forecasting of land surface dynamics. In this review we compiled 143 papers on EO-based forecasting of all aspects of the land surface published in 16 high-ranking remote sensing journals within the past decade. We analyzed the literature regarding research focus, the spatial scope of the study, the forecasting method applied, as well as the temporal and technical properties of the input data. We categorized the identified forecasting methods according to their temporal forecasting mechanism and the type of input data. Time-lagged regressions which are predominantly used for crop yield forecasting and approaches based on Markov Chains for future land use and land cover simulation are the most established methods. The use of external climate projections allows the forecasting of numerical land surface parameters up to one hundred years into the future, while auto-regressive time series modeling can account for intra-annual variances. Machine learning methods have been increasingly used in all categories and multivariate modeling that integrates multiple data sources appears to be more popular than univariate auto-regressive modeling despite the availability of continuously expanding time series data. Regardless of the method, reliable EO-based forecasting requires high-level remote sensing data products and the resulting computational demand appears to be the main reason that most forecasts are conducted only on a local scale. In the upcoming years, however, we expect this to change with further advances in the field of machine learning, the publication of new global datasets, and the further establishment of cloud computing for data processing.}, language = {en} } @article{HoeserBachoferKuenzer2020, author = {Hoeser, Thorsten and Bachofer, Felix and Kuenzer, Claudia}, title = {Object detection and image segmentation with deep learning on Earth Observation data: a review — part II: applications}, series = {Remote Sensing}, volume = {12}, journal = {Remote Sensing}, number = {18}, issn = {2072-4292}, doi = {10.3390/rs12183053}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-213152}, year = {2020}, abstract = {In Earth observation (EO), large-scale land-surface dynamics are traditionally analyzed by investigating aggregated classes. The increase in data with a very high spatial resolution enables investigations on a fine-grained feature level which can help us to better understand the dynamics of land surfaces by taking object dynamics into account. To extract fine-grained features and objects, the most popular deep-learning model for image analysis is commonly used: the convolutional neural network (CNN). In this review, we provide a comprehensive overview of the impact of deep learning on EO applications by reviewing 429 studies on image segmentation and object detection with CNNs. We extensively examine the spatial distribution of study sites, employed sensors, used datasets and CNN architectures, and give a thorough overview of applications in EO which used CNNs. Our main finding is that CNNs are in an advanced transition phase from computer vision to EO. Upon this, we argue that in the near future, investigations which analyze object dynamics with CNNs will have a significant impact on EO research. With a focus on EO applications in this Part II, we complete the methodological review provided in Part I.}, language = {en} } @article{JanieschZschechHeinrich2021, author = {Janiesch, Christian and Zschech, Patrick and Heinrich, Kai}, title = {Machine learning and deep learning}, series = {Electronic Markets}, volume = {31}, journal = {Electronic Markets}, number = {3}, issn = {1422-8890}, doi = {10.1007/s12525-021-00475-2}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-270155}, pages = {685-695}, year = {2021}, abstract = {Today, intelligent systems that offer artificial intelligence capabilities often rely on machine learning. Machine learning describes the capacity of systems to learn from problem-specific training data to automate the process of analytical model building and solve associated tasks. Deep learning is a machine learning concept based on artificial neural networks. For many applications, deep learning models outperform shallow machine learning models and traditional data analysis approaches. In this article, we summarize the fundamentals of machine learning and deep learning to generate a broader understanding of the methodical underpinning of current intelligent systems. In particular, we provide a conceptual distinction between relevant terms and concepts, explain the process of automated analytical model building through machine learning and deep learning, and discuss the challenges that arise when implementing such intelligent systems in the field of electronic markets and networked business. These naturally go beyond technological aspects and highlight issues in human-machine interaction and artificial intelligence servitization.}, language = {en} } @article{HoeserKuenzer2020, author = {Hoeser, Thorsten and Kuenzer, Claudia}, title = {Object detection and image segmentation with deep learning on Earth observation data: a review-part I: evolution and recent trends}, series = {Remote Sensing}, volume = {12}, journal = {Remote Sensing}, number = {10}, issn = {2072-4292}, doi = {10.3390/rs12101667}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-205918}, year = {2020}, abstract = {Deep learning (DL) has great influence on large parts of science and increasingly established itself as an adaptive method for new challenges in the field of Earth observation (EO). Nevertheless, the entry barriers for EO researchers are high due to the dense and rapidly developing field mainly driven by advances in computer vision (CV). To lower the barriers for researchers in EO, this review gives an overview of the evolution of DL with a focus on image segmentation and object detection in convolutional neural networks (CNN). The survey starts in 2012, when a CNN set new standards in image recognition, and lasts until late 2019. Thereby, we highlight the connections between the most important CNN architectures and cornerstones coming from CV in order to alleviate the evaluation of modern DL models. Furthermore, we briefly outline the evolution of the most popular DL frameworks and provide a summary of datasets in EO. By discussing well performing DL architectures on these datasets as well as reflecting on advances made in CV and their impact on future research in EO, we narrow the gap between the reviewed, theoretical concepts from CV and practical application in EO.}, language = {en} } @article{DirscherlDietzKneiseletal.2020, author = {Dirscherl, Mariel and Dietz, Andreas J. and Kneisel, Christof and Kuenzer, Claudia}, title = {Automated mapping of Antarctic supraglacial lakes using a Machine Learning approach}, series = {Remote Sensing}, volume = {12}, journal = {Remote Sensing}, number = {7}, issn = {2072-4292}, doi = {10.3390/rs12071203}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-203735}, year = {2020}, abstract = {Supraglacial lakes can have considerable impact on ice sheet mass balance and global sea-level-rise through ice shelf fracturing and subsequent glacier speedup. In Antarctica, the distribution and temporal development of supraglacial lakes as well as their potential contribution to increased ice mass loss remains largely unknown, requiring a detailed mapping of the Antarctic surface hydrological network. In this study, we employ a Machine Learning algorithm trained on Sentinel-2 and auxiliary TanDEM-X topographic data for automated mapping of Antarctic supraglacial lakes. To ensure the spatio-temporal transferability of our method, a Random Forest was trained on 14 training regions and applied over eight spatially independent test regions distributed across the whole Antarctic continent. In addition, we employed our workflow for large-scale application over Amery Ice Shelf where we calculated interannual supraglacial lake dynamics between 2017 and 2020 at full ice shelf coverage. To validate our supraglacial lake detection algorithm, we randomly created point samples over our classification results and compared them to Sentinel-2 imagery. The point comparisons were evaluated using a confusion matrix for calculation of selected accuracy metrics. Our analysis revealed wide-spread supraglacial lake occurrence in all three Antarctic regions. For the first time, we identified supraglacial meltwater features on Abbott, Hull and Cosgrove Ice Shelves in West Antarctica as well as for the entire Amery Ice Shelf for years 2017-2020. Over Amery Ice Shelf, maximum lake extent varied strongly between the years with the 2019 melt season characterized by the largest areal coverage of supraglacial lakes (~763 km\(^2\)). The accuracy assessment over the test regions revealed an average Kappa coefficient of 0.86 where the largest value of Kappa reached 0.98 over George VI Ice Shelf. Future developments will involve the generation of circum-Antarctic supraglacial lake mapping products as well as their use for further methodological developments using Sentinel-1 SAR data in order to characterize intraannual supraglacial meltwater dynamics also during polar night and independent of meteorological conditions. In summary, the implementation of the Random Forest classifier enabled the development of the first automated mapping method applied to Sentinel-2 data distributed across all three Antarctic regions.}, language = {en} } @article{AllgaierSchleeLangguthetal.2021, author = {Allgaier, Johannes and Schlee, Winfried and Langguth, Berthold and Probst, Thomas and Pryss, R{\"u}diger}, title = {Predicting the Gender of Individuals with Tinnitus based on Daily Life Data of the TrackYourTinnitus mHealth Platform}, series = {Scientific Reports}, volume = {11}, journal = {Scientific Reports}, number = {1}, doi = {10.1038/s41598-021-96731-8}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-261753}, year = {2021}, abstract = {Tinnitus is an auditory phantom perception in the absence of an external sound stimulation. People with tinnitus often report severe constraints in their daily life. Interestingly, indications exist on gender differences between women and men both in the symptom profile as well as in the response to specific tinnitus treatments. In this paper, data of the TrackYourTinnitus platform (TYT) were analyzed to investigate whether the gender of users can be predicted. In general, the TYT mobile Health crowdsensing platform was developed to demystify the daily and momentary variations of tinnitus symptoms over time. The goal of the presented investigation is a better understanding of gender-related differences in the symptom profiles of users from TYT. Based on two questionnaires of TYT, four machine learning based classifiers were trained and analyzed. With respect to the provided daily answers, the gender of TYT users can be predicted with an accuracy of 81.7\%. In this context, worries, difficulties in concentration, and irritability towards the family are the three most important characteristics for predicting the gender. Note that in contrast to existing studies on TYT, daily answers to the worst symptom question were firstly investigated in more detail. It was found that results of this question significantly contribute to the prediction of the gender of TYT users. Overall, our findings indicate gender-related differences in tinnitus and tinnitus-related symptoms. Based on evidence that gender impacts the development of tinnitus, the gathered insights can be considered relevant and justify further investigations in this direction.}, language = {en} } @article{VeyKapsnerFuchsetal.2019, author = {Vey, Johannes and Kapsner, Lorenz A. and Fuchs, Maximilian and Unberath, Philipp and Veronesi, Giulia and Kunz, Meik}, title = {A toolbox for functional analysis and the systematic identification of diagnostic and prognostic gene expression signatures combining meta-analysis and machine learning}, series = {Cancers}, volume = {11}, journal = {Cancers}, number = {10}, issn = {2072-6694}, doi = {10.3390/cancers11101606}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-193240}, year = {2019}, abstract = {The identification of biomarker signatures is important for cancer diagnosis and prognosis. However, the detection of clinical reliable signatures is influenced by limited data availability, which may restrict statistical power. Moreover, methods for integration of large sample cohorts and signature identification are limited. We present a step-by-step computational protocol for functional gene expression analysis and the identification of diagnostic and prognostic signatures by combining meta-analysis with machine learning and survival analysis. The novelty of the toolbox lies in its all-in-one functionality, generic design, and modularity. It is exemplified for lung cancer, including a comprehensive evaluation using different validation strategies. However, the protocol is not restricted to specific disease types and can therefore be used by a broad community. The accompanying R package vignette runs in ~1 h and describes the workflow in detail for use by researchers with limited bioinformatics training.}, language = {en} } @article{MaerzKurlbaumRocheLancasteretal.2021, author = {M{\"a}rz, Juliane and Kurlbaum, Max and Roche-Lancaster, Oisin and Deutschbein, Timo and Peitzsch, Mirko and Prehn, Cornelia and Weismann, Dirk and Robledo, Mercedes and Adamski, Jerzy and Fassnacht, Martin and Kunz, Meik and Kroiss, Matthias}, title = {Plasma Metabolome Profiling for the Diagnosis of Catecholamine Producing Tumors}, series = {Frontiers in Endocrinology}, volume = {12}, journal = {Frontiers in Endocrinology}, issn = {1664-2392}, doi = {10.3389/fendo.2021.722656}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-245710}, year = {2021}, abstract = {Context Pheochromocytomas and paragangliomas (PPGL) cause catecholamine excess leading to a characteristic clinical phenotype. Intra-individual changes at metabolome level have been described after surgical PPGL removal. The value of metabolomics for the diagnosis of PPGL has not been studied yet. Objective Evaluation of quantitative metabolomics as a diagnostic tool for PPGL. Design Targeted metabolomics by liquid chromatography-tandem mass spectrometry of plasma specimens and statistical modeling using ML-based feature selection approaches in a clinically well characterized cohort study. Patients Prospectively enrolled patients (n=36, 17 female) from the Prospective Monoamine-producing Tumor Study (PMT) with hormonally active PPGL and 36 matched controls in whom PPGL was rigorously excluded. Results Among 188 measured metabolites, only without considering false discovery rate, 4 exhibited statistically significant differences between patients with PPGL and controls (histidine p=0.004, threonine p=0.008, lyso PC a C28:0 p=0.044, sum of hexoses p=0.018). Weak, but significant correlations for histidine, threonine and lyso PC a C28:0 with total urine catecholamine levels were identified. Only the sum of hexoses (reflecting glucose) showed significant correlations with plasma metanephrines. By using ML-based feature selection approaches, we identified diagnostic signatures which all exhibited low accuracy and sensitivity. The best predictive value (sensitivity 87.5\%, accuracy 67.3\%) was obtained by using Gradient Boosting Machine Modelling. Conclusions The diabetogenic effect of catecholamine excess dominates the plasma metabolome in PPGL patients. While curative surgery for PPGL led to normalization of catecholamine-induced alterations of metabolomics in individual patients, plasma metabolomics are not useful for diagnostic purposes, most likely due to inter-individual variability.}, language = {en} } @article{MarquardtSolimandoKerscheretal.2021, author = {Marquardt, Andr{\´e} and Solimando, Antonio Giovanni and Kerscher, Alexander and Bittrich, Max and Kalogirou, Charis and K{\"u}bler, Hubert and Rosenwald, Andreas and Bargou, Ralf and Kollmannsberger, Philip and Schilling, Bastian and Meierjohann, Svenja and Krebs, Markus}, title = {Subgroup-Independent Mapping of Renal Cell Carcinoma — Machine Learning Reveals Prognostic Mitochondrial Gene Signature Beyond Histopathologic Boundaries}, series = {Frontiers in Oncology}, volume = {11}, journal = {Frontiers in Oncology}, issn = {2234-943X}, doi = {10.3389/fonc.2021.621278}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-232107}, year = {2021}, abstract = {Background: Renal cell carcinoma (RCC) is divided into three major histopathologic groups—clear cell (ccRCC), papillary (pRCC) and chromophobe RCC (chRCC). We performed a comprehensive re-analysis of publicly available RCC datasets from the TCGA (The Cancer Genome Atlas) database, thereby combining samples from all three subgroups, for an exploratory transcriptome profiling of RCC subgroups. Materials and Methods: We used FPKM (fragments per kilobase per million) files derived from the ccRCC, pRCC and chRCC cohorts of the TCGA database, representing transcriptomic data of 891 patients. Using principal component analysis, we visualized datasets as t-SNE plot for cluster detection. Clusters were characterized by machine learning, resulting gene signatures were validated by correlation analyses in the TCGA dataset and three external datasets (ICGC RECA-EU, CPTAC-3-Kidney, and GSE157256). Results: Many RCC samples co-clustered according to histopathology. However, a substantial number of samples clustered independently from histopathologic origin (mixed subgroup)—demonstrating divergence between histopathology and transcriptomic data. Further analyses of mixed subgroup via machine learning revealed a predominant mitochondrial gene signature—a trait previously known for chRCC—across all histopathologic subgroups. Additionally, ccRCC samples from mixed subgroup presented an inverse correlation of mitochondrial and angiogenesis-related genes in the TCGA and in three external validation cohorts. Moreover, mixed subgroup affiliation was associated with a highly significant shorter overall survival for patients with ccRCC—and a highly significant longer overall survival for chRCC patients. Conclusions: Pan-RCC clustering according to RNA-sequencing data revealed a distinct histology-independent subgroup characterized by strengthened mitochondrial and weakened angiogenesis-related gene signatures. Moreover, affiliation to mixed subgroup went along with a significantly shorter overall survival for ccRCC and a longer overall survival for chRCC patients. Further research could offer a therapy stratification by specifically addressing the mitochondrial metabolism of such tumors and its microenvironment.}, language = {en} } @article{PookFreudenthalKorteetal.2020, author = {Pook, Torsten and Freudenthal, Jan and Korte, Arthur and Simianer, Henner}, title = {Using Local Convolutional Neural Networks for Genomic Prediction}, series = {Frontiers in Genetics}, volume = {11}, journal = {Frontiers in Genetics}, doi = {10.3389/fgene.2020.561497}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-216436}, year = {2020}, abstract = {The prediction of breeding values and phenotypes is of central importance for both livestock and crop breeding. In this study, we analyze the use of artificial neural networks (ANN) and, in particular, local convolutional neural networks (LCNN) for genomic prediction, as a region-specific filter corresponds much better with our prior genetic knowledge on the genetic architecture of traits than traditional convolutional neural networks. Model performances are evaluated on a simulated maize data panel (n = 10,000; p = 34,595) and real Arabidopsis data (n = 2,039; p = 180,000) for a variety of traits based on their predictive ability. The baseline LCNN, containing one local convolutional layer (kernel size: 10) and two fully connected layers with 64 nodes each, is outperforming commonly proposed ANNs (multi layer perceptrons and convolutional neural networks) for basically all considered traits. For traits with high heritability and large training population as present in the simulated data, LCNN are even outperforming state-of-the-art methods like genomic best linear unbiased prediction (GBLUP), Bayesian models and extended GBLUP, indicated by an increase in predictive ability of up to 24\%. However, for small training populations, these state-of-the-art methods outperform all considered ANNs. Nevertheless, the LCNN still outperforms all other considered ANNs by around 10\%. Minor improvements to the tested baseline network architecture of the LCNN were obtained by increasing the kernel size and of reducing the stride, whereas the number of subsequent fully connected layers and their node sizes had neglectable impact. Although gains in predictive ability were obtained for large scale data sets by using LCNNs, the practical use of ANNs comes with additional problems, such as the need of genotyping all considered individuals, the lack of estimation of heritability and reliability. Furthermore, breeding values are additive by design, whereas ANN-based estimates are not. However, ANNs also comes with new opportunities, as networks can easily be extended to account for additional inputs (omics, weather etc.) and outputs (multi-trait models), and computing time increases linearly with the number of individuals. With advances in high-throughput phenotyping and cheaper genotyping, ANNs can become a valid alternative for genomic prediction.}, language = {en} } @article{MarquardtLandwehrRonchietal.2021, author = {Marquardt, Andr{\´e} and Landwehr, Laura-Sophie and Ronchi, Cristina L. and di Dalmazi, Guido and Riester, Anna and Kollmannsberger, Philip and Altieri, Barbara and Fassnacht, Martin and Sbiera, Silviu}, title = {Identifying New Potential Biomarkers in Adrenocortical Tumors Based on mRNA Expression Data Using Machine Learning}, series = {Cancers}, volume = {13}, journal = {Cancers}, number = {18}, issn = {2072-6694}, doi = {10.3390/cancers13184671}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-246245}, year = {2021}, abstract = {Simple Summary Using a visual-based clustering method on the TCGA RNA sequencing data of a large adrenocortical carcinoma (ACC) cohort, we were able to classify these tumors in two distinct clusters largely overlapping with previously identified ones. As previously shown, the identified clusters also correlated with patient survival. Applying the visual clustering method to a second dataset also including benign adrenocortical samples additionally revealed that one of the ACC clusters is more closely located to the benign samples, providing a possible explanation for the better survival of this ACC cluster. Furthermore, the subsequent use of machine learning identified new possible biomarker genes with prognostic potential for this rare disease, that are significantly differentially expressed in the different survival clusters and should be further evaluated. Abstract Adrenocortical carcinoma (ACC) is a rare disease, associated with poor survival. Several "multiple-omics" studies characterizing ACC on a molecular level identified two different clusters correlating with patient survival (C1A and C1B). We here used the publicly available transcriptome data from the TCGA-ACC dataset (n = 79), applying machine learning (ML) methods to classify the ACC based on expression pattern in an unbiased manner. UMAP (uniform manifold approximation and projection)-based clustering resulted in two distinct groups, ACC-UMAP1 and ACC-UMAP2, that largely overlap with clusters C1B and C1A, respectively. However, subsequent use of random-forest-based learning revealed a set of new possible marker genes showing significant differential expression in the described clusters (e.g., SOAT1, EIF2A1). For validation purposes, we used a secondary dataset based on a previous study from our group, consisting of 4 normal adrenal glands and 52 benign and 7 malignant tumor samples. The results largely confirmed those obtained for the TCGA-ACC cohort. In addition, the ENSAT dataset showed a correlation between benign adrenocortical tumors and the good prognosis ACC cluster ACC-UMAP1/C1B. In conclusion, the use of ML approaches re-identified and redefined known prognostic ACC subgroups. On the other hand, the subsequent use of random-forest-based learning identified new possible prognostic marker genes for ACC.}, language = {en} } @article{LohPoigneeWamseretal.2021, author = {Loh, Frank and Poign{\´e}e, Fabian and Wamser, Florian and Leidinger, Ferdinand and Hoßfeld, Tobias}, title = {Uplink vs. Downlink: Machine Learning-Based Quality Prediction for HTTP Adaptive Video Streaming}, series = {Sensors}, volume = {21}, journal = {Sensors}, number = {12}, issn = {1424-8220}, doi = {10.3390/s21124172}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-241121}, year = {2021}, abstract = {Streaming video is responsible for the bulk of Internet traffic these days. For this reason, Internet providers and network operators try to make predictions and assessments about the streaming quality for an end user. Current monitoring solutions are based on a variety of different machine learning approaches. The challenge for providers and operators nowadays is that existing approaches require large amounts of data. In this work, the most relevant quality of experience metrics, i.e., the initial playback delay, the video streaming quality, video quality changes, and video rebuffering events, are examined using a voluminous data set of more than 13,000 YouTube video streaming runs that were collected with the native YouTube mobile app. Three Machine Learning models are developed and compared to estimate playback behavior based on uplink request information. The main focus has been on developing a lightweight approach using as few features and as little data as possible, while maintaining state-of-the-art performance.}, language = {en} } @article{PryssSchleeHoppenstedtetal.2020, author = {Pryss, R{\"u}diger and Schlee, Winfried and Hoppenstedt, Burkhard and Reichert, Manfred and Spiliopoulou, Myra and Langguth, Berthold and Breitmayer, Marius and Probst, Thomas}, title = {Applying Machine Learning to Daily-Life Data From the TrackYourTinnitus Mobile Health Crowdsensing Platform to Predict the Mobile Operating System Used With High Accuracy: Longitudinal Observational Study}, series = {Journal of Medical Internet Research}, volume = {22}, journal = {Journal of Medical Internet Research}, number = {6}, doi = {10.2196/15547}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-229517}, year = {2020}, abstract = {Background: Tinnitus is often described as the phantom perception of a sound and is experienced by 5.1\% to 42.7\% of the population worldwide, at least once during their lifetime. The symptoms often reduce the patient's quality of life. The TrackYourTinnitus (TYT) mobile health (mHealth) crowdsensing platform was developed for two operating systems (OS)-Android and iOS-to help patients demystify the daily moment-to-moment variations of their tinnitus symptoms. In all platforms developed for more than one OS, it is important to investigate whether the crowdsensed data predicts the OS that was used in order to understand the degree to which the OS is a confounder that is necessary to consider.}, language = {en} } @article{SchloerRingHotho2020, author = {Schl{\"o}r, Daniel and Ring, Markus and Hotho, Andreas}, title = {iNALU: Improved Neural Arithmetic Logic Unit}, series = {Frontiers in Artificial Intelligence}, volume = {3}, journal = {Frontiers in Artificial Intelligence}, issn = {2624-8212}, doi = {10.3389/frai.2020.00071}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-212301}, year = {2020}, abstract = {Neural networks have to capture mathematical relationships in order to learn various tasks. They approximate these relations implicitly and therefore often do not generalize well. The recently proposed Neural Arithmetic Logic Unit (NALU) is a novel neural architecture which is able to explicitly represent the mathematical relationships by the units of the network to learn operations such as summation, subtraction or multiplication. Although NALUs have been shown to perform well on various downstream tasks, an in-depth analysis reveals practical shortcomings by design, such as the inability to multiply or divide negative input values or training stability issues for deeper networks. We address these issues and propose an improved model architecture. We evaluate our model empirically in various settings from learning basic arithmetic operations to more complex functions. Our experiments indicate that our model solves stability issues and outperforms the original NALU model in means of arithmetic precision and convergence.}, language = {en} } @article{SchwarzmeierLeehrBoehnleinetal.2020, author = {Schwarzmeier, Hanna and Leehr, Elisabeth Johanna and B{\"o}hnlein, Joscha and Seeger, Fabian Reinhard and Roesmann, Kati and Gathmann, Bettina and Herrmann, Martin J. and Siminski, Niklas and Jungh{\"o}fer, Markus and Straube, Thomas and Grotegerd, Dominik and Dannlowski, Udo}, title = {Theranostic markers for personalized therapy of spider phobia: Methods of a bicentric external cross-validation machine learning approach}, series = {International Journal of Methods in Psychiatric Research}, volume = {29}, journal = {International Journal of Methods in Psychiatric Research}, number = {2}, doi = {10.1002/mpr.1812}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-213430}, year = {2020}, abstract = {Objectives Embedded in the Collaborative Research Center "Fear, Anxiety, Anxiety Disorders" (CRC-TRR58), this bicentric clinical study aims at identifying biobehavioral markers of treatment (non-)response by applying machine learning methodology with an external cross-validation protocol. We hypothesize that a priori prediction of treatment (non-)response is possible in a second, independent sample based on multimodal markers. Methods One-session virtual reality exposure treatment (VRET) with patients with spider phobia was conducted on two sites. Clinical, neuroimaging, and genetic data were assessed at baseline, post-treatment and after 6 months. The primary and secondary outcomes defining treatment response are as follows: 30\% reduction regarding the individual score in the Spider Phobia Questionnaire and 50\% reduction regarding the individual distance in the behavioral avoidance test. Results N = 204 patients have been included (n = 100 in W{\"u}rzburg, n = 104 in M{\"u}nster). Sample characteristics for both sites are comparable. Discussion This study will offer cross-validated theranostic markers for predicting the individual success of exposure-based therapy. Findings will support clinical decision-making on personalized therapy, bridge the gap between basic and clinical research, and bring stratified therapy into reach. The study is registered at ClinicalTrials.gov (ID: NCT03208400).}, language = {en} } @article{KammererHoppenstedtPryssetal.2019, author = {Kammerer, Klaus and Hoppenstedt, Burkhard and Pryss, R{\"u}diger and St{\"o}kler, Steffen and Allgaier, Johannes and Reichert, Manfred}, title = {Anomaly Detections for Manufacturing Systems Based on Sensor Data—Insights into Two Challenging Real-World Production Settings}, series = {Sensors}, volume = {19}, journal = {Sensors}, number = {24}, issn = {1424-8220}, doi = {10.3390/s19245370}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-193885}, pages = {5370}, year = {2019}, abstract = {o build, run, and maintain reliable manufacturing machines, the condition of their components has to be continuously monitored. When following a fine-grained monitoring of these machines, challenges emerge pertaining to the (1) feeding procedure of large amounts of sensor data to downstream processing components and the (2) meaningful analysis of the produced data. Regarding the latter aspect, manifold purposes are addressed by practitioners and researchers. Two analyses of real-world datasets that were generated in production settings are discussed in this paper. More specifically, the analyses had the goals (1) to detect sensor data anomalies for further analyses of a pharma packaging scenario and (2) to predict unfavorable temperature values of a 3D printing machine environment. Based on the results of the analyses, it will be shown that a proper management of machines and their components in industrial manufacturing environments can be efficiently supported by the detection of anomalies. The latter shall help to support the technical evangelists of the production companies more properly.}, language = {en} } @article{KaltdorfTheissMarkertetal.2018, author = {Kaltdorf, Kristin Verena and Theiss, Maria and Markert, Sebastian Matthias and Zhen, Mei and Dandekar, Thomas and Stigloher, Christian and Kollmannsberger, Philipp}, title = {Automated classification of synaptic vesicles in electron tomograms of C. elegans using machine learning}, series = {PLoS ONE}, volume = {13}, journal = {PLoS ONE}, number = {10}, doi = {10.1371/journal.pone.0205348}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-176831}, pages = {e0205348}, year = {2018}, abstract = {Synaptic vesicles (SVs) are a key component of neuronal signaling and fulfil different roles depending on their composition. In electron micrograms of neurites, two types of vesicles can be distinguished by morphological criteria, the classical "clear core" vesicles (CCV) and the typically larger "dense core" vesicles (DCV), with differences in electron density due to their diverse cargos. Compared to CCVs, the precise function of DCVs is less defined. DCVs are known to store neuropeptides, which function as neuronal messengers and modulators [1]. In C. elegans, they play a role in locomotion, dauer formation, egg-laying, and mechano- and chemosensation [2]. Another type of DCVs, also referred to as granulated vesicles, are known to transport Bassoon, Piccolo and further constituents of the presynaptic density in the center of the active zone (AZ), and therefore are important for synaptogenesis [3]. To better understand the role of different types of SVs, we present here a new automated approach to classify vesicles. We combine machine learning with an extension of our previously developed vesicle segmentation workflow, the ImageJ macro 3D ART VeSElecT. With that we reliably distinguish CCVs and DCVs in electron tomograms of C. elegans NMJs using image-based features. Analysis of the underlying ground truth data shows an increased fraction of DCVs as well as a higher mean distance between DCVs and AZs in dauer larvae compared to young adult hermaphrodites. Our machine learning based tools are adaptable and can be applied to study properties of different synaptic vesicle pools in electron tomograms of diverse model organisms.}, language = {en} } @article{PfitznerMayNuechter2018, author = {Pfitzner, Christian and May, Stefan and N{\"u}chter, Andreas}, title = {Body weight estimation for dose-finding and health monitoring of lying, standing and walking patients based on RGB-D data}, series = {Sensors}, volume = {18}, journal = {Sensors}, number = {5}, doi = {10.3390/s18051311}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-176642}, pages = {1311}, year = {2018}, abstract = {This paper describes the estimation of the body weight of a person in front of an RGB-D camera. A survey of different methods for body weight estimation based on depth sensors is given. First, an estimation of people standing in front of a camera is presented. Second, an approach based on a stream of depth images is used to obtain the body weight of a person walking towards a sensor. The algorithm first extracts features from a point cloud and forwards them to an artificial neural network (ANN) to obtain an estimation of body weight. Besides the algorithm for the estimation, this paper further presents an open-access dataset based on measurements from a trauma room in a hospital as well as data from visitors of a public event. In total, the dataset contains 439 measurements. The article illustrates the efficiency of the approach with experiments with persons lying down in a hospital, standing persons, and walking persons. Applicable scenarios for the presented algorithm are body weight-related dosing of emergency patients.}, language = {en} } @article{KazuhinoWernerToriumietal.2018, author = {Kazuhino, Koshino and Werner, Rudolf A. and Toriumi, Fuijo and Javadi, Mehrbod S. and Pomper, Martin G. and Solnes, Lilja B. and Verde, Franco and Higuchi, Takahiro and Rowe, Steven P.}, title = {Generative Adversarial Networks for the Creation of Realistic Artificial Brain Magnetic Resonance Images}, series = {Tomography}, volume = {4}, journal = {Tomography}, number = {4}, doi = {10.18383/j.tom.2018.00042}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-172185}, pages = {159-163}, year = {2018}, abstract = {Even as medical data sets become more publicly accessible, most are restricted to specific medical conditions. Thus, data collection for machine learning approaches remains challenging, and synthetic data augmentation, such as generative adversarial networks (GAN), may overcome this hurdle. In the present quality control study, deep convolutional GAN (DCGAN)-based human brain magnetic resonance (MR) images were validated by blinded radiologists. In total, 96 T1-weighted brain images from 30 healthy individuals and 33 patients with cerebrovascular accident were included. A training data set was generated from the T1-weighted images and DCGAN was applied to generate additional artificial brain images. The likelihood that images were DCGAN-created versus acquired was evaluated by 5 radiologists (2 neuroradiologists [NRs], vs 3 non-neuroradiologists [NNRs]) in a binary fashion to identify real vs created images. Images were selected randomly from the data set (variation of created images, 40\%-60\%). None of the investigated images was rated as unknown. Of the created images, the NRs rated 45\% and 71\% as real magnetic resonance imaging images (NNRs, 24\%, 40\%, and 44\%). In contradistinction, 44\% and 70\% of the real images were rated as generated images by NRs (NNRs, 10\%, 17\%, and 27\%). The accuracy for the NRs was 0.55 and 0.30 (NNRs, 0.83, 0.72, and 0.64). DCGAN-created brain MR images are similar enough to acquired MR images so as to be indistinguishable in some cases. Such an artificial intelligence algorithm may contribute to synthetic data augmentation for "data-hungry" technologies, such as supervised machine learning approaches, in various clinical applications.}, subject = {Magnetresonanztomografie}, language = {en} } @article{HoehneHolzStaigerSaelzeretal.2014, author = {H{\"o}hne, Johannes and Holz, Elisa and Staiger-S{\"a}lzer, Pit and M{\"u}ller, Klaus-Robert and K{\"u}bler, Andrea and Tangermann, Michael}, title = {Motor Imagery for Severely Motor-Impaired Patients: Evidence for Brain-Computer Interfacing as Superior Control Solution}, series = {PLoS ONE}, volume = {9}, journal = {PLoS ONE}, number = {8}, doi = {10.1371/journal.pone.0104854}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-119331}, pages = {e104854}, year = {2014}, abstract = {Brain-Computer Interfaces (BCIs) strive to decode brain signals into control commands for severely handicapped people with no means of muscular control. These potential users of noninvasive BCIs display a large range of physical and mental conditions. Prior studies have shown the general applicability of BCI with patients, with the conflict of either using many training sessions or studying only moderately restricted patients. We present a BCI system designed to establish external control for severely motor-impaired patients within a very short time. Within only six experimental sessions, three out of four patients were able to gain significant control over the BCI, which was based on motor imagery or attempted execution. For the most affected patient, we found evidence that the BCI could outperform the best assistive technology (AT) of the patient in terms of control accuracy, reaction time and information transfer rate. We credit this success to the applied user-centered design approach and to a highly flexible technical setup. State-of-the art machine learning methods allowed the exploitation and combination of multiple relevant features contained in the EEG, which rapidly enabled the patients to gain substantial BCI control. Thus, we could show the feasibility of a flexible and tailorable BCI application in severely disabled users. This can be considered a significant success for two reasons: Firstly, the results were obtained within a short period of time, matching the tight clinical requirements. Secondly, the participating patients showed, compared to most other studies, very severe communication deficits. They were dependent on everyday use of AT and two patients were in a locked-in state. For the most affected patient a reliable communication was rarely possible with existing AT.}, language = {en} }