@article{LimanMayFetteetal.2023, author = {Liman, Leon and May, Bernd and Fette, Georg and Krebs, Jonathan and Puppe, Frank}, title = {Using a clinical data warehouse to calculate and present key metrics for the radiology department: implementation and performance evaluation}, series = {JMIR Medical Informatics}, volume = {11}, journal = {JMIR Medical Informatics}, issn = {2291-9694}, doi = {10.2196/41808}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-349411}, year = {2023}, abstract = {Background: Due to the importance of radiologic examinations, such as X-rays or computed tomography scans, for many clinical diagnoses, the optimal use of the radiology department is 1 of the primary goals of many hospitals. Objective: This study aims to calculate the key metrics of this use by creating a radiology data warehouse solution, where data from radiology information systems (RISs) can be imported and then queried using a query language as well as a graphical user interface (GUI). Methods: Using a simple configuration file, the developed system allowed for the processing of radiology data exported from any kind of RIS into a Microsoft Excel, comma-separated value (CSV), or JavaScript Object Notation (JSON) file. These data were then imported into a clinical data warehouse. Additional values based on the radiology data were calculated during this import process by implementing 1 of several provided interfaces. Afterward, the query language and GUI of the data warehouse were used to configure and calculate reports on these data. For the most common types of requested reports, a web interface was created to view their numbers as graphics. Results: The tool was successfully tested with the data of 4 different German hospitals from 2018 to 2021, with a total of 1,436,111 examinations. The user feedback was good, since all their queries could be answered if the available data were sufficient. The initial processing of the radiology data for using them with the clinical data warehouse took (depending on the amount of data provided by each hospital) between 7 minutes and 1 hour 11 minutes. Calculating 3 reports of different complexities on the data of each hospital was possible in 1-3 seconds for reports with up to 200 individual calculations and in up to 1.5 minutes for reports with up to 8200 individual calculations. Conclusions: A system was developed with the main advantage of being generic concerning the export of different RISs as well as concerning the configuration of queries for various reports. The queries could be configured easily using the GUI of the data warehouse, and their results could be exported into the standard formats Excel and CSV for further processing.}, language = {en} } @article{KrenzerHeilFittingetal., author = {Krenzer, Adrian and Heil, Stefan and Fitting, Daniel and Matti, Safa and Zoller, Wolfram G. and Hann, Alexander and Puppe, Frank}, title = {Automated classification of polyps using deep learning architectures and few-shot learning}, series = {BMC Medical Imaging}, volume = {23}, journal = {BMC Medical Imaging}, doi = {10.1186/s12880-023-01007-4}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-357465}, abstract = {Background Colorectal cancer is a leading cause of cancer-related deaths worldwide. The best method to prevent CRC is a colonoscopy. However, not all colon polyps have the risk of becoming cancerous. Therefore, polyps are classified using different classification systems. After the classification, further treatment and procedures are based on the classification of the polyp. Nevertheless, classification is not easy. Therefore, we suggest two novel automated classifications system assisting gastroenterologists in classifying polyps based on the NICE and Paris classification. Methods We build two classification systems. One is classifying polyps based on their shape (Paris). The other classifies polyps based on their texture and surface patterns (NICE). A two-step process for the Paris classification is introduced: First, detecting and cropping the polyp on the image, and secondly, classifying the polyp based on the cropped area with a transformer network. For the NICE classification, we design a few-shot learning algorithm based on the Deep Metric Learning approach. The algorithm creates an embedding space for polyps, which allows classification from a few examples to account for the data scarcity of NICE annotated images in our database. Results For the Paris classification, we achieve an accuracy of 89.35 \%, surpassing all papers in the literature and establishing a new state-of-the-art and baseline accuracy for other publications on a public data set. For the NICE classification, we achieve a competitive accuracy of 81.13 \% and demonstrate thereby the viability of the few-shot learning paradigm in polyp classification in data-scarce environments. Additionally, we show different ablations of the algorithms. Finally, we further elaborate on the explainability of the system by showing heat maps of the neural network explaining neural activations. Conclusion Overall we introduce two polyp classification systems to assist gastroenterologists. We achieve state-of-the-art performance in the Paris classification and demonstrate the viability of the few-shot learning paradigm in the NICE classification, addressing the prevalent data scarcity issues faced in medical machine learning.}, language = {en} } @article{WickHarteltPuppe2019, author = {Wick, Christoph and Hartelt, Alexander and Puppe, Frank}, title = {Staff, symbol and melody detection of Medieval manuscripts written in square notation using deep Fully Convolutional Networks}, series = {Applied Sciences}, volume = {9}, journal = {Applied Sciences}, number = {13}, issn = {2076-3417}, doi = {10.3390/app9132646}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-197248}, year = {2019}, abstract = {Even today, the automatic digitisation of scanned documents in general, but especially the automatic optical music recognition (OMR) of historical manuscripts, still remains an enormous challenge, since both handwritten musical symbols and text have to be identified. This paper focuses on the Medieval so-called square notation developed in the 11th-12th century, which is already composed of staff lines, staves, clefs, accidentals, and neumes that are roughly spoken connected single notes. The aim is to develop an algorithm that captures both the neumes, and in particular its melody, which can be used to reconstruct the original writing. Our pipeline is similar to the standard OMR approach and comprises a novel staff line and symbol detection algorithm based on deep Fully Convolutional Networks (FCN), which perform pixel-based predictions for either staff lines or symbols and their respective types. Then, the staff line detection combines the extracted lines to staves and yields an F\(_1\) -score of over 99\% for both detecting lines and complete staves. For the music symbol detection, we choose a novel approach that skips the step to identify neumes and instead directly predicts note components (NCs) and their respective affiliation to a neume. Furthermore, the algorithm detects clefs and accidentals. Our algorithm predicts the symbol sequence of a staff with a diplomatic symbol accuracy rate (dSAR) of about 87\%, which includes symbol type and location. If only the NCs without their respective connection to a neume, all clefs and accidentals are of interest, the algorithm reaches an harmonic symbol accuracy rate (hSAR) of approximately 90\%. In general, the algorithm recognises a symbol in the manuscript with an F\(_1\) -score of over 96\%.}, language = {en} } @article{DjebkoPuppeKayal2019, author = {Djebko, Kirill and Puppe, Frank and Kayal, Hakan}, title = {Model-based fault detection and diagnosis for spacecraft with an application for the SONATE triple cube nano-satellite}, series = {Aerospace}, volume = {6}, journal = {Aerospace}, number = {10}, issn = {2226-4310}, doi = {10.3390/aerospace6100105}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-198836}, pages = {105}, year = {2019}, abstract = {The correct behavior of spacecraft components is the foundation of unhindered mission operation. However, no technical system is free of wear and degradation. A malfunction of one single component might significantly alter the behavior of the whole spacecraft and may even lead to a complete mission failure. Therefore, abnormal component behavior must be detected early in order to be able to perform counter measures. A dedicated fault detection system can be employed, as opposed to classical health monitoring, performed by human operators, to decrease the response time to a malfunction. In this paper, we present a generic model-based diagnosis system, which detects faults by analyzing the spacecraft's housekeeping data. The observed behavior of the spacecraft components, given by the housekeeping data is compared to their expected behavior, obtained through simulation. Each discrepancy between the observed and the expected behavior of a component generates a so-called symptom. Given the symptoms, the diagnoses are derived by computing sets of components whose malfunction might cause the observed discrepancies. We demonstrate the applicability of the diagnosis system by using modified housekeeping data of the qualification model of an actual spacecraft and outline the advantages and drawbacks of our approach.}, language = {en} } @article{LodaKrebsDanhofetal.2019, author = {Loda, Sophia and Krebs, Jonathan and Danhof, Sophia and Schreder, Martin and Solimando, Antonio G. and Strifler, Susanne and Rasche, Leo and Kort{\"u}m, Martin and Kerscher, Alexander and Knop, Stefan and Puppe, Frank and Einsele, Hermann and Bittrich, Max}, title = {Exploration of artificial intelligence use with ARIES in multiple myeloma research}, series = {Journal of Clinical Medicine}, volume = {8}, journal = {Journal of Clinical Medicine}, number = {7}, issn = {2077-0383}, doi = {10.3390/jcm8070999}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-197231}, pages = {999}, year = {2019}, abstract = {Background: Natural language processing (NLP) is a powerful tool supporting the generation of Real-World Evidence (RWE). There is no NLP system that enables the extensive querying of parameters specific to multiple myeloma (MM) out of unstructured medical reports. We therefore created a MM-specific ontology to accelerate the information extraction (IE) out of unstructured text. Methods: Our MM ontology consists of extensive MM-specific and hierarchically structured attributes and values. We implemented "A Rule-based Information Extraction System" (ARIES) that uses this ontology. We evaluated ARIES on 200 randomly selected medical reports of patients diagnosed with MM. Results: Our system achieved a high F1-Score of 0.92 on the evaluation dataset with a precision of 0.87 and recall of 0.98. Conclusions: Our rule-based IE system enables the comprehensive querying of medical reports. The IE accelerates the extraction of data and enables clinicians to faster generate RWE on hematological issues. RWE helps clinicians to make decisions in an evidence-based manner. Our tool easily accelerates the integration of research evidence into everyday clinical practice.}, language = {en} }