@article{HoernleinMandelIflandetal.2011, author = {H{\"o}rnlein, Alexander and Mandel, Alexander and Ifland, Marianus and L{\"u}neberg, Edeltraud and Deckert, J{\"u}rgen and Puppe, Frank}, title = {Akzeptanz medizinischer Trainingsf{\"a}lle als Erg{\"a}nzung zu Vorlesungen}, series = {GMS Zeitschrift f{\"u}r Medizinische Ausbildung}, volume = {28}, journal = {GMS Zeitschrift f{\"u}r Medizinische Ausbildung}, number = {3}, doi = {10.3205/zma000754}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-133569}, pages = {Doc42}, year = {2011}, abstract = {Introduction: Medical training cases (virtual patients) are in widespread use for student education. Most publications report about development and experiences in one course with training cases. In this paper we compare the acceptance of different training case courses with different usages deployed as supplement to lectures of the medical faculty of Wuerzburg university during a period of three semesters. Methods: The training cases were developed with the authoring tool CaseTrain and are available for students via the Moodle-based eLearning platform WueCampus at Wuerzburg university. Various data about usage and acceptance is automatically collected. Results: From WS (winter semester) 08/09 till WS 09/10 19 courses with about 200 cases were available. In each semester, about 550 different medical students from W{\"u}rzburg and 50 students from other universities processed about 12000 training cases and filled in about 2000 evaluation forms. In different courses, the usage varied between less than 50 and more than 5000 processed cases. Discussion: Although students demand training cases as supplement to all lectures, the data show that the usage does not primarily depend on the quality of the available training cases. Instead, the training cases of nearly all case collections were processed extremely often shortly before the examination. It shows that the degree of usage depends primarily on the perceived relevance of the training cases for the examination."}, language = {de} } @article{MandelHoernleinIflandetal.2011, author = {Mandel, Alexander and H{\"o}rnlein, Alexander and Ifland, Marianus and L{\"u}neburg, Edeltraud and Deckert, J{\"u}rgen and Puppe, Frank}, title = {Aufwandsanalyse f{\"u}r computerunterst{\"u}tzte Multiple-Choice Papierklausuren}, series = {GMS Journal for Medical Education}, volume = {28}, journal = {GMS Journal for Medical Education}, number = {4}, doi = {10.3205/zma000767}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-134386}, pages = {1-15, Doc55}, year = {2011}, abstract = {Introduction: Multiple-choice-examinations are still fundamental for assessment in medical degree programs. In addition to content related research, the optimization of the technical procedure is an important question. Medical examiners face three options: paper-based examinations with or without computer support or completely electronic examinations. Critical aspects are the effort for formatting, the logistic effort during the actual examination, quality, promptness and effort of the correction, the time for making the documents available for inspection by the students, and the statistical analysis of the examination results. Methods: Since three semesters a computer program for input and formatting of MC-questions in medical and other paper-based examinations is used and continuously improved at Wuerzburg University. In the winter semester (WS) 2009/10 eleven, in the summer semester (SS) 2010 twelve and in WS 2010/11 thirteen medical examinations were accomplished with the program and automatically evaluated. For the last two semesters the remaining manual workload was recorded. Results: The cost of the formatting and the subsequent analysis including adjustments of the analysis of an average examination with about 140 participants and about 35 questions was 5-7 hours for exams without complications in the winter semester 2009/2010, about 2 hours in SS 2010 and about 1.5 hours in the winter semester 2010/11. Including exams with complications, the average time was about 3 hours per exam in SS 2010 and 2.67 hours for the WS 10/11. Discussion: For conventional multiple-choice exams the computer-based formatting and evaluation of paper-based exams offers a significant time reduction for lecturers in comparison with the manual correction of paper-based exams and compared to purely electronically conducted exams it needs a much simpler technological infrastructure and fewer staff during the exam."}, language = {de} } @inproceedings{JannidisRegerWeimeretal.2015, author = {Jannidis, Fotis and Reger, Isabella and Weimer, Lukas and Krug, Markus and Puppe, Frank}, title = {Automatische Erkennung von Figuren in deutschsprachigen Romanen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-143332}, pages = {7}, year = {2015}, abstract = {Eine wichtige Grundlage f{\"u}r die quantitative Analyse von Erz{\"a}hltexten, etwa eine Netzwerkanalyse der Figurenkonstellation, ist die automatische Erkennung von Referenzen auf Figuren in Erz{\"a}hltexten, ein Sonderfall des generischen NLP-Problems der Named Entity Recognition. Bestehende, auf Zeitungstexten trainierte Modelle sind f{\"u}r literarische Texte nur eingeschr{\"a}nkt brauchbar, da die Einbeziehung von Appellativen in die Named Entity-Definition und deren h{\"a}ufige Verwendung in Romantexten zu einem schlechten Ergebnis f{\"u}hrt. Dieses Paper stellt eine anhand eines manuell annotierten Korpus auf deutschsprachige Romane des 19. Jahrhunderts angepasste NER-Komponente vor.}, subject = {Digital Humanities}, language = {de} } @article{ToepferCorovicFetteetal.2015, author = {Toepfer, Martin and Corovic, Hamo and Fette, Georg and Kl{\"u}gl, Peter and St{\"o}rk, Stefan and Puppe, Frank}, title = {Fine-grained information extraction from German transthoracic echocardiography reports}, series = {BMC Medical Informatics and Decision Making}, volume = {15}, journal = {BMC Medical Informatics and Decision Making}, number = {91}, doi = {doi:10.1186/s12911-015-0215-x}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-125509}, year = {2015}, abstract = {Background Information extraction techniques that get structured representations out of unstructured data make a large amount of clinically relevant information about patients accessible for semantic applications. These methods typically rely on standardized terminologies that guide this process. Many languages and clinical domains, however, lack appropriate resources and tools, as well as evaluations of their applications, especially if detailed conceptualizations of the domain are required. For instance, German transthoracic echocardiography reports have not been targeted sufficiently before, despite of their importance for clinical trials. This work therefore aimed at development and evaluation of an information extraction component with a fine-grained terminology that enables to recognize almost all relevant information stated in German transthoracic echocardiography reports at the University Hospital of W{\"u}rzburg. Methods A domain expert validated and iteratively refined an automatically inferred base terminology. The terminology was used by an ontology-driven information extraction system that outputs attribute value pairs. The final component has been mapped to the central elements of a standardized terminology, and it has been evaluated according to documents with different layouts. Results The final system achieved state-of-the-art precision (micro average.996) and recall (micro average.961) on 100 test documents that represent more than 90 \% of all reports. In particular, principal aspects as defined in a standardized external terminology were recognized with f 1=.989 (micro average) and f 1=.963 (macro average). As a result of keyword matching and restraint concept extraction, the system obtained high precision also on unstructured or exceptionally short documents, and documents with uncommon layout. Conclusions The developed terminology and the proposed information extraction system allow to extract fine-grained information from German semi-structured transthoracic echocardiography reports with very high precision and high recall on the majority of documents at the University Hospital of W{\"u}rzburg. Extracted results populate a clinical data warehouse which supports clinical research.}, language = {en} } @article{GehrkeBalbachRauchetal.2019, author = {Gehrke, Alexander and Balbach, Nico and Rauch, Yong-Mi and Degkwitz, Andreas and Puppe, Frank}, title = {Erkennung von handschriftlichen Unterstreichungen in Alten Drucken}, series = {Bibliothek Forschung und Praxis}, volume = {43}, journal = {Bibliothek Forschung und Praxis}, number = {3}, issn = {1865-7648}, doi = {10.1515/bfp-2019-2083}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-193377}, pages = {447 -- 452}, year = {2019}, abstract = {Die Erkennung handschriftlicher Artefakte wie Unterstreichungen in Buchdrucken erm{\"o}glicht R{\"u}ckschl{\"u}sse auf das Rezeptionsverhalten und die Provenienzgeschichte und wird auch f{\"u}r eine OCR ben{\"o}tigt. Dabei soll zwischen handschriftlichen Unterstreichungen und waagerechten Linien im Druck (z. B. Trennlinien usw.) unterschieden werden, da letztere nicht ausgezeichnet werden sollen. Im Beitrag wird ein Ansatz basierend auf einem auf Unterstreichungen trainierten Neuronalen Netz gem{\"a}ß der U-Net Architektur vorgestellt, dessen Ergebnisse in einem zweiten Schritt mit heuristischen Regeln nachbearbeitet werden. Die Evaluationen zeigen, dass Unterstreichungen sehr gut erkannt werden, wenn bei der Binarisierung der Scans nicht zu viele Pixel der Unterstreichung wegen geringem Kontrast verloren gehen. Zuk{\"u}nftig sollen die Worte oberhalb der Unterstreichung mit OCR transkribiert werden und auch andere Artefakte wie handschriftliche Notizen in alten Drucken erkannt werden.}, language = {de} } @article{WickHarteltPuppe2019, author = {Wick, Christoph and Hartelt, Alexander and Puppe, Frank}, title = {Staff, symbol and melody detection of Medieval manuscripts written in square notation using deep Fully Convolutional Networks}, series = {Applied Sciences}, volume = {9}, journal = {Applied Sciences}, number = {13}, issn = {2076-3417}, doi = {10.3390/app9132646}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-197248}, year = {2019}, abstract = {Even today, the automatic digitisation of scanned documents in general, but especially the automatic optical music recognition (OMR) of historical manuscripts, still remains an enormous challenge, since both handwritten musical symbols and text have to be identified. This paper focuses on the Medieval so-called square notation developed in the 11th-12th century, which is already composed of staff lines, staves, clefs, accidentals, and neumes that are roughly spoken connected single notes. The aim is to develop an algorithm that captures both the neumes, and in particular its melody, which can be used to reconstruct the original writing. Our pipeline is similar to the standard OMR approach and comprises a novel staff line and symbol detection algorithm based on deep Fully Convolutional Networks (FCN), which perform pixel-based predictions for either staff lines or symbols and their respective types. Then, the staff line detection combines the extracted lines to staves and yields an F\(_1\) -score of over 99\% for both detecting lines and complete staves. For the music symbol detection, we choose a novel approach that skips the step to identify neumes and instead directly predicts note components (NCs) and their respective affiliation to a neume. Furthermore, the algorithm detects clefs and accidentals. Our algorithm predicts the symbol sequence of a staff with a diplomatic symbol accuracy rate (dSAR) of about 87\%, which includes symbol type and location. If only the NCs without their respective connection to a neume, all clefs and accidentals are of interest, the algorithm reaches an harmonic symbol accuracy rate (hSAR) of approximately 90\%. In general, the algorithm recognises a symbol in the manuscript with an F\(_1\) -score of over 96\%.}, language = {en} } @article{ReulChristHarteltetal.2019, author = {Reul, Christian and Christ, Dennis and Hartelt, Alexander and Balbach, Nico and Wehner, Maximilian and Springmann, Uwe and Wick, Christoph and Grundig, Christine and B{\"u}ttner, Andreas and Puppe, Frank}, title = {OCR4all—An open-source tool providing a (semi-)automatic OCR workflow for historical printings}, series = {Applied Sciences}, volume = {9}, journal = {Applied Sciences}, number = {22}, issn = {2076-3417}, doi = {10.3390/app9224853}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-193103}, pages = {4853}, year = {2019}, abstract = {Optical Character Recognition (OCR) on historical printings is a challenging task mainly due to the complexity of the layout and the highly variant typography. Nevertheless, in the last few years, great progress has been made in the area of historical OCR, resulting in several powerful open-source tools for preprocessing, layout analysis and segmentation, character recognition, and post-processing. The drawback of these tools often is their limited applicability by non-technical users like humanist scholars and in particular the combined use of several tools in a workflow. In this paper, we present an open-source OCR software called OCR4all, which combines state-of-the-art OCR components and continuous model training into a comprehensive workflow. While a variety of materials can already be processed fully automatically, books with more complex layouts require manual intervention by the users. This is mostly due to the fact that the required ground truth for training stronger mixed models (for segmentation, as well as text recognition) is not available, yet, neither in the desired quantity nor quality. To deal with this issue in the short run, OCR4all offers a comfortable GUI that allows error corrections not only in the final output, but already in early stages to minimize error propagations. In the long run, this constant manual correction produces large quantities of valuable, high quality training material, which can be used to improve fully automatic approaches. Further on, extensive configuration capabilities are provided to set the degree of automation of the workflow and to make adaptations to the carefully selected default parameters for specific printings, if necessary. During experiments, the fully automated application on 19th Century novels showed that OCR4all can considerably outperform the commercial state-of-the-art tool ABBYY Finereader on moderate layouts if suitably pretrained mixed OCR models are available. Furthermore, on very complex early printed books, even users with minimal or no experience were able to capture the text with manageable effort and great quality, achieving excellent Character Error Rates (CERs) below 0.5\%. The architecture of OCR4all allows the easy integration (or substitution) of newly developed tools for its main components by standardized interfaces like PageXML, thus aiming at continual higher automation for historical printings.}, language = {en} } @article{DjebkoPuppeKayal2019, author = {Djebko, Kirill and Puppe, Frank and Kayal, Hakan}, title = {Model-based fault detection and diagnosis for spacecraft with an application for the SONATE triple cube nano-satellite}, series = {Aerospace}, volume = {6}, journal = {Aerospace}, number = {10}, issn = {2226-4310}, doi = {10.3390/aerospace6100105}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-198836}, pages = {105}, year = {2019}, abstract = {The correct behavior of spacecraft components is the foundation of unhindered mission operation. However, no technical system is free of wear and degradation. A malfunction of one single component might significantly alter the behavior of the whole spacecraft and may even lead to a complete mission failure. Therefore, abnormal component behavior must be detected early in order to be able to perform counter measures. A dedicated fault detection system can be employed, as opposed to classical health monitoring, performed by human operators, to decrease the response time to a malfunction. In this paper, we present a generic model-based diagnosis system, which detects faults by analyzing the spacecraft's housekeeping data. The observed behavior of the spacecraft components, given by the housekeeping data is compared to their expected behavior, obtained through simulation. Each discrepancy between the observed and the expected behavior of a component generates a so-called symptom. Given the symptoms, the diagnoses are derived by computing sets of components whose malfunction might cause the observed discrepancies. We demonstrate the applicability of the diagnosis system by using modified housekeeping data of the qualification model of an actual spacecraft and outline the advantages and drawbacks of our approach.}, language = {en} } @article{DietrichKrebsLimanetal.2019, author = {Dietrich, Georg and Krebs, Jonathan and Liman, Leon and Fette, Georg and Ertl, Maximilian and Kaspar, Mathias and St{\"o}rk, Stefan and Puppe, Frank}, title = {Replicating medication trend studies using ad hoc information extraction in a clinical data warehouse}, series = {BMC Medical Informatics and Decision Making}, volume = {19}, journal = {BMC Medical Informatics and Decision Making}, doi = {10.1186/s12911-018-0729-0}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-200409}, pages = {15}, year = {2019}, abstract = {Background Medication trend studies show the changes of medication over the years and may be replicated using a clinical Data Warehouse (CDW). Even nowadays, a lot of the patient information, like medication data, in the EHR is stored in the format of free text. As the conventional approach of information extraction (IE) demands a high developmental effort, we used ad hoc IE instead. This technique queries information and extracts it on the fly from texts contained in the CDW. Methods We present a generalizable approach of ad hoc IE for pharmacotherapy (medications and their daily dosage) presented in hospital discharge letters. We added import and query features to the CDW system, like error tolerant queries to deal with misspellings and proximity search for the extraction of the daily dosage. During the data integration process in the CDW, negated, historical and non-patient context data are filtered. For the replication studies, we used a drug list grouped by ATC (Anatomical Therapeutic Chemical Classification System) codes as input for queries to the CDW. Results We achieve an F1 score of 0.983 (precision 0.997, recall 0.970) for extracting medication from discharge letters and an F1 score of 0.974 (precision 0.977, recall 0.972) for extracting the dosage. We replicated three published medical trend studies for hypertension, atrial fibrillation and chronic kidney disease. Overall, 93\% of the main findings could be replicated, 68\% of sub-findings, and 75\% of all findings. One study could be completely replicated with all main and sub-findings. Conclusion A novel approach for ad hoc IE is presented. It is very suitable for basic medical texts like discharge letters and finding reports. Ad hoc IE is by definition more limited than conventional IE and does not claim to replace it, but it substantially exceeds the search capabilities of many CDWs and it is convenient to conduct replication studies fast and with high quality.}, language = {en} } @article{LodaKrebsDanhofetal.2019, author = {Loda, Sophia and Krebs, Jonathan and Danhof, Sophia and Schreder, Martin and Solimando, Antonio G. and Strifler, Susanne and Rasche, Leo and Kort{\"u}m, Martin and Kerscher, Alexander and Knop, Stefan and Puppe, Frank and Einsele, Hermann and Bittrich, Max}, title = {Exploration of artificial intelligence use with ARIES in multiple myeloma research}, series = {Journal of Clinical Medicine}, volume = {8}, journal = {Journal of Clinical Medicine}, number = {7}, issn = {2077-0383}, doi = {10.3390/jcm8070999}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-197231}, pages = {999}, year = {2019}, abstract = {Background: Natural language processing (NLP) is a powerful tool supporting the generation of Real-World Evidence (RWE). There is no NLP system that enables the extensive querying of parameters specific to multiple myeloma (MM) out of unstructured medical reports. We therefore created a MM-specific ontology to accelerate the information extraction (IE) out of unstructured text. Methods: Our MM ontology consists of extensive MM-specific and hierarchically structured attributes and values. We implemented "A Rule-based Information Extraction System" (ARIES) that uses this ontology. We evaluated ARIES on 200 randomly selected medical reports of patients diagnosed with MM. Results: Our system achieved a high F1-Score of 0.92 on the evaluation dataset with a precision of 0.87 and recall of 0.98. Conclusions: Our rule-based IE system enables the comprehensive querying of medical reports. The IE accelerates the extraction of data and enables clinicians to faster generate RWE on hematological issues. RWE helps clinicians to make decisions in an evidence-based manner. Our tool easily accelerates the integration of research evidence into everyday clinical practice.}, language = {en} }