@misc{Reger2016, type = {Master Thesis}, author = {Reger, Isabella}, title = {Figurennetzwerke als {\"A}hnlichkeitsmaß}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-149106}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2016}, abstract = {Die vorliegende Arbeit l{\"a}sst sich dem Bereich der quantitativen Literaturanalyse zuordnen und verfolgt das Ziel, mittels computergest{\"u}tzter Verfahren zu untersuchen, inwieweit sich Romane hinsichtlich ihrer Figurenkonstellation {\"a}hneln. Dazu wird die Figurenkonstellation, als wichtiges strukturgebendes Ordnungsprinzip eines Romans, als soziales Netzwerk der Figuren operationalisiert. Solche Netzwerke k{\"o}nnen unter Anwendung von Verfahren des Natural Language Processing automatisch aus dem Text erstellt werden. Als Datengrundlage dient ein Korpus von deutschsprachigen Romanen aus dem 19. Jahrhundert, das mit automatischen Verfahren zur Figurenerkennung und Koreferenzaufl{\"o}sung prozessiert und manuell nachkorrigiert wurde, um eine m{\"o}glichst saubere Datenbasis zu schaffen. Ausgehend von der intensiven vergleichenden Betrachtung der Figurenkonstellationen von Fontanes "Effi Briest" und Flauberts "Madame Bovary" wurde in einer manuell erstellten Distanzmatrix die menschliche Intuition solcher {\"A}hnlichkeit zwischen allen Romanen des Korpus festgehalten, basierend auf der Lekt{\"u}re von Zusammenfassungen der Romane. Diese Daten werden als Evaluationsgrundlage genutzt. Mit Hilfe von Methoden der sozialen Netzwerkanalyse k{\"o}nnen strukturelle Eigenschaften dieser Netzwerke als Features erhoben werden. Diese wurden anschließend zur Berechnung der Kosinusdistanz zwischen den Romanen verwendet. Obwohl die automatisch erstellten Netzwerke die Figurenkonstellationen der Romane im Allgemeinen gut widerspiegeln und die Netzwerkfeatures sinnvoll interpretierbar sind, war die Korrelation mit der Evaluationsgrundlage niedrig. Dies legt die Vermutung nahe, dass neben der Struktur der Figurenkonstellation auch wiederkehrende Themen und Motive die Erstellung der Evaluationsgrundlage unterbewusst beeinflusst haben. Daher wurde Topic Modeling angewendet, um wichtige zwischenmenschliche Motive zu modellieren, die f{\"u}r die Figurenkonstellation von Bedeutung sein k{\"o}nnen. Die Netzwerkfeatures und die Topic-Verteilung wurden in Kombination zur Distanzberechnung herangezogen. Außerdem wurde versucht, jeder Kante des Figurennetzwerks ein Topic zuzuordnen, das diese Kante inhaltlich beschreibt. Hier zeigte sich, dass einerseits Topics, die sehr spezifisch f{\"u}r bestimmte Texte sind, und andererseits Topics, die {\"u}ber alle Texte hinweg stark vertreten sind, das Ergebnis bestimmen, sodass wiederum keine, bzw. nur eine sehr schwache Korrelation mit der Evaluationsgrundlage gefunden werden konnte. Der Umstand, dass keine Verbindung zwischen den berechneten Distanzen und der Evaluationsgrundlage gefunden werden konnte, obwohl die einzelnen Features sinnvoll interpretierbar sind, l{\"a}sst Zweifel an der Evaluationsmatrix aufkommen. Diese scheint st{\"a}rker als zu Beginn angenommen unterbewusst von thematischen und motivischen {\"A}hnlichkeiten zwischen den Romanen beeinflusst zu sein. Auch die Qualit{\"a}t der jeweiligen Zusammenfassung hat hier einen nicht unwesentlichen Einfluss. Daher w{\"a}re eine weniger subjektiv gepr{\"a}gte M{\"o}glichkeit der Auswertung von N{\"o}ten, beispielsweise durch die parallele Einsch{\"a}tzung mehrerer Annotatoren. Auch die weitere Verbesserung von NLP-Verfahren f{\"u}r literarische Texte in deutscher Sprache ist ein Desideratum f{\"u}r ankn{\"u}pfende Forschungsans{\"a}tze.}, subject = {Digital Humanities}, language = {de} } @article{ReulChristHarteltetal.2019, author = {Reul, Christian and Christ, Dennis and Hartelt, Alexander and Balbach, Nico and Wehner, Maximilian and Springmann, Uwe and Wick, Christoph and Grundig, Christine and B{\"u}ttner, Andreas and Puppe, Frank}, title = {OCR4all—An open-source tool providing a (semi-)automatic OCR workflow for historical printings}, series = {Applied Sciences}, volume = {9}, journal = {Applied Sciences}, number = {22}, issn = {2076-3417}, doi = {10.3390/app9224853}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-193103}, pages = {4853}, year = {2019}, abstract = {Optical Character Recognition (OCR) on historical printings is a challenging task mainly due to the complexity of the layout and the highly variant typography. Nevertheless, in the last few years, great progress has been made in the area of historical OCR, resulting in several powerful open-source tools for preprocessing, layout analysis and segmentation, character recognition, and post-processing. The drawback of these tools often is their limited applicability by non-technical users like humanist scholars and in particular the combined use of several tools in a workflow. In this paper, we present an open-source OCR software called OCR4all, which combines state-of-the-art OCR components and continuous model training into a comprehensive workflow. While a variety of materials can already be processed fully automatically, books with more complex layouts require manual intervention by the users. This is mostly due to the fact that the required ground truth for training stronger mixed models (for segmentation, as well as text recognition) is not available, yet, neither in the desired quantity nor quality. To deal with this issue in the short run, OCR4all offers a comfortable GUI that allows error corrections not only in the final output, but already in early stages to minimize error propagations. In the long run, this constant manual correction produces large quantities of valuable, high quality training material, which can be used to improve fully automatic approaches. Further on, extensive configuration capabilities are provided to set the degree of automation of the workflow and to make adaptations to the carefully selected default parameters for specific printings, if necessary. During experiments, the fully automated application on 19th Century novels showed that OCR4all can considerably outperform the commercial state-of-the-art tool ABBYY Finereader on moderate layouts if suitably pretrained mixed OCR models are available. Furthermore, on very complex early printed books, even users with minimal or no experience were able to capture the text with manageable effort and great quality, achieving excellent Character Error Rates (CERs) below 0.5\%. The architecture of OCR4all allows the easy integration (or substitution) of newly developed tools for its main components by standardized interfaces like PageXML, thus aiming at continual higher automation for historical printings.}, language = {en} } @phdthesis{Sauer2023, author = {Sauer, Christian}, title = {Development, Simulation and Evaluation of Mobile Wireless Networks in Industrial Applications}, doi = {10.25972/OPUS-29923}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-299238}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {Manyindustrialautomationsolutionsusewirelesscommunicationandrelyontheavail- ability and quality of the wireless channel. At the same time the wireless medium is highly congested and guaranteeing the availability of wireless channels is becoming increasingly difficult. In this work we show, that ad-hoc networking solutions can be used to provide new communication channels and improve the performance of mobile automation systems. These ad-hoc networking solutions describe different communi- cation strategies, but avoid relying on network infrastructure by utilizing the Peer-to- Peer (P2P) channel between communicating entities. This work is a step towards the effective implementation of low-range communication technologies(e.g. VisibleLightCommunication(VLC), radarcommunication, mmWave communication) to the industrial application. Implementing infrastructure networks with these technologies is unrealistic, since the low communication range would neces- sitate a high number of Access Points (APs) to yield full coverage. However, ad-hoc networks do not require any network infrastructure. In this work different ad-hoc net- working solutions for the industrial use case are presented and tools and models for their examination are proposed. The main use case investigated in this work are Automated Guided Vehicles (AGVs) for industrial applications. These mobile devices drive throughout the factory trans- porting crates, goods or tools or assisting workers. In most implementations they must exchange data with a Central Control Unit (CCU) and between one another. Predicting if a certain communication technology is suitable for an application is very challenging since the applications and the resulting requirements are very heterogeneous. The proposed models and simulation tools enable the simulation of the complex inter- action of mobile robotic clients and a wireless communication network. The goal is to predict the characteristics of a networked AGV fleet. Theproposedtoolswereusedtoimplement, testandexaminedifferentad-hocnetwork- ing solutions for industrial applications using AGVs. These communication solutions handle time-critical and delay-tolerant communication. Additionally a control method for the AGVs is proposed, which optimizes the communication and in turn increases the transport performance of the AGV fleet. Therefore, this work provides not only tools for the further research of industrial ad-hoc system, but also first implementations of ad-hoc systems which address many of the most pressing issues in industrial applica- tions.}, subject = {Industrie}, language = {en} } @article{SteiningerAbelZiegleretal.2023, author = {Steininger, Michael and Abel, Daniel and Ziegler, Katrin and Krause, Anna and Paeth, Heiko and Hotho, Andreas}, title = {ConvMOS: climate model output statistics with deep learning}, series = {Data Mining and Knowledge Discovery}, volume = {37}, journal = {Data Mining and Knowledge Discovery}, number = {1}, issn = {1384-5810}, doi = {10.1007/s10618-022-00877-6}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-324213}, pages = {136-166}, year = {2023}, abstract = {Climate models are the tool of choice for scientists researching climate change. Like all models they suffer from errors, particularly systematic and location-specific representation errors. One way to reduce these errors is model output statistics (MOS) where the model output is fitted to observational data with machine learning. In this work, we assess the use of convolutional Deep Learning climate MOS approaches and present the ConvMOS architecture which is specifically designed based on the observation that there are systematic and location-specific errors in the precipitation estimates of climate models. We apply ConvMOS models to the simulated precipitation of the regional climate model REMO, showing that a combination of per-location model parameters for reducing location-specific errors and global model parameters for reducing systematic errors is indeed beneficial for MOS performance. We find that ConvMOS models can reduce errors considerably and perform significantly better than three commonly used MOS approaches and plain ResNet and U-Net models in most cases. Our results show that non-linear MOS models underestimate the number of extreme precipitation events, which we alleviate by training models specialized towards extreme precipitation events with the imbalanced regression method DenseLoss. While we consider climate MOS, we argue that aspects of ConvMOS may also be beneficial in other domains with geospatial data, such as air pollution modeling or weather forecasts.}, subject = {Klima}, language = {en} } @phdthesis{Tzschichholz2014, author = {Tzschichholz, Tristan}, title = {Relative pose estimation of known rigid objects using a novel approach to high-level PMD-/CCD- sensor data fusion with regard to applications in space}, isbn = {978-3-923959-95-2}, issn = {1868-7474}, doi = {10.25972/OPUS-10391}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-103918}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {In this work, a novel method for estimating the relative pose of a known object is presented, which relies on an application-specific data fusion process. A PMD-sensor in conjunction with a CCD-sensor is used to perform the pose estimation. Furthermore, the work provides a method for extending the measurement range of the PMD sensor along with the necessary calibration methodology. Finally, extensive measurements on a very accurate Rendezvous and Docking testbed are made to evaluate the performance, what includes a detailed discussion of lighting conditions.}, subject = {Bildverarbeitung}, language = {en} } @article{VollmerVollmerLangetal.2022, author = {Vollmer, Andreas and Vollmer, Michael and Lang, Gernot and Straub, Anton and K{\"u}bler, Alexander and Gubik, Sebastian and Brands, Roman C. and Hartmann, Stefan and Saravi, Babak}, title = {Performance analysis of supervised machine learning algorithms for automatized radiographical classification of maxillary third molar impaction}, series = {Applied Sciences}, volume = {12}, journal = {Applied Sciences}, number = {13}, issn = {2076-3417}, doi = {10.3390/app12136740}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-281662}, year = {2022}, abstract = {Background: Oro-antral communication (OAC) is a common complication following the extraction of upper molar teeth. The Archer and the Root Sinus (RS) systems can be used to classify impacted teeth in panoramic radiographs. The Archer classes B-D and the Root Sinus classes III, IV have been associated with an increased risk of OAC following tooth extraction in the upper molar region. In our previous study, we found that panoramic radiographs are not reliable for predicting OAC. This study aimed to (1) determine the feasibility of automating the classification (Archer/RS classes) of impacted teeth from panoramic radiographs, (2) determine the distribution of OAC stratified by classification system classes for the purposes of decision tree construction, and (3) determine the feasibility of automating the prediction of OAC utilizing the mentioned classification systems. Methods: We utilized multiple supervised pre-trained machine learning models (VGG16, ResNet50, Inceptionv3, EfficientNet, MobileNetV2), one custom-made convolutional neural network (CNN) model, and a Bag of Visual Words (BoVW) technique to evaluate the performance to predict the clinical classification systems RS and Archer from panoramic radiographs (Aim 1). We then used Chi-square Automatic Interaction Detectors (CHAID) to determine the distribution of OAC stratified by the Archer/RS classes to introduce a decision tree for simple use in clinics (Aim 2). Lastly, we tested the ability of a multilayer perceptron artificial neural network (MLP) and a radial basis function neural network (RBNN) to predict OAC based on the high-risk classes RS III, IV, and Archer B-D (Aim 3). Results: We achieved accuracies of up to 0.771 for EfficientNet and MobileNetV2 when examining the Archer classification. For the AUC, we obtained values of up to 0.902 for our custom-made CNN. In comparison, the detection of the RS classification achieved accuracies of up to 0.792 for the BoVW and an AUC of up to 0.716 for our custom-made CNN. Overall, the Archer classification was detected more reliably than the RS classification when considering all algorithms. CHAID predicted 77.4\% correctness for the Archer classification and 81.4\% for the RS classification. MLP (AUC: 0.590) and RBNN (AUC: 0.590) for the Archer classification as well as MLP 0.638) and RBNN (0.630) for the RS classification did not show sufficient predictive capability for OAC. Conclusions: The results reveal that impacted teeth can be classified using panoramic radiographs (best AUC: 0.902), and the classification systems can be stratified according to their relationship to OAC (81.4\% correct for RS classification). However, the Archer and RS classes did not achieve satisfactory AUCs for predicting OAC (best AUC: 0.638). Additional research is needed to validate the results externally and to develop a reliable risk stratification tool based on the present findings.}, language = {en} } @phdthesis{Wiebusch2016, author = {Wiebusch, Dennis}, title = {Reusability for Intelligent Realtime Interactive Systems}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-040-5 (print)}, doi = {10.25972/WUP-978-3-95826-041-2}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-121869}, school = {W{\"u}rzburg University Press}, pages = {260}, year = {2016}, abstract = {Software frameworks for Realtime Interactive Systems (RIS), e.g., in the areas of Virtual, Augmented, and Mixed Reality (VR, AR, and MR) or computer games, facilitate a multitude of functionalities by coupling diverse software modules. In this context, no uniform methodology for coupling these modules does exist; instead various purpose-built solutions have been proposed. As a consequence, important software qualities, such as maintainability, reusability, and adaptability, are impeded. Many modern systems provide additional support for the integration of Artificial Intelligence (AI) methods to create so called intelligent virtual environments. These methods exacerbate the above-mentioned problem of coupling software modules in the thus created Intelligent Realtime Interactive Systems (IRIS) even more. This, on the one hand, is due to the commonly applied specialized data structures and asynchronous execution schemes, and the requirement for high consistency regarding content-wise coupled but functionally decoupled forms of data representation on the other. This work proposes an approach to decoupling software modules in IRIS, which is based on the abstraction of architecture elements using a semantic Knowledge Representation Layer (KRL). The layer facilitates decoupling the required modules, provides a means for ensuring interface compatibility and consistency, and in the end constitutes an interface for symbolic AI methods.}, subject = {Virtuelle Realit{\"a}t}, language = {en} } @article{WienrichCarolusRothIsigkeitetal.2022, author = {Wienrich, Carolin and Carolus, Astrid and Roth-Isigkeit, David and Hotho, Andreas}, title = {Inhibitors and enablers to explainable AI success: a systematic examination of explanation complexity and individual characteristics}, series = {Multimodal Technologies and Interaction}, volume = {6}, journal = {Multimodal Technologies and Interaction}, number = {12}, issn = {2414-4088}, doi = {10.3390/mti6120106}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-297288}, year = {2022}, abstract = {With the increasing adaptability and complexity of advisory artificial intelligence (AI)-based agents, the topics of explainable AI and human-centered AI are moving close together. Variations in the explanation itself have been widely studied, with some contradictory results. These could be due to users' individual differences, which have rarely been systematically studied regarding their inhibiting or enabling effect on the fulfillment of explanation objectives (such as trust, understanding, or workload). This paper aims to shed light on the significance of human dimensions (gender, age, trust disposition, need for cognition, affinity for technology, self-efficacy, attitudes, and mind attribution) as well as their interplay with different explanation modes (no, simple, or complex explanation). Participants played the game Deal or No Deal while interacting with an AI-based agent. The agent gave advice to the participants on whether they should accept or reject the deals offered to them. As expected, giving an explanation had a positive influence on the explanation objectives. However, the users' individual characteristics particularly reinforced the fulfillment of the objectives. The strongest predictor of objective fulfillment was the degree of attribution of human characteristics. The more human characteristics were attributed, the more trust was placed in the agent, advice was more likely to be accepted and understood, and important needs were satisfied during the interaction. Thus, the current work contributes to a better understanding of the design of explanations of an AI-based agent system that takes into account individual characteristics and meets the demand for both explainable and human-centered agent systems.}, language = {en} } @inproceedings{OPUS4-31720, title = {Abstracts of the Wuertual Reality XR Meeting 2023}, editor = {Neumann, Isabel and Gado, Sabrina and K{\"a}thner, Ivo and Hildebrandt, Lea and Andreatta, Marta}, edition = {korrigierte Auflage}, doi = {10.25972/OPUS-31720}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-317203}, pages = {76}, year = {2023}, abstract = {The Wuertual Reality XR Meeting 2023 was initiated to bring together researchers from many fields who use VR/AR/XR. There was a focus on applied XR and social VR. In this conference band, you can find the abstracts of the two keynotes, the 34 posters and poster pitches, the 29 talks and the four workshops.}, subject = {Virtuelle Realit{\"a}t}, language = {en} } @inproceedings{OPUS4-31528, title = {Abstracts of the Wuertual Reality XR Meeting 2023}, editor = {Neumann, Isabel and Gado, Sabrina and K{\"a}thner, Ivo and Hildebrandt, Lea and Andreatta, Marta}, doi = {10.25972/OPUS-31528}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-315285}, pages = {76}, year = {2023}, abstract = {The Wuertual Reality XR Meeting 2023 was initiated to bring together researchers from many fields who use VR/AR/XR. There was a focus on applied XR and social VR. In this conference band, you can find the abstracts of the two keynotes, the 34 posters and poster pitches, the 29 talks and the four workshops.}, subject = {Virtuelle Realit{\"a}t}, language = {en} }