@phdthesis{Tzschichholz2014, author = {Tzschichholz, Tristan}, title = {Relative pose estimation of known rigid objects using a novel approach to high-level PMD-/CCD- sensor data fusion with regard to applications in space}, isbn = {978-3-923959-95-2}, issn = {1868-7474}, doi = {10.25972/OPUS-10391}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-103918}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {In this work, a novel method for estimating the relative pose of a known object is presented, which relies on an application-specific data fusion process. A PMD-sensor in conjunction with a CCD-sensor is used to perform the pose estimation. Furthermore, the work provides a method for extending the measurement range of the PMD sensor along with the necessary calibration methodology. Finally, extensive measurements on a very accurate Rendezvous and Docking testbed are made to evaluate the performance, what includes a detailed discussion of lighting conditions.}, subject = {Bildverarbeitung}, language = {en} } @phdthesis{Wiebusch2016, author = {Wiebusch, Dennis}, title = {Reusability for Intelligent Realtime Interactive Systems}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-040-5 (print)}, doi = {10.25972/WUP-978-3-95826-041-2}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-121869}, school = {W{\"u}rzburg University Press}, pages = {260}, year = {2016}, abstract = {Software frameworks for Realtime Interactive Systems (RIS), e.g., in the areas of Virtual, Augmented, and Mixed Reality (VR, AR, and MR) or computer games, facilitate a multitude of functionalities by coupling diverse software modules. In this context, no uniform methodology for coupling these modules does exist; instead various purpose-built solutions have been proposed. As a consequence, important software qualities, such as maintainability, reusability, and adaptability, are impeded. Many modern systems provide additional support for the integration of Artificial Intelligence (AI) methods to create so called intelligent virtual environments. These methods exacerbate the above-mentioned problem of coupling software modules in the thus created Intelligent Realtime Interactive Systems (IRIS) even more. This, on the one hand, is due to the commonly applied specialized data structures and asynchronous execution schemes, and the requirement for high consistency regarding content-wise coupled but functionally decoupled forms of data representation on the other. This work proposes an approach to decoupling software modules in IRIS, which is based on the abstraction of architecture elements using a semantic Knowledge Representation Layer (KRL). The layer facilitates decoupling the required modules, provides a means for ensuring interface compatibility and consistency, and in the end constitutes an interface for symbolic AI methods.}, subject = {Virtuelle Realit{\"a}t}, language = {en} } @inproceedings{OPUS4-31720, title = {Abstracts of the Wuertual Reality XR Meeting 2023}, editor = {Neumann, Isabel and Gado, Sabrina and K{\"a}thner, Ivo and Hildebrandt, Lea and Andreatta, Marta}, edition = {korrigierte Auflage}, doi = {10.25972/OPUS-31720}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-317203}, pages = {76}, year = {2023}, abstract = {The Wuertual Reality XR Meeting 2023 was initiated to bring together researchers from many fields who use VR/AR/XR. There was a focus on applied XR and social VR. In this conference band, you can find the abstracts of the two keynotes, the 34 posters and poster pitches, the 29 talks and the four workshops.}, subject = {Virtuelle Realit{\"a}t}, language = {en} } @inproceedings{OPUS4-31528, title = {Abstracts of the Wuertual Reality XR Meeting 2023}, editor = {Neumann, Isabel and Gado, Sabrina and K{\"a}thner, Ivo and Hildebrandt, Lea and Andreatta, Marta}, doi = {10.25972/OPUS-31528}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-315285}, pages = {76}, year = {2023}, abstract = {The Wuertual Reality XR Meeting 2023 was initiated to bring together researchers from many fields who use VR/AR/XR. There was a focus on applied XR and social VR. In this conference band, you can find the abstracts of the two keynotes, the 34 posters and poster pitches, the 29 talks and the four workshops.}, subject = {Virtuelle Realit{\"a}t}, language = {en} } @article{ReulChristHarteltetal.2019, author = {Reul, Christian and Christ, Dennis and Hartelt, Alexander and Balbach, Nico and Wehner, Maximilian and Springmann, Uwe and Wick, Christoph and Grundig, Christine and B{\"u}ttner, Andreas and Puppe, Frank}, title = {OCR4all—An open-source tool providing a (semi-)automatic OCR workflow for historical printings}, series = {Applied Sciences}, volume = {9}, journal = {Applied Sciences}, number = {22}, issn = {2076-3417}, doi = {10.3390/app9224853}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-193103}, pages = {4853}, year = {2019}, abstract = {Optical Character Recognition (OCR) on historical printings is a challenging task mainly due to the complexity of the layout and the highly variant typography. Nevertheless, in the last few years, great progress has been made in the area of historical OCR, resulting in several powerful open-source tools for preprocessing, layout analysis and segmentation, character recognition, and post-processing. The drawback of these tools often is their limited applicability by non-technical users like humanist scholars and in particular the combined use of several tools in a workflow. In this paper, we present an open-source OCR software called OCR4all, which combines state-of-the-art OCR components and continuous model training into a comprehensive workflow. While a variety of materials can already be processed fully automatically, books with more complex layouts require manual intervention by the users. This is mostly due to the fact that the required ground truth for training stronger mixed models (for segmentation, as well as text recognition) is not available, yet, neither in the desired quantity nor quality. To deal with this issue in the short run, OCR4all offers a comfortable GUI that allows error corrections not only in the final output, but already in early stages to minimize error propagations. In the long run, this constant manual correction produces large quantities of valuable, high quality training material, which can be used to improve fully automatic approaches. Further on, extensive configuration capabilities are provided to set the degree of automation of the workflow and to make adaptations to the carefully selected default parameters for specific printings, if necessary. During experiments, the fully automated application on 19th Century novels showed that OCR4all can considerably outperform the commercial state-of-the-art tool ABBYY Finereader on moderate layouts if suitably pretrained mixed OCR models are available. Furthermore, on very complex early printed books, even users with minimal or no experience were able to capture the text with manageable effort and great quality, achieving excellent Character Error Rates (CERs) below 0.5\%. The architecture of OCR4all allows the easy integration (or substitution) of newly developed tools for its main components by standardized interfaces like PageXML, thus aiming at continual higher automation for historical printings.}, language = {en} } @article{OberdoerferSchraudtLatoschik2022, author = {Oberd{\"o}rfer, Sebastian and Schraudt, David and Latoschik, Marc Erich}, title = {Embodied gambling — investigating the influence of level of embodiment, avatar appearance, and virtual environment design on an online VR slot machine}, series = {Frontiers in Virtual Reality}, volume = {3}, journal = {Frontiers in Virtual Reality}, issn = {2673-4192}, doi = {10.3389/frvir.2022.828553}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-284662}, year = {2022}, abstract = {Slot machines are one of the most played games by players suffering from gambling disorder. New technologies like immersive Virtual Reality (VR) offer more possibilities to exploit erroneous beliefs in the context of gambling. Recent research indicates a higher risk potential when playing a slot machine in VR than on desktop. To continue this investigation, we evaluate the effects of providing different degrees of embodiment, i.e., minimal and full embodiment. The avatars used for the full embodiment further differ in their appearance, i.e., they elicit a high or a low socio-economic status. The virtual environment (VE) design can cause a potential influence on the overall gambling behavior. Thus, we also embed the slot machine in two different VEs that differ in their emotional design: a colorful underwater playground environment and a virtual counterpart of our lab. These design considerations resulted in four different versions of the same VR slot machine: 1) full embodiment with high socio-economic status, 2) full embodiment with low socio-economic status, 3) minimal embodiment playground VE, and 4) minimal embodiment laboratory VE. Both full embodiment versions also used the playground VE. We determine the risk potential by logging gambling frequency as well as stake size, and measuring harm-inducing factors, i.e., dissociation, urge to gamble, dark flow, and illusion of control, using questionnaires. Following a between groups experimental design, 82 participants played for 20 game rounds one of the four versions. We recruited our sample from the students enrolled at the University of W{\"u}rzburg. Our safety protocol ensured that only participants without any recent gambling activity took part in the experiment. In this comparative user study, we found no effect of the embodiment nor VE design on neither the gambling frequency, stake sizes, nor risk potential. However, our results provide further support for the hypothesis of the higher visual angle on gambling stimuli and hence the increased emotional response being the true cause for the higher risk potential.}, language = {en} } @article{Halbig Babu Gatter etal.2022, author = {Halbig , Andreas and Babu , Sooraj K. and Gatter , Shirin and Latoschik , Marc Erich and Brukamp, Kirsten and von Mammen , Sebastian}, title = {Opportunities and challenges of Virtual Reality in healthcare - a domain experts inquiry}, series = {Frontiers in Virtual Reality}, volume = {3}, journal = {Frontiers in Virtual Reality}, issn = {2673-4192}, doi = {10.3389/frvir.2022.837616}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-284752}, year = {2022}, abstract = {In recent years, the applications and accessibility of Virtual Reality (VR) for the healthcare sector have continued to grow. However, so far, most VR applications are only relevant in research settings. Information about what healthcare professionals would need to independently integrate VR applications into their daily working routines is missing. The actual needs and concerns of the people who work in the healthcare sector are often disregarded in the development of VR applications, even though they are the ones who are supposed to use them in practice. By means of this study, we systematically involve health professionals in the development process of VR applications. In particular, we conducted an online survey with 102 healthcare professionals based on a video prototype which demonstrates a software platform that allows them to create and utilise VR experiences on their own. For this study, we adapted and extended the Technology Acceptance Model (TAM). The survey focused on the perceived usefulness and the ease of use of such a platform, as well as the attitude and ethical concerns the users might have. The results show a generally positive attitude toward such a software platform. The users can imagine various use cases in different health domains. However, the perceived usefulness is tied to the actual ease of use of the platform and sufficient support for learning and working with the platform. In the discussion, we explain how these results can be generalized to facilitate the integration of VR in healthcare practice.}, language = {en} } @phdthesis{Sauer2023, author = {Sauer, Christian}, title = {Development, Simulation and Evaluation of Mobile Wireless Networks in Industrial Applications}, doi = {10.25972/OPUS-29923}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-299238}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {Manyindustrialautomationsolutionsusewirelesscommunicationandrelyontheavail- ability and quality of the wireless channel. At the same time the wireless medium is highly congested and guaranteeing the availability of wireless channels is becoming increasingly difficult. In this work we show, that ad-hoc networking solutions can be used to provide new communication channels and improve the performance of mobile automation systems. These ad-hoc networking solutions describe different communi- cation strategies, but avoid relying on network infrastructure by utilizing the Peer-to- Peer (P2P) channel between communicating entities. This work is a step towards the effective implementation of low-range communication technologies(e.g. VisibleLightCommunication(VLC), radarcommunication, mmWave communication) to the industrial application. Implementing infrastructure networks with these technologies is unrealistic, since the low communication range would neces- sitate a high number of Access Points (APs) to yield full coverage. However, ad-hoc networks do not require any network infrastructure. In this work different ad-hoc net- working solutions for the industrial use case are presented and tools and models for their examination are proposed. The main use case investigated in this work are Automated Guided Vehicles (AGVs) for industrial applications. These mobile devices drive throughout the factory trans- porting crates, goods or tools or assisting workers. In most implementations they must exchange data with a Central Control Unit (CCU) and between one another. Predicting if a certain communication technology is suitable for an application is very challenging since the applications and the resulting requirements are very heterogeneous. The proposed models and simulation tools enable the simulation of the complex inter- action of mobile robotic clients and a wireless communication network. The goal is to predict the characteristics of a networked AGV fleet. Theproposedtoolswereusedtoimplement, testandexaminedifferentad-hocnetwork- ing solutions for industrial applications using AGVs. These communication solutions handle time-critical and delay-tolerant communication. Additionally a control method for the AGVs is proposed, which optimizes the communication and in turn increases the transport performance of the AGV fleet. Therefore, this work provides not only tools for the further research of industrial ad-hoc system, but also first implementations of ad-hoc systems which address many of the most pressing issues in industrial applica- tions.}, subject = {Industrie}, language = {en} } @article{VollmerVollmerLangetal.2022, author = {Vollmer, Andreas and Vollmer, Michael and Lang, Gernot and Straub, Anton and K{\"u}bler, Alexander and Gubik, Sebastian and Brands, Roman C. and Hartmann, Stefan and Saravi, Babak}, title = {Performance analysis of supervised machine learning algorithms for automatized radiographical classification of maxillary third molar impaction}, series = {Applied Sciences}, volume = {12}, journal = {Applied Sciences}, number = {13}, issn = {2076-3417}, doi = {10.3390/app12136740}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-281662}, year = {2022}, abstract = {Background: Oro-antral communication (OAC) is a common complication following the extraction of upper molar teeth. The Archer and the Root Sinus (RS) systems can be used to classify impacted teeth in panoramic radiographs. The Archer classes B-D and the Root Sinus classes III, IV have been associated with an increased risk of OAC following tooth extraction in the upper molar region. In our previous study, we found that panoramic radiographs are not reliable for predicting OAC. This study aimed to (1) determine the feasibility of automating the classification (Archer/RS classes) of impacted teeth from panoramic radiographs, (2) determine the distribution of OAC stratified by classification system classes for the purposes of decision tree construction, and (3) determine the feasibility of automating the prediction of OAC utilizing the mentioned classification systems. Methods: We utilized multiple supervised pre-trained machine learning models (VGG16, ResNet50, Inceptionv3, EfficientNet, MobileNetV2), one custom-made convolutional neural network (CNN) model, and a Bag of Visual Words (BoVW) technique to evaluate the performance to predict the clinical classification systems RS and Archer from panoramic radiographs (Aim 1). We then used Chi-square Automatic Interaction Detectors (CHAID) to determine the distribution of OAC stratified by the Archer/RS classes to introduce a decision tree for simple use in clinics (Aim 2). Lastly, we tested the ability of a multilayer perceptron artificial neural network (MLP) and a radial basis function neural network (RBNN) to predict OAC based on the high-risk classes RS III, IV, and Archer B-D (Aim 3). Results: We achieved accuracies of up to 0.771 for EfficientNet and MobileNetV2 when examining the Archer classification. For the AUC, we obtained values of up to 0.902 for our custom-made CNN. In comparison, the detection of the RS classification achieved accuracies of up to 0.792 for the BoVW and an AUC of up to 0.716 for our custom-made CNN. Overall, the Archer classification was detected more reliably than the RS classification when considering all algorithms. CHAID predicted 77.4\% correctness for the Archer classification and 81.4\% for the RS classification. MLP (AUC: 0.590) and RBNN (AUC: 0.590) for the Archer classification as well as MLP 0.638) and RBNN (0.630) for the RS classification did not show sufficient predictive capability for OAC. Conclusions: The results reveal that impacted teeth can be classified using panoramic radiographs (best AUC: 0.902), and the classification systems can be stratified according to their relationship to OAC (81.4\% correct for RS classification). However, the Archer and RS classes did not achieve satisfactory AUCs for predicting OAC (best AUC: 0.638). Additional research is needed to validate the results externally and to develop a reliable risk stratification tool based on the present findings.}, language = {en} } @article{DonnermannSchaperLugrin2022, author = {Donnermann, Melissa and Schaper, Philipp and Lugrin, Birgit}, title = {Social robots in applied settings: a long-term study on adaptive robotic tutors in higher education}, series = {Frontiers in Robotics and AI}, volume = {9}, journal = {Frontiers in Robotics and AI}, issn = {2296-9144}, doi = {10.3389/frobt.2022.831633}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-266012}, year = {2022}, abstract = {Learning in higher education scenarios requires self-directed learning and the challenging task of self-motivation while individual support is rare. The integration of social robots to support learners has already shown promise to benefit the learning process in this area. In this paper, we focus on the applicability of an adaptive robotic tutor in a university setting. To this end, we conducted a long-term field study implementing an adaptive robotic tutor to support students with exam preparation over three sessions during one semester. In a mixed design, we compared the effect of an adaptive tutor to a control condition across all learning sessions. With the aim to benefit not only motivation but also academic success and the learning experience in general, we draw from research in adaptive tutoring, social robots in education, as well as our own prior work in this field. Our results show that opting in for the robotic tutoring is beneficial for students. We found significant subjective knowledge gain and increases in intrinsic motivation regarding the content of the course in general. Finally, participation resulted in a significantly better exam grade compared to students not participating. However, the extended adaptivity of the robotic tutor in the experimental condition did not seem to enhance learning, as we found no significant differences compared to a non-adaptive version of the robot.}, language = {en} }