@article{ToepferCorovicFetteetal.2015, author = {Toepfer, Martin and Corovic, Hamo and Fette, Georg and Kl{\"u}gl, Peter and St{\"o}rk, Stefan and Puppe, Frank}, title = {Fine-grained information extraction from German transthoracic echocardiography reports}, series = {BMC Medical Informatics and Decision Making}, volume = {15}, journal = {BMC Medical Informatics and Decision Making}, number = {91}, doi = {doi:10.1186/s12911-015-0215-x}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-125509}, year = {2015}, abstract = {Background Information extraction techniques that get structured representations out of unstructured data make a large amount of clinically relevant information about patients accessible for semantic applications. These methods typically rely on standardized terminologies that guide this process. Many languages and clinical domains, however, lack appropriate resources and tools, as well as evaluations of their applications, especially if detailed conceptualizations of the domain are required. For instance, German transthoracic echocardiography reports have not been targeted sufficiently before, despite of their importance for clinical trials. This work therefore aimed at development and evaluation of an information extraction component with a fine-grained terminology that enables to recognize almost all relevant information stated in German transthoracic echocardiography reports at the University Hospital of W{\"u}rzburg. Methods A domain expert validated and iteratively refined an automatically inferred base terminology. The terminology was used by an ontology-driven information extraction system that outputs attribute value pairs. The final component has been mapped to the central elements of a standardized terminology, and it has been evaluated according to documents with different layouts. Results The final system achieved state-of-the-art precision (micro average.996) and recall (micro average.961) on 100 test documents that represent more than 90 \% of all reports. In particular, principal aspects as defined in a standardized external terminology were recognized with f 1=.989 (micro average) and f 1=.963 (macro average). As a result of keyword matching and restraint concept extraction, the system obtained high precision also on unstructured or exceptionally short documents, and documents with uncommon layout. Conclusions The developed terminology and the proposed information extraction system allow to extract fine-grained information from German semi-structured transthoracic echocardiography reports with very high precision and high recall on the majority of documents at the University Hospital of W{\"u}rzburg. Extracted results populate a clinical data warehouse which supports clinical research.}, language = {en} } @article{KrenzerMakowskiHekaloetal.2022, author = {Krenzer, Adrian and Makowski, Kevin and Hekalo, Amar and Fitting, Daniel and Troya, Joel and Zoller, Wolfram G. and Hann, Alexander and Puppe, Frank}, title = {Fast machine learning annotation in the medical domain: a semi-automated video annotation tool for gastroenterologists}, series = {BioMedical Engineering OnLine}, volume = {21}, journal = {BioMedical Engineering OnLine}, number = {1}, doi = {10.1186/s12938-022-01001-x}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-300231}, year = {2022}, abstract = {Background Machine learning, especially deep learning, is becoming more and more relevant in research and development in the medical domain. For all the supervised deep learning applications, data is the most critical factor in securing successful implementation and sustaining the progress of the machine learning model. Especially gastroenterological data, which often involves endoscopic videos, are cumbersome to annotate. Domain experts are needed to interpret and annotate the videos. To support those domain experts, we generated a framework. With this framework, instead of annotating every frame in the video sequence, experts are just performing key annotations at the beginning and the end of sequences with pathologies, e.g., visible polyps. Subsequently, non-expert annotators supported by machine learning add the missing annotations for the frames in-between. Methods In our framework, an expert reviews the video and annotates a few video frames to verify the object's annotations for the non-expert. In a second step, a non-expert has visual confirmation of the given object and can annotate all following and preceding frames with AI assistance. After the expert has finished, relevant frames will be selected and passed on to an AI model. This information allows the AI model to detect and mark the desired object on all following and preceding frames with an annotation. Therefore, the non-expert can adjust and modify the AI predictions and export the results, which can then be used to train the AI model. Results Using this framework, we were able to reduce workload of domain experts on average by a factor of 20 on our data. This is primarily due to the structure of the framework, which is designed to minimize the workload of the domain expert. Pairing this framework with a state-of-the-art semi-automated AI model enhances the annotation speed further. Through a prospective study with 10 participants, we show that semi-automated annotation using our tool doubles the annotation speed of non-expert annotators compared to a well-known state-of-the-art annotation tool. Conclusion In summary, we introduce a framework for fast expert annotation for gastroenterologists, which reduces the workload of the domain expert considerably while maintaining a very high annotation quality. The framework incorporates a semi-automated annotation system utilizing trained object detection models. The software and framework are open-source.}, language = {en} } @article{WienrichLatoschik2021, author = {Wienrich, Carolin and Latoschik, Marc Erich}, title = {eXtended Artificial Intelligence: New Prospects of Human-AI Interaction Research}, series = {Frontiers in Virtual Reality}, volume = {2}, journal = {Frontiers in Virtual Reality}, doi = {10.3389/frvir.2021.686783}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-260296}, year = {2021}, abstract = {Artificial Intelligence (AI) covers a broad spectrum of computational problems and use cases. Many of those implicate profound and sometimes intricate questions of how humans interact or should interact with AIs. Moreover, many users or future users do have abstract ideas of what AI is, significantly depending on the specific embodiment of AI applications. Human-centered-design approaches would suggest evaluating the impact of different embodiments on human perception of and interaction with AI. An approach that is difficult to realize due to the sheer complexity of application fields and embodiments in reality. However, here XR opens new possibilities to research human-AI interactions. The article's contribution is twofold: First, it provides a theoretical treatment and model of human-AI interaction based on an XR-AI continuum as a framework for and a perspective of different approaches of XR-AI combinations. It motivates XR-AI combinations as a method to learn about the effects of prospective human-AI interfaces and shows why the combination of XR and AI fruitfully contributes to a valid and systematic investigation of human-AI interactions and interfaces. Second, the article provides two exemplary experiments investigating the aforementioned approach for two distinct AI-systems. The first experiment reveals an interesting gender effect in human-robot interaction, while the second experiment reveals an Eliza effect of a recommender system. Here the article introduces two paradigmatic implementations of the proposed XR testbed for human-AI interactions and interfaces and shows how a valid and systematic investigation can be conducted. In sum, the article opens new perspectives on how XR benefits human-centered AI design and development.}, language = {en} } @article{LodaKrebsDanhofetal.2019, author = {Loda, Sophia and Krebs, Jonathan and Danhof, Sophia and Schreder, Martin and Solimando, Antonio G. and Strifler, Susanne and Rasche, Leo and Kort{\"u}m, Martin and Kerscher, Alexander and Knop, Stefan and Puppe, Frank and Einsele, Hermann and Bittrich, Max}, title = {Exploration of artificial intelligence use with ARIES in multiple myeloma research}, series = {Journal of Clinical Medicine}, volume = {8}, journal = {Journal of Clinical Medicine}, number = {7}, issn = {2077-0383}, doi = {10.3390/jcm8070999}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-197231}, pages = {999}, year = {2019}, abstract = {Background: Natural language processing (NLP) is a powerful tool supporting the generation of Real-World Evidence (RWE). There is no NLP system that enables the extensive querying of parameters specific to multiple myeloma (MM) out of unstructured medical reports. We therefore created a MM-specific ontology to accelerate the information extraction (IE) out of unstructured text. Methods: Our MM ontology consists of extensive MM-specific and hierarchically structured attributes and values. We implemented "A Rule-based Information Extraction System" (ARIES) that uses this ontology. We evaluated ARIES on 200 randomly selected medical reports of patients diagnosed with MM. Results: Our system achieved a high F1-Score of 0.92 on the evaluation dataset with a precision of 0.87 and recall of 0.98. Conclusions: Our rule-based IE system enables the comprehensive querying of medical reports. The IE accelerates the extraction of data and enables clinicians to faster generate RWE on hematological issues. RWE helps clinicians to make decisions in an evidence-based manner. Our tool easily accelerates the integration of research evidence into everyday clinical practice.}, language = {en} } @article{AliMontenegro2016, author = {Ali, Qasim and Montenegro, Sergio}, title = {Explicit Model Following Distributed Control Scheme for Formation Flying of Mini UAVs}, series = {IEEE Access}, volume = {4}, journal = {IEEE Access}, number = {397-406}, doi = {10.1109/ACCESS.2016.2517203}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-146061}, year = {2016}, abstract = {A centralized heterogeneous formation flight position control scheme has been formulated using an explicit model following design, based on a Linear Quadratic Regulator Proportional Integral (LQR PI) controller. The leader quadcopter is a stable reference model with desired dynamics whose output is perfectly tracked by the two wingmen quadcopters. The leader itself is controlled through the pole placement control method with desired stability characteristics, while the two followers are controlled through a robust and adaptive LQR PI control method. Selected 3-D formation geometry and static stability are maintained under a number of possible perturbations. With this control scheme, formation geometry may also be switched to any arbitrary shape during flight, provided a suitable collision avoidance mechanism is incorporated. In case of communication loss between the leader and any of the followers, the other follower provides the data, received from the leader, to the affected follower. The stability of the closed-loop system has been analyzed using singular values. The proposed approach for the tightly coupled formation flight of mini unmanned aerial vehicles has been validated with the help of extensive simulations using MATLAB/Simulink, which provided promising results.}, language = {en} } @article{GehrkeBalbachRauchetal.2019, author = {Gehrke, Alexander and Balbach, Nico and Rauch, Yong-Mi and Degkwitz, Andreas and Puppe, Frank}, title = {Erkennung von handschriftlichen Unterstreichungen in Alten Drucken}, series = {Bibliothek Forschung und Praxis}, volume = {43}, journal = {Bibliothek Forschung und Praxis}, number = {3}, issn = {1865-7648}, doi = {10.1515/bfp-2019-2083}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-193377}, pages = {447 -- 452}, year = {2019}, abstract = {Die Erkennung handschriftlicher Artefakte wie Unterstreichungen in Buchdrucken erm{\"o}glicht R{\"u}ckschl{\"u}sse auf das Rezeptionsverhalten und die Provenienzgeschichte und wird auch f{\"u}r eine OCR ben{\"o}tigt. Dabei soll zwischen handschriftlichen Unterstreichungen und waagerechten Linien im Druck (z. B. Trennlinien usw.) unterschieden werden, da letztere nicht ausgezeichnet werden sollen. Im Beitrag wird ein Ansatz basierend auf einem auf Unterstreichungen trainierten Neuronalen Netz gem{\"a}ß der U-Net Architektur vorgestellt, dessen Ergebnisse in einem zweiten Schritt mit heuristischen Regeln nachbearbeitet werden. Die Evaluationen zeigen, dass Unterstreichungen sehr gut erkannt werden, wenn bei der Binarisierung der Scans nicht zu viele Pixel der Unterstreichung wegen geringem Kontrast verloren gehen. Zuk{\"u}nftig sollen die Worte oberhalb der Unterstreichung mit OCR transkribiert werden und auch andere Artefakte wie handschriftliche Notizen in alten Drucken erkannt werden.}, language = {de} } @article{OberdoerferHeidrichBirnstieletal.2021, author = {Oberd{\"o}rfer, Sebastian and Heidrich, David and Birnstiel, Sandra and Latoschik, Marc Erich}, title = {Enchanted by Your Surrounding? Measuring the Effects of Immersion and Design of Virtual Environments on Decision-Making}, series = {Frontiers in Virtual Reality}, volume = {2}, journal = {Frontiers in Virtual Reality}, doi = {10.3389/frvir.2021.679277}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-260101}, pages = {679277}, year = {2021}, abstract = {Impaired decision-making leads to the inability to distinguish between advantageous and disadvantageous choices. The impairment of a person's decision-making is a common goal of gambling games. Given the recent trend of gambling using immersive Virtual Reality it is crucial to investigate the effects of both immersion and the virtual environment (VE) on decision-making. In a novel user study, we measured decision-making using three virtual versions of the Iowa Gambling Task (IGT). The versions differed with regard to the degree of immersion and design of the virtual environment. While emotions affect decision-making, we further measured the positive and negative affect of participants. A higher visual angle on a stimulus leads to an increased emotional response. Thus, we kept the visual angle on the Iowa Gambling Task the same between our conditions. Our results revealed no significant impact of immersion or the VE on the IGT. We further found no significant difference between the conditions with regard to positive and negative affect. This suggests that neither the medium used nor the design of the VE causes an impairment of decision-making. However, in combination with a recent study, we provide first evidence that a higher visual angle on the IGT leads to an effect of impairment.}, language = {en} } @article{MadeiraGromerLatoschiketal.2021, author = {Madeira, Octavia and Gromer, Daniel and Latoschik, Marc Erich and Pauli, Paul}, title = {Effects of Acrophobic Fear and Trait Anxiety on Human Behavior in a Virtual Elevated Plus-Maze}, series = {Frontiers in Virtual Reality}, volume = {2}, journal = {Frontiers in Virtual Reality}, doi = {10.3389/frvir.2021.635048}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-258709}, year = {2021}, abstract = {The Elevated Plus-Maze (EPM) is a well-established apparatus to measure anxiety in rodents, i.e., animals exhibiting an increased relative time spent in the closed vs. the open arms are considered anxious. To examine whether such anxiety-modulated behaviors are conserved in humans, we re-translated this paradigm to a human setting using virtual reality in a Cave Automatic Virtual Environment (CAVE) system. In two studies, we examined whether the EPM exploration behavior of humans is modulated by their trait anxiety and also assessed the individuals' levels of acrophobia (fear of height), claustrophobia (fear of confined spaces), sensation seeking, and the reported anxiety when on the maze. First, we constructed an exact virtual copy of the animal EPM adjusted to human proportions. In analogy to animal EPM studies, participants (N = 30) freely explored the EPM for 5 min. In the second study (N = 61), we redesigned the EPM to make it more human-adapted and to differentiate influences of trait anxiety and acrophobia by introducing various floor textures and lower walls of closed arms to the height of standard handrails. In the first experiment, hierarchical regression analyses of exploration behavior revealed the expected association between open arm avoidance and Trait Anxiety, an even stronger association with acrophobic fear. In the second study, results revealed that acrophobia was associated with avoidance of open arms with mesh-floor texture, whereas for trait anxiety, claustrophobia, and sensation seeking, no effect was detected. Also, subjects' fear rating was moderated by all psychometrics but trait anxiety. In sum, both studies consistently indicate that humans show no general open arm avoidance analogous to rodents and that human EPM behavior is modulated strongest by acrophobic fear, whereas trait anxiety plays a subordinate role. Thus, we conclude that the criteria for cross-species validity are met insufficiently in this case. Despite the exploratory nature, our studies provide in-depth insights into human exploration behavior on the virtual EPM.}, language = {en} } @article{DumicBjeloperaNuechter2021, author = {Dumic, Emil and Bjelopera, Anamaria and N{\"u}chter, Andreas}, title = {Dynamic point cloud compression based on projections, surface reconstruction and video compression}, series = {Sensors}, volume = {22}, journal = {Sensors}, number = {1}, issn = {1424-8220}, doi = {10.3390/s22010197}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-252231}, year = {2021}, abstract = {In this paper we will present a new dynamic point cloud compression based on different projection types and bit depth, combined with the surface reconstruction algorithm and video compression for obtained geometry and texture maps. Texture maps have been compressed after creating Voronoi diagrams. Used video compression is specific for geometry (FFV1) and texture (H.265/HEVC). Decompressed point clouds are reconstructed using a Poisson surface reconstruction algorithm. Comparison with the original point clouds was performed using point-to-point and point-to-plane measures. Comprehensive experiments show better performance for some projection maps: cylindrical, Miller and Mercator projections.}, language = {en} } @article{BuchinBuchinByrkaetal.2012, author = {Buchin, Kevin and Buchin, Maike and Byrka, Jaroslaw and N{\"o}llenburg, Martin and Okamoto, Yoshio and Silveira, Rodrigo I. and Wolff, Alexander}, title = {Drawing (Complete) Binary Tanglegrams}, series = {Algorithmica}, volume = {62}, journal = {Algorithmica}, doi = {10.1007/s00453-010-9456-3}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-124622}, pages = {309-332}, year = {2012}, abstract = {A binary tanglegram is a drawing of a pair of rooted binary trees whose leaf sets are in one-to-one correspondence; matching leaves are connected by inter-tree edges. For applications, for example, in phylogenetics, it is essential that both trees are drawn without edge crossings and that the inter-tree edges have as few crossings as possible. It is known that finding a tanglegram with the minimum number of crossings is NP-hard and that the problem is fixed-parameter tractable with respect to that number. We prove that under the Unique Games Conjecture there is no constant-factor approximation for binary trees. We show that the problem is NP-hard even if both trees are complete binary trees. For this case we give an O(n 3)-time 2-approximation and a new, simple fixed-parameter algorithm. We show that the maximization version of the dual problem for binary trees can be reduced to a version of MaxCut for which the algorithm of Goemans and Williamson yields a 0.878-approximation.}, language = {en} } @article{SteiningerKobsDavidsonetal.2021, author = {Steininger, Michael and Kobs, Konstantin and Davidson, Padraig and Krause, Anna and Hotho, Andreas}, title = {Density-based weighting for imbalanced regression}, series = {Machine Learning}, volume = {110}, journal = {Machine Learning}, number = {8}, issn = {1573-0565}, doi = {10.1007/s10994-021-06023-5}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-269177}, pages = {2187-2211}, year = {2021}, abstract = {In many real world settings, imbalanced data impedes model performance of learning algorithms, like neural networks, mostly for rare cases. This is especially problematic for tasks focusing on these rare occurrences. For example, when estimating precipitation, extreme rainfall events are scarce but important considering their potential consequences. While there are numerous well studied solutions for classification settings, most of them cannot be applied to regression easily. Of the few solutions for regression tasks, barely any have explored cost-sensitive learning which is known to have advantages compared to sampling-based methods in classification tasks. In this work, we propose a sample weighting approach for imbalanced regression datasets called DenseWeight and a cost-sensitive learning approach for neural network regression with imbalanced data called DenseLoss based on our weighting scheme. DenseWeight weights data points according to their target value rarities through kernel density estimation (KDE). DenseLoss adjusts each data point's influence on the loss according to DenseWeight, giving rare data points more influence on model training compared to common data points. We show on multiple differently distributed datasets that DenseLoss significantly improves model performance for rare data points through its density-based weighting scheme. Additionally, we compare DenseLoss to the state-of-the-art method SMOGN, finding that our method mostly yields better performance. Our approach provides more control over model training as it enables us to actively decide on the trade-off between focusing on common or rare cases through a single hyperparameter, allowing the training of better models for rare data points.}, language = {en} } @article{SeufertSchroederSeufert2021, author = {Seufert, Anika and Schr{\"o}der, Svenja and Seufert, Michael}, title = {Delivering User Experience over Networks: Towards a Quality of Experience Centered Design Cycle for Improved Design of Networked Applications}, series = {SN Computer Science}, volume = {2}, journal = {SN Computer Science}, number = {6}, issn = {2661-8907}, doi = {10.1007/s42979-021-00851-x}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-271762}, year = {2021}, abstract = {To deliver the best user experience (UX), the human-centered design cycle (HCDC) serves as a well-established guideline to application developers. However, it does not yet cover network-specific requirements, which become increasingly crucial, as most applications deliver experience over the Internet. The missing network-centric view is provided by Quality of Experience (QoE), which could team up with UX towards an improved overall experience. By considering QoE aspects during the development process, it can be achieved that applications become network-aware by design. In this paper, the Quality of Experience Centered Design Cycle (QoE-CDC) is proposed, which provides guidelines on how to design applications with respect to network-specific requirements and QoE. Its practical value is showcased for popular application types and validated by outlining the design of a new smartphone application. We show that combining HCDC and QoE-CDC will result in an application design, which reaches a high UX and avoids QoE degradation.}, language = {en} } @article{MuellerLeppichGeissetal.2023, author = {M{\"u}ller, Konstantin and Leppich, Robert and Geiß, Christian and Borst, Vanessa and Pelizari, Patrick Aravena and Kounev, Samuel and Taubenb{\"o}ck, Hannes}, title = {Deep neural network regression for normalized digital surface model generation with Sentinel-2 imagery}, series = {IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing}, volume = {16}, journal = {IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing}, issn = {1939-1404}, doi = {10.1109/JSTARS.2023.3297710}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-349424}, pages = {8508-8519}, year = {2023}, abstract = {In recent history, normalized digital surface models (nDSMs) have been constantly gaining importance as a means to solve large-scale geographic problems. High-resolution surface models are precious, as they can provide detailed information for a specific area. However, measurements with a high resolution are time consuming and costly. Only a few approaches exist to create high-resolution nDSMs for extensive areas. This article explores approaches to extract high-resolution nDSMs from low-resolution Sentinel-2 data, allowing us to derive large-scale models. We thereby utilize the advantages of Sentinel 2 being open access, having global coverage, and providing steady updates through a high repetition rate. Several deep learning models are trained to overcome the gap in producing high-resolution surface maps from low-resolution input data. With U-Net as a base architecture, we extend the capabilities of our model by integrating tailored multiscale encoders with differently sized kernels in the convolution as well as conformed self-attention inside the skip connection gates. Using pixelwise regression, our U-Net base models can achieve a mean height error of approximately 2 m. Moreover, through our enhancements to the model architecture, we reduce the model error by more than 7\%.}, language = {en} } @article{AliMontenegro2016, author = {Ali, Qasim and Montenegro, Sergio}, title = {Decentralized control for scalable quadcopter formations}, series = {International Journal of Aerospace Engineering}, volume = {2016}, journal = {International Journal of Aerospace Engineering}, doi = {10.1155/2016/9108983}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-146704}, pages = {9108983}, year = {2016}, abstract = {An innovative framework has been developed for teamwork of two quadcopter formations, each having its specified formation geometry, assigned task, and matching control scheme. Position control for quadcopters in one of the formations has been implemented through a Linear Quadratic Regulator Proportional Integral (LQR PI) control scheme based on explicit model following scheme. Quadcopters in the other formation are controlled through LQR PI servomechanism control scheme. These two control schemes are compared in terms of their performance and control effort. Both formations are commanded by respective ground stations through virtual leaders. Quadcopters in formations are able to track desired trajectories as well as hovering at desired points for selected time duration. In case of communication loss between ground station and any of the quadcopters, the neighboring quadcopter provides the command data, received from the ground station, to the affected unit. Proposed control schemes have been validated through extensive simulations using MATLAB®/Simulink® that provided favorable results.}, language = {en} } @article{DuLauterbachLietal.2020, author = {Du, Shitong and Lauterbach, Helge A. and Li, Xuyou and Demisse, Girum G. and Borrmann, Dorit and N{\"u}chter, Andreas}, title = {Curvefusion — A Method for Combining Estimated Trajectories with Applications to SLAM and Time-Calibration}, series = {Sensors}, volume = {20}, journal = {Sensors}, number = {23}, issn = {1424-8220}, doi = {10.3390/s20236918}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-219988}, year = {2020}, abstract = {Mapping and localization of mobile robots in an unknown environment are essential for most high-level operations like autonomous navigation or exploration. This paper presents a novel approach for combining estimated trajectories, namely curvefusion. The robot used in the experiments is equipped with a horizontally mounted 2D profiler, a constantly spinning 3D laser scanner and a GPS module. The proposed algorithm first combines trajectories from different sensors to optimize poses of the planar three degrees of freedom (DoF) trajectory, which is then fed into continuous-time simultaneous localization and mapping (SLAM) to further improve the trajectory. While state-of-the-art multi-sensor fusion methods mainly focus on probabilistic methods, our approach instead adopts a deformation-based method to optimize poses. To this end, a similarity metric for curved shapes is introduced into the robotics community to fuse the estimated trajectories. Additionally, a shape-based point correspondence estimation method is applied to the multi-sensor time calibration. Experiments show that the proposed fusion method can achieve relatively better accuracy, even if the error of the trajectory before fusion is large, which demonstrates that our method can still maintain a certain degree of accuracy in an environment where typical pose estimation methods have poor performance. In addition, the proposed time-calibration method also achieves high accuracy in estimating point correspondences.}, language = {en} } @article{AtienzadeCastroCortesetal.2012, author = {Atienza, Nieves and de Castro, Natalia and Cort{\´e}s, Carmen and Garrido, M. {\´A}ngeles and Grima, Clara I. and Hern{\´a}ndez, Gregorio and M{\´a}rquez, Alberto and Moreno-Gonz{\´a}lez, Auxiliadora and N{\"o}llenburg, Martin and Portillo, Jos{\´e} Ram{\´o}n and Reyes, Pedro and Valenzuela, Jes{\´u}s and Trinidad Villar, Maria and Wolff, Alexander}, title = {Cover contact graphs}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-78845}, year = {2012}, abstract = {We study problems that arise in the context of covering certain geometric objects called seeds (e.g., points or disks) by a set of other geometric objects called cover (e.g., a set of disks or homothetic triangles). We insist that the interiors of the seeds and the cover elements are pairwise disjoint, respectively, but they can touch. We call the contact graph of a cover a cover contact graph (CCG). We are interested in three types of tasks, both in the general case and in the special case of seeds on a line: (a) deciding whether a given seed set has a connected CCG, (b) deciding whether a given graph has a realization as a CCG on a given seed set, and (c) bounding the sizes of certain classes of CCG's. Concerning (a) we give efficient algorithms for the case that seeds are points and show that the problem becomes hard if seeds and covers are disks. Concerning (b) we show that this problem is hard even for point seeds and disk covers (given a fixed correspondence between graph vertices and seeds). Concerning (c) we obtain upper and lower bounds on the number of CCG's for point seeds.}, subject = {Informatik}, language = {de} } @article{SteiningerAbelZiegleretal.2023, author = {Steininger, Michael and Abel, Daniel and Ziegler, Katrin and Krause, Anna and Paeth, Heiko and Hotho, Andreas}, title = {ConvMOS: climate model output statistics with deep learning}, series = {Data Mining and Knowledge Discovery}, volume = {37}, journal = {Data Mining and Knowledge Discovery}, number = {1}, issn = {1384-5810}, doi = {10.1007/s10618-022-00877-6}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-324213}, pages = {136-166}, year = {2023}, abstract = {Climate models are the tool of choice for scientists researching climate change. Like all models they suffer from errors, particularly systematic and location-specific representation errors. One way to reduce these errors is model output statistics (MOS) where the model output is fitted to observational data with machine learning. In this work, we assess the use of convolutional Deep Learning climate MOS approaches and present the ConvMOS architecture which is specifically designed based on the observation that there are systematic and location-specific errors in the precipitation estimates of climate models. We apply ConvMOS models to the simulated precipitation of the regional climate model REMO, showing that a combination of per-location model parameters for reducing location-specific errors and global model parameters for reducing systematic errors is indeed beneficial for MOS performance. We find that ConvMOS models can reduce errors considerably and perform significantly better than three commonly used MOS approaches and plain ResNet and U-Net models in most cases. Our results show that non-linear MOS models underestimate the number of extreme precipitation events, which we alleviate by training models specialized towards extreme precipitation events with the imbalanced regression method DenseLoss. While we consider climate MOS, we argue that aspects of ConvMOS may also be beneficial in other domains with geospatial data, such as air pollution modeling or weather forecasts.}, subject = {Klima}, language = {en} } @article{GlemarecLugrinBosseretal.2022, author = {Gl{\´e}marec, Yann and Lugrin, Jean-Luc and Bosser, Anne-Gwenn and Buche, C{\´e}dric and Latoschik, Marc Erich}, title = {Controlling the stage: a high-level control system for virtual audiences in Virtual Reality}, series = {Frontiers in Virtual Reality}, volume = {3}, journal = {Frontiers in Virtual Reality}, issn = {2673-4192}, doi = {10.3389/frvir.2022.876433}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-284601}, year = {2022}, abstract = {This article presents a novel method for controlling a virtual audience system (VAS) in Virtual Reality (VR) application, called STAGE, which has been originally designed for supervised public speaking training in university seminars dedicated to the preparation and delivery of scientific talks. We are interested in creating pedagogical narratives: narratives encompass affective phenomenon and rather than organizing events changing the course of a training scenario, pedagogical plans using our system focus on organizing the affects it arouses for the trainees. Efficiently controlling a virtual audience towards a specific training objective while evaluating the speaker's performance presents a challenge for a seminar instructor: the high level of cognitive and physical demands required to be able to control the virtual audience, whilst evaluating speaker's performance, adjusting and allowing it to quickly react to the user's behaviors and interactions. It is indeed a critical limitation of a number of existing systems that they rely on a Wizard of Oz approach, where the tutor drives the audience in reaction to the user's performance. We address this problem by integrating with a VAS a high-level control component for tutors, which allows using predefined audience behavior rules, defining custom ones, as well as intervening during run-time for finer control of the unfolding of the pedagogical plan. At its core, this component offers a tool to program, select, modify and monitor interactive training narratives using a high-level representation. The STAGE offers the following features: i) a high-level API to program pedagogical narratives focusing on a specific public speaking situation and training objectives, ii) an interactive visualization interface iii) computation and visualization of user metrics, iv) a semi-autonomous virtual audience composed of virtual spectators with automatic reactions to the speaker and surrounding spectators while following the pedagogical plan V) and the possibility for the instructor to embody a virtual spectator to ask questions or guide the speaker from within the Virtual Environment. We present here the design, and implementation of the tutoring system and its integration in STAGE, and discuss its reception by end-users.}, language = {en} } @article{LatoschikWienrich2022, author = {Latoschik, Marc Erich and Wienrich, Carolin}, title = {Congruence and plausibility, not presence: pivotal conditions for XR experiences and effects, a novel approach}, series = {Frontiers in Virtual Reality}, volume = {3}, journal = {Frontiers in Virtual Reality}, issn = {2673-4192}, doi = {10.3389/frvir.2022.694433}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-284787}, year = {2022}, abstract = {Presence is often considered the most important quale describing the subjective feeling of being in a computer-generated and/or computer-mediated virtual environment. The identification and separation of orthogonal presence components, i.e., the place illusion and the plausibility illusion, has been an accepted theoretical model describing Virtual Reality (VR) experiences for some time. This perspective article challenges this presence-oriented VR theory. First, we argue that a place illusion cannot be the major construct to describe the much wider scope of virtual, augmented, and mixed reality (VR, AR, MR: or XR for short). Second, we argue that there is no plausibility illusion but merely plausibility, and we derive the place illusion caused by the congruent and plausible generation of spatial cues and similarly for all the current model's so-defined illusions. Finally, we propose congruence and plausibility to become the central essential conditions in a novel theoretical model describing XR experiences and effects.}, language = {en} } @article{BoehlerCreignouGalotaetal.2012, author = {B{\"o}hler, Elmar and Creignou, Nadia and Galota, Matthias and Reith, Steffen and Schnoor, Henning and Vollmer, Heribert}, title = {Complexity Classifications for Different Equivalence and Audit Problems for Boolean Circuits}, series = {Logical Methods in Computer Science}, volume = {8}, journal = {Logical Methods in Computer Science}, number = {3:27}, doi = {10.2168/LMCS-8(3:27)2012}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-131121}, pages = {1 -- 25}, year = {2012}, abstract = {We study Boolean circuits as a representation of Boolean functions and conskier different equivalence, audit, and enumeration problems. For a number of restricted sets of gate types (bases) we obtain efficient algorithms, while for all other gate types we show these problems are at least NP-hard.}, language = {en} }