@article{LohWamserPoigneeetal.2022, author = {Loh, Frank and Wamser, Florian and Poign{\´e}e, Fabian and Geißler, Stefan and Hoßfeld, Tobias}, title = {YouTube Dataset on Mobile Streaming for Internet Traffic Modeling and Streaming Analysis}, series = {Scientific Data}, volume = {9}, journal = {Scientific Data}, number = {1}, doi = {10.1038/s41597-022-01418-y}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-300240}, year = {2022}, abstract = {Around 4.9 billion Internet users worldwide watch billions of hours of online video every day. As a result, streaming is by far the predominant type of traffic in communication networks. According to Google statistics, three out of five video views come from mobile devices. Thus, in view of the continuous technological advances in end devices and increasing mobile use, datasets for mobile streaming are indispensable in research but only sparsely dealt with in literature so far. With this public dataset, we provide 1,081 hours of time-synchronous video measurements at network, transport, and application layer with the native YouTube streaming client on mobile devices. The dataset includes 80 network scenarios with 171 different individual bandwidth settings measured in 5,181 runs with limited bandwidth, 1,939 runs with emulated 3 G/4 G traces, and 4,022 runs with pre-defined bandwidth changes. This corresponds to 332 GB video payload. We present the most relevant quality indicators for scientific use, i.e., initial playback delay, streaming video quality, adaptive video quality changes, video rebuffering events, and streaming phases.}, language = {en} } @article{GageikStrohmeierMontenegro2013, author = {Gageik, Nils and Strohmeier, Michael and Montenegro, Sergio}, title = {Waypoint flight parameter comparison of an autonomous UAV}, series = {International Journal of Artificial Intelligence \& Applications (IJAIA)}, journal = {International Journal of Artificial Intelligence \& Applications (IJAIA)}, doi = {10.5121/ijaia.2013.4304}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-96833}, year = {2013}, abstract = {The present paper compares the effect of different waypoint parameters on the flight performance of a special autonomous indoor UAV (unmanned aerial vehicle) fusing ultrasonic, inertial, pressure and optical sensors for 3D positioning and controlling. The investigated parameters are the acceptance threshold for reaching a waypoint as well as the maximal waypoint step size or block size. The effect of these parameters on the flight time and accuracy of the flight path is investigated. Therefore the paper addresses how the acceptance threshold and step size influence the speed and accuracy of the autonomous flight and thus influence the performance of the presented autonomous quadrocopter under real indoor navigation circumstances. Furthermore the paper demonstrates a drawback of the standard potential field method for navigation of such autonomous quadrocopters and points to an improvement.}, language = {en} } @article{WamserSeufertHalletal.2021, author = {Wamser, Florian and Seufert, Anika and Hall, Andrew and Wunderer, Stefan and Hoßfeld, Tobias}, title = {Valid statements by the crowd: statistical measures for precision in crowdsourced mobile measurements}, series = {Network}, volume = {1}, journal = {Network}, number = {2}, issn = {2673-8732}, doi = {10.3390/network1020013}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-284154}, pages = {215 -- 232}, year = {2021}, abstract = {Crowdsourced network measurements (CNMs) are becoming increasingly popular as they assess the performance of a mobile network from the end user's perspective on a large scale. Here, network measurements are performed directly on the end-users' devices, thus taking advantage of the real-world conditions end-users encounter. However, this type of uncontrolled measurement raises questions about its validity and reliability. The problem lies in the nature of this type of data collection. In CNMs, mobile network subscribers are involved to a large extent in the measurement process, and collect data themselves for the operator. The collection of data on user devices in arbitrary locations and at uncontrolled times requires means to ensure validity and reliability. To address this issue, our paper defines concepts and guidelines for analyzing the precision of CNMs; specifically, the number of measurements required to make valid statements. In addition to the formal definition of the aspect, we illustrate the problem and use an extensive sample data set to show possible assessment approaches. This data set consists of more than 20.4 million crowdsourced mobile measurements from across France, measured by a commercial data provider.}, language = {en} } @article{LimanMayFetteetal.2023, author = {Liman, Leon and May, Bernd and Fette, Georg and Krebs, Jonathan and Puppe, Frank}, title = {Using a clinical data warehouse to calculate and present key metrics for the radiology department: implementation and performance evaluation}, series = {JMIR Medical Informatics}, volume = {11}, journal = {JMIR Medical Informatics}, issn = {2291-9694}, doi = {10.2196/41808}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-349411}, year = {2023}, abstract = {Background: Due to the importance of radiologic examinations, such as X-rays or computed tomography scans, for many clinical diagnoses, the optimal use of the radiology department is 1 of the primary goals of many hospitals. Objective: This study aims to calculate the key metrics of this use by creating a radiology data warehouse solution, where data from radiology information systems (RISs) can be imported and then queried using a query language as well as a graphical user interface (GUI). Methods: Using a simple configuration file, the developed system allowed for the processing of radiology data exported from any kind of RIS into a Microsoft Excel, comma-separated value (CSV), or JavaScript Object Notation (JSON) file. These data were then imported into a clinical data warehouse. Additional values based on the radiology data were calculated during this import process by implementing 1 of several provided interfaces. Afterward, the query language and GUI of the data warehouse were used to configure and calculate reports on these data. For the most common types of requested reports, a web interface was created to view their numbers as graphics. Results: The tool was successfully tested with the data of 4 different German hospitals from 2018 to 2021, with a total of 1,436,111 examinations. The user feedback was good, since all their queries could be answered if the available data were sufficient. The initial processing of the radiology data for using them with the clinical data warehouse took (depending on the amount of data provided by each hospital) between 7 minutes and 1 hour 11 minutes. Calculating 3 reports of different complexities on the data of each hospital was possible in 1-3 seconds for reports with up to 200 individual calculations and in up to 1.5 minutes for reports with up to 8200 individual calculations. Conclusions: A system was developed with the main advantage of being generic concerning the export of different RISs as well as concerning the configuration of queries for various reports. The queries could be configured easily using the GUI of the data warehouse, and their results could be exported into the standard formats Excel and CSV for further processing.}, language = {en} } @article{LohPoigneeWamseretal.2021, author = {Loh, Frank and Poign{\´e}e, Fabian and Wamser, Florian and Leidinger, Ferdinand and Hoßfeld, Tobias}, title = {Uplink vs. Downlink: Machine Learning-Based Quality Prediction for HTTP Adaptive Video Streaming}, series = {Sensors}, volume = {21}, journal = {Sensors}, number = {12}, issn = {1424-8220}, doi = {10.3390/s21124172}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-241121}, year = {2021}, abstract = {Streaming video is responsible for the bulk of Internet traffic these days. For this reason, Internet providers and network operators try to make predictions and assessments about the streaming quality for an end user. Current monitoring solutions are based on a variety of different machine learning approaches. The challenge for providers and operators nowadays is that existing approaches require large amounts of data. In this work, the most relevant quality of experience metrics, i.e., the initial playback delay, the video streaming quality, video quality changes, and video rebuffering events, are examined using a voluminous data set of more than 13,000 YouTube video streaming runs that were collected with the native YouTube mobile app. Three Machine Learning models are developed and compared to estimate playback behavior based on uplink request information. The main focus has been on developing a lightweight approach using as few features and as little data as possible, while maintaining state-of-the-art performance.}, language = {en} } @article{KaiserLeschRotheetal.2020, author = {Kaiser, Dennis and Lesch, Veronika and Rothe, Julian and Strohmeier, Michael and Spieß, Florian and Krupitzer, Christian and Montenegro, Sergio and Kounev, Samuel}, title = {Towards Self-Aware Multirotor Formations}, series = {Computers}, volume = {9}, journal = {Computers}, number = {1}, issn = {2073-431X}, doi = {10.3390/computers9010007}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-200572}, pages = {7}, year = {2020}, abstract = {In the present day, unmanned aerial vehicles become seemingly more popular every year, but, without regulation of the increasing number of these vehicles, the air space could become chaotic and uncontrollable. In this work, a framework is proposed to combine self-aware computing with multirotor formations to address this problem. The self-awareness is envisioned to improve the dynamic behavior of multirotors. The formation scheme that is implemented is called platooning, which arranges vehicles in a string behind the lead vehicle and is proposed to bring order into chaotic air space. Since multirotors define a general category of unmanned aerial vehicles, the focus of this thesis are quadcopters, platforms with four rotors. A modification for the LRA-M self-awareness loop is proposed and named Platooning Awareness. The implemented framework is able to offer two flight modes that enable waypoint following and the self-awareness module to find a path through scenarios, where obstacles are present on the way, onto a goal position. The evaluation of this work shows that the proposed framework is able to use self-awareness to learn about its environment, avoid obstacles, and can successfully move a platoon of drones through multiple scenarios.}, language = {en} } @article{LohMehlingHossfeld2022, author = {Loh, Frank and Mehling, Noah and Hoßfeld, Tobias}, title = {Towards LoRaWAN without data loss: studying the performance of different channel access approaches}, series = {Sensors}, volume = {22}, journal = {Sensors}, number = {2}, issn = {1424-8220}, doi = {10.3390/s22020691}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-302418}, year = {2022}, abstract = {The Long Range Wide Area Network (LoRaWAN) is one of the fastest growing Internet of Things (IoT) access protocols. It operates in the license free 868 MHz band and gives everyone the possibility to create their own small sensor networks. The drawback of this technology is often unscheduled or random channel access, which leads to message collisions and potential data loss. For that reason, recent literature studies alternative approaches for LoRaWAN channel access. In this work, state-of-the-art random channel access is compared with alternative approaches from the literature by means of collision probability. Furthermore, a time scheduled channel access methodology is presented to completely avoid collisions in LoRaWAN. For this approach, an exhaustive simulation study was conducted and the performance was evaluated with random access cross-traffic. In a general theoretical analysis the limits of the time scheduled approach are discussed to comply with duty cycle regulations in LoRaWAN.}, language = {en} } @article{UnruhLandeckOberdoerferetal.2021, author = {Unruh, Fabian and Landeck, Maximilian and Oberd{\"o}rfer, Sebastian and Lugrin, Jean-Luc and Latoschik, Marc Erich}, title = {The Influence of Avatar Embodiment on Time Perception - Towards VR for Time-Based Therapy}, series = {Frontiers in Virtual Reality}, volume = {2}, journal = {Frontiers in Virtual Reality}, doi = {10.3389/frvir.2021.658509}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-259076}, pages = {658509}, year = {2021}, abstract = {Psycho-pathological conditions, such as depression or schizophrenia, are often accompanied by a distorted perception of time. People suffering from this conditions often report that the passage of time slows down considerably and that they are "stuck in time." Virtual Reality (VR) could potentially help to diagnose and maybe treat such mental conditions. However, the conditions in which a VR simulation could correctly diagnose a time perception deviation are still unknown. In this paper, we present an experiment investigating the difference in time experience with and without a virtual body in VR, also known as avatar. The process of substituting a person's body with a virtual body is called avatar embodiment. Numerous studies demonstrated interesting perceptual, emotional, behavioral, and psychological effects caused by avatar embodiment. However, the relations between time perception and avatar embodiment are still unclear. Whether or not the presence or absence of an avatar is already influencing time perception is still open to question. Therefore, we conducted a between-subjects design with and without avatar embodiment as well as a real condition (avatar vs. no-avatar vs. real). A group of 105 healthy subjects had to wait for seven and a half minutes in a room without any distractors (e.g., no window, magazine, people, decoration) or time indicators (e.g., clocks, sunlight). The virtual environment replicates the real physical environment. Participants were unaware that they will be asked to estimate their waiting time duration as well as describing their experience of the passage of time at a later stage. Our main finding shows that the presence of an avatar is leading to a significantly faster perceived passage of time. It seems to be promising to integrate avatar embodiment in future VR time-based therapy applications as they potentially could modulate a user's perception of the passage of time. We also found no significant difference in time perception between the real and the VR conditions (avatar, no-avatar), but further research is needed to better understand this outcome.}, language = {en} } @article{ObremskiFriedrichHaaketal.2022, author = {Obremski, David and Friedrich, Paula and Haak, Nora and Schaper, Philipp and Lugrin, Birgit}, title = {The impact of mixed-cultural speech on the stereotypical perception of a virtual robot}, series = {Frontiers in Robotics and AI}, volume = {9}, journal = {Frontiers in Robotics and AI}, issn = {2296-9144}, doi = {10.3389/frobt.2022.983955}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-293531}, year = {2022}, abstract = {Despite the fact that mixed-cultural backgrounds become of increasing importance in our daily life, the representation of multiple cultural backgrounds in one entity is still rare in socially interactive agents (SIAs). This paper's contribution is twofold. First, it provides a survey of research on mixed-cultured SIAs. Second, it presents a study investigating how mixed-cultural speech (in this case, non-native accent) influences how a virtual robot is perceived in terms of personality, warmth, competence and credibility. Participants with English or German respectively as their first language watched a video of a virtual robot speaking in either standard English or German-accented English. It was expected that the German-accented speech would be rated more positively by native German participants as well as elicit the German stereotypes credibility and conscientiousness for both German and English participants. Contrary to the expectations, German participants rated the virtual robot lower in terms of competence and credibility when it spoke with a German accent, whereas English participants perceived the virtual robot with a German accent as more credible compared to the version without an accent. Both the native English and native German listeners classified the virtual robot with a German accent as significantly more neurotic than the virtual robot speaking standard English. This work shows that by solely implementing a non-native accent in a virtual robot, stereotypes are partly transferred. It also shows that the implementation of a non-native accent leads to differences in the perception of the virtual robot.}, language = {en} } @article{LeschKoenigKounevetal.2022, author = {Lesch, Veronika and K{\"o}nig, Maximilian and Kounev, Samuel and Stein, Anthony and Krupitzer, Christian}, title = {Tackling the rich vehicle routing problem with nature-inspired algorithms}, series = {Applied Intelligence}, volume = {52}, journal = {Applied Intelligence}, issn = {1573-7497}, doi = {10.1007/s10489-021-03035-5}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-268942}, pages = {9476-9500}, year = {2022}, abstract = {In the last decades, the classical Vehicle Routing Problem (VRP), i.e., assigning a set of orders to vehicles and planning their routes has been intensively researched. As only the assignment of order to vehicles and their routes is already an NP-complete problem, the application of these algorithms in practice often fails to take into account the constraints and restrictions that apply in real-world applications, the so called rich VRP (rVRP) and are limited to single aspects. In this work, we incorporate the main relevant real-world constraints and requirements. We propose a two-stage strategy and a Timeline algorithm for time windows and pause times, and apply a Genetic Algorithm (GA) and Ant Colony Optimization (ACO) individually to the problem to find optimal solutions. Our evaluation of eight different problem instances against four state-of-the-art algorithms shows that our approach handles all given constraints in a reasonable time.}, language = {en} } @article{WickHarteltPuppe2019, author = {Wick, Christoph and Hartelt, Alexander and Puppe, Frank}, title = {Staff, symbol and melody detection of Medieval manuscripts written in square notation using deep Fully Convolutional Networks}, series = {Applied Sciences}, volume = {9}, journal = {Applied Sciences}, number = {13}, issn = {2076-3417}, doi = {10.3390/app9132646}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-197248}, year = {2019}, abstract = {Even today, the automatic digitisation of scanned documents in general, but especially the automatic optical music recognition (OMR) of historical manuscripts, still remains an enormous challenge, since both handwritten musical symbols and text have to be identified. This paper focuses on the Medieval so-called square notation developed in the 11th-12th century, which is already composed of staff lines, staves, clefs, accidentals, and neumes that are roughly spoken connected single notes. The aim is to develop an algorithm that captures both the neumes, and in particular its melody, which can be used to reconstruct the original writing. Our pipeline is similar to the standard OMR approach and comprises a novel staff line and symbol detection algorithm based on deep Fully Convolutional Networks (FCN), which perform pixel-based predictions for either staff lines or symbols and their respective types. Then, the staff line detection combines the extracted lines to staves and yields an F\(_1\) -score of over 99\% for both detecting lines and complete staves. For the music symbol detection, we choose a novel approach that skips the step to identify neumes and instead directly predicts note components (NCs) and their respective affiliation to a neume. Furthermore, the algorithm detects clefs and accidentals. Our algorithm predicts the symbol sequence of a staff with a diplomatic symbol accuracy rate (dSAR) of about 87\%, which includes symbol type and location. If only the NCs without their respective connection to a neume, all clefs and accidentals are of interest, the algorithm reaches an harmonic symbol accuracy rate (hSAR) of approximately 90\%. In general, the algorithm recognises a symbol in the manuscript with an F\(_1\) -score of over 96\%.}, language = {en} } @article{WienrichKommaVogtetal.2021, author = {Wienrich, Carolin and Komma, Philipp and Vogt, Stephanie and Latoschik, Marc E.}, title = {Spatial Presence in Mixed Realities - Considerations About the Concept, Measures, Design, and Experiments}, series = {Frontiers in Virtual Reality}, volume = {2}, journal = {Frontiers in Virtual Reality}, doi = {10.3389/frvir.2021.694315}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-260328}, year = {2021}, abstract = {Plenty of theories, models, measures, and investigations target the understanding of virtual presence, i.e., the sense of presence in immersive Virtual Reality (VR). Other varieties of the so-called eXtended Realities (XR), e.g., Augmented and Mixed Reality (AR and MR) incorporate immersive features to a lesser degree and continuously combine spatial cues from the real physical space and the simulated virtual space. This blurred separation questions the applicability of the accumulated knowledge about the similarities of virtual presence and presence occurring in other varieties of XR, and corresponding outcomes. The present work bridges this gap by analyzing the construct of presence in mixed realities (MR). To achieve this, the following presents (1) a short review of definitions, dimensions, and measurements of presence in VR, and (2) the state of the art views on MR. Additionally, we (3) derived a working definition of MR, extending the Milgram continuum. This definition is based on entities reaching from real to virtual manifestations at one time point. Entities possess different degrees of referential power, determining the selection of the frame of reference. Furthermore, we (4) identified three research desiderata, including research questions about the frame of reference, the corresponding dimension of transportation, and the dimension of realism in MR. Mainly the relationship between the main aspects of virtual presence of immersive VR, i.e., the place-illusion, and the plausibility-illusion, and of the referential power of MR entities are discussed regarding the concept, measures, and design of presence in MR. Finally, (5) we suggested an experimental setup to reveal the research heuristic behind experiments investigating presence in MR. The present work contributes to the theories and the meaning of and approaches to simulate and measure presence in MR. We hypothesize that research about essential underlying factors determining user experience (UX) in MR simulations and experiences is still in its infancy and hopes this article provides an encouraging starting point to tackle related questions.}, language = {en} } @article{DavidsonDuekingZinneretal.2020, author = {Davidson, Padraig and D{\"u}king, Peter and Zinner, Christoph and Sperlich, Billy and Hotho, Andreas}, title = {Smartwatch-Derived Data and Machine Learning Algorithms Estimate Classes of Ratings of Perceived Exertion in Runners: A Pilot Study}, series = {Sensors}, volume = {20}, journal = {Sensors}, number = {9}, issn = {1424-8220}, doi = {10.3390/s20092637}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-205686}, year = {2020}, abstract = {The rating of perceived exertion (RPE) is a subjective load marker and may assist in individualizing training prescription, particularly by adjusting running intensity. Unfortunately, RPE has shortcomings (e.g., underreporting) and cannot be monitored continuously and automatically throughout a training sessions. In this pilot study, we aimed to predict two classes of RPE (≤15 "Somewhat hard to hard" on Borg's 6-20 scale vs. RPE >15 in runners by analyzing data recorded by a commercially-available smartwatch with machine learning algorithms. Twelve trained and untrained runners performed long-continuous runs at a constant self-selected pace to volitional exhaustion. Untrained runners reported their RPE each kilometer, whereas trained runners reported every five kilometers. The kinetics of heart rate, step cadence, and running velocity were recorded continuously ( 1 Hz ) with a commercially-available smartwatch (Polar V800). We trained different machine learning algorithms to estimate the two classes of RPE based on the time series sensor data derived from the smartwatch. Predictions were analyzed in different settings: accuracy overall and per runner type; i.e., accuracy for trained and untrained runners independently. We achieved top accuracies of 84.8 \% for the whole dataset, 81.8 \% for the trained runners, and 86.1 \% for the untrained runners. We predict two classes of RPE with high accuracy using machine learning and smartwatch data. This approach might aid in individualizing training prescriptions.}, language = {en} } @article{BaierBaierSaipSchillingetal.2016, author = {Baier, Pablo A. and Baier-Saip, J{\"u}rgen A. and Schilling, Klaus and Oliveira, Jauvane C.}, title = {Simulator for Minimally Invasive Vascular Interventions: Hardware and Software}, series = {Presence}, volume = {25}, journal = {Presence}, number = {2}, issn = {1531-3263}, doi = {10.1162/PRES_a_00250}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-140580}, pages = {108-128}, year = {2016}, abstract = {In the present work, a simulation system is proposed that can be used as an educational tool by physicians in training basic skills of minimally invasive vascular interventions. In order to accomplish this objective, initially the physical model of the wire proposed by Konings has been improved. As a result, a simpler and more stable method was obtained to calculate the equilibrium configuration of the wire. In addition, a geometrical method is developed to perform relaxations. It is particularly useful when the wire is hindered in the physical method because of the boundary conditions. Then a recipe is given to merge the physical and the geometrical methods, resulting in efficient relaxations. Moreover, tests have shown that the shape of the virtual wire agrees with the experiment. The proposed algorithm allows real-time executions, and furthermore, the hardware to assemble the simulator has a low cost.}, language = {en} } @article{SeufertPoigneeSeufertetal.2023, author = {Seufert, Anika and Poign{\´e}e, Fabian and Seufert, Michael and Hoßfeld, Tobias}, title = {Share and multiply: modeling communication and generated traffic in private WhatsApp groups}, series = {IEEE Access}, volume = {11}, journal = {IEEE Access}, doi = {10.1109/ACCESS.2023.3254913}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-349430}, pages = {25401-25414}, year = {2023}, abstract = {Group-based communication is a highly popular communication paradigm, which is especially prominent in mobile instant messaging (MIM) applications, such as WhatsApp. Chat groups in MIM applications facilitate the sharing of various types of messages (e.g., text, voice, image, video) among a large number of participants. As each message has to be transmitted to every other member of the group, which multiplies the traffic, this has a massive impact on the underlying communication networks. However, most chat groups are private and network operators cannot obtain deep insights into MIM communication via network measurements due to end-to-end encryption. Thus, the generation of traffic is not well understood, given that it depends on sizes of communication groups, speed of communication, and exchanged message types. In this work, we provide a huge data set of 5,956 private WhatsApp chat histories, which contains over 76 million messages from more than 117,000 users. We describe and model the properties of chat groups and users, and the communication within these chat groups, which gives unprecedented insights into private MIM communication. In addition, we conduct exemplary measurements for the most popular message types, which empower the provided models to estimate the traffic over time in a chat group.}, language = {en} } @article{ZimmererFischbachLatoschik2018, author = {Zimmerer, Chris and Fischbach, Martin and Latoschik, Marc Erich}, title = {Semantic Fusion for Natural Multimodal Interfaces using Concurrent Augmented Transition Networks}, series = {Multimodal Technologies and Interaction}, volume = {2}, journal = {Multimodal Technologies and Interaction}, number = {4}, issn = {2414-4088}, doi = {10.3390/mti2040081}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-197573}, year = {2018}, abstract = {Semantic fusion is a central requirement of many multimodal interfaces. Procedural methods like finite-state transducers and augmented transition networks have proven to be beneficial to implement semantic fusion. They are compliant with rapid development cycles that are common for the development of user interfaces, in contrast to machine-learning approaches that require time-costly training and optimization. We identify seven fundamental requirements for the implementation of semantic fusion: Action derivation, continuous feedback, context-sensitivity, temporal relation support, access to the interaction context, as well as the support of chronologically unsorted and probabilistic input. A subsequent analysis reveals, however, that there is currently no solution for fulfilling the latter two requirements. As the main contribution of this article, we thus present the Concurrent Cursor concept to compensate these shortcomings. In addition, we showcase a reference implementation, the Concurrent Augmented Transition Network (cATN), that validates the concept's feasibility in a series of proof of concept demonstrations as well as through a comparative benchmark. The cATN fulfills all identified requirements and fills the lack amongst previous solutions. It supports the rapid prototyping of multimodal interfaces by means of five concrete traits: Its declarative nature, the recursiveness of the underlying transition network, the network abstraction constructs of its description language, the utilized semantic queries, and an abstraction layer for lexical information. Our reference implementation was and is used in various student projects, theses, as well as master-level courses. It is openly available and showcases that non-experts can effectively implement multimodal interfaces, even for non-trivial applications in mixed and virtual reality.}, language = {en} } @article{KoopmannStubbemannKapaetal.2021, author = {Koopmann, Tobias and Stubbemann, Maximilian and Kapa, Matthias and Paris, Michael and Buenstorf, Guido and Hanika, Tom and Hotho, Andreas and J{\"a}schke, Robert and Stumme, Gerd}, title = {Proximity dimensions and the emergence of collaboration: a HypTrails study on German AI research}, series = {Scientometrics}, volume = {126}, journal = {Scientometrics}, number = {12}, issn = {1588-2861}, doi = {10.1007/s11192-021-03922-1}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-269831}, pages = {9847-9868}, year = {2021}, abstract = {Creation and exchange of knowledge depends on collaboration. Recent work has suggested that the emergence of collaboration frequently relies on geographic proximity. However, being co-located tends to be associated with other dimensions of proximity, such as social ties or a shared organizational environment. To account for such factors, multiple dimensions of proximity have been proposed, including cognitive, institutional, organizational, social and geographical proximity. Since they strongly interrelate, disentangling these dimensions and their respective impact on collaboration is challenging. To address this issue, we propose various methods for measuring different dimensions of proximity. We then present an approach to compare and rank them with respect to the extent to which they indicate co-publications and co-inventions. We adapt the HypTrails approach, which was originally developed to explain human navigation, to co-author and co-inventor graphs. We evaluate this approach on a subset of the German research community, specifically academic authors and inventors active in research on artificial intelligence (AI). We find that social proximity and cognitive proximity are more important for the emergence of collaboration than geographic proximity.}, language = {en} } @article{HirthSeufertLangeetal.2021, author = {Hirth, Matthias and Seufert, Michael and Lange, Stanislav and Meixner, Markus and Tran-Gia, Phuoc}, title = {Performance evaluation of hybrid crowdsensing and fixed sensor systems for event detection in urban environments}, series = {Sensors}, volume = {21}, journal = {Sensors}, number = {17}, issn = {1424-8220}, doi = {10.3390/s21175880}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-245245}, year = {2021}, abstract = {Crowdsensing offers a cost-effective way to collect large amounts of environmental sensor data; however, the spatial distribution of crowdsensing sensors can hardly be influenced, as the participants carry the sensors, and, additionally, the quality of the crowdsensed data can vary significantly. Hybrid systems that use mobile users in conjunction with fixed sensors might help to overcome these limitations, as such systems allow assessing the quality of the submitted crowdsensed data and provide sensor values where no crowdsensing data are typically available. In this work, we first used a simulation study to analyze a simple crowdsensing system concerning the detection performance of spatial events to highlight the potential and limitations of a pure crowdsourcing system. The results indicate that even if only a small share of inhabitants participate in crowdsensing, events that have locations correlated with the population density can be easily and quickly detected using such a system. On the contrary, events with uniformly randomly distributed locations are much harder to detect using a simple crowdsensing-based approach. A second evaluation shows that hybrid systems improve the detection probability and time. Finally, we illustrate how to compute the minimum number of fixed sensors for the given detection time thresholds in our exemplary scenario.}, language = {en} } @article{SirbuBeckerCaminitietal.2015, author = {S{\^i}rbu, Alina and Becker, Martin and Caminiti, Saverio and De Baets, Bernard and Elen, Bart and Francis, Louise and Gravino, Pietro and Hotho, Andreas and Ingarra, Stefano and Loreto, Vittorio and Molino, Andrea and Mueller, Juergen and Peters, Jan and Ricchiuti, Ferdinando and Saracino, Fabio and Servedio, Vito D.P. and Stumme, Gerd and Theunis, Jan and Tria, Francesca and Van den Bossche, Joris}, title = {Participatory Patterns in an International Air Quality Monitoring Initiative}, series = {PLoS ONE}, volume = {10}, journal = {PLoS ONE}, number = {8}, doi = {10.1371/journal. pone.0136763}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-151379}, pages = {e0136763}, year = {2015}, abstract = {The issue of sustainability is at the top of the political and societal agenda, being considered of extreme importance and urgency. Human individual action impacts the environment both locally (e.g., local air/water quality, noise disturbance) and globally (e.g., climate change, resource use). Urban environments represent a crucial example, with an increasing realization that the most effective way of producing a change is involving the citizens themselves in monitoring campaigns (a citizen science bottom-up approach). This is possible by developing novel technologies and IT infrastructures enabling large citizen participation. Here, in the wider framework of one of the first such projects, we show results from an international competition where citizens were involved in mobile air pollution monitoring using low cost sensing devices, combined with a web-based game to monitor perceived levels of pollution. Measures of shift in perceptions over the course of the campaign are provided, together with insights into participatory patterns emerging from this study. Interesting effects related to inertia and to direct involvement in measurement activities rather than indirect information exposure are also highlighted, indicating that direct involvement can enhance learning and environmental awareness. In the future, this could result in better adoption of policies towards decreasing pollution.}, language = {en} } @article{SeufertPoigneeHossfeldetal.2022, author = {Seufert, Anika and Poign{\´e}e, Fabian and Hoßfeld, Tobias and Seufert, Michael}, title = {Pandemic in the digital age: analyzing WhatsApp communication behavior before, during, and after the COVID-19 lockdown}, series = {Humanities and Social Sciences Communications}, volume = {9}, journal = {Humanities and Social Sciences Communications}, doi = {10.1057/s41599-022-01161-0}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-300261}, year = {2022}, abstract = {The strict restrictions introduced by the COVID-19 lockdowns, which started from March 2020, changed people's daily lives and habits on many different levels. In this work, we investigate the impact of the lockdown on the communication behavior in the mobile instant messaging application WhatsApp. Our evaluations are based on a large dataset of 2577 private chat histories with 25,378,093 messages from 51,973 users. The analysis of the one-to-one and group conversations confirms that the lockdown severely altered the communication in WhatsApp chats compared to pre-pandemic time ranges. In particular, we observe short-term effects, which caused an increased message frequency in the first lockdown months and a shifted communication activity during the day in March and April 2020. Moreover, we also see long-term effects of the ongoing pandemic situation until February 2021, which indicate a change of communication behavior towards more regular messaging, as well as a persisting change in activity during the day. The results of our work show that even anonymized chat histories can tell us a lot about people's behavior and especially behavioral changes during the COVID-19 pandemic and thus are of great relevance for behavioral researchers. Furthermore, looking at the pandemic from an Internet provider perspective, these insights can be used during the next pandemic, or if the current COVID-19 situation worsens, to adapt communication networks to the changed usage behavior early on and thus avoid network congestion.}, language = {en} } @article{KernKullmannGanaletal.2021, author = {Kern, Florian and Kullmann, Peter and Ganal, Elisabeth and Korwisi, Kristof and Stingl, Ren{\´e} and Niebling, Florian and Latoschik, Marc Erich}, title = {Off-The-Shelf Stylus: Using XR Devices for Handwriting and Sketching on Physically Aligned Virtual Surfaces}, series = {Frontiers in Virtual Reality}, volume = {2}, journal = {Frontiers in Virtual Reality}, doi = {10.3389/frvir.2021.684498}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-260219}, year = {2021}, abstract = {This article introduces the Off-The-Shelf Stylus (OTSS), a framework for 2D interaction (in 3D) as well as for handwriting and sketching with digital pen, ink, and paper on physically aligned virtual surfaces in Virtual, Augmented, and Mixed Reality (VR, AR, MR: XR for short). OTSS supports self-made XR styluses based on consumer-grade six-degrees-of-freedom XR controllers and commercially available styluses. The framework provides separate modules for three basic but vital features: 1) The stylus module provides stylus construction and calibration features. 2) The surface module provides surface calibration and visual feedback features for virtual-physical 2D surface alignment using our so-called 3ViSuAl procedure, and surface interaction features. 3) The evaluation suite provides a comprehensive test bed combining technical measurements for precision, accuracy, and latency with extensive usability evaluations including handwriting and sketching tasks based on established visuomotor, graphomotor, and handwriting research. The framework's development is accompanied by an extensive open source reference implementation targeting the Unity game engine using an Oculus Rift S headset and Oculus Touch controllers. The development compares three low-cost and low-tech options to equip controllers with a tip and includes a web browser-based surface providing support for interacting, handwriting, and sketching. The evaluation of the reference implementation based on the OTSS framework identified an average stylus precision of 0.98 mm (SD = 0.54 mm) and an average surface accuracy of 0.60 mm (SD = 0.32 mm) in a seated VR environment. The time for displaying the stylus movement as digital ink on the web browser surface in VR was 79.40 ms on average (SD = 23.26 ms), including the physical controller's motion-to-photon latency visualized by its virtual representation (M = 42.57 ms, SD = 15.70 ms). The usability evaluation (N = 10) revealed a low task load, high usability, and high user experience. Participants successfully reproduced given shapes and created legible handwriting, indicating that the OTSS and it's reference implementation is ready for everyday use. We provide source code access to our implementation, including stylus and surface calibration and surface interaction features, making it easy to reuse, extend, adapt and/or replicate previous results (https://go.uniwue.de/hci-otss).}, language = {en} } @article{ObremskiLugrinSchaperetal.2021, author = {Obremski, David and Lugrin, Jean-Luc and Schaper, Philipp and Lugrin, Birgit}, title = {Non-native speaker perception of Intelligent Virtual Agents in two languages: the impact of amount and type of grammatical mistakes}, series = {Journal on Multimodal User Interfaces}, volume = {15}, journal = {Journal on Multimodal User Interfaces}, number = {2}, issn = {1783-8738}, doi = {10.1007/s12193-021-00369-9}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-269984}, pages = {229-238}, year = {2021}, abstract = {Having a mixed-cultural membership becomes increasingly common in our modern society. It is thus beneficial in several ways to create Intelligent Virtual Agents (IVAs) that reflect a mixed-cultural background as well, e.g., for educational settings. For research with such IVAs, it is essential that they are classified as non-native by members of a target culture. In this paper, we focus on variations of IVAs' speech to create the impression of non-native speakers that are identified as such by speakers of two different mother tongues. In particular, we investigate grammatical mistakes and identify thresholds beyond which the agents is clearly categorised as a non-native speaker. Therefore, we conducted two experiments: one for native speakers of German, and one for native speakers of English. Results of the German study indicate that beyond 10\% of word order mistakes and 25\% of infinitive mistakes German-speaking IVAs are perceived as non-native speakers. Results of the English study indicate that beyond 50\% of omission mistakes and 50\% of infinitive mistakes English-speaking IVAs are perceived as non-native speakers. We believe these thresholds constitute helpful guidelines for computational approaches of non-native speaker generation, simplifying research with IVAs in mixed-cultural settings.}, language = {en} } @article{BencurovaShityakovSchaacketal.2022, author = {Bencurova, Elena and Shityakov, Sergey and Schaack, Dominik and Kaltdorf, Martin and Sarukhanyan, Edita and Hilgarth, Alexander and Rath, Christin and Montenegro, Sergio and Roth, G{\"u}nter and Lopez, Daniel and Dandekar, Thomas}, title = {Nanocellulose composites as smart devices with chassis, light-directed DNA Storage, engineered electronic properties, and chip integration}, series = {Frontiers in Bioengineering and Biotechnology}, volume = {10}, journal = {Frontiers in Bioengineering and Biotechnology}, issn = {2296-4185}, doi = {10.3389/fbioe.2022.869111}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-283033}, year = {2022}, abstract = {The rapid development of green and sustainable materials opens up new possibilities in the field of applied research. Such materials include nanocellulose composites that can integrate many components into composites and provide a good chassis for smart devices. In our study, we evaluate four approaches for turning a nanocellulose composite into an information storage or processing device: 1) nanocellulose can be a suitable carrier material and protect information stored in DNA. 2) Nucleotide-processing enzymes (polymerase and exonuclease) can be controlled by light after fusing them with light-gating domains; nucleotide substrate specificity can be changed by mutation or pH change (read-in and read-out of the information). 3) Semiconductors and electronic capabilities can be achieved: we show that nanocellulose is rendered electronic by iodine treatment replacing silicon including microstructures. Nanocellulose semiconductor properties are measured, and the resulting potential including single-electron transistors (SET) and their properties are modeled. Electric current can also be transported by DNA through G-quadruplex DNA molecules; these as well as classical silicon semiconductors can easily be integrated into the nanocellulose composite. 4) To elaborate upon miniaturization and integration for a smart nanocellulose chip device, we demonstrate pH-sensitive dyes in nanocellulose, nanopore creation, and kinase micropatterning on bacterial membranes as well as digital PCR micro-wells. Future application potential includes nano-3D printing and fast molecular processors (e.g., SETs) integrated with DNA storage and conventional electronics. This would also lead to environment-friendly nanocellulose chips for information processing as well as smart nanocellulose composites for biomedical applications and nano-factories.}, language = {en} } @article{OberdoerferBirnstielLatoschiketal.2021, author = {Oberd{\"o}rfer, Sebastian and Birnstiel, Sandra and Latoschik, Marc Erich and Grafe, Silke}, title = {Mutual Benefits: Interdisciplinary Education of Pre-Service Teachers and HCI Students in VR/AR Learning Environment Design}, series = {Frontiers in Education}, volume = {6}, journal = {Frontiers in Education}, issn = {2504-284X}, doi = {10.3389/feduc.2021.693012}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-241612}, year = {2021}, abstract = {The successful development and classroom integration of Virtual (VR) and Augmented Reality (AR) learning environments requires competencies and content knowledge with respect to media didactics and the respective technologies. The paper discusses a pedagogical concept specifically aiming at the interdisciplinary education of pre-service teachers in collaboration with human-computer interaction students. The students' overarching goal is the interdisciplinary realization and integration of VR/AR learning environments in teaching and learning concepts. To assist this approach, we developed a specific tutorial guiding the developmental process. We evaluate and validate the effectiveness of the overall pedagogical concept by analyzing the change in attitudes regarding 1) the use of VR/AR for educational purposes and in competencies and content knowledge regarding 2) media didactics and 3) technology. Our results indicate a significant improvement in the knowledge of media didactics and technology. We further report on four STEM learning environments that have been developed during the seminar.}, language = {en} } @article{DjebkoPuppeKayal2019, author = {Djebko, Kirill and Puppe, Frank and Kayal, Hakan}, title = {Model-based fault detection and diagnosis for spacecraft with an application for the SONATE triple cube nano-satellite}, series = {Aerospace}, volume = {6}, journal = {Aerospace}, number = {10}, issn = {2226-4310}, doi = {10.3390/aerospace6100105}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-198836}, pages = {105}, year = {2019}, abstract = {The correct behavior of spacecraft components is the foundation of unhindered mission operation. However, no technical system is free of wear and degradation. A malfunction of one single component might significantly alter the behavior of the whole spacecraft and may even lead to a complete mission failure. Therefore, abnormal component behavior must be detected early in order to be able to perform counter measures. A dedicated fault detection system can be employed, as opposed to classical health monitoring, performed by human operators, to decrease the response time to a malfunction. In this paper, we present a generic model-based diagnosis system, which detects faults by analyzing the spacecraft's housekeeping data. The observed behavior of the spacecraft components, given by the housekeeping data is compared to their expected behavior, obtained through simulation. Each discrepancy between the observed and the expected behavior of a component generates a so-called symptom. Given the symptoms, the diagnoses are derived by computing sets of components whose malfunction might cause the observed discrepancies. We demonstrate the applicability of the diagnosis system by using modified housekeeping data of the qualification model of an actual spacecraft and outline the advantages and drawbacks of our approach.}, language = {en} } @article{WienrichCarolusMarkusetal.2023, author = {Wienrich, Carolin and Carolus, Astrid and Markus, Andr{\´e} and Augustin, Yannik and Pfister, Jan and Hotho, Andreas}, title = {Long-term effects of perceived friendship with intelligent voice assistants on usage behavior, user experience, and social perceptions}, series = {Computers}, volume = {12}, journal = {Computers}, number = {4}, issn = {2073-431X}, doi = {10.3390/computers12040077}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-313552}, year = {2023}, abstract = {Social patterns and roles can develop when users talk to intelligent voice assistants (IVAs) daily. The current study investigates whether users assign different roles to devices and how this affects their usage behavior, user experience, and social perceptions. Since social roles take time to establish, we equipped 106 participants with Alexa or Google assistants and some smart home devices and observed their interactions for nine months. We analyzed diverse subjective (questionnaire) and objective data (interaction data). By combining social science and data science analyses, we identified two distinct clusters—users who assigned a friendship role to IVAs over time and users who did not. Interestingly, these clusters exhibited significant differences in their usage behavior, user experience, and social perceptions of the devices. For example, participants who assigned a role to IVAs attributed more friendship to them used them more frequently, reported more enjoyment during interactions, and perceived more empathy for IVAs. In addition, these users had distinct personal requirements, for example, they reported more loneliness. This study provides valuable insights into the role-specific effects and consequences of voice assistants. Recent developments in conversational language models such as ChatGPT suggest that the findings of this study could make an important contribution to the design of dialogic human-AI interactions.}, language = {en} } @article{KlemzRote2022, author = {Klemz, Boris and Rote, G{\"u}nter}, title = {Linear-Time Algorithms for Maximum-Weight Induced Matchings and Minimum Chain Covers in Convex Bipartite Graphs}, series = {Algorithmica}, volume = {84}, journal = {Algorithmica}, number = {4}, issn = {1432-0541}, doi = {10.1007/s00453-021-00904-w}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-267876}, pages = {1064-1080}, year = {2022}, abstract = {A bipartite graph G=(U,V,E) is convex if the vertices in V can be linearly ordered such that for each vertex u∈U, the neighbors of u are consecutive in the ordering of V. An induced matching H of G is a matching for which no edge of E connects endpoints of two different edges of H. We show that in a convex bipartite graph with n vertices and m weighted edges, an induced matching of maximum total weight can be computed in O(n+m) time. An unweighted convex bipartite graph has a representation of size O(n) that records for each vertex u∈U the first and last neighbor in the ordering of V. Given such a compact representation, we compute an induced matching of maximum cardinality in O(n) time. In convex bipartite graphs, maximum-cardinality induced matchings are dual to minimum chain covers. A chain cover is a covering of the edge set by chain subgraphs, that is, subgraphs that do not contain induced matchings of more than one edge. Given a compact representation, we compute a representation of a minimum chain cover in O(n) time. If no compact representation is given, the cover can be computed in O(n+m) time. All of our algorithms achieve optimal linear running time for the respective problem and model, and they improve and generalize the previous results in several ways: The best algorithms for the unweighted problem versions had a running time of O(n\(^{2}\)) (Brandst{\"a}dt et al. in Theor. Comput. Sci. 381(1-3):260-265, 2007. https://doi.org/10.1016/j.tcs.2007.04.006). The weighted case has not been considered before.}, language = {en} } @article{FischerHarteltPuppe2023, author = {Fischer, Norbert and Hartelt, Alexander and Puppe, Frank}, title = {Line-level layout recognition of historical documents with background knowledge}, series = {Algorithms}, volume = {16}, journal = {Algorithms}, number = {3}, issn = {1999-4893}, doi = {10.3390/a16030136}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-310938}, year = {2023}, abstract = {Digitization and transcription of historic documents offer new research opportunities for humanists and are the topics of many edition projects. However, manual work is still required for the main phases of layout recognition and the subsequent optical character recognition (OCR) of early printed documents. This paper describes and evaluates how deep learning approaches recognize text lines and can be extended to layout recognition using background knowledge. The evaluation was performed on five corpora of early prints from the 15th and 16th Centuries, representing a variety of layout features. While the main text with standard layouts could be recognized in the correct reading order with a precision and recall of up to 99.9\%, also complex layouts were recognized at a rate as high as 90\% by using background knowledge, the full potential of which was revealed if many pages of the same source were transcribed.}, language = {en} } @article{OberdoerferLatoschik2019, author = {Oberd{\"o}rfer, Sebastian and Latoschik, Marc Erich}, title = {Knowledge encoding in game mechanics: transfer-oriented knowledge learning in desktop-3D and VR}, series = {International Journal of Computer Games Technology}, volume = {2019}, journal = {International Journal of Computer Games Technology}, doi = {10.1155/2019/7626349}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-201159}, pages = {7626349}, year = {2019}, abstract = {Affine Transformations (ATs) are a complex and abstract learning content. Encoding the AT knowledge in Game Mechanics (GMs) achieves a repetitive knowledge application and audiovisual demonstration. Playing a serious game providing these GMs leads to motivating and effective knowledge learning. Using immersive Virtual Reality (VR) has the potential to even further increase the serious game's learning outcome and learning quality. This paper compares the effectiveness and efficiency of desktop-3D and VR in respect to the achieved learning outcome. Also, the present study analyzes the effectiveness of an enhanced audiovisual knowledge encoding and the provision of a debriefing system. The results validate the effectiveness of the knowledge encoding in GMs to achieve knowledge learning. The study also indicates that VR is beneficial for the overall learning quality and that an enhanced audiovisual encoding has only a limited effect on the learning outcome.}, language = {en} } @article{KempfKrugPuppe2023, author = {Kempf, Sebastian and Krug, Markus and Puppe, Frank}, title = {KIETA: Key-insight extraction from scientific tables}, series = {Applied Intelligence}, volume = {53}, journal = {Applied Intelligence}, number = {8}, issn = {0924-669X}, doi = {10.1007/s10489-022-03957-8}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-324180}, pages = {9513-9530}, year = {2023}, abstract = {An important but very time consuming part of the research process is literature review. An already large and nevertheless growing ground set of publications as well as a steadily increasing publication rate continue to worsen the situation. Consequently, automating this task as far as possible is desirable. Experimental results of systems are key-insights of high importance during literature review and usually represented in form of tables. Our pipeline KIETA exploits these tables to contribute to the endeavor of automation by extracting them and their contained knowledge from scientific publications. The pipeline is split into multiple steps to guarantee modularity as well as analyzability, and agnosticim regarding the specific scientific domain up until the knowledge extraction step, which is based upon an ontology. Additionally, a dataset of corresponding articles has been manually annotated with information regarding table and knowledge extraction. Experiments show promising results that signal the possibility of an automated system, while also indicating limits of extracting knowledge from tables without any context.}, language = {en} } @article{SteinhaeusserOberdoerfervonMammenetal.2022, author = {Steinhaeusser, Sophia C. and Oberd{\"o}rfer, Sebastian and von Mammen, Sebastian and Latoschik, Marc Erich and Lugrin, Birgit}, title = {Joyful adventures and frightening places - designing emotion-inducing virtual environments}, series = {Frontiers in Virtual Reality}, volume = {3}, journal = {Frontiers in Virtual Reality}, issn = {2673-4192}, doi = {10.3389/frvir.2022.919163}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-284831}, year = {2022}, abstract = {Virtual environments (VEs) can evoke and support emotions, as experienced when playing emotionally arousing games. We theoretically approach the design of fear and joy evoking VEs based on a literature review of empirical studies on virtual and real environments as well as video games' reviews and content analyses. We define the design space and identify central design elements that evoke specific positive and negative emotions. Based on that, we derive and present guidelines for emotion-inducing VE design with respect to design themes, colors and textures, and lighting configurations. To validate our guidelines in two user studies, we 1) expose participants to 360° videos of VEs designed following the individual guidelines and 2) immerse them in a neutral, positive and negative emotion-inducing VEs combining all respective guidelines in Virtual Reality. The results support our theoretically derived guidelines by revealing significant differences in terms of fear and joy induction.}, language = {en} } @article{LandeckAlvarezIgarzabalUnruhetal.2022, author = {Landeck, Maximilian and Alvarez Igarz{\´a}bal, Federico and Unruh, Fabian and Habenicht, Hannah and Khoshnoud, Shiva and Wittmann, Marc and Lugrin, Jean-Luc and Latoschik, Marc Erich}, title = {Journey through a virtual tunnel: Simulated motion and its effects on the experience of time}, series = {Frontiers in Virtual Reality}, volume = {3}, journal = {Frontiers in Virtual Reality}, issn = {2673-4192}, doi = {10.3389/frvir.2022.1059971}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-301519}, year = {2022}, abstract = {This paper examines the relationship between time and motion perception in virtual environments. Previous work has shown that the perception of motion can affect the perception of time. We developed a virtual environment that simulates motion in a tunnel and measured its effects on the estimation of the duration of time, the speed at which perceived time passes, and the illusion of self-motion, also known as vection. When large areas of the visual field move in the same direction, vection can occur; observers often perceive this as self-motion rather than motion of the environment. To generate different levels of vection and investigate its effects on time perception, we developed an abstract procedural tunnel generator. The generator can simulate different speeds and densities of tunnel sections (visibly distinguishable sections that form the virtual tunnel), as well as the degree of embodiment of the user avatar (with or without virtual hands). We exposed participants to various tunnel simulations with different durations, speeds, and densities in a remote desktop and a virtual reality (VR) laboratory study. Time passed subjectively faster under high-speed and high-density conditions in both studies. The experience of self-motion was also stronger under high-speed and high-density conditions. Both studies revealed a significant correlation between the perceived passage of time and perceived self-motion. Subjects in the virtual reality study reported a stronger self-motion experience, a faster perceived passage of time, and shorter time estimates than subjects in the desktop study. Our results suggest that a virtual tunnel simulation can manipulate time perception in virtual reality. We will explore these results for the development of virtual reality applications for therapeutic approaches in our future work. This could be particularly useful in treating disorders like depression, autism, and schizophrenia, which are known to be associated with distortions in time perception. For example, the tunnel could be therapeutically applied by resetting patients' time perceptions by exposing them to the tunnel under different conditions, such as increasing or decreasing perceived time.}, language = {en} } @article{KrupitzerEberhardingerGerostathopoulosetal.2020, author = {Krupitzer, Christian and Eberhardinger, Benedikt and Gerostathopoulos, Ilias and Raibulet, Claudia}, title = {Introduction to the special issue "Applications in Self-Aware Computing Systems and their Evaluation"}, series = {Computers}, volume = {9}, journal = {Computers}, number = {1}, issn = {2073-431X}, doi = {10.3390/computers9010022}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-203439}, year = {2020}, abstract = {The joint 1st Workshop on Evaluations and Measurements in Self-Aware Computing Systems (EMSAC 2019) and Workshop on Self-Aware Computing (SeAC) was held as part of the FAS* conference alliance in conjunction with the 16th IEEE International Conference on Autonomic Computing (ICAC) and the 13th IEEE International Conference on Self-Adaptive and Self-Organizing Systems (SASO) in Ume{\aa}, Sweden on 20 June 2019. The goal of this one-day workshop was to bring together researchers and practitioners from academic environments and from the industry to share their solutions, ideas, visions, and doubts in self-aware computing systems in general and in the evaluation and measurements of such systems in particular. The workshop aimed to enable discussions, partnerships, and collaborations among the participants. This special issue follows the theme of the workshop. It contains extended versions of workshop presentations as well as additional contributions.}, language = {en} } @article{HeinLatoschikWienrich2022, author = {Hein, Rebecca M. and Latoschik, Marc Erich and Wienrich, Carolin}, title = {Inter- and transcultural learning in cocial virtual reality: a proposal for an inter- and transcultural virtual object database to be used in the implementation, reflection, and evaluation of virtual encounters}, series = {Multimodal Technologies and Interaction}, volume = {6}, journal = {Multimodal Technologies and Interaction}, number = {7}, issn = {2414-4088}, doi = {10.3390/mti6070050}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-278974}, year = {2022}, abstract = {Visual stimuli are frequently used to improve memory, language learning or perception, and understanding of metacognitive processes. However, in virtual reality (VR), there are few systematically and empirically derived databases. This paper proposes the first collection of virtual objects based on empirical evaluation for inter-and transcultural encounters between English- and German-speaking learners. We used explicit and implicit measurement methods to identify cultural associations and the degree of stereotypical perception for each virtual stimuli (n = 293) through two online studies, including native German and English-speaking participants. The analysis resulted in a final well-describable database of 128 objects (called InteractionSuitcase). In future applications, the objects can be used as a great interaction or conversation asset and behavioral measurement tool in social VR applications, especially in the field of foreign language education. For example, encounters can use the objects to describe their culture, or teachers can intuitively assess stereotyped attitudes of the encounters.}, language = {en} } @article{RiedmannSchaperLugrin2022, author = {Riedmann, Anna and Schaper, Philipp and Lugrin, Birgit}, title = {Integration of a social robot and gamification in adult learning and effects on motivation, engagement and performance}, series = {AI \& Society}, journal = {AI \& Society}, issn = {0951-5666}, doi = {10.1007/s00146-022-01514-y}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-324208}, year = {2022}, abstract = {Learning is a central component of human life and essential for personal development. Therefore, utilizing new technologies in the learning context and exploring their combined potential are considered essential to support self-directed learning in a digital age. A learning environment can be expanded by various technical and content-related aspects. Gamification in the form of elements from video games offers a potential concept to support the learning process. This can be supplemented by technology-supported learning. While the use of tablets is already widespread in the learning context, the integration of a social robot can provide new perspectives on the learning process. However, simply adding new technologies such as social robots or gamification to existing systems may not automatically result in a better learning environment. In the present study, game elements as well as a social robot were integrated separately and conjointly into a learning environment for basic Spanish skills, with a follow-up on retained knowledge. This allowed us to investigate the respective and combined effects of both expansions on motivation, engagement and learning effect. This approach should provide insights into the integration of both additions in an adult learning context. We found that the additions of game elements and the robot did not significantly improve learning, engagement or motivation. Based on these results and a literature review, we outline relevant factors for meaningful integration of gamification and social robots in learning environments in adult learning.}, language = {en} } @article{LiGuanGaoetal.2020, author = {Li, Ningbo and Guan, Lianwu and Gao, Yanbin and Du, Shitong and Wu, Menghao and Guang, Xingxing and Cong, Xiaodan}, title = {Indoor and outdoor low-cost seamless integrated navigation system based on the integration of INS/GNSS/LIDAR system}, series = {Remote Sensing}, volume = {12}, journal = {Remote Sensing}, number = {19}, issn = {2072-4292}, doi = {10.3390/rs12193271}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-216229}, year = {2020}, abstract = {Global Navigation Satellite System (GNSS) provides accurate positioning data for vehicular navigation in open outdoor environment. In an indoor environment, Light Detection and Ranging (LIDAR) Simultaneous Localization and Mapping (SLAM) establishes a two-dimensional map and provides positioning data. However, LIDAR can only provide relative positioning data and it cannot directly provide the latitude and longitude of the current position. As a consequence, GNSS/Inertial Navigation System (INS) integrated navigation could be employed in outdoors, while the indoors part makes use of INS/LIDAR integrated navigation and the corresponding switching navigation will make the indoor and outdoor positioning consistent. In addition, when the vehicle enters the garage, the GNSS signal will be blurred for a while and then disappeared. Ambiguous GNSS satellite signals will lead to the continuous distortion or overall drift of the positioning trajectory in the indoor condition. Therefore, an INS/LIDAR seamless integrated navigation algorithm and a switching algorithm based on vehicle navigation system are designed. According to the experimental data, the positioning accuracy of the INS/LIDAR navigation algorithm in the simulated environmental experiment is 50\% higher than that of the Dead Reckoning (DR) algorithm. Besides, the switching algorithm developed based on the INS/LIDAR integrated navigation algorithm can achieve 80\% success rate in navigation mode switching.}, language = {en} } @article{SchloerRingHotho2020, author = {Schl{\"o}r, Daniel and Ring, Markus and Hotho, Andreas}, title = {iNALU: Improved Neural Arithmetic Logic Unit}, series = {Frontiers in Artificial Intelligence}, volume = {3}, journal = {Frontiers in Artificial Intelligence}, issn = {2624-8212}, doi = {10.3389/frai.2020.00071}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-212301}, year = {2020}, abstract = {Neural networks have to capture mathematical relationships in order to learn various tasks. They approximate these relations implicitly and therefore often do not generalize well. The recently proposed Neural Arithmetic Logic Unit (NALU) is a novel neural architecture which is able to explicitly represent the mathematical relationships by the units of the network to learn operations such as summation, subtraction or multiplication. Although NALUs have been shown to perform well on various downstream tasks, an in-depth analysis reveals practical shortcomings by design, such as the inability to multiply or divide negative input values or training stability issues for deeper networks. We address these issues and propose an improved model architecture. We evaluate our model empirically in various settings from learning basic arithmetic operations to more complex functions. Our experiments indicate that our model solves stability issues and outperforms the original NALU model in means of arithmetic precision and convergence.}, language = {en} } @article{LopezArreguinMontenegro2019, author = {Lopez-Arreguin, A. J. R. and Montenegro, S.}, title = {Improving engineering models of terramechanics for planetary exploration}, series = {Results in Engineering}, volume = {3}, journal = {Results in Engineering}, doi = {10.1016/j.rineng.2019.100027}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-202490}, pages = {100027}, year = {2019}, abstract = {This short letter proposes more consolidated explicit solutions for the forces and torques acting on typical rover wheels, that can be used as a method to determine their average mobility characteristics in planetary soils. The closed loop solutions stand in one of the verified methods, but at difference of the previous, observables are decoupled requiring a less amount of physical parameters to measure. As a result, we show that with knowledge of terrain properties, wheel driving performance rely in a single observable only. Because of their generality, the formulated equations established here can have further implications in autonomy and control of rovers or planetary soil characterization.}, language = {en} } @article{MaiwaldBruschkeSchneideretal.2023, author = {Maiwald, Ferdinand and Bruschke, Jonas and Schneider, Danilo and Wacker, Markus and Niebling, Florian}, title = {Giving historical photographs a new perspective: introducing camera orientation parameters as new metadata in a large-scale 4D application}, series = {Remote Sensing}, volume = {15}, journal = {Remote Sensing}, number = {7}, issn = {2072-4292}, doi = {10.3390/rs15071879}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-311103}, year = {2023}, abstract = {The ongoing digitization of historical photographs in archives allows investigating the quality, quantity, and distribution of these images. However, the exact interior and exterior camera orientations of these photographs are usually lost during the digitization process. The proposed method uses content-based image retrieval (CBIR) to filter exterior images of single buildings in combination with metadata information. The retrieved photographs are automatically processed in an adapted structure-from-motion (SfM) pipeline to determine the camera parameters. In an interactive georeferencing process, the calculated camera positions are transferred into a global coordinate system. As all image and camera data are efficiently stored in the proposed 4D database, they can be conveniently accessed afterward to georeference newly digitized images by using photogrammetric triangulation and spatial resection. The results show that the CBIR and the subsequent SfM are robust methods for various kinds of buildings and different quantity of data. The absolute accuracy of the camera positions after georeferencing lies in the range of a few meters likely introduced by the inaccurate LOD2 models used for transformation. The proposed photogrammetric method, the database structure, and the 4D visualization interface enable adding historical urban photographs and 3D models from other locations.}, language = {en} } @article{Puppe2022, author = {Puppe, Frank}, title = {Gesellschaftliche Perspektiven einer fachspezifischen KI f{\"u}r automatisierte Entscheidungen}, series = {Informatik Spektrum}, volume = {45}, journal = {Informatik Spektrum}, number = {2}, issn = {0170-6012}, doi = {10.1007/s00287-022-01443-6}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-324197}, pages = {88-95}, year = {2022}, abstract = {Die k{\"u}nstliche Intelligenz (KI) entwickelt sich rasant und hat bereits eindrucksvolle Erfolge zu verzeichnen, darunter {\"u}bermenschliche Kompetenz in den meisten Spielen und vielen Quizshows, intelligente Suchmaschinen, individualisierte Werbung, Spracherkennung, -ausgabe und -{\"u}bersetzung auf sehr hohem Niveau und hervorragende Leistungen bei der Bildverarbeitung, u. a. in der Medizin, der optischen Zeichenerkennung, beim autonomen Fahren, aber auch beim Erkennen von Menschen auf Bildern und Videos oder bei Deep Fakes f{\"u}r Fotos und Videos. Es ist zu erwarten, dass die KI auch in der Entscheidungsfindung Menschen {\"u}bertreffen wird; ein alter Traum der Expertensysteme, der durch Lernverfahren, Big Data und Zugang zu dem gesammelten Wissen im Web in greifbare N{\"a}he r{\"u}ckt. Gegenstand dieses Beitrags sind aber weniger die technischen Entwicklungen, sondern m{\"o}gliche gesellschaftliche Auswirkungen einer spezialisierten, kompetenten KI f{\"u}r verschiedene Bereiche der autonomen, d. h. nicht nur unterst{\"u}tzenden Entscheidungsfindung: als Fußballschiedsrichter, in der Medizin, f{\"u}r richterliche Entscheidungen und sehr spekulativ auch im politischen Bereich. Dabei werden Vor- und Nachteile dieser Szenarien aus gesellschaftlicher Sicht diskutiert.}, subject = {K{\"u}nstliche Intelligenz}, language = {de} } @article{ToepferCorovicFetteetal.2015, author = {Toepfer, Martin and Corovic, Hamo and Fette, Georg and Kl{\"u}gl, Peter and St{\"o}rk, Stefan and Puppe, Frank}, title = {Fine-grained information extraction from German transthoracic echocardiography reports}, series = {BMC Medical Informatics and Decision Making}, volume = {15}, journal = {BMC Medical Informatics and Decision Making}, number = {91}, doi = {doi:10.1186/s12911-015-0215-x}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-125509}, year = {2015}, abstract = {Background Information extraction techniques that get structured representations out of unstructured data make a large amount of clinically relevant information about patients accessible for semantic applications. These methods typically rely on standardized terminologies that guide this process. Many languages and clinical domains, however, lack appropriate resources and tools, as well as evaluations of their applications, especially if detailed conceptualizations of the domain are required. For instance, German transthoracic echocardiography reports have not been targeted sufficiently before, despite of their importance for clinical trials. This work therefore aimed at development and evaluation of an information extraction component with a fine-grained terminology that enables to recognize almost all relevant information stated in German transthoracic echocardiography reports at the University Hospital of W{\"u}rzburg. Methods A domain expert validated and iteratively refined an automatically inferred base terminology. The terminology was used by an ontology-driven information extraction system that outputs attribute value pairs. The final component has been mapped to the central elements of a standardized terminology, and it has been evaluated according to documents with different layouts. Results The final system achieved state-of-the-art precision (micro average.996) and recall (micro average.961) on 100 test documents that represent more than 90 \% of all reports. In particular, principal aspects as defined in a standardized external terminology were recognized with f 1=.989 (micro average) and f 1=.963 (macro average). As a result of keyword matching and restraint concept extraction, the system obtained high precision also on unstructured or exceptionally short documents, and documents with uncommon layout. Conclusions The developed terminology and the proposed information extraction system allow to extract fine-grained information from German semi-structured transthoracic echocardiography reports with very high precision and high recall on the majority of documents at the University Hospital of W{\"u}rzburg. Extracted results populate a clinical data warehouse which supports clinical research.}, language = {en} } @article{KrenzerMakowskiHekaloetal.2022, author = {Krenzer, Adrian and Makowski, Kevin and Hekalo, Amar and Fitting, Daniel and Troya, Joel and Zoller, Wolfram G. and Hann, Alexander and Puppe, Frank}, title = {Fast machine learning annotation in the medical domain: a semi-automated video annotation tool for gastroenterologists}, series = {BioMedical Engineering OnLine}, volume = {21}, journal = {BioMedical Engineering OnLine}, number = {1}, doi = {10.1186/s12938-022-01001-x}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-300231}, year = {2022}, abstract = {Background Machine learning, especially deep learning, is becoming more and more relevant in research and development in the medical domain. For all the supervised deep learning applications, data is the most critical factor in securing successful implementation and sustaining the progress of the machine learning model. Especially gastroenterological data, which often involves endoscopic videos, are cumbersome to annotate. Domain experts are needed to interpret and annotate the videos. To support those domain experts, we generated a framework. With this framework, instead of annotating every frame in the video sequence, experts are just performing key annotations at the beginning and the end of sequences with pathologies, e.g., visible polyps. Subsequently, non-expert annotators supported by machine learning add the missing annotations for the frames in-between. Methods In our framework, an expert reviews the video and annotates a few video frames to verify the object's annotations for the non-expert. In a second step, a non-expert has visual confirmation of the given object and can annotate all following and preceding frames with AI assistance. After the expert has finished, relevant frames will be selected and passed on to an AI model. This information allows the AI model to detect and mark the desired object on all following and preceding frames with an annotation. Therefore, the non-expert can adjust and modify the AI predictions and export the results, which can then be used to train the AI model. Results Using this framework, we were able to reduce workload of domain experts on average by a factor of 20 on our data. This is primarily due to the structure of the framework, which is designed to minimize the workload of the domain expert. Pairing this framework with a state-of-the-art semi-automated AI model enhances the annotation speed further. Through a prospective study with 10 participants, we show that semi-automated annotation using our tool doubles the annotation speed of non-expert annotators compared to a well-known state-of-the-art annotation tool. Conclusion In summary, we introduce a framework for fast expert annotation for gastroenterologists, which reduces the workload of the domain expert considerably while maintaining a very high annotation quality. The framework incorporates a semi-automated annotation system utilizing trained object detection models. The software and framework are open-source.}, language = {en} } @article{WienrichLatoschik2021, author = {Wienrich, Carolin and Latoschik, Marc Erich}, title = {eXtended Artificial Intelligence: New Prospects of Human-AI Interaction Research}, series = {Frontiers in Virtual Reality}, volume = {2}, journal = {Frontiers in Virtual Reality}, doi = {10.3389/frvir.2021.686783}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-260296}, year = {2021}, abstract = {Artificial Intelligence (AI) covers a broad spectrum of computational problems and use cases. Many of those implicate profound and sometimes intricate questions of how humans interact or should interact with AIs. Moreover, many users or future users do have abstract ideas of what AI is, significantly depending on the specific embodiment of AI applications. Human-centered-design approaches would suggest evaluating the impact of different embodiments on human perception of and interaction with AI. An approach that is difficult to realize due to the sheer complexity of application fields and embodiments in reality. However, here XR opens new possibilities to research human-AI interactions. The article's contribution is twofold: First, it provides a theoretical treatment and model of human-AI interaction based on an XR-AI continuum as a framework for and a perspective of different approaches of XR-AI combinations. It motivates XR-AI combinations as a method to learn about the effects of prospective human-AI interfaces and shows why the combination of XR and AI fruitfully contributes to a valid and systematic investigation of human-AI interactions and interfaces. Second, the article provides two exemplary experiments investigating the aforementioned approach for two distinct AI-systems. The first experiment reveals an interesting gender effect in human-robot interaction, while the second experiment reveals an Eliza effect of a recommender system. Here the article introduces two paradigmatic implementations of the proposed XR testbed for human-AI interactions and interfaces and shows how a valid and systematic investigation can be conducted. In sum, the article opens new perspectives on how XR benefits human-centered AI design and development.}, language = {en} } @article{LodaKrebsDanhofetal.2019, author = {Loda, Sophia and Krebs, Jonathan and Danhof, Sophia and Schreder, Martin and Solimando, Antonio G. and Strifler, Susanne and Rasche, Leo and Kort{\"u}m, Martin and Kerscher, Alexander and Knop, Stefan and Puppe, Frank and Einsele, Hermann and Bittrich, Max}, title = {Exploration of artificial intelligence use with ARIES in multiple myeloma research}, series = {Journal of Clinical Medicine}, volume = {8}, journal = {Journal of Clinical Medicine}, number = {7}, issn = {2077-0383}, doi = {10.3390/jcm8070999}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-197231}, pages = {999}, year = {2019}, abstract = {Background: Natural language processing (NLP) is a powerful tool supporting the generation of Real-World Evidence (RWE). There is no NLP system that enables the extensive querying of parameters specific to multiple myeloma (MM) out of unstructured medical reports. We therefore created a MM-specific ontology to accelerate the information extraction (IE) out of unstructured text. Methods: Our MM ontology consists of extensive MM-specific and hierarchically structured attributes and values. We implemented "A Rule-based Information Extraction System" (ARIES) that uses this ontology. We evaluated ARIES on 200 randomly selected medical reports of patients diagnosed with MM. Results: Our system achieved a high F1-Score of 0.92 on the evaluation dataset with a precision of 0.87 and recall of 0.98. Conclusions: Our rule-based IE system enables the comprehensive querying of medical reports. The IE accelerates the extraction of data and enables clinicians to faster generate RWE on hematological issues. RWE helps clinicians to make decisions in an evidence-based manner. Our tool easily accelerates the integration of research evidence into everyday clinical practice.}, language = {en} } @article{AliMontenegro2016, author = {Ali, Qasim and Montenegro, Sergio}, title = {Explicit Model Following Distributed Control Scheme for Formation Flying of Mini UAVs}, series = {IEEE Access}, volume = {4}, journal = {IEEE Access}, number = {397-406}, doi = {10.1109/ACCESS.2016.2517203}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-146061}, year = {2016}, abstract = {A centralized heterogeneous formation flight position control scheme has been formulated using an explicit model following design, based on a Linear Quadratic Regulator Proportional Integral (LQR PI) controller. The leader quadcopter is a stable reference model with desired dynamics whose output is perfectly tracked by the two wingmen quadcopters. The leader itself is controlled through the pole placement control method with desired stability characteristics, while the two followers are controlled through a robust and adaptive LQR PI control method. Selected 3-D formation geometry and static stability are maintained under a number of possible perturbations. With this control scheme, formation geometry may also be switched to any arbitrary shape during flight, provided a suitable collision avoidance mechanism is incorporated. In case of communication loss between the leader and any of the followers, the other follower provides the data, received from the leader, to the affected follower. The stability of the closed-loop system has been analyzed using singular values. The proposed approach for the tightly coupled formation flight of mini unmanned aerial vehicles has been validated with the help of extensive simulations using MATLAB/Simulink, which provided promising results.}, language = {en} } @article{GehrkeBalbachRauchetal.2019, author = {Gehrke, Alexander and Balbach, Nico and Rauch, Yong-Mi and Degkwitz, Andreas and Puppe, Frank}, title = {Erkennung von handschriftlichen Unterstreichungen in Alten Drucken}, series = {Bibliothek Forschung und Praxis}, volume = {43}, journal = {Bibliothek Forschung und Praxis}, number = {3}, issn = {1865-7648}, doi = {10.1515/bfp-2019-2083}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-193377}, pages = {447 -- 452}, year = {2019}, abstract = {Die Erkennung handschriftlicher Artefakte wie Unterstreichungen in Buchdrucken erm{\"o}glicht R{\"u}ckschl{\"u}sse auf das Rezeptionsverhalten und die Provenienzgeschichte und wird auch f{\"u}r eine OCR ben{\"o}tigt. Dabei soll zwischen handschriftlichen Unterstreichungen und waagerechten Linien im Druck (z. B. Trennlinien usw.) unterschieden werden, da letztere nicht ausgezeichnet werden sollen. Im Beitrag wird ein Ansatz basierend auf einem auf Unterstreichungen trainierten Neuronalen Netz gem{\"a}ß der U-Net Architektur vorgestellt, dessen Ergebnisse in einem zweiten Schritt mit heuristischen Regeln nachbearbeitet werden. Die Evaluationen zeigen, dass Unterstreichungen sehr gut erkannt werden, wenn bei der Binarisierung der Scans nicht zu viele Pixel der Unterstreichung wegen geringem Kontrast verloren gehen. Zuk{\"u}nftig sollen die Worte oberhalb der Unterstreichung mit OCR transkribiert werden und auch andere Artefakte wie handschriftliche Notizen in alten Drucken erkannt werden.}, language = {de} } @article{OberdoerferHeidrichBirnstieletal.2021, author = {Oberd{\"o}rfer, Sebastian and Heidrich, David and Birnstiel, Sandra and Latoschik, Marc Erich}, title = {Enchanted by Your Surrounding? Measuring the Effects of Immersion and Design of Virtual Environments on Decision-Making}, series = {Frontiers in Virtual Reality}, volume = {2}, journal = {Frontiers in Virtual Reality}, doi = {10.3389/frvir.2021.679277}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-260101}, pages = {679277}, year = {2021}, abstract = {Impaired decision-making leads to the inability to distinguish between advantageous and disadvantageous choices. The impairment of a person's decision-making is a common goal of gambling games. Given the recent trend of gambling using immersive Virtual Reality it is crucial to investigate the effects of both immersion and the virtual environment (VE) on decision-making. In a novel user study, we measured decision-making using three virtual versions of the Iowa Gambling Task (IGT). The versions differed with regard to the degree of immersion and design of the virtual environment. While emotions affect decision-making, we further measured the positive and negative affect of participants. A higher visual angle on a stimulus leads to an increased emotional response. Thus, we kept the visual angle on the Iowa Gambling Task the same between our conditions. Our results revealed no significant impact of immersion or the VE on the IGT. We further found no significant difference between the conditions with regard to positive and negative affect. This suggests that neither the medium used nor the design of the VE causes an impairment of decision-making. However, in combination with a recent study, we provide first evidence that a higher visual angle on the IGT leads to an effect of impairment.}, language = {en} } @article{MadeiraGromerLatoschiketal.2021, author = {Madeira, Octavia and Gromer, Daniel and Latoschik, Marc Erich and Pauli, Paul}, title = {Effects of Acrophobic Fear and Trait Anxiety on Human Behavior in a Virtual Elevated Plus-Maze}, series = {Frontiers in Virtual Reality}, volume = {2}, journal = {Frontiers in Virtual Reality}, doi = {10.3389/frvir.2021.635048}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-258709}, year = {2021}, abstract = {The Elevated Plus-Maze (EPM) is a well-established apparatus to measure anxiety in rodents, i.e., animals exhibiting an increased relative time spent in the closed vs. the open arms are considered anxious. To examine whether such anxiety-modulated behaviors are conserved in humans, we re-translated this paradigm to a human setting using virtual reality in a Cave Automatic Virtual Environment (CAVE) system. In two studies, we examined whether the EPM exploration behavior of humans is modulated by their trait anxiety and also assessed the individuals' levels of acrophobia (fear of height), claustrophobia (fear of confined spaces), sensation seeking, and the reported anxiety when on the maze. First, we constructed an exact virtual copy of the animal EPM adjusted to human proportions. In analogy to animal EPM studies, participants (N = 30) freely explored the EPM for 5 min. In the second study (N = 61), we redesigned the EPM to make it more human-adapted and to differentiate influences of trait anxiety and acrophobia by introducing various floor textures and lower walls of closed arms to the height of standard handrails. In the first experiment, hierarchical regression analyses of exploration behavior revealed the expected association between open arm avoidance and Trait Anxiety, an even stronger association with acrophobic fear. In the second study, results revealed that acrophobia was associated with avoidance of open arms with mesh-floor texture, whereas for trait anxiety, claustrophobia, and sensation seeking, no effect was detected. Also, subjects' fear rating was moderated by all psychometrics but trait anxiety. In sum, both studies consistently indicate that humans show no general open arm avoidance analogous to rodents and that human EPM behavior is modulated strongest by acrophobic fear, whereas trait anxiety plays a subordinate role. Thus, we conclude that the criteria for cross-species validity are met insufficiently in this case. Despite the exploratory nature, our studies provide in-depth insights into human exploration behavior on the virtual EPM.}, language = {en} } @article{DumicBjeloperaNuechter2021, author = {Dumic, Emil and Bjelopera, Anamaria and N{\"u}chter, Andreas}, title = {Dynamic point cloud compression based on projections, surface reconstruction and video compression}, series = {Sensors}, volume = {22}, journal = {Sensors}, number = {1}, issn = {1424-8220}, doi = {10.3390/s22010197}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-252231}, year = {2021}, abstract = {In this paper we will present a new dynamic point cloud compression based on different projection types and bit depth, combined with the surface reconstruction algorithm and video compression for obtained geometry and texture maps. Texture maps have been compressed after creating Voronoi diagrams. Used video compression is specific for geometry (FFV1) and texture (H.265/HEVC). Decompressed point clouds are reconstructed using a Poisson surface reconstruction algorithm. Comparison with the original point clouds was performed using point-to-point and point-to-plane measures. Comprehensive experiments show better performance for some projection maps: cylindrical, Miller and Mercator projections.}, language = {en} } @article{BuchinBuchinByrkaetal.2012, author = {Buchin, Kevin and Buchin, Maike and Byrka, Jaroslaw and N{\"o}llenburg, Martin and Okamoto, Yoshio and Silveira, Rodrigo I. and Wolff, Alexander}, title = {Drawing (Complete) Binary Tanglegrams}, series = {Algorithmica}, volume = {62}, journal = {Algorithmica}, doi = {10.1007/s00453-010-9456-3}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-124622}, pages = {309-332}, year = {2012}, abstract = {A binary tanglegram is a drawing of a pair of rooted binary trees whose leaf sets are in one-to-one correspondence; matching leaves are connected by inter-tree edges. For applications, for example, in phylogenetics, it is essential that both trees are drawn without edge crossings and that the inter-tree edges have as few crossings as possible. It is known that finding a tanglegram with the minimum number of crossings is NP-hard and that the problem is fixed-parameter tractable with respect to that number. We prove that under the Unique Games Conjecture there is no constant-factor approximation for binary trees. We show that the problem is NP-hard even if both trees are complete binary trees. For this case we give an O(n 3)-time 2-approximation and a new, simple fixed-parameter algorithm. We show that the maximization version of the dual problem for binary trees can be reduced to a version of MaxCut for which the algorithm of Goemans and Williamson yields a 0.878-approximation.}, language = {en} } @article{SteiningerKobsDavidsonetal.2021, author = {Steininger, Michael and Kobs, Konstantin and Davidson, Padraig and Krause, Anna and Hotho, Andreas}, title = {Density-based weighting for imbalanced regression}, series = {Machine Learning}, volume = {110}, journal = {Machine Learning}, number = {8}, issn = {1573-0565}, doi = {10.1007/s10994-021-06023-5}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-269177}, pages = {2187-2211}, year = {2021}, abstract = {In many real world settings, imbalanced data impedes model performance of learning algorithms, like neural networks, mostly for rare cases. This is especially problematic for tasks focusing on these rare occurrences. For example, when estimating precipitation, extreme rainfall events are scarce but important considering their potential consequences. While there are numerous well studied solutions for classification settings, most of them cannot be applied to regression easily. Of the few solutions for regression tasks, barely any have explored cost-sensitive learning which is known to have advantages compared to sampling-based methods in classification tasks. In this work, we propose a sample weighting approach for imbalanced regression datasets called DenseWeight and a cost-sensitive learning approach for neural network regression with imbalanced data called DenseLoss based on our weighting scheme. DenseWeight weights data points according to their target value rarities through kernel density estimation (KDE). DenseLoss adjusts each data point's influence on the loss according to DenseWeight, giving rare data points more influence on model training compared to common data points. We show on multiple differently distributed datasets that DenseLoss significantly improves model performance for rare data points through its density-based weighting scheme. Additionally, we compare DenseLoss to the state-of-the-art method SMOGN, finding that our method mostly yields better performance. Our approach provides more control over model training as it enables us to actively decide on the trade-off between focusing on common or rare cases through a single hyperparameter, allowing the training of better models for rare data points.}, language = {en} } @article{SeufertSchroederSeufert2021, author = {Seufert, Anika and Schr{\"o}der, Svenja and Seufert, Michael}, title = {Delivering User Experience over Networks: Towards a Quality of Experience Centered Design Cycle for Improved Design of Networked Applications}, series = {SN Computer Science}, volume = {2}, journal = {SN Computer Science}, number = {6}, issn = {2661-8907}, doi = {10.1007/s42979-021-00851-x}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-271762}, year = {2021}, abstract = {To deliver the best user experience (UX), the human-centered design cycle (HCDC) serves as a well-established guideline to application developers. However, it does not yet cover network-specific requirements, which become increasingly crucial, as most applications deliver experience over the Internet. The missing network-centric view is provided by Quality of Experience (QoE), which could team up with UX towards an improved overall experience. By considering QoE aspects during the development process, it can be achieved that applications become network-aware by design. In this paper, the Quality of Experience Centered Design Cycle (QoE-CDC) is proposed, which provides guidelines on how to design applications with respect to network-specific requirements and QoE. Its practical value is showcased for popular application types and validated by outlining the design of a new smartphone application. We show that combining HCDC and QoE-CDC will result in an application design, which reaches a high UX and avoids QoE degradation.}, language = {en} } @article{MuellerLeppichGeissetal.2023, author = {M{\"u}ller, Konstantin and Leppich, Robert and Geiß, Christian and Borst, Vanessa and Pelizari, Patrick Aravena and Kounev, Samuel and Taubenb{\"o}ck, Hannes}, title = {Deep neural network regression for normalized digital surface model generation with Sentinel-2 imagery}, series = {IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing}, volume = {16}, journal = {IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing}, issn = {1939-1404}, doi = {10.1109/JSTARS.2023.3297710}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-349424}, pages = {8508-8519}, year = {2023}, abstract = {In recent history, normalized digital surface models (nDSMs) have been constantly gaining importance as a means to solve large-scale geographic problems. High-resolution surface models are precious, as they can provide detailed information for a specific area. However, measurements with a high resolution are time consuming and costly. Only a few approaches exist to create high-resolution nDSMs for extensive areas. This article explores approaches to extract high-resolution nDSMs from low-resolution Sentinel-2 data, allowing us to derive large-scale models. We thereby utilize the advantages of Sentinel 2 being open access, having global coverage, and providing steady updates through a high repetition rate. Several deep learning models are trained to overcome the gap in producing high-resolution surface maps from low-resolution input data. With U-Net as a base architecture, we extend the capabilities of our model by integrating tailored multiscale encoders with differently sized kernels in the convolution as well as conformed self-attention inside the skip connection gates. Using pixelwise regression, our U-Net base models can achieve a mean height error of approximately 2 m. Moreover, through our enhancements to the model architecture, we reduce the model error by more than 7\%.}, language = {en} } @article{AliMontenegro2016, author = {Ali, Qasim and Montenegro, Sergio}, title = {Decentralized control for scalable quadcopter formations}, series = {International Journal of Aerospace Engineering}, volume = {2016}, journal = {International Journal of Aerospace Engineering}, doi = {10.1155/2016/9108983}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-146704}, pages = {9108983}, year = {2016}, abstract = {An innovative framework has been developed for teamwork of two quadcopter formations, each having its specified formation geometry, assigned task, and matching control scheme. Position control for quadcopters in one of the formations has been implemented through a Linear Quadratic Regulator Proportional Integral (LQR PI) control scheme based on explicit model following scheme. Quadcopters in the other formation are controlled through LQR PI servomechanism control scheme. These two control schemes are compared in terms of their performance and control effort. Both formations are commanded by respective ground stations through virtual leaders. Quadcopters in formations are able to track desired trajectories as well as hovering at desired points for selected time duration. In case of communication loss between ground station and any of the quadcopters, the neighboring quadcopter provides the command data, received from the ground station, to the affected unit. Proposed control schemes have been validated through extensive simulations using MATLAB®/Simulink® that provided favorable results.}, language = {en} } @article{DuLauterbachLietal.2020, author = {Du, Shitong and Lauterbach, Helge A. and Li, Xuyou and Demisse, Girum G. and Borrmann, Dorit and N{\"u}chter, Andreas}, title = {Curvefusion — A Method for Combining Estimated Trajectories with Applications to SLAM and Time-Calibration}, series = {Sensors}, volume = {20}, journal = {Sensors}, number = {23}, issn = {1424-8220}, doi = {10.3390/s20236918}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-219988}, year = {2020}, abstract = {Mapping and localization of mobile robots in an unknown environment are essential for most high-level operations like autonomous navigation or exploration. This paper presents a novel approach for combining estimated trajectories, namely curvefusion. The robot used in the experiments is equipped with a horizontally mounted 2D profiler, a constantly spinning 3D laser scanner and a GPS module. The proposed algorithm first combines trajectories from different sensors to optimize poses of the planar three degrees of freedom (DoF) trajectory, which is then fed into continuous-time simultaneous localization and mapping (SLAM) to further improve the trajectory. While state-of-the-art multi-sensor fusion methods mainly focus on probabilistic methods, our approach instead adopts a deformation-based method to optimize poses. To this end, a similarity metric for curved shapes is introduced into the robotics community to fuse the estimated trajectories. Additionally, a shape-based point correspondence estimation method is applied to the multi-sensor time calibration. Experiments show that the proposed fusion method can achieve relatively better accuracy, even if the error of the trajectory before fusion is large, which demonstrates that our method can still maintain a certain degree of accuracy in an environment where typical pose estimation methods have poor performance. In addition, the proposed time-calibration method also achieves high accuracy in estimating point correspondences.}, language = {en} } @article{AtienzadeCastroCortesetal.2012, author = {Atienza, Nieves and de Castro, Natalia and Cort{\´e}s, Carmen and Garrido, M. {\´A}ngeles and Grima, Clara I. and Hern{\´a}ndez, Gregorio and M{\´a}rquez, Alberto and Moreno-Gonz{\´a}lez, Auxiliadora and N{\"o}llenburg, Martin and Portillo, Jos{\´e} Ram{\´o}n and Reyes, Pedro and Valenzuela, Jes{\´u}s and Trinidad Villar, Maria and Wolff, Alexander}, title = {Cover contact graphs}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-78845}, year = {2012}, abstract = {We study problems that arise in the context of covering certain geometric objects called seeds (e.g., points or disks) by a set of other geometric objects called cover (e.g., a set of disks or homothetic triangles). We insist that the interiors of the seeds and the cover elements are pairwise disjoint, respectively, but they can touch. We call the contact graph of a cover a cover contact graph (CCG). We are interested in three types of tasks, both in the general case and in the special case of seeds on a line: (a) deciding whether a given seed set has a connected CCG, (b) deciding whether a given graph has a realization as a CCG on a given seed set, and (c) bounding the sizes of certain classes of CCG's. Concerning (a) we give efficient algorithms for the case that seeds are points and show that the problem becomes hard if seeds and covers are disks. Concerning (b) we show that this problem is hard even for point seeds and disk covers (given a fixed correspondence between graph vertices and seeds). Concerning (c) we obtain upper and lower bounds on the number of CCG's for point seeds.}, subject = {Informatik}, language = {de} } @article{SteiningerAbelZiegleretal.2023, author = {Steininger, Michael and Abel, Daniel and Ziegler, Katrin and Krause, Anna and Paeth, Heiko and Hotho, Andreas}, title = {ConvMOS: climate model output statistics with deep learning}, series = {Data Mining and Knowledge Discovery}, volume = {37}, journal = {Data Mining and Knowledge Discovery}, number = {1}, issn = {1384-5810}, doi = {10.1007/s10618-022-00877-6}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-324213}, pages = {136-166}, year = {2023}, abstract = {Climate models are the tool of choice for scientists researching climate change. Like all models they suffer from errors, particularly systematic and location-specific representation errors. One way to reduce these errors is model output statistics (MOS) where the model output is fitted to observational data with machine learning. In this work, we assess the use of convolutional Deep Learning climate MOS approaches and present the ConvMOS architecture which is specifically designed based on the observation that there are systematic and location-specific errors in the precipitation estimates of climate models. We apply ConvMOS models to the simulated precipitation of the regional climate model REMO, showing that a combination of per-location model parameters for reducing location-specific errors and global model parameters for reducing systematic errors is indeed beneficial for MOS performance. We find that ConvMOS models can reduce errors considerably and perform significantly better than three commonly used MOS approaches and plain ResNet and U-Net models in most cases. Our results show that non-linear MOS models underestimate the number of extreme precipitation events, which we alleviate by training models specialized towards extreme precipitation events with the imbalanced regression method DenseLoss. While we consider climate MOS, we argue that aspects of ConvMOS may also be beneficial in other domains with geospatial data, such as air pollution modeling or weather forecasts.}, subject = {Klima}, language = {en} } @article{GlemarecLugrinBosseretal.2022, author = {Gl{\´e}marec, Yann and Lugrin, Jean-Luc and Bosser, Anne-Gwenn and Buche, C{\´e}dric and Latoschik, Marc Erich}, title = {Controlling the stage: a high-level control system for virtual audiences in Virtual Reality}, series = {Frontiers in Virtual Reality}, volume = {3}, journal = {Frontiers in Virtual Reality}, issn = {2673-4192}, doi = {10.3389/frvir.2022.876433}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-284601}, year = {2022}, abstract = {This article presents a novel method for controlling a virtual audience system (VAS) in Virtual Reality (VR) application, called STAGE, which has been originally designed for supervised public speaking training in university seminars dedicated to the preparation and delivery of scientific talks. We are interested in creating pedagogical narratives: narratives encompass affective phenomenon and rather than organizing events changing the course of a training scenario, pedagogical plans using our system focus on organizing the affects it arouses for the trainees. Efficiently controlling a virtual audience towards a specific training objective while evaluating the speaker's performance presents a challenge for a seminar instructor: the high level of cognitive and physical demands required to be able to control the virtual audience, whilst evaluating speaker's performance, adjusting and allowing it to quickly react to the user's behaviors and interactions. It is indeed a critical limitation of a number of existing systems that they rely on a Wizard of Oz approach, where the tutor drives the audience in reaction to the user's performance. We address this problem by integrating with a VAS a high-level control component for tutors, which allows using predefined audience behavior rules, defining custom ones, as well as intervening during run-time for finer control of the unfolding of the pedagogical plan. At its core, this component offers a tool to program, select, modify and monitor interactive training narratives using a high-level representation. The STAGE offers the following features: i) a high-level API to program pedagogical narratives focusing on a specific public speaking situation and training objectives, ii) an interactive visualization interface iii) computation and visualization of user metrics, iv) a semi-autonomous virtual audience composed of virtual spectators with automatic reactions to the speaker and surrounding spectators while following the pedagogical plan V) and the possibility for the instructor to embody a virtual spectator to ask questions or guide the speaker from within the Virtual Environment. We present here the design, and implementation of the tutoring system and its integration in STAGE, and discuss its reception by end-users.}, language = {en} } @article{LatoschikWienrich2022, author = {Latoschik, Marc Erich and Wienrich, Carolin}, title = {Congruence and plausibility, not presence: pivotal conditions for XR experiences and effects, a novel approach}, series = {Frontiers in Virtual Reality}, volume = {3}, journal = {Frontiers in Virtual Reality}, issn = {2673-4192}, doi = {10.3389/frvir.2022.694433}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-284787}, year = {2022}, abstract = {Presence is often considered the most important quale describing the subjective feeling of being in a computer-generated and/or computer-mediated virtual environment. The identification and separation of orthogonal presence components, i.e., the place illusion and the plausibility illusion, has been an accepted theoretical model describing Virtual Reality (VR) experiences for some time. This perspective article challenges this presence-oriented VR theory. First, we argue that a place illusion cannot be the major construct to describe the much wider scope of virtual, augmented, and mixed reality (VR, AR, MR: or XR for short). Second, we argue that there is no plausibility illusion but merely plausibility, and we derive the place illusion caused by the congruent and plausible generation of spatial cues and similarly for all the current model's so-defined illusions. Finally, we propose congruence and plausibility to become the central essential conditions in a novel theoretical model describing XR experiences and effects.}, language = {en} } @article{BoehlerCreignouGalotaetal.2012, author = {B{\"o}hler, Elmar and Creignou, Nadia and Galota, Matthias and Reith, Steffen and Schnoor, Henning and Vollmer, Heribert}, title = {Complexity Classifications for Different Equivalence and Audit Problems for Boolean Circuits}, series = {Logical Methods in Computer Science}, volume = {8}, journal = {Logical Methods in Computer Science}, number = {3:27}, doi = {10.2168/LMCS-8(3:27)2012}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-131121}, pages = {1 -- 25}, year = {2012}, abstract = {We study Boolean circuits as a representation of Boolean functions and conskier different equivalence, audit, and enumeration problems. For a number of restricted sets of gate types (bases) we obtain efficient algorithms, while for all other gate types we show these problems are at least NP-hard.}, language = {en} } @article{HossfeldHeegaardKellerer2023, author = {Hossfeld, Tobias and Heegaard, Poul E. and Kellerer, Wolfgang}, title = {Comparing the scalability of communication networks and systems}, series = {IEEE Access}, volume = {11}, journal = {IEEE Access}, doi = {10.1109/ACCESS.2023.3314201}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-349403}, pages = {101474-101497}, year = {2023}, abstract = {Scalability is often mentioned in literature, but a stringent definition is missing. In particular, there is no general scalability assessment which clearly indicates whether a system scales or not or whether a system scales better than another. The key contribution of this article is the definition of a scalability index (SI) which quantifies if a system scales in comparison to another system, a hypothetical system, e.g., linear system, or the theoretically optimal system. The suggested SI generalizes different metrics from literature, which are specialized cases of our SI. The primary target of our scalability framework is, however, benchmarking of two systems, which does not require any reference system. The SI is demonstrated and evaluated for different use cases, that are (1) the performance of an IoT load balancer depending on the system load, (2) the availability of a communication system depending on the size and structure of the network, (3) scalability comparison of different location selection mechanisms in fog computing with respect to delays and energy consumption; (4) comparison of time-sensitive networking (TSN) mechanisms in terms of efficiency and utilization. Finally, we discuss how to use and how not to use the SI and give recommendations and guidelines in practice. To the best of our knowledge, this is the first work which provides a general SI for the comparison and benchmarking of systems, which is the primary target of our scalability analysis.}, language = {en} } @article{HentschelKobsHotho2022, author = {Hentschel, Simon and Kobs, Konstantin and Hotho, Andreas}, title = {CLIP knows image aesthetics}, series = {Frontiers in Artificial Intelligence}, volume = {5}, journal = {Frontiers in Artificial Intelligence}, issn = {2624-8212}, doi = {10.3389/frai.2022.976235}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-297150}, year = {2022}, abstract = {Most Image Aesthetic Assessment (IAA) methods use a pretrained ImageNet classification model as a base to fine-tune. We hypothesize that content classification is not an optimal pretraining task for IAA, since the task discourages the extraction of features that are useful for IAA, e.g., composition, lighting, or style. On the other hand, we argue that the Contrastive Language-Image Pretraining (CLIP) model is a better base for IAA models, since it has been trained using natural language supervision. Due to the rich nature of language, CLIP needs to learn a broad range of image features that correlate with sentences describing the image content, composition, environments, and even subjective feelings about the image. While it has been shown that CLIP extracts features useful for content classification tasks, its suitability for tasks that require the extraction of style-based features like IAA has not yet been shown. We test our hypothesis by conducting a three-step study, investigating the usefulness of features extracted by CLIP compared to features obtained from the last layer of a comparable ImageNet classification model. In each step, we get more computationally expensive. First, we engineer natural language prompts that let CLIP assess an image's aesthetic without adjusting any weights in the model. To overcome the challenge that CLIP's prompting only is applicable to classification tasks, we propose a simple but effective strategy to convert multiple prompts to a continuous scalar as required when predicting an image's mean aesthetic score. Second, we train a linear regression on the AVA dataset using image features obtained by CLIP's image encoder. The resulting model outperforms a linear regression trained on features from an ImageNet classification model. It also shows competitive performance with fully fine-tuned networks based on ImageNet, while only training a single layer. Finally, by fine-tuning CLIP's image encoder on the AVA dataset, we show that CLIP only needs a fraction of training epochs to converge, while also performing better than a fine-tuned ImageNet model. Overall, our experiments suggest that CLIP is better suited as a base model for IAA methods than ImageNet pretrained networks.}, language = {en} } @article{DoellingerWienrichLatoschik2021, author = {D{\"o}llinger, Nina and Wienrich, Carolin and Latoschik, Marc Erich}, title = {Challenges and opportunities of immersive technologies for mindfulness meditation: a systematic review}, series = {Frontiers in Virtual Reality}, volume = {2}, journal = {Frontiers in Virtual Reality}, doi = {10.3389/frvir.2021.644683}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-259047}, pages = {644683}, year = {2021}, abstract = {Mindfulness is considered an important factor of an individual's subjective well-being. Consequently, Human-Computer Interaction (HCI) has investigated approaches that strengthen mindfulness, i.e., by inventing multimedia technologies to support mindfulness meditation. These approaches often use smartphones, tablets, or consumer-grade desktop systems to allow everyday usage in users' private lives or in the scope of organized therapies. Virtual, Augmented, and Mixed Reality (VR, AR, MR; in short: XR) significantly extend the design space for such approaches. XR covers a wide range of potential sensory stimulation, perceptive and cognitive manipulations, content presentation, interaction, and agency. These facilities are linked to typical XR-specific perceptions that are conceptually closely related to mindfulness research, such as (virtual) presence and (virtual) embodiment. However, a successful exploitation of XR that strengthens mindfulness requires a systematic analysis of the potential interrelation and influencing mechanisms between XR technology, its properties, factors, and phenomena and existing models and theories of the construct of mindfulness. This article reports such a systematic analysis of XR-related research from HCI and life sciences to determine the extent to which existing research frameworks on HCI and mindfulness can be applied to XR technologies, the potential of XR technologies to support mindfulness, and open research gaps. Fifty papers of ACM Digital Library and National Institutes of Health's National Library of Medicine (PubMed) with and without empirical efficacy evaluation were included in our analysis. The results reveal that at the current time, empirical research on XR-based mindfulness support mainly focuses on therapy and therapeutic outcomes. Furthermore, most of the currently investigated XR-supported mindfulness interactions are limited to vocally guided meditations within nature-inspired virtual environments. While an analysis of empirical research on those systems did not reveal differences in mindfulness compared to non-mediated mindfulness practices, various design proposals illustrate that XR has the potential to provide interactive and body-based innovations for mindfulness practice. We propose a structured approach for future work to specify and further explore the potential of XR as mindfulness-support. The resulting framework provides design guidelines for XR-based mindfulness support based on the elements and psychological mechanisms of XR interactions.}, language = {en} } @article{LugrinLatoschikHabeletal.2016, author = {Lugrin, Jean-Luc and Latoschik, Marc Erich and Habel, Michael and Roth, Daniel and Seufert, Christian and Grafe, Silke}, title = {Breaking Bad Behaviors: A New Tool for Learning Classroom Management Using Virtual Reality}, series = {Frontiers in ICT}, volume = {3}, journal = {Frontiers in ICT}, number = {26}, doi = {10.3389/fict.2016.00026}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-147945}, year = {2016}, abstract = {This article presents an immersive virtual reality (VR) system for training classroom management skills, with a specific focus on learning to manage disruptive student behavior in face-to-face, one-to-many teaching scenarios. The core of the system is a real-time 3D virtual simulation of a classroom populated by twenty-four semi-autonomous virtual students. The system has been designed as a companion tool for classroom management seminars in a syllabus for primary and secondary school teachers. This will allow lecturers to link theory with practice using the medium of VR. The system is therefore designed for two users: a trainee teacher and an instructor supervising the training session. The teacher is immersed in a real-time 3D simulation of a classroom by means of a head-mounted display and headphone. The instructor operates a graphical desktop console, which renders a view of the class and the teacher whose avatar movements are captured by a marker less tracking system. This console includes a 2D graphics menu with convenient behavior and feedback control mechanisms to provide human-guided training sessions. The system is built using low-cost consumer hardware and software. Its architecture and technical design are described in detail. A first evaluation confirms its conformance to critical usability requirements (i.e., safety and comfort, believability, simplicity, acceptability, extensibility, affordability, and mobility). Our initial results are promising and constitute the necessary first step toward a possible investigation of the efficiency and effectiveness of such a system in terms of learning outcomes and experience.}, language = {en} } @article{PfitznerMayNuechter2018, author = {Pfitzner, Christian and May, Stefan and N{\"u}chter, Andreas}, title = {Body weight estimation for dose-finding and health monitoring of lying, standing and walking patients based on RGB-D data}, series = {Sensors}, volume = {18}, journal = {Sensors}, number = {5}, doi = {10.3390/s18051311}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-176642}, pages = {1311}, year = {2018}, abstract = {This paper describes the estimation of the body weight of a person in front of an RGB-D camera. A survey of different methods for body weight estimation based on depth sensors is given. First, an estimation of people standing in front of a camera is presented. Second, an approach based on a stream of depth images is used to obtain the body weight of a person walking towards a sensor. The algorithm first extracts features from a point cloud and forwards them to an artificial neural network (ANN) to obtain an estimation of body weight. Besides the algorithm for the estimation, this paper further presents an open-access dataset based on measurements from a trauma room in a hospital as well as data from visitors of a public event. In total, the dataset contains 439 measurements. The article illustrates the efficiency of the approach with experiments with persons lying down in a hospital, standing persons, and walking persons. Applicable scenarios for the presented algorithm are body weight-related dosing of emergency patients.}, language = {en} } @article{BeckerCaminitiFiorellaetal.2013, author = {Becker, Martin and Caminiti, Saverio and Fiorella, Donato and Francis, Louise and Gravino, Pietro and Haklay, Mordechai (Muki) and Hotho, Andreas and Loreto, Virrorio and Mueller, Juergen and Ricchiuti, Ferdinando and Servedio, Vito D. P. and Sirbu, Alina and Tria, Franesca}, title = {Awareness and Learning in Participatory Noise Sensing}, series = {PLOS ONE}, volume = {8}, journal = {PLOS ONE}, number = {12}, issn = {1932-6203}, doi = {10.1371/journal.pone.0081638}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-127675}, pages = {e81638}, year = {2013}, abstract = {The development of ICT infrastructures has facilitated the emergence of new paradigms for looking at society and the environment over the last few years. Participatory environmental sensing, i.e. directly involving citizens in environmental monitoring, is one example, which is hoped to encourage learning and enhance awareness of environmental issues. In this paper, an analysis of the behaviour of individuals involved in noise sensing is presented. Citizens have been involved in noise measuring activities through the WideNoise smartphone application. This application has been designed to record both objective (noise samples) and subjective (opinions, feelings) data. The application has been open to be used freely by anyone and has been widely employed worldwide. In addition, several test cases have been organised in European countries. Based on the information submitted by users, an analysis of emerging awareness and learning is performed. The data show that changes in the way the environment is perceived after repeated usage of the application do appear. Specifically, users learn how to recognise different noise levels they are exposed to. Additionally, the subjective data collected indicate an increased user involvement in time and a categorisation effect between pleasant and less pleasant environments.}, language = {en} } @article{KrenzerHeilFittingetal., author = {Krenzer, Adrian and Heil, Stefan and Fitting, Daniel and Matti, Safa and Zoller, Wolfram G. and Hann, Alexander and Puppe, Frank}, title = {Automated classification of polyps using deep learning architectures and few-shot learning}, series = {BMC Medical Imaging}, volume = {23}, journal = {BMC Medical Imaging}, doi = {10.1186/s12880-023-01007-4}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-357465}, abstract = {Background Colorectal cancer is a leading cause of cancer-related deaths worldwide. The best method to prevent CRC is a colonoscopy. However, not all colon polyps have the risk of becoming cancerous. Therefore, polyps are classified using different classification systems. After the classification, further treatment and procedures are based on the classification of the polyp. Nevertheless, classification is not easy. Therefore, we suggest two novel automated classifications system assisting gastroenterologists in classifying polyps based on the NICE and Paris classification. Methods We build two classification systems. One is classifying polyps based on their shape (Paris). The other classifies polyps based on their texture and surface patterns (NICE). A two-step process for the Paris classification is introduced: First, detecting and cropping the polyp on the image, and secondly, classifying the polyp based on the cropped area with a transformer network. For the NICE classification, we design a few-shot learning algorithm based on the Deep Metric Learning approach. The algorithm creates an embedding space for polyps, which allows classification from a few examples to account for the data scarcity of NICE annotated images in our database. Results For the Paris classification, we achieve an accuracy of 89.35 \%, surpassing all papers in the literature and establishing a new state-of-the-art and baseline accuracy for other publications on a public data set. For the NICE classification, we achieve a competitive accuracy of 81.13 \% and demonstrate thereby the viability of the few-shot learning paradigm in polyp classification in data-scarce environments. Additionally, we show different ablations of the algorithms. Finally, we further elaborate on the explainability of the system by showing heat maps of the neural network explaining neural activations. Conclusion Overall we introduce two polyp classification systems to assist gastroenterologists. We achieve state-of-the-art performance in the Paris classification and demonstrate the viability of the few-shot learning paradigm in the NICE classification, addressing the prevalent data scarcity issues faced in medical machine learning.}, language = {en} } @article{WolffRutter2012, author = {Wolff, Alexander and Rutter, Iganz}, title = {Augmenting the Connectivity of Planar and Geometric Graphs}, series = {Journal of Graph Algorithms and Applications}, journal = {Journal of Graph Algorithms and Applications}, doi = {10.7155/jgaa.00275}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-97587}, year = {2012}, abstract = {In this paper we study connectivity augmentation problems. Given a connected graph G with some desirable property, we want to make G 2-vertex connected (or 2-edge connected) by adding edges such that the resulting graph keeps the property. The aim is to add as few edges as possible. The property that we consider is planarity, both in an abstract graph-theoretic and in a geometric setting, where vertices correspond to points in the plane and edges to straight-line segments. We show that it is NP-hard to � nd a minimum-cardinality augmentation that makes a planar graph 2-edge connected. For making a planar graph 2-vertex connected this was known. We further show that both problems are hard in the geometric setting, even when restricted to trees. The problems remain hard for higher degrees of connectivity. On the other hand we give polynomial-time algorithms for the special case of convex geometric graphs. We also study the following related problem. Given a planar (plane geometric) graph G, two vertices s and t of G, and an integer c, how many edges have to be added to G such that G is still planar (plane geometric) and contains c edge- (or vertex-) disjoint s{t paths? For the planar case we give a linear-time algorithm for c = 2. For the plane geometric case we give optimal worst-case bounds for c = 2; for c = 3 we characterize the cases that have a solution.}, language = {en} } @article{MandelHoernleinIflandetal.2011, author = {Mandel, Alexander and H{\"o}rnlein, Alexander and Ifland, Marianus and L{\"u}neburg, Edeltraud and Deckert, J{\"u}rgen and Puppe, Frank}, title = {Aufwandsanalyse f{\"u}r computerunterst{\"u}tzte Multiple-Choice Papierklausuren}, series = {GMS Journal for Medical Education}, volume = {28}, journal = {GMS Journal for Medical Education}, number = {4}, doi = {10.3205/zma000767}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-134386}, pages = {1-15, Doc55}, year = {2011}, abstract = {Introduction: Multiple-choice-examinations are still fundamental for assessment in medical degree programs. In addition to content related research, the optimization of the technical procedure is an important question. Medical examiners face three options: paper-based examinations with or without computer support or completely electronic examinations. Critical aspects are the effort for formatting, the logistic effort during the actual examination, quality, promptness and effort of the correction, the time for making the documents available for inspection by the students, and the statistical analysis of the examination results. Methods: Since three semesters a computer program for input and formatting of MC-questions in medical and other paper-based examinations is used and continuously improved at Wuerzburg University. In the winter semester (WS) 2009/10 eleven, in the summer semester (SS) 2010 twelve and in WS 2010/11 thirteen medical examinations were accomplished with the program and automatically evaluated. For the last two semesters the remaining manual workload was recorded. Results: The cost of the formatting and the subsequent analysis including adjustments of the analysis of an average examination with about 140 participants and about 35 questions was 5-7 hours for exams without complications in the winter semester 2009/2010, about 2 hours in SS 2010 and about 1.5 hours in the winter semester 2010/11. Including exams with complications, the average time was about 3 hours per exam in SS 2010 and 2.67 hours for the WS 10/11. Discussion: For conventional multiple-choice exams the computer-based formatting and evaluation of paper-based exams offers a significant time reduction for lecturers in comparison with the manual correction of paper-based exams and compared to purely electronically conducted exams it needs a much simpler technological infrastructure and fewer staff during the exam."}, language = {de} } @article{GreubelAndresHennecke2023, author = {Greubel, Andr{\´e} and Andres, Daniela and Hennecke, Martin}, title = {Analyzing reporting on ransomware incidents: a case study}, series = {Social Sciences}, volume = {12}, journal = {Social Sciences}, number = {5}, issn = {2076-0760}, doi = {10.3390/socsci12050265}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-313746}, year = {2023}, abstract = {Knowledge about ransomware is important for protecting sensitive data and for participating in public debates about suitable regulation regarding its security. However, as of now, this topic has received little to no attention in most school curricula. As such, it is desirable to analyze what citizens can learn about this topic outside of formal education, e.g., from news articles. This analysis is both relevant to analyzing the public discourse about ransomware, as well as to identify what aspects of this topic should be included in the limited time available for this topic in formal education. Thus, this paper was motivated both by educational and media research. The central goal is to explore how the media reports on this topic and, additionally, to identify potential misconceptions that could stem from this reporting. To do so, we conducted an exploratory case study into the reporting of 109 media articles regarding a high-impact ransomware event: the shutdown of the Colonial Pipeline (located in the east of the USA). We analyzed how the articles introduced central terminology, what details were provided, what details were not, and what (mis-)conceptions readers might receive from them. Our results show that an introduction of the terminology and technical concepts of security is insufficient for a complete understanding of the incident. Most importantly, the articles may lead to four misconceptions about ransomware that are likely to lead to misleading conclusions about the responsibility for the incident and possible political and technical options to prevent such attacks in the future.}, language = {en} } @article{GageikStrohmeierMontenegro2013, author = {Gageik, Nils and Strohmeier, Michael and Montenegro, Sergio}, title = {An Autonomous UAV with an Optical Flow Sensor for Positioning and Navigation}, series = {International Journal of Advanced Robotic Systems}, journal = {International Journal of Advanced Robotic Systems}, doi = {10.5772/56813}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-96368}, year = {2013}, abstract = {A procedure to control all six DOF (degrees of freedom) of a UAV (unmanned aerial vehicle) without an external reference system and to enable fully autonomous flight is presented here. For 2D positioning the principle of optical flow is used. Together with the output of height estimation, fusing ultrasonic, infrared and inertial and pressure sensor data, the 3D position of the UAV can be computed, controlled and steered. All data processing is done on the UAV. An external computer with a pathway planning interface is for commanding purposes only. The presented system is part of the AQopterI8 project, which aims to develop an autonomous flying quadrocopter for indoor application. The focus of this paper is 2D positioning using an optical flow sensor. As a result of the performed evaluation, it can be concluded that for position hold, the standard deviation of the position error is 10cm and after landing the position error is about 30cm.}, language = {en} } @article{TsouliasJoerissenNuechter2022, author = {Tsoulias, Nikos and J{\"o}rissen, Sven and N{\"u}chter, Andreas}, title = {An approach for monitoring temperature on fruit surface by means of thermal point cloud}, series = {MethodsX}, volume = {9}, journal = {MethodsX}, issn = {2215-0161}, doi = {10.1016/j.mex.2022.101712}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-300270}, year = {2022}, abstract = {Heat and excessive solar radiation can produce abiotic stresses during apple maturation, resulting fruit quality. Therefore, the monitoring of temperature on fruit surface (FST) over the growing period can allow to identify thresholds, above of which several physiological disorders such as sunburn may occur in apple. The current approaches neglect spatial variation of FST and have reduced repeatability, resulting in unreliable predictions. In this study, LiDAR laser scanning and thermal imaging were employed to detect the temperature on fruit surface by means of 3D point cloud. A process for calibrating the two sensors based on an active board target and producing a 3D thermal point cloud was suggested. After calibration, the sensor system was utilised to scan the fruit trees, while temperature values assigned in the corresponding 3D point cloud were based on the extrinsic calibration. Whereas a fruit detection algorithm was performed to segment the FST from each apple. • The approach allows the calibration of LiDAR laser scanner with thermal camera in order to produce a 3D thermal point cloud. • The method can be applied in apple trees for segmenting FST in 3D. Whereas the approach can be utilised to predict several physiological disorders including sunburn on fruit surface.}, language = {en} } @article{BartlWenningerWolfetal.2021, author = {Bartl, Andrea and Wenninger, Stephan and Wolf, Erik and Botsch, Mario and Latoschik, Marc Erich}, title = {Affordable but not cheap: a case study of the effects of two 3D-reconstruction methods of virtual humans}, series = {Frontiers in Virtual Reality}, volume = {2}, journal = {Frontiers in Virtual Reality}, doi = {10.3389/frvir.2021.694617}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-260492}, year = {2021}, abstract = {Realistic and lifelike 3D-reconstruction of virtual humans has various exciting and important use cases. Our and others' appearances have notable effects on ourselves and our interaction partners in virtual environments, e.g., on acceptance, preference, trust, believability, behavior (the Proteus effect), and more. Today, multiple approaches for the 3D-reconstruction of virtual humans exist. They significantly vary in terms of the degree of achievable realism, the technical complexities, and finally, the overall reconstruction costs involved. This article compares two 3D-reconstruction approaches with very different hardware requirements. The high-cost solution uses a typical complex and elaborated camera rig consisting of 94 digital single-lens reflex (DSLR) cameras. The recently developed low-cost solution uses a smartphone camera to create videos that capture multiple views of a person. Both methods use photogrammetric reconstruction and template fitting with the same template model and differ in their adaptation to the method-specific input material. Each method generates high-quality virtual humans ready to be processed, animated, and rendered by standard XR simulation and game engines such as Unreal or Unity. We compare the results of the two 3D-reconstruction methods in an immersive virtual environment against each other in a user study. Our results indicate that the virtual humans from the low-cost approach are perceived similarly to those from the high-cost approach regarding the perceived similarity to the original, human-likeness, beauty, and uncanniness, despite significant differences in the objectively measured quality. The perceived feeling of change of the own body was higher for the low-cost virtual humans. Quality differences were perceived more strongly for one's own body than for other virtual humans.}, language = {en} } @article{GrohmannHerbstChalbanietal.2020, author = {Grohmann, Johannes and Herbst, Nikolas and Chalbani, Avi and Arian, Yair and Peretz, Noam and Kounev, Samuel}, title = {A Taxonomy of Techniques for SLO Failure Prediction in Software Systems}, series = {Computers}, volume = {9}, journal = {Computers}, number = {1}, issn = {2073-431X}, doi = {10.3390/computers9010010}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-200594}, pages = {10}, year = {2020}, abstract = {Failure prediction is an important aspect of self-aware computing systems. Therefore, a multitude of different approaches has been proposed in the literature over the past few years. In this work, we propose a taxonomy for organizing works focusing on the prediction of Service Level Objective (SLO) failures. Our taxonomy classifies related work along the dimensions of the prediction target (e.g., anomaly detection, performance prediction, or failure prediction), the time horizon (e.g., detection or prediction, online or offline application), and the applied modeling type (e.g., time series forecasting, machine learning, or queueing theory). The classification is derived based on a systematic mapping of relevant papers in the area. Additionally, we give an overview of different techniques in each sub-group and address remaining challenges in order to guide future research.}, language = {en} } @article{HalbigLatoschik2021, author = {Halbig, Andreas and Latoschik, Marc Erich}, title = {A systematic review of physiological measurements, factors, methods, and applications in virtual reality}, series = {Frontiers in Virtual Reality}, volume = {2}, journal = {Frontiers in Virtual Reality}, doi = {10.3389/frvir.2021.694567}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-260503}, year = {2021}, abstract = {Measurements of physiological parameters provide an objective, often non-intrusive, and (at least semi-)automatic evaluation and utilization of user behavior. In addition, specific hardware devices of Virtual Reality (VR) often ship with built-in sensors, i.e. eye-tracking and movements sensors. Hence, the combination of physiological measurements and VR applications seems promising. Several approaches have investigated the applicability and benefits of this combination for various fields of applications. However, the range of possible application fields, coupled with potentially useful and beneficial physiological parameters, types of sensor, target variables and factors, and analysis approaches and techniques is manifold. This article provides a systematic overview and an extensive state-of-the-art review of the usage of physiological measurements in VR. We identified 1,119 works that make use of physiological measurements in VR. Within these, we identified 32 approaches that focus on the classification of characteristics of experience, common in VR applications. The first part of this review categorizes the 1,119 works by field of application, i.e. therapy, training, entertainment, and communication and interaction, as well as by the specific target factors and variables measured by the physiological parameters. An additional category summarizes general VR approaches applicable to all specific fields of application since they target typical VR qualities. In the second part of this review, we analyze the target factors and variables regarding the respective methods used for an automatic analysis and, potentially, classification. For example, we highlight which measurement setups have been proven to be sensitive enough to distinguish different levels of arousal, valence, anxiety, stress, or cognitive workload in the virtual realm. This work may prove useful for all researchers wanting to use physiological data in VR and who want to have a good overview of prior approaches taken, their benefits and potential drawbacks.}, language = {en} } @article{HeinWienrichLatoschik2021, author = {Hein, Rebecca M. and Wienrich, Carolin and Latoschik, Marc E.}, title = {A systematic review of foreign language learning with immersive technologies (2001-2020)}, series = {AIMS Electronics and Electrical Engineering}, volume = {5}, journal = {AIMS Electronics and Electrical Engineering}, number = {2}, doi = {10.3934/electreng.2021007}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-268811}, pages = {117-145}, year = {2021}, abstract = {This study provides a systematic literature review of research (2001-2020) in the field of teaching and learning a foreign language and intercultural learning using immersive technologies. Based on 2507 sources, 54 articles were selected according to a predefined selection criteria. The review is aimed at providing information about which immersive interventions are being used for foreign language learning and teaching and where potential research gaps exist. The papers were analyzed and coded according to the following categories: (1) investigation form and education level, (2) degree of immersion, and technology used, (3) predictors, and (4) criterions. The review identified key research findings relating the use of immersive technologies for learning and teaching a foreign language and intercultural learning at cognitive, affective, and conative levels. The findings revealed research gaps in the area of teachers as a target group, and virtual reality (VR) as a fully immersive intervention form. Furthermore, the studies reviewed rarely examined behavior, and implicit measurements related to inter- and trans-cultural learning and teaching. Inter- and transcultural learning and teaching especially is an underrepresented investigation subject. Finally, concrete suggestions for future research are given. The systematic review contributes to the challenge of interdisciplinary cooperation between pedagogy, foreign language didactics, and Human-Computer Interaction to achieve innovative teaching-learning formats and a successful digital transformation.}, language = {en} } @article{PrantlZeckBaueretal.2022, author = {Prantl, Thomas and Zeck, Timo and Bauer, Andre and Ten, Peter and Prantl, Dominik and Yahya, Ala Eddine Ben and Ifflaender, Lukas and Dmitrienko, Alexandra and Krupitzer, Christian and Kounev, Samuel}, title = {A Survey on Secure Group Communication Schemes With Focus on IoT Communication}, series = {IEEE Access}, volume = {10}, journal = {IEEE Access}, doi = {10.1109/ACCESS.2022.3206451}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-300257}, pages = {99944 -- 99962}, year = {2022}, abstract = {A key feature for Internet of Things (IoT) is to control what content is available to each user. To handle this access management, encryption schemes can be used. Due to the diverse usage of encryption schemes, there are various realizations of 1-to-1, 1-to-n, and n-to-n schemes in the literature. This multitude of encryption methods with a wide variety of properties presents developers with the challenge of selecting the optimal method for a particular use case, which is further complicated by the fact that there is no overview of existing encryption schemes. To fill this gap, we envision a cryptography encyclopedia providing such an overview of existing encryption schemes. In this survey paper, we take a first step towards such an encyclopedia by creating a sub-encyclopedia for secure group communication (SGC) schemes, which belong to the n-to-n category. We extensively surveyed the state-of-the-art and classified 47 different schemes. More precisely, we provide (i) a comprehensive overview of the relevant security features, (ii) a set of relevant performance metrics, (iii) a classification for secure group communication schemes, and (iv) workflow descriptions of the 47 schemes. Moreover, we perform a detailed performance and security evaluation of the 47 secure group communication schemes. Based on this evaluation, we create a guideline for the selection of secure group communication schemes.}, language = {en} } @article{BayerPruckner2023, author = {Bayer, Daniel and Pruckner, Marco}, title = {A digital twin of a local energy system based on real smart meter data}, series = {Energy Informatics}, volume = {6}, journal = {Energy Informatics}, doi = {10.1186/s42162-023-00263-6}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-357456}, year = {2023}, abstract = {The steadily increasing usage of smart meters generates a valuable amount of high-resolution data about the individual energy consumption and production of local energy systems. Private households install more and more photovoltaic systems, battery storage and big consumers like heat pumps. Thus, our vision is to augment these collected smart meter time series of a complete system (e.g., a city, town or complex institutions like airports) with simulatively added previously named components. We, therefore, propose a novel digital twin of such an energy system based solely on a complete set of smart meter data including additional building data. Based on the additional geospatial data, the twin is intended to represent the addition of the abovementioned components as realistically as possible. Outputs of the twin can be used as a decision support for either system operators where to strengthen the system or for individual households where and how to install photovoltaic systems and batteries. Meanwhile, the first local energy system operators had such smart meter data of almost all residential consumers for several years. We acquire those of an exemplary operator and discuss a case study presenting some features of our digital twin and highlighting the value of the combination of smart meter and geospatial data.}, language = {en} }