@article{LohWamserPoigneeetal.2022, author = {Loh, Frank and Wamser, Florian and Poign{\´e}e, Fabian and Geißler, Stefan and Hoßfeld, Tobias}, title = {YouTube Dataset on Mobile Streaming for Internet Traffic Modeling and Streaming Analysis}, series = {Scientific Data}, volume = {9}, journal = {Scientific Data}, number = {1}, doi = {10.1038/s41597-022-01418-y}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-300240}, year = {2022}, abstract = {Around 4.9 billion Internet users worldwide watch billions of hours of online video every day. As a result, streaming is by far the predominant type of traffic in communication networks. According to Google statistics, three out of five video views come from mobile devices. Thus, in view of the continuous technological advances in end devices and increasing mobile use, datasets for mobile streaming are indispensable in research but only sparsely dealt with in literature so far. With this public dataset, we provide 1,081 hours of time-synchronous video measurements at network, transport, and application layer with the native YouTube streaming client on mobile devices. The dataset includes 80 network scenarios with 171 different individual bandwidth settings measured in 5,181 runs with limited bandwidth, 1,939 runs with emulated 3 G/4 G traces, and 4,022 runs with pre-defined bandwidth changes. This corresponds to 332 GB video payload. We present the most relevant quality indicators for scientific use, i.e., initial playback delay, streaming video quality, adaptive video quality changes, video rebuffering events, and streaming phases.}, language = {en} } @article{LohMehlingHossfeld2022, author = {Loh, Frank and Mehling, Noah and Hoßfeld, Tobias}, title = {Towards LoRaWAN without data loss: studying the performance of different channel access approaches}, series = {Sensors}, volume = {22}, journal = {Sensors}, number = {2}, issn = {1424-8220}, doi = {10.3390/s22020691}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-302418}, year = {2022}, abstract = {The Long Range Wide Area Network (LoRaWAN) is one of the fastest growing Internet of Things (IoT) access protocols. It operates in the license free 868 MHz band and gives everyone the possibility to create their own small sensor networks. The drawback of this technology is often unscheduled or random channel access, which leads to message collisions and potential data loss. For that reason, recent literature studies alternative approaches for LoRaWAN channel access. In this work, state-of-the-art random channel access is compared with alternative approaches from the literature by means of collision probability. Furthermore, a time scheduled channel access methodology is presented to completely avoid collisions in LoRaWAN. For this approach, an exhaustive simulation study was conducted and the performance was evaluated with random access cross-traffic. In a general theoretical analysis the limits of the time scheduled approach are discussed to comply with duty cycle regulations in LoRaWAN.}, language = {en} } @article{ObremskiFriedrichHaaketal.2022, author = {Obremski, David and Friedrich, Paula and Haak, Nora and Schaper, Philipp and Lugrin, Birgit}, title = {The impact of mixed-cultural speech on the stereotypical perception of a virtual robot}, series = {Frontiers in Robotics and AI}, volume = {9}, journal = {Frontiers in Robotics and AI}, issn = {2296-9144}, doi = {10.3389/frobt.2022.983955}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-293531}, year = {2022}, abstract = {Despite the fact that mixed-cultural backgrounds become of increasing importance in our daily life, the representation of multiple cultural backgrounds in one entity is still rare in socially interactive agents (SIAs). This paper's contribution is twofold. First, it provides a survey of research on mixed-cultured SIAs. Second, it presents a study investigating how mixed-cultural speech (in this case, non-native accent) influences how a virtual robot is perceived in terms of personality, warmth, competence and credibility. Participants with English or German respectively as their first language watched a video of a virtual robot speaking in either standard English or German-accented English. It was expected that the German-accented speech would be rated more positively by native German participants as well as elicit the German stereotypes credibility and conscientiousness for both German and English participants. Contrary to the expectations, German participants rated the virtual robot lower in terms of competence and credibility when it spoke with a German accent, whereas English participants perceived the virtual robot with a German accent as more credible compared to the version without an accent. Both the native English and native German listeners classified the virtual robot with a German accent as significantly more neurotic than the virtual robot speaking standard English. This work shows that by solely implementing a non-native accent in a virtual robot, stereotypes are partly transferred. It also shows that the implementation of a non-native accent leads to differences in the perception of the virtual robot.}, language = {en} } @article{LeschKoenigKounevetal.2022, author = {Lesch, Veronika and K{\"o}nig, Maximilian and Kounev, Samuel and Stein, Anthony and Krupitzer, Christian}, title = {Tackling the rich vehicle routing problem with nature-inspired algorithms}, series = {Applied Intelligence}, volume = {52}, journal = {Applied Intelligence}, issn = {1573-7497}, doi = {10.1007/s10489-021-03035-5}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-268942}, pages = {9476-9500}, year = {2022}, abstract = {In the last decades, the classical Vehicle Routing Problem (VRP), i.e., assigning a set of orders to vehicles and planning their routes has been intensively researched. As only the assignment of order to vehicles and their routes is already an NP-complete problem, the application of these algorithms in practice often fails to take into account the constraints and restrictions that apply in real-world applications, the so called rich VRP (rVRP) and are limited to single aspects. In this work, we incorporate the main relevant real-world constraints and requirements. We propose a two-stage strategy and a Timeline algorithm for time windows and pause times, and apply a Genetic Algorithm (GA) and Ant Colony Optimization (ACO) individually to the problem to find optimal solutions. Our evaluation of eight different problem instances against four state-of-the-art algorithms shows that our approach handles all given constraints in a reasonable time.}, language = {en} } @article{SeufertPoigneeHossfeldetal.2022, author = {Seufert, Anika and Poign{\´e}e, Fabian and Hoßfeld, Tobias and Seufert, Michael}, title = {Pandemic in the digital age: analyzing WhatsApp communication behavior before, during, and after the COVID-19 lockdown}, series = {Humanities and Social Sciences Communications}, volume = {9}, journal = {Humanities and Social Sciences Communications}, doi = {10.1057/s41599-022-01161-0}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-300261}, year = {2022}, abstract = {The strict restrictions introduced by the COVID-19 lockdowns, which started from March 2020, changed people's daily lives and habits on many different levels. In this work, we investigate the impact of the lockdown on the communication behavior in the mobile instant messaging application WhatsApp. Our evaluations are based on a large dataset of 2577 private chat histories with 25,378,093 messages from 51,973 users. The analysis of the one-to-one and group conversations confirms that the lockdown severely altered the communication in WhatsApp chats compared to pre-pandemic time ranges. In particular, we observe short-term effects, which caused an increased message frequency in the first lockdown months and a shifted communication activity during the day in March and April 2020. Moreover, we also see long-term effects of the ongoing pandemic situation until February 2021, which indicate a change of communication behavior towards more regular messaging, as well as a persisting change in activity during the day. The results of our work show that even anonymized chat histories can tell us a lot about people's behavior and especially behavioral changes during the COVID-19 pandemic and thus are of great relevance for behavioral researchers. Furthermore, looking at the pandemic from an Internet provider perspective, these insights can be used during the next pandemic, or if the current COVID-19 situation worsens, to adapt communication networks to the changed usage behavior early on and thus avoid network congestion.}, language = {en} } @article{BencurovaShityakovSchaacketal.2022, author = {Bencurova, Elena and Shityakov, Sergey and Schaack, Dominik and Kaltdorf, Martin and Sarukhanyan, Edita and Hilgarth, Alexander and Rath, Christin and Montenegro, Sergio and Roth, G{\"u}nter and Lopez, Daniel and Dandekar, Thomas}, title = {Nanocellulose composites as smart devices with chassis, light-directed DNA Storage, engineered electronic properties, and chip integration}, series = {Frontiers in Bioengineering and Biotechnology}, volume = {10}, journal = {Frontiers in Bioengineering and Biotechnology}, issn = {2296-4185}, doi = {10.3389/fbioe.2022.869111}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-283033}, year = {2022}, abstract = {The rapid development of green and sustainable materials opens up new possibilities in the field of applied research. Such materials include nanocellulose composites that can integrate many components into composites and provide a good chassis for smart devices. In our study, we evaluate four approaches for turning a nanocellulose composite into an information storage or processing device: 1) nanocellulose can be a suitable carrier material and protect information stored in DNA. 2) Nucleotide-processing enzymes (polymerase and exonuclease) can be controlled by light after fusing them with light-gating domains; nucleotide substrate specificity can be changed by mutation or pH change (read-in and read-out of the information). 3) Semiconductors and electronic capabilities can be achieved: we show that nanocellulose is rendered electronic by iodine treatment replacing silicon including microstructures. Nanocellulose semiconductor properties are measured, and the resulting potential including single-electron transistors (SET) and their properties are modeled. Electric current can also be transported by DNA through G-quadruplex DNA molecules; these as well as classical silicon semiconductors can easily be integrated into the nanocellulose composite. 4) To elaborate upon miniaturization and integration for a smart nanocellulose chip device, we demonstrate pH-sensitive dyes in nanocellulose, nanopore creation, and kinase micropatterning on bacterial membranes as well as digital PCR micro-wells. Future application potential includes nano-3D printing and fast molecular processors (e.g., SETs) integrated with DNA storage and conventional electronics. This would also lead to environment-friendly nanocellulose chips for information processing as well as smart nanocellulose composites for biomedical applications and nano-factories.}, language = {en} } @article{KlemzRote2022, author = {Klemz, Boris and Rote, G{\"u}nter}, title = {Linear-Time Algorithms for Maximum-Weight Induced Matchings and Minimum Chain Covers in Convex Bipartite Graphs}, series = {Algorithmica}, volume = {84}, journal = {Algorithmica}, number = {4}, issn = {1432-0541}, doi = {10.1007/s00453-021-00904-w}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-267876}, pages = {1064-1080}, year = {2022}, abstract = {A bipartite graph G=(U,V,E) is convex if the vertices in V can be linearly ordered such that for each vertex u∈U, the neighbors of u are consecutive in the ordering of V. An induced matching H of G is a matching for which no edge of E connects endpoints of two different edges of H. We show that in a convex bipartite graph with n vertices and m weighted edges, an induced matching of maximum total weight can be computed in O(n+m) time. An unweighted convex bipartite graph has a representation of size O(n) that records for each vertex u∈U the first and last neighbor in the ordering of V. Given such a compact representation, we compute an induced matching of maximum cardinality in O(n) time. In convex bipartite graphs, maximum-cardinality induced matchings are dual to minimum chain covers. A chain cover is a covering of the edge set by chain subgraphs, that is, subgraphs that do not contain induced matchings of more than one edge. Given a compact representation, we compute a representation of a minimum chain cover in O(n) time. If no compact representation is given, the cover can be computed in O(n+m) time. All of our algorithms achieve optimal linear running time for the respective problem and model, and they improve and generalize the previous results in several ways: The best algorithms for the unweighted problem versions had a running time of O(n\(^{2}\)) (Brandst{\"a}dt et al. in Theor. Comput. Sci. 381(1-3):260-265, 2007. https://doi.org/10.1016/j.tcs.2007.04.006). The weighted case has not been considered before.}, language = {en} } @article{SteinhaeusserOberdoerfervonMammenetal.2022, author = {Steinhaeusser, Sophia C. and Oberd{\"o}rfer, Sebastian and von Mammen, Sebastian and Latoschik, Marc Erich and Lugrin, Birgit}, title = {Joyful adventures and frightening places - designing emotion-inducing virtual environments}, series = {Frontiers in Virtual Reality}, volume = {3}, journal = {Frontiers in Virtual Reality}, issn = {2673-4192}, doi = {10.3389/frvir.2022.919163}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-284831}, year = {2022}, abstract = {Virtual environments (VEs) can evoke and support emotions, as experienced when playing emotionally arousing games. We theoretically approach the design of fear and joy evoking VEs based on a literature review of empirical studies on virtual and real environments as well as video games' reviews and content analyses. We define the design space and identify central design elements that evoke specific positive and negative emotions. Based on that, we derive and present guidelines for emotion-inducing VE design with respect to design themes, colors and textures, and lighting configurations. To validate our guidelines in two user studies, we 1) expose participants to 360° videos of VEs designed following the individual guidelines and 2) immerse them in a neutral, positive and negative emotion-inducing VEs combining all respective guidelines in Virtual Reality. The results support our theoretically derived guidelines by revealing significant differences in terms of fear and joy induction.}, language = {en} } @article{LandeckAlvarezIgarzabalUnruhetal.2022, author = {Landeck, Maximilian and Alvarez Igarz{\´a}bal, Federico and Unruh, Fabian and Habenicht, Hannah and Khoshnoud, Shiva and Wittmann, Marc and Lugrin, Jean-Luc and Latoschik, Marc Erich}, title = {Journey through a virtual tunnel: Simulated motion and its effects on the experience of time}, series = {Frontiers in Virtual Reality}, volume = {3}, journal = {Frontiers in Virtual Reality}, issn = {2673-4192}, doi = {10.3389/frvir.2022.1059971}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-301519}, year = {2022}, abstract = {This paper examines the relationship between time and motion perception in virtual environments. Previous work has shown that the perception of motion can affect the perception of time. We developed a virtual environment that simulates motion in a tunnel and measured its effects on the estimation of the duration of time, the speed at which perceived time passes, and the illusion of self-motion, also known as vection. When large areas of the visual field move in the same direction, vection can occur; observers often perceive this as self-motion rather than motion of the environment. To generate different levels of vection and investigate its effects on time perception, we developed an abstract procedural tunnel generator. The generator can simulate different speeds and densities of tunnel sections (visibly distinguishable sections that form the virtual tunnel), as well as the degree of embodiment of the user avatar (with or without virtual hands). We exposed participants to various tunnel simulations with different durations, speeds, and densities in a remote desktop and a virtual reality (VR) laboratory study. Time passed subjectively faster under high-speed and high-density conditions in both studies. The experience of self-motion was also stronger under high-speed and high-density conditions. Both studies revealed a significant correlation between the perceived passage of time and perceived self-motion. Subjects in the virtual reality study reported a stronger self-motion experience, a faster perceived passage of time, and shorter time estimates than subjects in the desktop study. Our results suggest that a virtual tunnel simulation can manipulate time perception in virtual reality. We will explore these results for the development of virtual reality applications for therapeutic approaches in our future work. This could be particularly useful in treating disorders like depression, autism, and schizophrenia, which are known to be associated with distortions in time perception. For example, the tunnel could be therapeutically applied by resetting patients' time perceptions by exposing them to the tunnel under different conditions, such as increasing or decreasing perceived time.}, language = {en} } @article{HeinLatoschikWienrich2022, author = {Hein, Rebecca M. and Latoschik, Marc Erich and Wienrich, Carolin}, title = {Inter- and transcultural learning in cocial virtual reality: a proposal for an inter- and transcultural virtual object database to be used in the implementation, reflection, and evaluation of virtual encounters}, series = {Multimodal Technologies and Interaction}, volume = {6}, journal = {Multimodal Technologies and Interaction}, number = {7}, issn = {2414-4088}, doi = {10.3390/mti6070050}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-278974}, year = {2022}, abstract = {Visual stimuli are frequently used to improve memory, language learning or perception, and understanding of metacognitive processes. However, in virtual reality (VR), there are few systematically and empirically derived databases. This paper proposes the first collection of virtual objects based on empirical evaluation for inter-and transcultural encounters between English- and German-speaking learners. We used explicit and implicit measurement methods to identify cultural associations and the degree of stereotypical perception for each virtual stimuli (n = 293) through two online studies, including native German and English-speaking participants. The analysis resulted in a final well-describable database of 128 objects (called InteractionSuitcase). In future applications, the objects can be used as a great interaction or conversation asset and behavioral measurement tool in social VR applications, especially in the field of foreign language education. For example, encounters can use the objects to describe their culture, or teachers can intuitively assess stereotyped attitudes of the encounters.}, language = {en} } @article{RiedmannSchaperLugrin2022, author = {Riedmann, Anna and Schaper, Philipp and Lugrin, Birgit}, title = {Integration of a social robot and gamification in adult learning and effects on motivation, engagement and performance}, series = {AI \& Society}, journal = {AI \& Society}, issn = {0951-5666}, doi = {10.1007/s00146-022-01514-y}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-324208}, year = {2022}, abstract = {Learning is a central component of human life and essential for personal development. Therefore, utilizing new technologies in the learning context and exploring their combined potential are considered essential to support self-directed learning in a digital age. A learning environment can be expanded by various technical and content-related aspects. Gamification in the form of elements from video games offers a potential concept to support the learning process. This can be supplemented by technology-supported learning. While the use of tablets is already widespread in the learning context, the integration of a social robot can provide new perspectives on the learning process. However, simply adding new technologies such as social robots or gamification to existing systems may not automatically result in a better learning environment. In the present study, game elements as well as a social robot were integrated separately and conjointly into a learning environment for basic Spanish skills, with a follow-up on retained knowledge. This allowed us to investigate the respective and combined effects of both expansions on motivation, engagement and learning effect. This approach should provide insights into the integration of both additions in an adult learning context. We found that the additions of game elements and the robot did not significantly improve learning, engagement or motivation. Based on these results and a literature review, we outline relevant factors for meaningful integration of gamification and social robots in learning environments in adult learning.}, language = {en} } @article{Puppe2022, author = {Puppe, Frank}, title = {Gesellschaftliche Perspektiven einer fachspezifischen KI f{\"u}r automatisierte Entscheidungen}, series = {Informatik Spektrum}, volume = {45}, journal = {Informatik Spektrum}, number = {2}, issn = {0170-6012}, doi = {10.1007/s00287-022-01443-6}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-324197}, pages = {88-95}, year = {2022}, abstract = {Die k{\"u}nstliche Intelligenz (KI) entwickelt sich rasant und hat bereits eindrucksvolle Erfolge zu verzeichnen, darunter {\"u}bermenschliche Kompetenz in den meisten Spielen und vielen Quizshows, intelligente Suchmaschinen, individualisierte Werbung, Spracherkennung, -ausgabe und -{\"u}bersetzung auf sehr hohem Niveau und hervorragende Leistungen bei der Bildverarbeitung, u. a. in der Medizin, der optischen Zeichenerkennung, beim autonomen Fahren, aber auch beim Erkennen von Menschen auf Bildern und Videos oder bei Deep Fakes f{\"u}r Fotos und Videos. Es ist zu erwarten, dass die KI auch in der Entscheidungsfindung Menschen {\"u}bertreffen wird; ein alter Traum der Expertensysteme, der durch Lernverfahren, Big Data und Zugang zu dem gesammelten Wissen im Web in greifbare N{\"a}he r{\"u}ckt. Gegenstand dieses Beitrags sind aber weniger die technischen Entwicklungen, sondern m{\"o}gliche gesellschaftliche Auswirkungen einer spezialisierten, kompetenten KI f{\"u}r verschiedene Bereiche der autonomen, d. h. nicht nur unterst{\"u}tzenden Entscheidungsfindung: als Fußballschiedsrichter, in der Medizin, f{\"u}r richterliche Entscheidungen und sehr spekulativ auch im politischen Bereich. Dabei werden Vor- und Nachteile dieser Szenarien aus gesellschaftlicher Sicht diskutiert.}, subject = {K{\"u}nstliche Intelligenz}, language = {de} } @article{KrenzerMakowskiHekaloetal.2022, author = {Krenzer, Adrian and Makowski, Kevin and Hekalo, Amar and Fitting, Daniel and Troya, Joel and Zoller, Wolfram G. and Hann, Alexander and Puppe, Frank}, title = {Fast machine learning annotation in the medical domain: a semi-automated video annotation tool for gastroenterologists}, series = {BioMedical Engineering OnLine}, volume = {21}, journal = {BioMedical Engineering OnLine}, number = {1}, doi = {10.1186/s12938-022-01001-x}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-300231}, year = {2022}, abstract = {Background Machine learning, especially deep learning, is becoming more and more relevant in research and development in the medical domain. For all the supervised deep learning applications, data is the most critical factor in securing successful implementation and sustaining the progress of the machine learning model. Especially gastroenterological data, which often involves endoscopic videos, are cumbersome to annotate. Domain experts are needed to interpret and annotate the videos. To support those domain experts, we generated a framework. With this framework, instead of annotating every frame in the video sequence, experts are just performing key annotations at the beginning and the end of sequences with pathologies, e.g., visible polyps. Subsequently, non-expert annotators supported by machine learning add the missing annotations for the frames in-between. Methods In our framework, an expert reviews the video and annotates a few video frames to verify the object's annotations for the non-expert. In a second step, a non-expert has visual confirmation of the given object and can annotate all following and preceding frames with AI assistance. After the expert has finished, relevant frames will be selected and passed on to an AI model. This information allows the AI model to detect and mark the desired object on all following and preceding frames with an annotation. Therefore, the non-expert can adjust and modify the AI predictions and export the results, which can then be used to train the AI model. Results Using this framework, we were able to reduce workload of domain experts on average by a factor of 20 on our data. This is primarily due to the structure of the framework, which is designed to minimize the workload of the domain expert. Pairing this framework with a state-of-the-art semi-automated AI model enhances the annotation speed further. Through a prospective study with 10 participants, we show that semi-automated annotation using our tool doubles the annotation speed of non-expert annotators compared to a well-known state-of-the-art annotation tool. Conclusion In summary, we introduce a framework for fast expert annotation for gastroenterologists, which reduces the workload of the domain expert considerably while maintaining a very high annotation quality. The framework incorporates a semi-automated annotation system utilizing trained object detection models. The software and framework are open-source.}, language = {en} } @article{GlemarecLugrinBosseretal.2022, author = {Gl{\´e}marec, Yann and Lugrin, Jean-Luc and Bosser, Anne-Gwenn and Buche, C{\´e}dric and Latoschik, Marc Erich}, title = {Controlling the stage: a high-level control system for virtual audiences in Virtual Reality}, series = {Frontiers in Virtual Reality}, volume = {3}, journal = {Frontiers in Virtual Reality}, issn = {2673-4192}, doi = {10.3389/frvir.2022.876433}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-284601}, year = {2022}, abstract = {This article presents a novel method for controlling a virtual audience system (VAS) in Virtual Reality (VR) application, called STAGE, which has been originally designed for supervised public speaking training in university seminars dedicated to the preparation and delivery of scientific talks. We are interested in creating pedagogical narratives: narratives encompass affective phenomenon and rather than organizing events changing the course of a training scenario, pedagogical plans using our system focus on organizing the affects it arouses for the trainees. Efficiently controlling a virtual audience towards a specific training objective while evaluating the speaker's performance presents a challenge for a seminar instructor: the high level of cognitive and physical demands required to be able to control the virtual audience, whilst evaluating speaker's performance, adjusting and allowing it to quickly react to the user's behaviors and interactions. It is indeed a critical limitation of a number of existing systems that they rely on a Wizard of Oz approach, where the tutor drives the audience in reaction to the user's performance. We address this problem by integrating with a VAS a high-level control component for tutors, which allows using predefined audience behavior rules, defining custom ones, as well as intervening during run-time for finer control of the unfolding of the pedagogical plan. At its core, this component offers a tool to program, select, modify and monitor interactive training narratives using a high-level representation. The STAGE offers the following features: i) a high-level API to program pedagogical narratives focusing on a specific public speaking situation and training objectives, ii) an interactive visualization interface iii) computation and visualization of user metrics, iv) a semi-autonomous virtual audience composed of virtual spectators with automatic reactions to the speaker and surrounding spectators while following the pedagogical plan V) and the possibility for the instructor to embody a virtual spectator to ask questions or guide the speaker from within the Virtual Environment. We present here the design, and implementation of the tutoring system and its integration in STAGE, and discuss its reception by end-users.}, language = {en} } @article{LatoschikWienrich2022, author = {Latoschik, Marc Erich and Wienrich, Carolin}, title = {Congruence and plausibility, not presence: pivotal conditions for XR experiences and effects, a novel approach}, series = {Frontiers in Virtual Reality}, volume = {3}, journal = {Frontiers in Virtual Reality}, issn = {2673-4192}, doi = {10.3389/frvir.2022.694433}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-284787}, year = {2022}, abstract = {Presence is often considered the most important quale describing the subjective feeling of being in a computer-generated and/or computer-mediated virtual environment. The identification and separation of orthogonal presence components, i.e., the place illusion and the plausibility illusion, has been an accepted theoretical model describing Virtual Reality (VR) experiences for some time. This perspective article challenges this presence-oriented VR theory. First, we argue that a place illusion cannot be the major construct to describe the much wider scope of virtual, augmented, and mixed reality (VR, AR, MR: or XR for short). Second, we argue that there is no plausibility illusion but merely plausibility, and we derive the place illusion caused by the congruent and plausible generation of spatial cues and similarly for all the current model's so-defined illusions. Finally, we propose congruence and plausibility to become the central essential conditions in a novel theoretical model describing XR experiences and effects.}, language = {en} } @article{HentschelKobsHotho2022, author = {Hentschel, Simon and Kobs, Konstantin and Hotho, Andreas}, title = {CLIP knows image aesthetics}, series = {Frontiers in Artificial Intelligence}, volume = {5}, journal = {Frontiers in Artificial Intelligence}, issn = {2624-8212}, doi = {10.3389/frai.2022.976235}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-297150}, year = {2022}, abstract = {Most Image Aesthetic Assessment (IAA) methods use a pretrained ImageNet classification model as a base to fine-tune. We hypothesize that content classification is not an optimal pretraining task for IAA, since the task discourages the extraction of features that are useful for IAA, e.g., composition, lighting, or style. On the other hand, we argue that the Contrastive Language-Image Pretraining (CLIP) model is a better base for IAA models, since it has been trained using natural language supervision. Due to the rich nature of language, CLIP needs to learn a broad range of image features that correlate with sentences describing the image content, composition, environments, and even subjective feelings about the image. While it has been shown that CLIP extracts features useful for content classification tasks, its suitability for tasks that require the extraction of style-based features like IAA has not yet been shown. We test our hypothesis by conducting a three-step study, investigating the usefulness of features extracted by CLIP compared to features obtained from the last layer of a comparable ImageNet classification model. In each step, we get more computationally expensive. First, we engineer natural language prompts that let CLIP assess an image's aesthetic without adjusting any weights in the model. To overcome the challenge that CLIP's prompting only is applicable to classification tasks, we propose a simple but effective strategy to convert multiple prompts to a continuous scalar as required when predicting an image's mean aesthetic score. Second, we train a linear regression on the AVA dataset using image features obtained by CLIP's image encoder. The resulting model outperforms a linear regression trained on features from an ImageNet classification model. It also shows competitive performance with fully fine-tuned networks based on ImageNet, while only training a single layer. Finally, by fine-tuning CLIP's image encoder on the AVA dataset, we show that CLIP only needs a fraction of training epochs to converge, while also performing better than a fine-tuned ImageNet model. Overall, our experiments suggest that CLIP is better suited as a base model for IAA methods than ImageNet pretrained networks.}, language = {en} } @article{TsouliasJoerissenNuechter2022, author = {Tsoulias, Nikos and J{\"o}rissen, Sven and N{\"u}chter, Andreas}, title = {An approach for monitoring temperature on fruit surface by means of thermal point cloud}, series = {MethodsX}, volume = {9}, journal = {MethodsX}, issn = {2215-0161}, doi = {10.1016/j.mex.2022.101712}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-300270}, year = {2022}, abstract = {Heat and excessive solar radiation can produce abiotic stresses during apple maturation, resulting fruit quality. Therefore, the monitoring of temperature on fruit surface (FST) over the growing period can allow to identify thresholds, above of which several physiological disorders such as sunburn may occur in apple. The current approaches neglect spatial variation of FST and have reduced repeatability, resulting in unreliable predictions. In this study, LiDAR laser scanning and thermal imaging were employed to detect the temperature on fruit surface by means of 3D point cloud. A process for calibrating the two sensors based on an active board target and producing a 3D thermal point cloud was suggested. After calibration, the sensor system was utilised to scan the fruit trees, while temperature values assigned in the corresponding 3D point cloud were based on the extrinsic calibration. Whereas a fruit detection algorithm was performed to segment the FST from each apple. • The approach allows the calibration of LiDAR laser scanner with thermal camera in order to produce a 3D thermal point cloud. • The method can be applied in apple trees for segmenting FST in 3D. Whereas the approach can be utilised to predict several physiological disorders including sunburn on fruit surface.}, language = {en} } @article{PrantlZeckBaueretal.2022, author = {Prantl, Thomas and Zeck, Timo and Bauer, Andre and Ten, Peter and Prantl, Dominik and Yahya, Ala Eddine Ben and Ifflaender, Lukas and Dmitrienko, Alexandra and Krupitzer, Christian and Kounev, Samuel}, title = {A Survey on Secure Group Communication Schemes With Focus on IoT Communication}, series = {IEEE Access}, volume = {10}, journal = {IEEE Access}, doi = {10.1109/ACCESS.2022.3206451}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-300257}, pages = {99944 -- 99962}, year = {2022}, abstract = {A key feature for Internet of Things (IoT) is to control what content is available to each user. To handle this access management, encryption schemes can be used. Due to the diverse usage of encryption schemes, there are various realizations of 1-to-1, 1-to-n, and n-to-n schemes in the literature. This multitude of encryption methods with a wide variety of properties presents developers with the challenge of selecting the optimal method for a particular use case, which is further complicated by the fact that there is no overview of existing encryption schemes. To fill this gap, we envision a cryptography encyclopedia providing such an overview of existing encryption schemes. In this survey paper, we take a first step towards such an encyclopedia by creating a sub-encyclopedia for secure group communication (SGC) schemes, which belong to the n-to-n category. We extensively surveyed the state-of-the-art and classified 47 different schemes. More precisely, we provide (i) a comprehensive overview of the relevant security features, (ii) a set of relevant performance metrics, (iii) a classification for secure group communication schemes, and (iv) workflow descriptions of the 47 schemes. Moreover, we perform a detailed performance and security evaluation of the 47 secure group communication schemes. Based on this evaluation, we create a guideline for the selection of secure group communication schemes.}, language = {en} }