@article{SeufertSchroederSeufert2021, author = {Seufert, Anika and Schr{\"o}der, Svenja and Seufert, Michael}, title = {Delivering User Experience over Networks: Towards a Quality of Experience Centered Design Cycle for Improved Design of Networked Applications}, series = {SN Computer Science}, volume = {2}, journal = {SN Computer Science}, number = {6}, issn = {2661-8907}, doi = {10.1007/s42979-021-00851-x}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-271762}, year = {2021}, abstract = {To deliver the best user experience (UX), the human-centered design cycle (HCDC) serves as a well-established guideline to application developers. However, it does not yet cover network-specific requirements, which become increasingly crucial, as most applications deliver experience over the Internet. The missing network-centric view is provided by Quality of Experience (QoE), which could team up with UX towards an improved overall experience. By considering QoE aspects during the development process, it can be achieved that applications become network-aware by design. In this paper, the Quality of Experience Centered Design Cycle (QoE-CDC) is proposed, which provides guidelines on how to design applications with respect to network-specific requirements and QoE. Its practical value is showcased for popular application types and validated by outlining the design of a new smartphone application. We show that combining HCDC and QoE-CDC will result in an application design, which reaches a high UX and avoids QoE degradation.}, language = {en} } @article{KrupitzerEberhardingerGerostathopoulosetal.2020, author = {Krupitzer, Christian and Eberhardinger, Benedikt and Gerostathopoulos, Ilias and Raibulet, Claudia}, title = {Introduction to the special issue "Applications in Self-Aware Computing Systems and their Evaluation"}, series = {Computers}, volume = {9}, journal = {Computers}, number = {1}, issn = {2073-431X}, doi = {10.3390/computers9010022}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-203439}, year = {2020}, abstract = {The joint 1st Workshop on Evaluations and Measurements in Self-Aware Computing Systems (EMSAC 2019) and Workshop on Self-Aware Computing (SeAC) was held as part of the FAS* conference alliance in conjunction with the 16th IEEE International Conference on Autonomic Computing (ICAC) and the 13th IEEE International Conference on Self-Adaptive and Self-Organizing Systems (SASO) in Ume{\aa}, Sweden on 20 June 2019. The goal of this one-day workshop was to bring together researchers and practitioners from academic environments and from the industry to share their solutions, ideas, visions, and doubts in self-aware computing systems in general and in the evaluation and measurements of such systems in particular. The workshop aimed to enable discussions, partnerships, and collaborations among the participants. This special issue follows the theme of the workshop. It contains extended versions of workshop presentations as well as additional contributions.}, language = {en} } @phdthesis{Peng2019, author = {Peng, Dongliang}, title = {An Optimization-Based Approach for Continuous Map Generalization}, edition = {1. Auflage}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-104-4}, doi = {10.25972/WUP-978-3-95826-105-1}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-174427}, school = {W{\"u}rzburg University Press}, pages = {xv, 132}, year = {2019}, abstract = {Maps are the main tool to represent geographical information. Geographical information is usually scale-dependent, so users need to have access to maps at different scales. In our digital age, the access is realized by zooming. As discrete changes during the zooming tend to distract users, smooth changes are preferred. This is why some digital maps are trying to make the zooming as continuous as they can. The process of producing maps at different scales with smooth changes is called continuous map generalization. In order to produce maps of high quality, cartographers often take into account additional requirements. These requirements are transferred to models in map generalization. Optimization for map generalization is important not only because it finds optimal solutions in the sense of the models, but also because it helps us to evaluate the quality of the models. Optimization, however, becomes more delicate when we deal with continuous map generalization. In this area, there are requirements not only for a specific map but also for relations between maps at difference scales. This thesis is about continuous map generalization based on optimization. First, we show the background of our research topics. Second, we find optimal sequences for aggregating land-cover areas. We compare the A\$^{\!\star}\$\xspace algorithm and integer linear programming in completing this task. Third, we continuously generalize county boundaries to provincial boundaries based on compatible triangulations. We morph between the two sets of boundaries, using dynamic programming to compute the correspondence. Fourth, we continuously generalize buildings to built-up areas by aggregating and growing. In this work, we group buildings with the help of a minimum spanning tree. Fifth, we define vertex trajectories that allow us to morph between polylines. We require that both the angles and the edge lengths change linearly over time. As it is impossible to fulfill all of these requirements simultaneously, we mediate between them using least-squares adjustment. Sixth, we discuss the performance of some commonly used data structures for a specific spatial problem. Seventh, we conclude this thesis and present open problems.}, subject = {Generalisierung }, language = {en} } @article{PfitznerMayNuechter2018, author = {Pfitzner, Christian and May, Stefan and N{\"u}chter, Andreas}, title = {Body weight estimation for dose-finding and health monitoring of lying, standing and walking patients based on RGB-D data}, series = {Sensors}, volume = {18}, journal = {Sensors}, number = {5}, doi = {10.3390/s18051311}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-176642}, pages = {1311}, year = {2018}, abstract = {This paper describes the estimation of the body weight of a person in front of an RGB-D camera. A survey of different methods for body weight estimation based on depth sensors is given. First, an estimation of people standing in front of a camera is presented. Second, an approach based on a stream of depth images is used to obtain the body weight of a person walking towards a sensor. The algorithm first extracts features from a point cloud and forwards them to an artificial neural network (ANN) to obtain an estimation of body weight. Besides the algorithm for the estimation, this paper further presents an open-access dataset based on measurements from a trauma room in a hospital as well as data from visitors of a public event. In total, the dataset contains 439 measurements. The article illustrates the efficiency of the approach with experiments with persons lying down in a hospital, standing persons, and walking persons. Applicable scenarios for the presented algorithm are body weight-related dosing of emergency patients.}, language = {en} } @article{SirbuBeckerCaminitietal.2015, author = {S{\^i}rbu, Alina and Becker, Martin and Caminiti, Saverio and De Baets, Bernard and Elen, Bart and Francis, Louise and Gravino, Pietro and Hotho, Andreas and Ingarra, Stefano and Loreto, Vittorio and Molino, Andrea and Mueller, Juergen and Peters, Jan and Ricchiuti, Ferdinando and Saracino, Fabio and Servedio, Vito D.P. and Stumme, Gerd and Theunis, Jan and Tria, Francesca and Van den Bossche, Joris}, title = {Participatory Patterns in an International Air Quality Monitoring Initiative}, series = {PLoS ONE}, volume = {10}, journal = {PLoS ONE}, number = {8}, doi = {10.1371/journal. pone.0136763}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-151379}, pages = {e0136763}, year = {2015}, abstract = {The issue of sustainability is at the top of the political and societal agenda, being considered of extreme importance and urgency. Human individual action impacts the environment both locally (e.g., local air/water quality, noise disturbance) and globally (e.g., climate change, resource use). Urban environments represent a crucial example, with an increasing realization that the most effective way of producing a change is involving the citizens themselves in monitoring campaigns (a citizen science bottom-up approach). This is possible by developing novel technologies and IT infrastructures enabling large citizen participation. Here, in the wider framework of one of the first such projects, we show results from an international competition where citizens were involved in mobile air pollution monitoring using low cost sensing devices, combined with a web-based game to monitor perceived levels of pollution. Measures of shift in perceptions over the course of the campaign are provided, together with insights into participatory patterns emerging from this study. Interesting effects related to inertia and to direct involvement in measurement activities rather than indirect information exposure are also highlighted, indicating that direct involvement can enhance learning and environmental awareness. In the future, this could result in better adoption of policies towards decreasing pollution.}, language = {en} } @phdthesis{Fleszar2018, author = {Fleszar, Krzysztof}, title = {Network-Design Problems in Graphs and on the Plane}, edition = {1. Auflage}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-076-4 (Print)}, doi = {10.25972/WUP-978-3-95826-077-1}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-154904}, school = {W{\"u}rzburg University Press}, pages = {xi, 204}, year = {2018}, abstract = {A network design problem defines an infinite set whose elements, called instances, describe relationships and network constraints. It asks for an algorithm that, given an instance of this set, designs a network that respects the given constraints and at the same time optimizes some given criterion. In my thesis, I develop algorithms whose solutions are optimum or close to an optimum value within some guaranteed bound. I also examine the computational complexity of these problems. Problems from two vast areas are considered: graphs and the Euclidean plane. In the Maximum Edge Disjoint Paths problem, we are given a graph and a subset of vertex pairs that are called terminal pairs. We are asked for a set of paths where the endpoints of each path form a terminal pair. The constraint is that any two paths share at most one inner vertex. The optimization criterion is to maximize the cardinality of the set. In the hard-capacitated k-Facility Location problem, we are given an integer k and a complete graph where the distances obey a given metric and where each node has two numerical values: a capacity and an opening cost. We are asked for a subset of k nodes, called facilities, and an assignment of all the nodes, called clients, to the facilities. The constraint is that the number of clients assigned to a facility cannot exceed the facility's capacity value. The optimization criterion is to minimize the total cost which consists of the total opening cost of the facilities and the total distance between the clients and the facilities they are assigned to. In the Stabbing problem, we are given a set of axis-aligned rectangles in the plane. We are asked for a set of horizontal line segments such that, for every rectangle, there is a line segment crossing its left and right edge. The optimization criterion is to minimize the total length of the line segments. In the k-Colored Non-Crossing Euclidean Steiner Forest problem, we are given an integer k and a finite set of points in the plane where each point has one of k colors. For every color, we are asked for a drawing that connects all the points of the same color. The constraint is that drawings of different colors are not allowed to cross each other. The optimization criterion is to minimize the total length of the drawings. In the Minimum Rectilinear Polygon for Given Angle Sequence problem, we are given an angle sequence of left (+90°) turns and right (-90°) turns. We are asked for an axis-parallel simple polygon where the angles of the vertices yield the given sequence when walking around the polygon in counter-clockwise manner. The optimization criteria considered are to minimize the perimeter, the area, and the size of the axis-parallel bounding box of the polygon.}, subject = {Euklidische Ebene}, language = {en} } @phdthesis{Wojtkowiak2018, author = {Wojtkowiak, Harald}, title = {Planungssystem zur Steigerung der Autonomie von Kleinstsatelliten}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-163569}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2018}, abstract = {Der Betrieb von Satelliten wird sich in Zukunft gravierend {\"a}ndern. Die bisher ausge{\"u}bte konventionelle Vorgehensweise, bei der die Planung der vom Satelliten auszuf{\"u}hrenden Aktivit{\"a}ten sowie die Kontrolle hier{\"u}ber ausschließlich vom Boden aus erfolgen, st{\"o}ßt bei heutigen Anwendungen an ihre Grenzen. Im schlimmsten Fall verhindert dieser Umstand sogar die Erschließung bisher ungenutzter M{\"o}glichkeiten. Der Gewinn eines Satelliten, sei es in Form wissenschaftlicher Daten oder der Vermarktung satellitengest{\"u}tzter Dienste, wird daher nicht optimal ausgesch{\"o}pft. Die Ursache f{\"u}r dieses Problem l{\"a}sst sich im Grunde auf eine ausschlaggebende Tatsache zur{\"u}ckf{\"u}hren: Konventionelle Satelliten k{\"o}nnen ihr Verhalten, d.h. die Folge ihrer T{\"a}tigkeiten, nicht eigenst{\"a}ndig anpassen. Stattdessen erstellt das Bedienpersonal am Boden - vor allem die Operatoren - mit Hilfe von Planungssoftware feste Ablaufpl{\"a}ne, die dann in Form von Kommandosequenzen von den Bodenstationen aus an die jeweiligen Satelliten hochgeladen werden. Dort werden die Befehle lediglich {\"u}berpr{\"u}ft, interpretiert und strikt ausgef{\"u}hrt. Die Abarbeitung erfolgt linear. Situationsbedingte {\"A}nderungen, wie sie vergleichsweise bei der Codeausf{\"u}hrung von Softwareprogrammen durch Kontrollkonstrukte, zum Beispiel Schleifen und Verzweigungen, {\"u}blich sind, sind typischerweise nicht vorgesehen. Der Operator ist daher die einzige Instanz, die das Verhalten des Satelliten mittels Kommandierung, per Upload, beeinflussen kann, und auch nur dann, wenn ein direkter Funkkontakt zwischen Satellit und Bodenstation besteht. Die dadurch m{\"o}glichen Reaktionszeiten des Satelliten liegen bestenfalls bei einigen Sekunden, falls er sich im Wirkungsbereich der Bodenstation befindet. Außerhalb des Kontaktfensters kann sich die Zeitschranke, gegeben durch den Orbit und die aktuelle Position des Satelliten, von einigen Minuten bis hin zu einigen Stunden erstrecken. Die Signallaufzeiten der Funk{\"u}bertragung verl{\"a}ngern die Reaktionszeiten um weitere Sekunden im erdnahen Bereich. Im interplanetaren Raum erstrecken sich die Zeitspannen aufgrund der immensen Entfernungen sogar auf mehrere Minuten. Dadurch bedingt liegt die derzeit technologisch m{\"o}gliche, bodengest{\"u}tzte, Reaktionszeit von Satelliten bestenfalls im Bereich von einigen Sekunden. Diese Einschr{\"a}nkung stellt ein schweres Hindernis f{\"u}r neuartige Satellitenmissionen, bei denen insbesondere nichtdeterministische und kurzzeitige Ph{\"a}nomene (z.B. Blitze und Meteoreintritte in die Erdatmosph{\"a}re) Gegenstand der Beobachtungen sind, dar. Die langen Reaktionszeiten des konventionellen Satellitenbetriebs verhindern die Realisierung solcher Missionen, da die verz{\"o}gerte Reaktion erst erfolgt, nachdem das zu beobachtende Ereignis bereits abgeschlossen ist. Die vorliegende Dissertation zeigt eine M{\"o}glichkeit, das durch die langen Reaktionszeiten entstandene Problem zu l{\"o}sen, auf. Im Zentrum des L{\"o}sungsansatzes steht dabei die Autonomie. Im Wesentlichen geht es dabei darum, den Satelliten mit der F{\"a}higkeit auszustatten, sein Verhalten, d.h. die Folge seiner T{\"a}tigkeiten, eigenst{\"a}ndig zu bestimmen bzw. zu {\"a}ndern. Dadurch wird die direkte Abh{\"a}ngigkeit des Satelliten vom Operator bei Reaktionen aufgehoben. Im Grunde wird der Satellit in die Lage versetzt, sich selbst zu kommandieren. Die Idee der Autonomie wurde im Rahmen der zugrunde liegenden Forschungsarbeiten umgesetzt. Das Ergebnis ist ein autonomes Planungssystem. Dabei handelt es sich um ein Softwaresystem, mit dem sich autonomes Verhalten im Satelliten realisieren l{\"a}sst. Es kann an unterschiedliche Satellitenmissionen angepasst werden. Ferner deckt es verschiedene Aspekte des autonomen Satellitenbetriebs, angefangen bei der generellen Entscheidungsfindung der T{\"a}tigkeiten, {\"u}ber die zeitliche Ablaufplanung unter Einbeziehung von Randbedingungen (z.B. Ressourcen) bis hin zur eigentlichen Ausf{\"u}hrung, d.h. Kommandierung, ab. Das Planungssystem kommt als Anwendung in ASAP, einer autonomen Sensorplattform, zum Einsatz. Es ist ein optisches System und dient der Detektion von kurzzeitigen Ph{\"a}nomenen und Ereignissen in der Erdatmosph{\"a}re. Die Forschungsarbeiten an dem autonomen Planungssystem, an ASAP sowie an anderen zu diesen in Bezug stehenden Systemen wurden an der Professur f{\"u}r Raumfahrttechnik des Lehrstuhls Informatik VIII der Julius-Maximilians-Universit{\"a}t W{\"u}rzburg durchgef{\"u}hrt.}, subject = {Planungssystem}, language = {de} } @article{KaiserLeschRotheetal.2020, author = {Kaiser, Dennis and Lesch, Veronika and Rothe, Julian and Strohmeier, Michael and Spieß, Florian and Krupitzer, Christian and Montenegro, Sergio and Kounev, Samuel}, title = {Towards Self-Aware Multirotor Formations}, series = {Computers}, volume = {9}, journal = {Computers}, number = {1}, issn = {2073-431X}, doi = {10.3390/computers9010007}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-200572}, pages = {7}, year = {2020}, abstract = {In the present day, unmanned aerial vehicles become seemingly more popular every year, but, without regulation of the increasing number of these vehicles, the air space could become chaotic and uncontrollable. In this work, a framework is proposed to combine self-aware computing with multirotor formations to address this problem. The self-awareness is envisioned to improve the dynamic behavior of multirotors. The formation scheme that is implemented is called platooning, which arranges vehicles in a string behind the lead vehicle and is proposed to bring order into chaotic air space. Since multirotors define a general category of unmanned aerial vehicles, the focus of this thesis are quadcopters, platforms with four rotors. A modification for the LRA-M self-awareness loop is proposed and named Platooning Awareness. The implemented framework is able to offer two flight modes that enable waypoint following and the self-awareness module to find a path through scenarios, where obstacles are present on the way, onto a goal position. The evaluation of this work shows that the proposed framework is able to use self-awareness to learn about its environment, avoid obstacles, and can successfully move a platoon of drones through multiple scenarios.}, language = {en} } @article{GrohmannHerbstChalbanietal.2020, author = {Grohmann, Johannes and Herbst, Nikolas and Chalbani, Avi and Arian, Yair and Peretz, Noam and Kounev, Samuel}, title = {A Taxonomy of Techniques for SLO Failure Prediction in Software Systems}, series = {Computers}, volume = {9}, journal = {Computers}, number = {1}, issn = {2073-431X}, doi = {10.3390/computers9010010}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-200594}, pages = {10}, year = {2020}, abstract = {Failure prediction is an important aspect of self-aware computing systems. Therefore, a multitude of different approaches has been proposed in the literature over the past few years. In this work, we propose a taxonomy for organizing works focusing on the prediction of Service Level Objective (SLO) failures. Our taxonomy classifies related work along the dimensions of the prediction target (e.g., anomaly detection, performance prediction, or failure prediction), the time horizon (e.g., detection or prediction, online or offline application), and the applied modeling type (e.g., time series forecasting, machine learning, or queueing theory). The classification is derived based on a systematic mapping of relevant papers in the area. Additionally, we give an overview of different techniques in each sub-group and address remaining challenges in order to guide future research.}, language = {en} } @article{OberdoerferLatoschik2019, author = {Oberd{\"o}rfer, Sebastian and Latoschik, Marc Erich}, title = {Knowledge encoding in game mechanics: transfer-oriented knowledge learning in desktop-3D and VR}, series = {International Journal of Computer Games Technology}, volume = {2019}, journal = {International Journal of Computer Games Technology}, doi = {10.1155/2019/7626349}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-201159}, pages = {7626349}, year = {2019}, abstract = {Affine Transformations (ATs) are a complex and abstract learning content. Encoding the AT knowledge in Game Mechanics (GMs) achieves a repetitive knowledge application and audiovisual demonstration. Playing a serious game providing these GMs leads to motivating and effective knowledge learning. Using immersive Virtual Reality (VR) has the potential to even further increase the serious game's learning outcome and learning quality. This paper compares the effectiveness and efficiency of desktop-3D and VR in respect to the achieved learning outcome. Also, the present study analyzes the effectiveness of an enhanced audiovisual knowledge encoding and the provision of a debriefing system. The results validate the effectiveness of the knowledge encoding in GMs to achieve knowledge learning. The study also indicates that VR is beneficial for the overall learning quality and that an enhanced audiovisual encoding has only a limited effect on the learning outcome.}, language = {en} } @phdthesis{Baier2018, author = {Baier, Pablo A.}, title = {Simulator for Minimally Invasive Vascular Interventions: Hardware and Software}, isbn = {978-3-945459-22-5}, doi = {10.25972/OPUS-16119}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-161190}, school = {Universit{\"a}t W{\"u}rzburg}, pages = {118}, year = {2018}, abstract = {A complete simulation system is proposed that can be used as an educational tool by physicians in training basic skills of Minimally Invasive Vascular Interventions. In the first part, a surface model is developed to assemble arteries having a planar segmentation. It is based on Sweep Surfaces and can be extended to T- and Y-like bifurcations. A continuous force vector field is described, representing the interaction between the catheter and the surface. The computation time of the force field is almost unaffected when the resolution of the artery is increased. The mechanical properties of arteries play an essential role in the study of the circulatory system dynamics, which has been becoming increasingly important in the treatment of cardiovascular diseases. In Virtual Reality Simulators, it is crucial to have a tissue model that responds in real time. In this work, the arteries are discretized by a two dimensional mesh and the nodes are connected by three kinds of linear springs. Three tissue layers (Intima, Media, Adventitia) are considered and, starting from the stretch-energy density, some of the elasticity tensor components are calculated. The physical model linearizes and homogenizes the material response, but it still contemplates the geometric nonlinearity. In general, if the arterial stretch varies by 1\% or less, then the agreement between the linear and nonlinear models is trustworthy. In the last part, the physical model of the wire proposed by Konings is improved. As a result, a simpler and more stable method is obtained to calculate the equilibrium configuration of the wire. In addition, a geometrical method is developed to perform relaxations. It is particularly useful when the wire is hindered in the physical method because of the boundary conditions. The physical and the geometrical methods are merged, resulting in efficient relaxations. Tests show that the shape of the virtual wire agrees with the experiment. The proposed algorithm allows real-time executions and the hardware to assemble the simulator has a low cost.}, subject = {Computersimulation}, language = {en} } @inproceedings{OPUS4-24577, title = {Proceedings of the 1st Games Technology Summit}, editor = {von Mammen, Sebastian and Klemke, Roland and Lorber, Martin}, isbn = {978-3-945459-36-2}, doi = {10.25972/OPUS-24577}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-245776}, pages = {vi, 46}, year = {2021}, abstract = {As part of the Clash of Realities International Conference on the Technology and Theory of Digital Games, the Game Technology Summit is a premium venue to bring together experts from academia and industry to disseminate state-of-the-art research on trending technology topics in digital games. In this first iteration of the Game Technology Summit, we specifically paid attention on how the successes in AI in Natural User Interfaces have been impacting the games industry (industry track) and which scientific, state-of-the-art ideas and approaches are currently pursued (scientific track).}, subject = {Veranstaltung}, language = {en} } @article{DuLauterbachLietal.2020, author = {Du, Shitong and Lauterbach, Helge A. and Li, Xuyou and Demisse, Girum G. and Borrmann, Dorit and N{\"u}chter, Andreas}, title = {Curvefusion — A Method for Combining Estimated Trajectories with Applications to SLAM and Time-Calibration}, series = {Sensors}, volume = {20}, journal = {Sensors}, number = {23}, issn = {1424-8220}, doi = {10.3390/s20236918}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-219988}, year = {2020}, abstract = {Mapping and localization of mobile robots in an unknown environment are essential for most high-level operations like autonomous navigation or exploration. This paper presents a novel approach for combining estimated trajectories, namely curvefusion. The robot used in the experiments is equipped with a horizontally mounted 2D profiler, a constantly spinning 3D laser scanner and a GPS module. The proposed algorithm first combines trajectories from different sensors to optimize poses of the planar three degrees of freedom (DoF) trajectory, which is then fed into continuous-time simultaneous localization and mapping (SLAM) to further improve the trajectory. While state-of-the-art multi-sensor fusion methods mainly focus on probabilistic methods, our approach instead adopts a deformation-based method to optimize poses. To this end, a similarity metric for curved shapes is introduced into the robotics community to fuse the estimated trajectories. Additionally, a shape-based point correspondence estimation method is applied to the multi-sensor time calibration. Experiments show that the proposed fusion method can achieve relatively better accuracy, even if the error of the trajectory before fusion is large, which demonstrates that our method can still maintain a certain degree of accuracy in an environment where typical pose estimation methods have poor performance. In addition, the proposed time-calibration method also achieves high accuracy in estimating point correspondences.}, language = {en} } @article{GlemarecLugrinBosseretal.2022, author = {Gl{\´e}marec, Yann and Lugrin, Jean-Luc and Bosser, Anne-Gwenn and Buche, C{\´e}dric and Latoschik, Marc Erich}, title = {Controlling the stage: a high-level control system for virtual audiences in Virtual Reality}, series = {Frontiers in Virtual Reality}, volume = {3}, journal = {Frontiers in Virtual Reality}, issn = {2673-4192}, doi = {10.3389/frvir.2022.876433}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-284601}, year = {2022}, abstract = {This article presents a novel method for controlling a virtual audience system (VAS) in Virtual Reality (VR) application, called STAGE, which has been originally designed for supervised public speaking training in university seminars dedicated to the preparation and delivery of scientific talks. We are interested in creating pedagogical narratives: narratives encompass affective phenomenon and rather than organizing events changing the course of a training scenario, pedagogical plans using our system focus on organizing the affects it arouses for the trainees. Efficiently controlling a virtual audience towards a specific training objective while evaluating the speaker's performance presents a challenge for a seminar instructor: the high level of cognitive and physical demands required to be able to control the virtual audience, whilst evaluating speaker's performance, adjusting and allowing it to quickly react to the user's behaviors and interactions. It is indeed a critical limitation of a number of existing systems that they rely on a Wizard of Oz approach, where the tutor drives the audience in reaction to the user's performance. We address this problem by integrating with a VAS a high-level control component for tutors, which allows using predefined audience behavior rules, defining custom ones, as well as intervening during run-time for finer control of the unfolding of the pedagogical plan. At its core, this component offers a tool to program, select, modify and monitor interactive training narratives using a high-level representation. The STAGE offers the following features: i) a high-level API to program pedagogical narratives focusing on a specific public speaking situation and training objectives, ii) an interactive visualization interface iii) computation and visualization of user metrics, iv) a semi-autonomous virtual audience composed of virtual spectators with automatic reactions to the speaker and surrounding spectators while following the pedagogical plan V) and the possibility for the instructor to embody a virtual spectator to ask questions or guide the speaker from within the Virtual Environment. We present here the design, and implementation of the tutoring system and its integration in STAGE, and discuss its reception by end-users.}, language = {en} } @phdthesis{Loeffler2021, author = {L{\"o}ffler, Andre}, title = {Constrained Graph Layouts: Vertices on the Outer Face and on the Integer Grid}, edition = {1. Auflage}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-146-4}, doi = {10.25972/WUP-978-3-95826-147-1}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-215746}, school = {W{\"u}rzburg University Press}, pages = {viii, 161}, year = {2021}, abstract = {Constraining graph layouts - that is, restricting the placement of vertices and the routing of edges to obey certain constraints - is common practice in graph drawing. In this book, we discuss algorithmic results on two different restriction types: placing vertices on the outer face and on the integer grid. For the first type, we look into the outer k-planar and outer k-quasi-planar graphs, as well as giving a linear-time algorithm to recognize full and closed outer k-planar graphs Monadic Second-order Logic. For the second type, we consider the problem of transferring a given planar drawing onto the integer grid while perserving the original drawings topology; we also generalize a variant of Cauchy's rigidity theorem for orthogonal polyhedra of genus 0 to those of arbitrary genus.}, subject = {Graphenzeichnen}, language = {en} } @article{LinsenmannMaerzDufneretal.2021, author = {Linsenmann, Thomas and M{\"a}rz, Alexander and Dufner, Vera and Stetter, Christian and Weiland, Judith and Westermaier, Thomas}, title = {Optimization of radiation settings for angiography using 3D fluoroscopy for imaging of intracranial aneurysms}, series = {Computer Assisted Surgery}, volume = {26}, journal = {Computer Assisted Surgery}, number = {1}, doi = {10.1080/24699322.2021.1894240}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-259251}, pages = {22-30}, year = {2021}, abstract = {Mobile 3D fluoroscopes have become increasingly available in neurosurgical operating rooms. We recently reported its use for imaging cerebral vascular malformations and aneurysms. This study was conducted to evaluate various radiation settings for the imaging of cerebral aneurysms before and after surgical occlusion. Eighteen patients with cerebral aneurysms with the indication for surgical clipping were included in this prospective analysis. Before surgery the patients were randomized into one of three different scan protocols according (default settings of the 3D fluoroscope): Group 1: 110 kV, 80 mA (enhanced cranial mode), group 2: 120 kV, 64 mA (lumbar spine mode), group 3: 120 kV, 25 mA (head/neck settings). Prior to surgery, a rotational fluoroscopy scan (duration 24 s) was performed without contrast agent followed by another scan with 50 ml of intravenous iodine contrast agent. The image files of both scans were transferred to an Apple PowerMac(R) workstation, subtracted and reconstructed using OsiriX(R) MD 10.0 software. The procedure was repeated after clip placement. The image quality regarding preoperative aneurysm configuration and postoperative assessment of aneurysm occlusion and vessel patency was analyzed by 2 independent reviewers using a 6-grade scale. This technique quickly supplies images of adequate quality to depict intracranial aneurysms and distal vessel patency after aneurysm clipping. Regarding these features, a further optimization to our previous protocol seems possible lowering the voltage and increasing tube current. For quick intraoperative assessment, image subtraction seems not necessary. Thus, a native scan without a contrast agent is not necessary. Further optimization may be possible using a different contrast injection protocol.}, language = {en} } @techreport{RossiMaurelliUnnithanetal.2021, author = {Rossi, Angelo Pio and Maurelli, Francesco and Unnithan, Vikram and Dreger, Hendrik and Mathewos, Kedus and Pradhan, Nayan and Corbeanu, Dan-Andrei and Pozzobon, Riccardo and Massironi, Matteo and Ferrari, Sabrina and Pernechele, Claudia and Paoletti, Lorenzo and Simioni, Emanuele and Maurizio, Pajola and Santagata, Tommaso and Borrmann, Dorit and N{\"u}chter, Andreas and Bredenbeck, Anton and Zevering, Jasper and Arzberger, Fabian and Reyes Mantilla, Camilo Andr{\´e}s}, title = {DAEDALUS - Descent And Exploration in Deep Autonomy of Lava Underground Structures}, isbn = {978-3-945459-33-1}, issn = {1868-7466}, doi = {10.25972/OPUS-22791}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-227911}, pages = {188}, year = {2021}, abstract = {The DAEDALUS mission concept aims at exploring and characterising the entrance and initial part of Lunar lava tubes within a compact, tightly integrated spherical robotic device, with a complementary payload set and autonomous capabilities. The mission concept addresses specifically the identification and characterisation of potential resources for future ESA exploration, the local environment of the subsurface and its geologic and compositional structure. A sphere is ideally suited to protect sensors and scientific equipment in rough, uneven environments. It will house laser scanners, cameras and ancillary payloads. The sphere will be lowered into the skylight and will explore the entrance shaft, associated caverns and conduits. Lidar (light detection and ranging) systems produce 3D models with high spatial accuracy independent of lighting conditions and visible features. Hence this will be the primary exploration toolset within the sphere. The additional payload that can be accommodated in the robotic sphere consists of camera systems with panoramic lenses and scanners such as multi-wavelength or single-photon scanners. A moving mass will trigger movements. The tether for lowering the sphere will be used for data communication and powering the equipment during the descending phase. Furthermore, the connector tether-sphere will host a WIFI access point, such that data of the conduit can be transferred to the surface relay station. During the exploration phase, the robot will be disconnected from the cable, and will use wireless communication. Emergency autonomy software will ensure that in case of loss of communication, the robot will continue the nominal mission.}, subject = {Mond}, language = {en} } @phdthesis{Niebler2019, author = {Niebler, Thomas}, title = {Extracting and Learning Semantics from Social Web Data}, doi = {10.25972/OPUS-17866}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-178666}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2019}, abstract = {Making machines understand natural language is a dream of mankind that existed since a very long time. Early attempts at programming machines to converse with humans in a supposedly intelligent way with humans relied on phrase lists and simple keyword matching. However, such approaches cannot provide semantically adequate answers, as they do not consider the specific meaning of the conversation. Thus, if we want to enable machines to actually understand language, we need to be able to access semantically relevant background knowledge. For this, it is possible to query so-called ontologies, which are large networks containing knowledge about real-world entities and their semantic relations. However, creating such ontologies is a tedious task, as often extensive expert knowledge is required. Thus, we need to find ways to automatically construct and update ontologies that fit human intuition of semantics and semantic relations. More specifically, we need to determine semantic entities and find relations between them. While this is usually done on large corpora of unstructured text, previous work has shown that we can at least facilitate the first issue of extracting entities by considering special data such as tagging data or human navigational paths. Here, we do not need to detect the actual semantic entities, as they are already provided because of the way those data are collected. Thus we can mainly focus on the problem of assessing the degree of semantic relatedness between tags or web pages. However, there exist several issues which need to be overcome, if we want to approximate human intuition of semantic relatedness. For this, it is necessary to represent words and concepts in a way that allows easy and highly precise semantic characterization. This also largely depends on the quality of data from which these representations are constructed. In this thesis, we extract semantic information from both tagging data created by users of social tagging systems and human navigation data in different semantic-driven social web systems. Our main goal is to construct high quality and robust vector representations of words which can the be used to measure the relatedness of semantic concepts. First, we show that navigation in the social media systems Wikipedia and BibSonomy is driven by a semantic component. After this, we discuss and extend methods to model the semantic information in tagging data as low-dimensional vectors. Furthermore, we show that tagging pragmatics influences different facets of tagging semantics. We then investigate the usefulness of human navigational paths in several different settings on Wikipedia and BibSonomy for measuring semantic relatedness. Finally, we propose a metric-learning based algorithm in adapt pre-trained word embeddings to datasets containing human judgment of semantic relatedness. This work contributes to the field of studying semantic relatedness between words by proposing methods to extract semantic relatedness from web navigation, learn highquality and low-dimensional word representations from tagging data, and to learn semantic relatedness from any kind of vector representation by exploiting human feedback. Applications first and foremest lie in ontology learning for the Semantic Web, but also semantic search or query expansion.}, subject = {Semantik}, language = {en} } @article{HeinLatoschikWienrich2022, author = {Hein, Rebecca M. and Latoschik, Marc Erich and Wienrich, Carolin}, title = {Inter- and transcultural learning in cocial virtual reality: a proposal for an inter- and transcultural virtual object database to be used in the implementation, reflection, and evaluation of virtual encounters}, series = {Multimodal Technologies and Interaction}, volume = {6}, journal = {Multimodal Technologies and Interaction}, number = {7}, issn = {2414-4088}, doi = {10.3390/mti6070050}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-278974}, year = {2022}, abstract = {Visual stimuli are frequently used to improve memory, language learning or perception, and understanding of metacognitive processes. However, in virtual reality (VR), there are few systematically and empirically derived databases. This paper proposes the first collection of virtual objects based on empirical evaluation for inter-and transcultural encounters between English- and German-speaking learners. We used explicit and implicit measurement methods to identify cultural associations and the degree of stereotypical perception for each virtual stimuli (n = 293) through two online studies, including native German and English-speaking participants. The analysis resulted in a final well-describable database of 128 objects (called InteractionSuitcase). In future applications, the objects can be used as a great interaction or conversation asset and behavioral measurement tool in social VR applications, especially in the field of foreign language education. For example, encounters can use the objects to describe their culture, or teachers can intuitively assess stereotyped attitudes of the encounters.}, language = {en} } @article{DjebkoPuppeKayal2019, author = {Djebko, Kirill and Puppe, Frank and Kayal, Hakan}, title = {Model-based fault detection and diagnosis for spacecraft with an application for the SONATE triple cube nano-satellite}, series = {Aerospace}, volume = {6}, journal = {Aerospace}, number = {10}, issn = {2226-4310}, doi = {10.3390/aerospace6100105}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-198836}, pages = {105}, year = {2019}, abstract = {The correct behavior of spacecraft components is the foundation of unhindered mission operation. However, no technical system is free of wear and degradation. A malfunction of one single component might significantly alter the behavior of the whole spacecraft and may even lead to a complete mission failure. Therefore, abnormal component behavior must be detected early in order to be able to perform counter measures. A dedicated fault detection system can be employed, as opposed to classical health monitoring, performed by human operators, to decrease the response time to a malfunction. In this paper, we present a generic model-based diagnosis system, which detects faults by analyzing the spacecraft's housekeeping data. The observed behavior of the spacecraft components, given by the housekeeping data is compared to their expected behavior, obtained through simulation. Each discrepancy between the observed and the expected behavior of a component generates a so-called symptom. Given the symptoms, the diagnoses are derived by computing sets of components whose malfunction might cause the observed discrepancies. We demonstrate the applicability of the diagnosis system by using modified housekeeping data of the qualification model of an actual spacecraft and outline the advantages and drawbacks of our approach.}, language = {en} } @article{ZimmererFischbachLatoschik2018, author = {Zimmerer, Chris and Fischbach, Martin and Latoschik, Marc Erich}, title = {Semantic Fusion for Natural Multimodal Interfaces using Concurrent Augmented Transition Networks}, series = {Multimodal Technologies and Interaction}, volume = {2}, journal = {Multimodal Technologies and Interaction}, number = {4}, issn = {2414-4088}, doi = {10.3390/mti2040081}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-197573}, year = {2018}, abstract = {Semantic fusion is a central requirement of many multimodal interfaces. Procedural methods like finite-state transducers and augmented transition networks have proven to be beneficial to implement semantic fusion. They are compliant with rapid development cycles that are common for the development of user interfaces, in contrast to machine-learning approaches that require time-costly training and optimization. We identify seven fundamental requirements for the implementation of semantic fusion: Action derivation, continuous feedback, context-sensitivity, temporal relation support, access to the interaction context, as well as the support of chronologically unsorted and probabilistic input. A subsequent analysis reveals, however, that there is currently no solution for fulfilling the latter two requirements. As the main contribution of this article, we thus present the Concurrent Cursor concept to compensate these shortcomings. In addition, we showcase a reference implementation, the Concurrent Augmented Transition Network (cATN), that validates the concept's feasibility in a series of proof of concept demonstrations as well as through a comparative benchmark. The cATN fulfills all identified requirements and fills the lack amongst previous solutions. It supports the rapid prototyping of multimodal interfaces by means of five concrete traits: Its declarative nature, the recursiveness of the underlying transition network, the network abstraction constructs of its description language, the utilized semantic queries, and an abstraction layer for lexical information. Our reference implementation was and is used in various student projects, theses, as well as master-level courses. It is openly available and showcases that non-experts can effectively implement multimodal interfaces, even for non-trivial applications in mixed and virtual reality.}, language = {en} } @article{LodaKrebsDanhofetal.2019, author = {Loda, Sophia and Krebs, Jonathan and Danhof, Sophia and Schreder, Martin and Solimando, Antonio G. and Strifler, Susanne and Rasche, Leo and Kort{\"u}m, Martin and Kerscher, Alexander and Knop, Stefan and Puppe, Frank and Einsele, Hermann and Bittrich, Max}, title = {Exploration of artificial intelligence use with ARIES in multiple myeloma research}, series = {Journal of Clinical Medicine}, volume = {8}, journal = {Journal of Clinical Medicine}, number = {7}, issn = {2077-0383}, doi = {10.3390/jcm8070999}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-197231}, pages = {999}, year = {2019}, abstract = {Background: Natural language processing (NLP) is a powerful tool supporting the generation of Real-World Evidence (RWE). There is no NLP system that enables the extensive querying of parameters specific to multiple myeloma (MM) out of unstructured medical reports. We therefore created a MM-specific ontology to accelerate the information extraction (IE) out of unstructured text. Methods: Our MM ontology consists of extensive MM-specific and hierarchically structured attributes and values. We implemented "A Rule-based Information Extraction System" (ARIES) that uses this ontology. We evaluated ARIES on 200 randomly selected medical reports of patients diagnosed with MM. Results: Our system achieved a high F1-Score of 0.92 on the evaluation dataset with a precision of 0.87 and recall of 0.98. Conclusions: Our rule-based IE system enables the comprehensive querying of medical reports. The IE accelerates the extraction of data and enables clinicians to faster generate RWE on hematological issues. RWE helps clinicians to make decisions in an evidence-based manner. Our tool easily accelerates the integration of research evidence into everyday clinical practice.}, language = {en} } @article{LopezArreguinMontenegro2019, author = {Lopez-Arreguin, A. J. R. and Montenegro, S.}, title = {Improving engineering models of terramechanics for planetary exploration}, series = {Results in Engineering}, volume = {3}, journal = {Results in Engineering}, doi = {10.1016/j.rineng.2019.100027}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-202490}, pages = {100027}, year = {2019}, abstract = {This short letter proposes more consolidated explicit solutions for the forces and torques acting on typical rover wheels, that can be used as a method to determine their average mobility characteristics in planetary soils. The closed loop solutions stand in one of the verified methods, but at difference of the previous, observables are decoupled requiring a less amount of physical parameters to measure. As a result, we show that with knowledge of terrain properties, wheel driving performance rely in a single observable only. Because of their generality, the formulated equations established here can have further implications in autonomy and control of rovers or planetary soil characterization.}, language = {en} } @phdthesis{Budig2018, author = {Budig, Benedikt}, title = {Extracting Spatial Information from Historical Maps: Algorithms and Interaction}, edition = {1. Auflage}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-092-4}, doi = {10.25972/WUP-978-3-95826-093-1}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-160955}, school = {W{\"u}rzburg University Press}, pages = {viii, 160}, year = {2018}, abstract = {Historical maps are fascinating documents and a valuable source of information for scientists of various disciplines. Many of these maps are available as scanned bitmap images, but in order to make them searchable in useful ways, a structured representation of the contained information is desirable. This book deals with the extraction of spatial information from historical maps. This cannot be expected to be solved fully automatically (since it involves difficult semantics), but is also too tedious to be done manually at scale. The methodology used in this book combines the strengths of both computers and humans: it describes efficient algorithms to largely automate information extraction tasks and pairs these algorithms with smart user interactions to handle what is not understood by the algorithm. The effectiveness of this approach is shown for various kinds of spatial documents from the 16th to the early 20th century.}, subject = {Karte}, language = {en} } @article{RodriguesWeissHewigetal.2021, author = {Rodrigues, Johannes and Weiß, Martin and Hewig, Johannes and Allen, John J. B.}, title = {EPOS: EEG Processing Open-Source Scripts}, series = {Frontiers in Neuroscience}, volume = {15}, journal = {Frontiers in Neuroscience}, issn = {1662-453X}, doi = {10.3389/fnins.2021.660449}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-240221}, year = {2021}, abstract = {Background: Since the replication crisis, standardization has become even more important in psychological science and neuroscience. As a result, many methods are being reconsidered, and researchers' degrees of freedom in these methods are being discussed as a potential source of inconsistencies across studies. New Method: With the aim of addressing these subjectivity issues, we have been working on a tutorial-like EEG (pre-)processing pipeline to achieve an automated method based on the semi-automated analysis proposed by Delorme and Makeig. Results: Two scripts are presented and explained step-by-step to perform basic, informed ERP and frequency-domain analyses, including data export to statistical programs and visual representations of the data. The open-source software EEGlab in MATLAB is used as the data handling platform, but scripts based on code provided by Mike Cohen (2014) are also included. Comparison with existing methods: This accompanying tutorial-like article explains and shows how the processing of our automated pipeline affects the data and addresses, especially beginners in EEG-analysis, as other (pre)-processing chains are mostly targeting rather informed users in specialized areas or only parts of a complete procedure. In this context, we compared our pipeline with a selection of existing approaches. Conclusion: The need for standardization and replication is evident, yet it is equally important to control the plausibility of the suggested solution by data exploration. Here, we provide the community with a tool to enhance the understanding and capability of EEG-analysis. We aim to contribute to comprehensive and reliable analyses for neuro-scientific research.}, language = {en} } @article{HirthSeufertLangeetal.2021, author = {Hirth, Matthias and Seufert, Michael and Lange, Stanislav and Meixner, Markus and Tran-Gia, Phuoc}, title = {Performance evaluation of hybrid crowdsensing and fixed sensor systems for event detection in urban environments}, series = {Sensors}, volume = {21}, journal = {Sensors}, number = {17}, issn = {1424-8220}, doi = {10.3390/s21175880}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-245245}, year = {2021}, abstract = {Crowdsensing offers a cost-effective way to collect large amounts of environmental sensor data; however, the spatial distribution of crowdsensing sensors can hardly be influenced, as the participants carry the sensors, and, additionally, the quality of the crowdsensed data can vary significantly. Hybrid systems that use mobile users in conjunction with fixed sensors might help to overcome these limitations, as such systems allow assessing the quality of the submitted crowdsensed data and provide sensor values where no crowdsensing data are typically available. In this work, we first used a simulation study to analyze a simple crowdsensing system concerning the detection performance of spatial events to highlight the potential and limitations of a pure crowdsourcing system. The results indicate that even if only a small share of inhabitants participate in crowdsensing, events that have locations correlated with the population density can be easily and quickly detected using such a system. On the contrary, events with uniformly randomly distributed locations are much harder to detect using a simple crowdsensing-based approach. A second evaluation shows that hybrid systems improve the detection probability and time. Finally, we illustrate how to compute the minimum number of fixed sensors for the given detection time thresholds in our exemplary scenario.}, language = {en} } @techreport{LhamoNguyenFitzek2022, type = {Working Paper}, author = {Lhamo, Osel and Nguyen, Giang T. and Fitzek, Frank H. P.}, title = {Virtual Queues for QoS Compliance of Haptic Data Streams in Teleoperation}, series = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, journal = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, doi = {10.25972/OPUS-28076}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-280762}, pages = {4}, year = {2022}, abstract = {Tactile Internet aims at allowing perceived real-time interactions between humans and machines. This requires satisfying a stringent latency requirement of haptic data streams whose data rates vary drastically as the results of perceptual codecs. This introduces a complex problem for the underlying network infrastructure to fulfill the pre-defined level of Quality of Service (QoS). However, novel networking hardware with data plane programming capability allows processing packets differently and opens up a new opportunity. For example, a dynamic and network-aware resource management strategy can help satisfy the QoS requirements of different priority flows without wasting precious bandwidth. This paper introduces virtual queues for service differentiation between different types of traffic streams, leveraging protocol independent switch architecture (PISA). We propose coordinating the management of all the queues and dynamically adapting their sizes to minimize packet loss and delay due to network congestion and ensure QoS compliance.}, subject = {Datennetz}, language = {en} } @techreport{VomhoffGeisslerHossfeld2022, type = {Working Paper}, author = {Vomhoff, Viktoria and Geißler, Stefan and Hoßfeld, Tobias}, title = {Identification of Signaling Patterns in Mobile IoT Signaling Traffic}, series = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, journal = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, doi = {10.25972/OPUS-28081}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-280819}, pages = {4}, year = {2022}, abstract = {We attempt to identify sequences of signaling dialogs, to strengthen our understanding of the signaling behavior of IoT devices by examining a dataset containing over 270.000 distinct IoT devices whose signaling traffic has been observed over a 31-day period in a 2G network [4]. We propose a set of rules that allows the assembly of signaling dialogs into so-called sessions in order to identify common patterns and lay the foundation for future research in the areas of traffic modeling and anomaly detection.}, subject = {Datennetz}, language = {en} } @misc{FunkenTscherner2019, author = {Funken, Matthias and Tscherner, Michael}, title = {Jahresbericht 2018 des Rechenzentrums der Universit{\"a}t W{\"u}rzburg}, edition = {1. Auflage}, organization = {Rechenzentrum (Universit{\"a}t W{\"u}rzburg)}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-188265}, pages = {76}, year = {2019}, abstract = {Eine {\"U}bersicht {\"u}ber die Aktivit{\"a}ten des Rechenzentrums im Jahr 2018.}, subject = {Julius-Maximilians-Universit{\"a}t W{\"u}rzburg}, language = {de} } @misc{OPUS4-15355, title = {Jahresbericht 2016 des Rechenzentrums der Universit{\"a}t W{\"u}rzburg}, edition = {1. Auflage}, organization = {Rechenzentrum (Universit{\"a}t W{\"u}rzburg)}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-153558}, pages = {72}, year = {2017}, abstract = {Das Dokument umfasst eine j{\"a}hrliche Zusammenfassung der Aktivit{\"a}ten des Rechenzentrums als zentraler IT-Dienstleister der Universit{\"a}t W{\"u}rzburg}, subject = {Jahresbericht}, language = {de} } @misc{FunkenTscherner2018, author = {Funken, Matthias and Tscherner, Michael}, title = {Jahresbericht 2017 des Rechenzentrums der Universit{\"a}t W{\"u}rzburg}, edition = {1. Auflage}, organization = {Rechenzentrum (Universit{\"a}t W{\"u}rzburg)}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-168537}, pages = {68}, year = {2018}, abstract = {Eine {\"U}bersicht {\"u}ber die Aktivit{\"a}ten des Rechenzentrums im Jahr 2017.}, subject = {Julius-Maximilians-Universit{\"a}t W{\"u}rzburg}, language = {de} } @article{SchererFleishmanJonesetal.2021, author = {Scherer, Marc and Fleishman, Sarel J. and Jones, Patrik R. and Dandekar, Thomas and Bencurova, Elena}, title = {Computational Enzyme Engineering Pipelines for Optimized Production of Renewable Chemicals}, series = {Frontiers in Bioengineering and Biotechnology}, volume = {9}, journal = {Frontiers in Bioengineering and Biotechnology}, issn = {2296-4185}, doi = {10.3389/fbioe.2021.673005}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-240598}, year = {2021}, abstract = {To enable a sustainable supply of chemicals, novel biotechnological solutions are required that replace the reliance on fossil resources. One potential solution is to utilize tailored biosynthetic modules for the metabolic conversion of CO2 or organic waste to chemicals and fuel by microorganisms. Currently, it is challenging to commercialize biotechnological processes for renewable chemical biomanufacturing because of a lack of highly active and specific biocatalysts. As experimental methods to engineer biocatalysts are time- and cost-intensive, it is important to establish efficient and reliable computational tools that can speed up the identification or optimization of selective, highly active, and stable enzyme variants for utilization in the biotechnological industry. Here, we review and suggest combinations of effective state-of-the-art software and online tools available for computational enzyme engineering pipelines to optimize metabolic pathways for the biosynthesis of renewable chemicals. Using examples relevant for biotechnology, we explain the underlying principles of enzyme engineering and design and illuminate future directions for automated optimization of biocatalysts for the assembly of synthetic metabolic pathways.}, language = {en} } @article{KammererGoesterReichertetal.2021, author = {Kammerer, Klaus and G{\"o}ster, Manuel and Reichert, Manfred and Pryss, R{\"u}diger}, title = {Ambalytics: a scalable and distributed system architecture concept for bibliometric network analyses}, series = {Future Internet}, volume = {13}, journal = {Future Internet}, number = {8}, issn = {1999-5903}, doi = {10.3390/fi13080203}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-244916}, year = {2021}, abstract = {A deep understanding about a field of research is valuable for academic researchers. In addition to technical knowledge, this includes knowledge about subareas, open research questions, and social communities (networks) of individuals and organizations within a given field. With bibliometric analyses, researchers can acquire quantitatively valuable knowledge about a research area by using bibliographic information on academic publications provided by bibliographic data providers. Bibliometric analyses include the calculation of bibliometric networks to describe affiliations or similarities of bibliometric entities (e.g., authors) and group them into clusters representing subareas or communities. Calculating and visualizing bibliometric networks is a nontrivial and time-consuming data science task that requires highly skilled individuals. In addition to domain knowledge, researchers must often provide statistical knowledge and programming skills or use software tools having limited functionality and usability. In this paper, we present the ambalytics bibliometric platform, which reduces the complexity of bibliometric network analysis and the visualization of results. It accompanies users through the process of bibliometric analysis and eliminates the need for individuals to have programming skills and statistical knowledge, while preserving advanced functionality, such as algorithm parameterization, for experts. As a proof-of-concept, and as an example of bibliometric analyses outcomes, the calculation of research fronts networks based on a hybrid similarity approach is shown. Being designed to scale, ambalytics makes use of distributed systems concepts and technologies. It is based on the microservice architecture concept and uses the Kubernetes framework for orchestration. This paper presents the initial building block of a comprehensive bibliometric analysis platform called ambalytics, which aims at a high usability for users as well as scalability.}, language = {en} } @article{OberdoerferBirnstielLatoschiketal.2021, author = {Oberd{\"o}rfer, Sebastian and Birnstiel, Sandra and Latoschik, Marc Erich and Grafe, Silke}, title = {Mutual Benefits: Interdisciplinary Education of Pre-Service Teachers and HCI Students in VR/AR Learning Environment Design}, series = {Frontiers in Education}, volume = {6}, journal = {Frontiers in Education}, issn = {2504-284X}, doi = {10.3389/feduc.2021.693012}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-241612}, year = {2021}, abstract = {The successful development and classroom integration of Virtual (VR) and Augmented Reality (AR) learning environments requires competencies and content knowledge with respect to media didactics and the respective technologies. The paper discusses a pedagogical concept specifically aiming at the interdisciplinary education of pre-service teachers in collaboration with human-computer interaction students. The students' overarching goal is the interdisciplinary realization and integration of VR/AR learning environments in teaching and learning concepts. To assist this approach, we developed a specific tutorial guiding the developmental process. We evaluate and validate the effectiveness of the overall pedagogical concept by analyzing the change in attitudes regarding 1) the use of VR/AR for educational purposes and in competencies and content knowledge regarding 2) media didactics and 3) technology. Our results indicate a significant improvement in the knowledge of media didactics and technology. We further report on four STEM learning environments that have been developed during the seminar.}, language = {en} } @article{NaglerNaegeleGillietal.2018, author = {Nagler, Matthias and N{\"a}gele, Thomas and Gilli, Christian and Fragner, Lena and Korte, Arthur and Platzer, Alexander and Farlow, Ashley and Nordborg, Magnus and Weckwerth, Wolfram}, title = {Eco-Metabolomics and Metabolic Modeling: Making the Leap From Model Systems in the Lab to Native Populations in the Field}, series = {Frontiers in Plant Science}, volume = {9}, journal = {Frontiers in Plant Science}, number = {1556}, issn = {1664-462X}, doi = {10.3389/fpls.2018.01556}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-189560}, year = {2018}, abstract = {Experimental high-throughput analysis of molecular networks is a central approach to characterize the adaptation of plant metabolism to the environment. However, recent studies have demonstrated that it is hardly possible to predict in situ metabolic phenotypes from experiments under controlled conditions, such as growth chambers or greenhouses. This is particularly due to the high molecular variance of in situ samples induced by environmental fluctuations. An approach of functional metabolome interpretation of field samples would be desirable in order to be able to identify and trace back the impact of environmental changes on plant metabolism. To test the applicability of metabolomics studies for a characterization of plant populations in the field, we have identified and analyzed in situ samples of nearby grown natural populations of Arabidopsis thaliana in Austria. A. thaliana is the primary molecular biological model system in plant biology with one of the best functionally annotated genomes representing a reference system for all other plant genome projects. The genomes of these novel natural populations were sequenced and phylogenetically compared to a comprehensive genome database of A. thaliana ecotypes. Experimental results on primary and secondary metabolite profiling and genotypic variation were functionally integrated by a data mining strategy, which combines statistical output of metabolomics data with genome-derived biochemical pathway reconstruction and metabolic modeling. Correlations of biochemical model predictions and population-specific genetic variation indicated varying strategies of metabolic regulation on a population level which enabled the direct comparison, differentiation, and prediction of metabolic adaptation of the same species to different habitats. These differences were most pronounced at organic and amino acid metabolism as well as at the interface of primary and secondary metabolism and allowed for the direct classification of population-specific metabolic phenotypes within geographically contiguous sampling sites.}, language = {en} } @article{PetschkeStaab2018, author = {Petschke, Danny and Staab, Torsten E.M.}, title = {DLTPulseGenerator: a library for the simulation of lifetime spectra based on detector-output pulses}, series = {SoftwareX}, volume = {7}, journal = {SoftwareX}, doi = {10.1016/j.softx.2018.04.002}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-176883}, pages = {122-128}, year = {2018}, abstract = {The quantitative analysis of lifetime spectra relevant in both life and materials sciences presents one of the ill-posed inverse problems and, hence, leads to most stringent requirements on the hardware specifications and the analysis algorithms. Here we present DLTPulseGenerator, a library written in native C++ 11, which provides a simulation of lifetime spectra according to the measurement setup. The simulation is based on pairs of non-TTL detector output-pulses. Those pulses require the Constant Fraction Principle (CFD) for the determination of the exact timing signal and, thus, the calculation of the time difference i.e. the lifetime. To verify the functionality, simulation results were compared to experimentally obtained data using Positron Annihilation Lifetime Spectroscopy (PALS) on pure tin.}, language = {en} } @misc{Hochmuth2022, type = {Master Thesis}, author = {Hochmuth, Christian Andreas}, title = {Innovative Software in Unternehmen: Strategie und Erfolgsfaktoren f{\"u}r Einf{\"u}hrungsprojekte}, doi = {10.25972/OPUS-28841}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-288411}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2022}, abstract = {Innovative Software kann die Position eines Unternehmens im Wettbewerb sichern. Die Einf{\"u}hrung innovativer Software ist aber alles andere als einfach. Denn obgleich die technischen Aspekte offensichtlicher sind, dominieren organisationale Aspekte. Zu viele Softwareprojekte schlagen fehl, da die Einf{\"u}hrung nicht gelingt, trotz Erf{\"u}llung technischer Anforderungen. Vor diesem Hintergrund ist das Forschungsziel der Masterarbeit, Risiken und Erfolgsfaktoren f{\"u}r die Einf{\"u}hrung innovativer Software in Unternehmen zu finden, eine Strategie zu formulieren und dabei die Bedeutung von Schl{\"u}sselpersonen zu bestimmen.}, subject = {Innovationsmanagement}, language = {de} } @article{OsmanogluKhaledAlSeiariAlKhoorietal.2021, author = {Osmanoglu, {\"O}zge and Khaled AlSeiari, Mariam and AlKhoori, Hasa Abduljaleel and Shams, Shabana and Bencurova, Elena and Dandekar, Thomas and Naseem, Muhammad}, title = {Topological Analysis of the Carbon-Concentrating CETCH Cycle and a Photorespiratory Bypass Reveals Boosted CO\(_2\)-Sequestration by Plants}, series = {Frontiers in Bioengineering and Biotechnology}, volume = {9}, journal = {Frontiers in Bioengineering and Biotechnology}, issn = {2296-4185}, doi = {10.3389/fbioe.2021.708417}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-249260}, year = {2021}, abstract = {Synthetically designed alternative photorespiratory pathways increase the biomass of tobacco and rice plants. Likewise, some in planta-tested synthetic carbon-concentrating cycles (CCCs) hold promise to increase plant biomass while diminishing atmospheric carbon dioxide burden. Taking these individual contributions into account, we hypothesize that the integration of bypasses and CCCs will further increase plant productivity. To test this in silico, we reconstructed a metabolic model by integrating photorespiration and photosynthesis with the synthetically designed alternative pathway 3 (AP3) enzymes and transporters. We calculated fluxes of the native plant system and those of AP3 combined with the inhibition of the glycolate/glycerate transporter by using the YANAsquare package. The activity values corresponding to each enzyme in photosynthesis, photorespiration, and for synthetically designed alternative pathways were estimated. Next, we modeled the effect of the crotonyl-CoA/ethylmalonyl-CoA/hydroxybutyryl-CoA cycle (CETCH), which is a set of natural and synthetically designed enzymes that fix CO₂ manifold more than the native Calvin-Benson-Bassham (CBB) cycle. We compared estimated fluxes across various pathways in the native model and under an introduced CETCH cycle. Moreover, we combined CETCH and AP3-w/plgg1RNAi, and calculated the fluxes. We anticipate higher carbon dioxide-harvesting potential in plants with an AP3 bypass and CETCH-AP3 combination. We discuss the in vivo implementation of these strategies for the improvement of C3 plants and in natural high carbon harvesters.}, language = {en} } @article{vonMammenWagnerKnoteetal.2017, author = {von Mammen, Sebastian Albrecht and Wagner, Daniel and Knote, Andreas and Taskin, Umut}, title = {Interactive simulations of biohybrid systems}, series = {Frontiers in Robotics and AI}, volume = {4}, journal = {Frontiers in Robotics and AI}, issn = {2296-9144}, doi = {10.3389/frobt.2017.00050}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-195755}, year = {2017}, abstract = {In this article, we present approaches to interactive simulations of biohybrid systems. These simulations are comprised of two major computational components: (1) agent-based developmental models that retrace organismal growth and unfolding of technical scaffoldings and (2) interfaces to explore these models interactively. Simulations of biohybrid systems allow us to fast forward and experience their evolution over time based on our design decisions involving the choice, configuration and initial states of the deployed biological and robotic actors as well as their interplay with the environment. We briefly introduce the concept of swarm grammars, an agent-based extension of L-systems for retracing growth processes and structural artifacts. Next, we review an early augmented reality prototype for designing and projecting biohybrid system simulations into real space. In addition to models that retrace plant behaviors, we specify swarm grammar agents to braid structures in a self-organizing manner. Based on this model, both robotic and plant-driven braiding processes can be experienced and explored in virtual worlds. We present an according user interface for use in virtual reality. As we present interactive models concerning rather diverse description levels, we only ensured their principal capacity for interaction but did not consider efficiency analyzes beyond prototypic operation. We conclude this article with an outlook on future works on melding reality and virtuality to drive the design and deployment of biohybrid systems.}, language = {en} } @article{AppelScholzMuelleretal.2015, author = {Appel, Mirjam and Scholz, Claus-J{\"u}rgen and M{\"u}ller, Tobias and Dittrich, Marcus and K{\"o}nig, Christian and Bockstaller, Marie and Oguz, Tuba and Khalili, Afshin and Antwi-Adjei, Emmanuel and Schauer, Tamas and Margulies, Carla and Tanimoto, Hiromu and Yarali, Ayse}, title = {Genome-Wide Association Analyses Point to Candidate Genes for Electric Shock Avoidance in Drosophila melanogaster}, series = {PLoS ONE}, volume = {10}, journal = {PLoS ONE}, number = {5}, doi = {10.1371/journal.pone.0126986}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-152006}, pages = {e0126986}, year = {2015}, abstract = {Electric shock is a common stimulus for nociception-research and the most widely used reinforcement in aversive associative learning experiments. Yet, nothing is known about the mechanisms it recruits at the periphery. To help fill this gap, we undertook a genome-wide association analysis using 38 inbred Drosophila melanogaster strains, which avoided shock to varying extents. We identified 514 genes whose expression levels and/or sequences covaried with shock avoidance scores. We independently scrutinized 14 of these genes using mutants, validating the effect of 7 of them on shock avoidance. This emphasizes the value of our candidate gene list as a guide for follow-up research. In addition, by integrating our association results with external protein-protein interaction data we obtained a shock avoidance- associated network of 38 genes. Both this network and the original candidate list contained a substantial number of genes that affect mechanosensory bristles, which are hairlike organs distributed across the fly's body. These results may point to a potential role for mechanosensory bristles in shock sensation. Thus, we not only provide a first list of candidate genes for shock avoidance, but also point to an interesting new hypothesis on nociceptive mechanisms.}, language = {en} } @article{FisselerMuellerWeichert2017, author = {Fisseler, Denis and M{\"u}ller, Gerfrid G. W. and Weichert, Frank}, title = {Web-Based scientific exploration and analysis of 3D scanned cuneiform datasets for collaborative research}, series = {Informatics}, volume = {4}, journal = {Informatics}, number = {4}, issn = {2227-9709}, doi = {10.3390/informatics4040044}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-197958}, pages = {44}, year = {2017}, abstract = {The three-dimensional cuneiform script is one of the oldest known writing systems and a central object of research in Ancient Near Eastern Studies and Hittitology. An important step towards the understanding of the cuneiform script is the provision of opportunities and tools for joint analysis. This paper presents an approach that contributes to this challenge: a collaborative compatible web-based scientific exploration and analysis of 3D scanned cuneiform fragments. The WebGL -based concept incorporates methods for compressed web-based content delivery of large 3D datasets and high quality visualization. To maximize accessibility and to promote acceptance of 3D techniques in the field of Hittitology, the introduced concept is integrated into the Hethitologie-Portal Mainz, an established leading online research resource in the field of Hittitology, which until now exclusively included 2D content. The paper shows that increasing the availability of 3D scanned archaeological data through a web-based interface can provide significant scientific value while at the same time finding a trade-off between copyright induced restrictions and scientific usability.}, language = {en} } @article{PetschkeStaab2019, author = {Petschke, Danny and Staab, Torsten E.M.}, title = {DDRS4PALS: a software for the acquisition and simulation of lifetime spectra using the DRS4 evaluation board}, series = {SoftwareX}, volume = {10}, journal = {SoftwareX}, doi = {10.1016/j.softx.2019.100261}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-202276}, pages = {100261}, year = {2019}, abstract = {Lifetime techniques are applied to diverse fields of study including materials sciences, semiconductor physics, biology, molecular biophysics and photochemistry. Here we present DDRS4PALS, a software for the acquisition and simulation of lifetime spectra using the DRS4 evaluation board (Paul Scherrer Institute, Switzerland) for time resolved measurements and digitization of detector output pulses. Artifact afflicted pulses can be corrected or rejected prior to the lifetime calculation to provide the generation of high-quality lifetime spectra, which are crucial for a profound analysis, i.e. the decomposition of the true information. Moreover, the pulses can be streamed on an (external) hard drive during the measurement and subsequently downloaded in the offline mode without being connected to the hardware. This allows the generation of various lifetime spectra at different configurations from one single measurement and, hence, a meaningful comparison in terms of analyzability and quality. Parallel processing and an integrated JavaScript based language provide convenient options to accelerate and automate time consuming processes such as lifetime spectra simulations.}, language = {en} } @inproceedings{DaviesDewellHarvey2021, author = {Davies, Richard and Dewell, Nathan and Harvey, Carlo}, title = {A framework for interactive, autonomous and semantic dialogue generation in games}, series = {Proceedings of the 1st Games Technology Summit}, booktitle = {Proceedings of the 1st Games Technology Summit}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-246023}, pages = {16-28}, year = {2021}, abstract = {Immersive virtual environments provide users with the opportunity to escape from the real world, but scripted dialogues can disrupt the presence within the world the user is trying to escape within. Both Non-Playable Character (NPC) to Player and NPC to NPC dialogue can be non-natural and the reliance on responding with pre-defined dialogue does not always meet the players emotional expectations or provide responses appropriate to the given context or world states. This paper investigates the application of Artificial Intelligence (AI) and Natural Language Processing to generate dynamic human-like responses within a themed virtual world. Each thematic has been analysed against humangenerated responses for the same seed and demonstrates invariance of rating across a range of model sizes, but shows an effect of theme and the size of the corpus used for fine-tuning the context for the game world.}, language = {en} } @inproceedings{SanusiKlemke2021, author = {Sanusi, Khaleel Asyraaf Mat and Klemke, Roland}, title = {Immersive Multimodal Environments for Psychomotor Skills Training}, series = {Proceedings of the 1st Games Technology Summit}, booktitle = {Proceedings of the 1st Games Technology Summit}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-246016}, pages = {9-15}, year = {2021}, abstract = {Modern immersive multimodal technologies enable the learners to completely get immersed in various learning situations in a way that feels like experiencing an authentic learning environment. These environments also allow the collection of multimodal data, which can be used with artificial intelligence to further improve the immersion and learning outcomes. The use of artificial intelligence has been widely explored for the interpretation of multimodal data collected from multiple sensors, thus giving insights to support learners' performance by providing personalised feedback. In this paper, we present a conceptual approach for creating immersive learning environments, integrated with multi-sensor setup to help learners improve their psychomotor skills in a remote setting.}, language = {en} } @phdthesis{Nogatz2023, author = {Nogatz, Falco}, title = {Defining and Implementing Domain-Specific Languages with Prolog}, doi = {10.25972/OPUS-30187}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-301872}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {The landscape of today's programming languages is manifold. With the diversity of applications, the difficulty of adequately addressing and specifying the used programs increases. This often leads to newly designed and implemented domain-specific languages. They enable domain experts to express knowledge in their preferred format, resulting in more readable and concise programs. Due to its flexible and declarative syntax without reserved keywords, the logic programming language Prolog is particularly suitable for defining and embedding domain-specific languages. This thesis addresses the questions and challenges that arise when integrating domain-specific languages into Prolog. We compare the two approaches to define them either externally or internally, and provide assisting tools for each. The grammar of a formal language is usually defined in the extended Backus-Naur form. In this work, we handle this formalism as a domain-specific language in Prolog, and define term expansions that allow to translate it into equivalent definite clause grammars. We present the package library(dcg4pt) for SWI-Prolog, which enriches them by an additional argument to automatically process the term's corresponding parse tree. To simplify the work with definite clause grammars, we visualise their application by a web-based tracer. The external integration of domain-specific languages requires the programmer to keep the grammar, parser, and interpreter in sync. In many cases, domain-specific languages can instead be directly embedded into Prolog by providing appropriate operator definitions. In addition, we propose syntactic extensions for Prolog to expand its expressiveness, for instance to state logic formulas with their connectives verbatim. This allows to use all tools that were originally written for Prolog, for instance code linters and editors with syntax highlighting. We present the package library(plammar), a standard-compliant parser for Prolog source code, written in Prolog. It is able to automatically infer from example sentences the required operator definitions with their classes and precedences as well as the required Prolog language extensions. As a result, we can automatically answer the question: Is it possible to model these example sentences as valid Prolog clauses, and how? We discuss and apply the two approaches to internal and external integrations for several domain-specific languages, namely the extended Backus-Naur form, GraphQL, XPath, and a controlled natural language to represent expert rules in if-then form. The created toolchain with library(dcg4pt) and library(plammar) yields new application opportunities for static Prolog source code analysis, which we also present.}, subject = {PROLOG }, language = {en} } @techreport{LohGeisslerHossfeld2022, type = {Working Paper}, author = {Loh, Frank and Geißler, Stefan and Hoßfeld, Tobias}, title = {LoRaWAN Network Planning in Smart Environments: Towards Reliability, Scalability, and Cost Reduction}, series = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, journal = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, doi = {10.25972/OPUS-28082}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-280829}, pages = {4}, year = {2022}, abstract = {The goal in this work is to present a guidance for LoRaWAN planning to improve overall reliability for message transmissions and scalability. At the end, the cost component is discussed. Therefore, a five step approach is presented that helps to plan a LoRaWAN deployment step by step: Based on the device locations, an initial gateway placement is suggested followed by in-depth frequency and channel access planning. After an initial planning phase, updates for channel access and the initial gateway planning is suggested that should also be done periodically during network operation. Since current gateway placement approaches are only studied with random channel access, there is a lot of potential in the cell planning phase. Furthermore, the performance of different channel access approaches is highly related on network load, and thus cell size and sensor density. Last, the influence of different cell planning ideas on expected costs are discussed.}, subject = {Datennetz}, language = {en} } @article{LohMehlingHossfeld2022, author = {Loh, Frank and Mehling, Noah and Hoßfeld, Tobias}, title = {Towards LoRaWAN without data loss: studying the performance of different channel access approaches}, series = {Sensors}, volume = {22}, journal = {Sensors}, number = {2}, issn = {1424-8220}, doi = {10.3390/s22020691}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-302418}, year = {2022}, abstract = {The Long Range Wide Area Network (LoRaWAN) is one of the fastest growing Internet of Things (IoT) access protocols. It operates in the license free 868 MHz band and gives everyone the possibility to create their own small sensor networks. The drawback of this technology is often unscheduled or random channel access, which leads to message collisions and potential data loss. For that reason, recent literature studies alternative approaches for LoRaWAN channel access. In this work, state-of-the-art random channel access is compared with alternative approaches from the literature by means of collision probability. Furthermore, a time scheduled channel access methodology is presented to completely avoid collisions in LoRaWAN. For this approach, an exhaustive simulation study was conducted and the performance was evaluated with random access cross-traffic. In a general theoretical analysis the limits of the time scheduled approach are discussed to comply with duty cycle regulations in LoRaWAN.}, language = {en} } @article{KernKullmannGanaletal.2021, author = {Kern, Florian and Kullmann, Peter and Ganal, Elisabeth and Korwisi, Kristof and Stingl, Ren{\´e} and Niebling, Florian and Latoschik, Marc Erich}, title = {Off-The-Shelf Stylus: Using XR Devices for Handwriting and Sketching on Physically Aligned Virtual Surfaces}, series = {Frontiers in Virtual Reality}, volume = {2}, journal = {Frontiers in Virtual Reality}, doi = {10.3389/frvir.2021.684498}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-260219}, year = {2021}, abstract = {This article introduces the Off-The-Shelf Stylus (OTSS), a framework for 2D interaction (in 3D) as well as for handwriting and sketching with digital pen, ink, and paper on physically aligned virtual surfaces in Virtual, Augmented, and Mixed Reality (VR, AR, MR: XR for short). OTSS supports self-made XR styluses based on consumer-grade six-degrees-of-freedom XR controllers and commercially available styluses. The framework provides separate modules for three basic but vital features: 1) The stylus module provides stylus construction and calibration features. 2) The surface module provides surface calibration and visual feedback features for virtual-physical 2D surface alignment using our so-called 3ViSuAl procedure, and surface interaction features. 3) The evaluation suite provides a comprehensive test bed combining technical measurements for precision, accuracy, and latency with extensive usability evaluations including handwriting and sketching tasks based on established visuomotor, graphomotor, and handwriting research. The framework's development is accompanied by an extensive open source reference implementation targeting the Unity game engine using an Oculus Rift S headset and Oculus Touch controllers. The development compares three low-cost and low-tech options to equip controllers with a tip and includes a web browser-based surface providing support for interacting, handwriting, and sketching. The evaluation of the reference implementation based on the OTSS framework identified an average stylus precision of 0.98 mm (SD = 0.54 mm) and an average surface accuracy of 0.60 mm (SD = 0.32 mm) in a seated VR environment. The time for displaying the stylus movement as digital ink on the web browser surface in VR was 79.40 ms on average (SD = 23.26 ms), including the physical controller's motion-to-photon latency visualized by its virtual representation (M = 42.57 ms, SD = 15.70 ms). The usability evaluation (N = 10) revealed a low task load, high usability, and high user experience. Participants successfully reproduced given shapes and created legible handwriting, indicating that the OTSS and it's reference implementation is ready for everyday use. We provide source code access to our implementation, including stylus and surface calibration and surface interaction features, making it easy to reuse, extend, adapt and/or replicate previous results (https://go.uniwue.de/hci-otss).}, language = {en} } @article{BartlWenningerWolfetal.2021, author = {Bartl, Andrea and Wenninger, Stephan and Wolf, Erik and Botsch, Mario and Latoschik, Marc Erich}, title = {Affordable but not cheap: a case study of the effects of two 3D-reconstruction methods of virtual humans}, series = {Frontiers in Virtual Reality}, volume = {2}, journal = {Frontiers in Virtual Reality}, doi = {10.3389/frvir.2021.694617}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-260492}, year = {2021}, abstract = {Realistic and lifelike 3D-reconstruction of virtual humans has various exciting and important use cases. Our and others' appearances have notable effects on ourselves and our interaction partners in virtual environments, e.g., on acceptance, preference, trust, believability, behavior (the Proteus effect), and more. Today, multiple approaches for the 3D-reconstruction of virtual humans exist. They significantly vary in terms of the degree of achievable realism, the technical complexities, and finally, the overall reconstruction costs involved. This article compares two 3D-reconstruction approaches with very different hardware requirements. The high-cost solution uses a typical complex and elaborated camera rig consisting of 94 digital single-lens reflex (DSLR) cameras. The recently developed low-cost solution uses a smartphone camera to create videos that capture multiple views of a person. Both methods use photogrammetric reconstruction and template fitting with the same template model and differ in their adaptation to the method-specific input material. Each method generates high-quality virtual humans ready to be processed, animated, and rendered by standard XR simulation and game engines such as Unreal or Unity. We compare the results of the two 3D-reconstruction methods in an immersive virtual environment against each other in a user study. Our results indicate that the virtual humans from the low-cost approach are perceived similarly to those from the high-cost approach regarding the perceived similarity to the original, human-likeness, beauty, and uncanniness, despite significant differences in the objectively measured quality. The perceived feeling of change of the own body was higher for the low-cost virtual humans. Quality differences were perceived more strongly for one's own body than for other virtual humans.}, language = {en} } @article{WienrichLatoschik2021, author = {Wienrich, Carolin and Latoschik, Marc Erich}, title = {eXtended Artificial Intelligence: New Prospects of Human-AI Interaction Research}, series = {Frontiers in Virtual Reality}, volume = {2}, journal = {Frontiers in Virtual Reality}, doi = {10.3389/frvir.2021.686783}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-260296}, year = {2021}, abstract = {Artificial Intelligence (AI) covers a broad spectrum of computational problems and use cases. Many of those implicate profound and sometimes intricate questions of how humans interact or should interact with AIs. Moreover, many users or future users do have abstract ideas of what AI is, significantly depending on the specific embodiment of AI applications. Human-centered-design approaches would suggest evaluating the impact of different embodiments on human perception of and interaction with AI. An approach that is difficult to realize due to the sheer complexity of application fields and embodiments in reality. However, here XR opens new possibilities to research human-AI interactions. The article's contribution is twofold: First, it provides a theoretical treatment and model of human-AI interaction based on an XR-AI continuum as a framework for and a perspective of different approaches of XR-AI combinations. It motivates XR-AI combinations as a method to learn about the effects of prospective human-AI interfaces and shows why the combination of XR and AI fruitfully contributes to a valid and systematic investigation of human-AI interactions and interfaces. Second, the article provides two exemplary experiments investigating the aforementioned approach for two distinct AI-systems. The first experiment reveals an interesting gender effect in human-robot interaction, while the second experiment reveals an Eliza effect of a recommender system. Here the article introduces two paradigmatic implementations of the proposed XR testbed for human-AI interactions and interfaces and shows how a valid and systematic investigation can be conducted. In sum, the article opens new perspectives on how XR benefits human-centered AI design and development.}, language = {en} }