@phdthesis{Zuefle2022, author = {Z{\"u}fle, Marwin Otto}, title = {Proactive Critical Event Prediction based on Monitoring Data with Focus on Technical Systems}, doi = {10.25972/OPUS-25575}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-255757}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2022}, abstract = {The importance of proactive and timely prediction of critical events is steadily increasing, whether in the manufacturing industry or in private life. In the past, machines in the manufacturing industry were often maintained based on a regular schedule or threshold violations, which is no longer competitive as it causes unnecessary costs and downtime. In contrast, the predictions of critical events in everyday life are often much more concealed and hardly noticeable to the private individual, unless the critical event occurs. For instance, our electricity provider has to ensure that we, as end users, are always supplied with sufficient electricity, or our favorite streaming service has to guarantee that we can watch our favorite series without interruptions. For this purpose, they have to constantly analyze what the current situation is, how it will develop in the near future, and how they have to react in order to cope with future conditions without causing power outages or video stalling. In order to analyze the performance of a system, monitoring mechanisms are often integrated to observe characteristics that describe the workload and the state of the system and its environment. Reactive systems typically employ thresholds, utility functions, or models to determine the current state of the system. However, such reactive systems cannot proactively estimate future events, but only as they occur. In the case of critical events, reactive determination of the current system state is futile, whereas a proactive system could have predicted this event in advance and enabled timely countermeasures. To achieve proactivity, the system requires estimates of future system states. Given the gap between design time and runtime, it is typically not possible to use expert knowledge to a priori model all situations a system might encounter at runtime. Therefore, prediction methods must be integrated into the system. Depending on the available monitoring data and the complexity of the prediction task, either time series forecasting in combination with thresholding or more sophisticated machine and deep learning models have to be trained. Although numerous forecasting methods have been proposed in the literature, these methods have their advantages and disadvantages depending on the characteristics of the time series under consideration. Therefore, expert knowledge is required to decide which forecasting method to choose. However, since the time series observed at runtime cannot be known at design time, such expert knowledge cannot be implemented in the system. In addition to selecting an appropriate forecasting method, several time series preprocessing steps are required to achieve satisfactory forecasting accuracy. In the literature, this preprocessing is often done manually, which is not practical for autonomous computing systems, such as Self-Aware Computing Systems. Several approaches have also been presented in the literature for predicting critical events based on multivariate monitoring data using machine and deep learning. However, these approaches are typically highly domain-specific, such as financial failures, bearing failures, or product failures. Therefore, they require in-depth expert knowledge. For this reason, these approaches cannot be fully automated and are not transferable to other use cases. Thus, the literature lacks generalizable end-to-end workflows for modeling, detecting, and predicting failures that require only little expert knowledge. To overcome these shortcomings, this thesis presents a system model for meta-self-aware prediction of critical events based on the LRA-M loop of Self-Aware Computing Systems. Building upon this system model, this thesis provides six further contributions to critical event prediction. While the first two contributions address critical event prediction based on univariate data via time series forecasting, the three subsequent contributions address critical event prediction for multivariate monitoring data using machine and deep learning algorithms. Finally, the last contribution addresses the update procedure of the system model. Specifically, the seven main contributions of this thesis can be summarized as follows: First, we present a system model for meta self-aware prediction of critical events. To handle both univariate and multivariate monitoring data, it offers univariate time series forecasting for use cases where a single observed variable is representative of the state of the system, and machine learning algorithms combined with various preprocessing techniques for use cases where a large number of variables are observed to characterize the system's state. However, the two different modeling alternatives are not disjoint, as univariate time series forecasts can also be included to estimate future monitoring data as additional input to the machine learning models. Finally, a feedback loop is incorporated to monitor the achieved prediction quality and trigger model updates. We propose a novel hybrid time series forecasting method for univariate, seasonal time series, called Telescope. To this end, Telescope automatically preprocesses the time series, performs a kind of divide-and-conquer technique to split the time series into multiple components, and derives additional categorical information. It then forecasts the components and categorical information separately using a specific state-of-the-art method for each component. Finally, Telescope recombines the individual predictions. As Telescope performs both preprocessing and forecasting automatically, it represents a complete end-to-end approach to univariate seasonal time series forecasting. Experimental results show that Telescope achieves enhanced forecast accuracy, more reliable forecasts, and a substantial speedup. Furthermore, we apply Telescope to the scenario of predicting critical events for virtual machine auto-scaling. Here, results show that Telescope considerably reduces the average response time and significantly reduces the number of service level objective violations. For the automatic selection of a suitable forecasting method, we introduce two frameworks for recommending forecasting methods. The first framework extracts various time series characteristics to learn the relationship between them and forecast accuracy. In contrast, the other framework divides the historical observations into internal training and validation parts to estimate the most appropriate forecasting method. Moreover, this framework also includes time series preprocessing steps. Comparisons between the proposed forecasting method recommendation frameworks and the individual state-of-the-art forecasting methods and the state-of-the-art forecasting method recommendation approach show that the proposed frameworks considerably improve the forecast accuracy. With regard to multivariate monitoring data, we first present an end-to-end workflow to detect critical events in technical systems in the form of anomalous machine states. The end-to-end design includes raw data processing, phase segmentation, data resampling, feature extraction, and machine tool anomaly detection. In addition, the workflow does not rely on profound domain knowledge or specific monitoring variables, but merely assumes standard machine monitoring data. We evaluate the end-to-end workflow using data from a real CNC machine. The results indicate that conventional frequency analysis does not detect the critical machine conditions well, while our workflow detects the critical events very well with an F1-score of almost 91\%. To predict critical events rather than merely detecting them, we compare different modeling alternatives for critical event prediction in the use case of time-to-failure prediction of hard disk drives. Given that failure records are typically significantly less frequent than instances representing the normal state, we employ different oversampling strategies. Next, we compare the prediction quality of binary class modeling with downscaled multi-class modeling. Furthermore, we integrate univariate time series forecasting into the feature generation process to estimate future monitoring data. Finally, we model the time-to-failure using not only classification models but also regression models. The results suggest that multi-class modeling provides the overall best prediction quality with respect to practical requirements. In addition, we prove that forecasting the features of the prediction model significantly improves the critical event prediction quality. We propose an end-to-end workflow for predicting critical events of industrial machines. Again, this approach does not rely on expert knowledge except for the definition of monitoring data, and therefore represents a generalizable workflow for predicting critical events of industrial machines. The workflow includes feature extraction, feature handling, target class mapping, and model learning with integrated hyperparameter tuning via a grid-search technique. Drawing on the result of the previous contribution, the workflow models the time-to-failure prediction in terms of multiple classes, where we compare different labeling strategies for multi-class classification. The evaluation using real-world production data of an industrial press demonstrates that the workflow is capable of predicting six different time-to-failure windows with a macro F1-score of 90\%. When scaling the time-to-failure classes down to a binary prediction of critical events, the F1-score increases to above 98\%. Finally, we present four update triggers to assess when critical event prediction models should be re-trained during on-line application. Such re-training is required, for instance, due to concept drift. The update triggers introduced in this thesis take into account the elapsed time since the last update, the prediction quality achieved on the current test data, and the prediction quality achieved on the preceding test data. We compare the different update strategies with each other and with the static baseline model. The results demonstrate the necessity of model updates during on-line application and suggest that the update triggers that consider both the prediction quality of the current and preceding test data achieve the best trade-off between prediction quality and number of updates required. We are convinced that the contributions of this thesis constitute significant impulses for the academic research community as well as for practitioners. First of all, to the best of our knowledge, we are the first to propose a fully automated, end-to-end, hybrid, component-based forecasting method for seasonal time series that also includes time series preprocessing. Due to the combination of reliably high forecast accuracy and reliably low time-to-result, it offers many new opportunities in applications requiring accurate forecasts within a fixed time period in order to take timely countermeasures. In addition, the promising results of the forecasting method recommendation systems provide new opportunities to enhance forecasting performance for all types of time series, not just seasonal ones. Furthermore, we are the first to expose the deficiencies of the prior state-of-the-art forecasting method recommendation system. Concerning the contributions to critical event prediction based on multivariate monitoring data, we have already collaborated closely with industrial partners, which supports the practical relevance of the contributions of this thesis. The automated end-to-end design of the proposed workflows that do not demand profound domain or expert knowledge represents a milestone in bridging the gap between academic theory and industrial application. Finally, the workflow for predicting critical events in industrial machines is currently being operationalized in a real production system, underscoring the practical impact of this thesis.}, subject = {Prognose}, language = {en} } @phdthesis{Zinner2012, author = {Zinner, Thomas}, title = {Performance Modeling of QoE-Aware Multipath Video Transmission in the Future Internet}, doi = {10.25972/OPUS-6106}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-72324}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2012}, abstract = {Internet applications are becoming more and more flexible to support diverge user demands and network conditions. This is reflected by technical concepts, which provide new adaptation mechanisms to allow fine grained adjustment of the application quality and the corresponding bandwidth requirements. For the case of video streaming, the scalable video codec H.264/SVC allows the flexible adaptation of frame rate, video resolution and image quality with respect to the available network resources. In order to guarantee a good user-perceived quality (Quality of Experience, QoE) it is necessary to adjust and optimize the video quality accurately. But not only have the applications of the current Internet changed. Within network and transport, new technologies evolved during the last years providing a more flexible and efficient usage of data transport and network resources. One of the most promising technologies is Network Virtualization (NV) which is seen as an enabler to overcome the ossification of the Internet stack. It provides means to simultaneously operate multiple logical networks which allow for example application-specific addressing, naming and routing, or their individual resource management. New transport mechanisms like multipath transmission on the network and transport layer aim at an efficient usage of available transport resources. However, the simultaneous transmission of data via heterogeneous transport paths and communication technologies inevitably introduces packet reordering. Additional mechanisms and buffers are required to restore the correct packet order and thus to prevent a disturbance of the data transport. A proper buffer dimensioning as well as the classification of the impact of varying path characteristics like bandwidth and delay require appropriate evaluation methods. Additionally, for a path selection mechanism real time evaluation mechanisms are needed. A better application-network interaction and the corresponding exchange of information enable an efficient adaptation of the application to the network conditions and vice versa. This PhD thesis analyzes a video streaming architecture utilizing multipath transmission and scalable video coding and develops the following optimization possibilities and results: Analysis and dimensioning methods for multipath transmission, quantification of the adaptation possibilities to the current network conditions with respect to the QoE for H.264/SVC, and evaluation and optimization of a future video streaming architecture, which allows a better interaction of application and network.}, subject = {Video{\"u}bertragung}, language = {en} } @phdthesis{Zink2024, author = {Zink, Johannes}, title = {Algorithms for Drawing Graphs and Polylines with Straight-Line Segments}, doi = {10.25972/OPUS-35475}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-354756}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2024}, abstract = {Graphs provide a key means to model relationships between entities. They consist of vertices representing the entities, and edges representing relationships between pairs of entities. To make people conceive the structure of a graph, it is almost inevitable to visualize the graph. We call such a visualization a graph drawing. Moreover, we have a straight-line graph drawing if each vertex is represented as a point (or a small geometric object, e.g., a rectangle) and each edge is represented as a line segment between its two vertices. A polyline is a very simple straight-line graph drawing, where the vertices form a sequence according to which the vertices are connected by edges. An example of a polyline in practice is a GPS trajectory. The underlying road network, in turn, can be modeled as a graph. This book addresses problems that arise when working with straight-line graph drawings and polylines. In particular, we study algorithms for recognizing certain graphs representable with line segments, for generating straight-line graph drawings, and for abstracting polylines. In the first part, we first examine, how and in which time we can decide whether a given graph is a stick graph, that is, whether its vertices can be represented as vertical and horizontal line segments on a diagonal line, which intersect if and only if there is an edge between them. We then consider the visual complexity of graphs. Specifically, we investigate, for certain classes of graphs, how many line segments are necessary for any straight-line graph drawing, and whether three (or more) different slopes of the line segments are sufficient to draw all edges. Last, we study the question, how to assign (ordered) colors to the vertices of a graph with both directed and undirected edges such that no neighboring vertices get the same color and colors are ascending along directed edges. Here, the special property of the considered graph is that the vertices can be represented as intervals that overlap if and only if there is an edge between them. The latter problem is motivated by an application in automated drawing of cable plans with vertical and horizontal line segments, which we cover in the second part. We describe an algorithm that gets the abstract description of a cable plan as input, and generates a drawing that takes into account the special properties of these cable plans, like plugs and groups of wires. We then experimentally evaluate the quality of the resulting drawings. In the third part, we study the problem of abstracting (or simplifying) a single polyline and a bundle of polylines. In this problem, the objective is to remove as many vertices as possible from the given polyline(s) while keeping each resulting polyline sufficiently similar to its original course (according to a given similarity measure).}, subject = {Graphenzeichnen}, language = {en} } @article{ZimmererFischbachLatoschik2018, author = {Zimmerer, Chris and Fischbach, Martin and Latoschik, Marc Erich}, title = {Semantic Fusion for Natural Multimodal Interfaces using Concurrent Augmented Transition Networks}, series = {Multimodal Technologies and Interaction}, volume = {2}, journal = {Multimodal Technologies and Interaction}, number = {4}, issn = {2414-4088}, doi = {10.3390/mti2040081}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-197573}, year = {2018}, abstract = {Semantic fusion is a central requirement of many multimodal interfaces. Procedural methods like finite-state transducers and augmented transition networks have proven to be beneficial to implement semantic fusion. They are compliant with rapid development cycles that are common for the development of user interfaces, in contrast to machine-learning approaches that require time-costly training and optimization. We identify seven fundamental requirements for the implementation of semantic fusion: Action derivation, continuous feedback, context-sensitivity, temporal relation support, access to the interaction context, as well as the support of chronologically unsorted and probabilistic input. A subsequent analysis reveals, however, that there is currently no solution for fulfilling the latter two requirements. As the main contribution of this article, we thus present the Concurrent Cursor concept to compensate these shortcomings. In addition, we showcase a reference implementation, the Concurrent Augmented Transition Network (cATN), that validates the concept's feasibility in a series of proof of concept demonstrations as well as through a comparative benchmark. The cATN fulfills all identified requirements and fills the lack amongst previous solutions. It supports the rapid prototyping of multimodal interfaces by means of five concrete traits: Its declarative nature, the recursiveness of the underlying transition network, the network abstraction constructs of its description language, the utilized semantic queries, and an abstraction layer for lexical information. Our reference implementation was and is used in various student projects, theses, as well as master-level courses. It is openly available and showcases that non-experts can effectively implement multimodal interfaces, even for non-trivial applications in mixed and virtual reality.}, language = {en} } @phdthesis{Zhai2010, author = {Zhai, Xiaomin}, title = {Design, Development and Evaluation of a Virtual Classroom and Teaching Contents for Bernoulli Stochastics}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-56106}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2010}, abstract = {This thesis is devoted to Bernoulli Stochastics, which was initiated by Jakob Bernoulli more than 300 years ago by his master piece 'Ars conjectandi', which can be translated as 'Science of Prediction'. Thus, Jakob Bernoulli's Stochastics focus on prediction in contrast to the later emerging disciplines probability theory, statistics and mathematical statistics. Only recently Jakob Bernoulli's focus was taken up von Collani, who developed a unified theory of uncertainty aiming at making reliable and accurate predictions. In this thesis, teaching material as well as a virtual classroom are developed for fostering ideas and techniques initiated by Jakob Bernoulli and elaborated by Elart von Collani. The thesis is part of an extensively construed project called 'Stochastikon' aiming at introducing Bernoulli Stochastics as a unified science of prediction and measurement under uncertainty. This ambitious aim shall be reached by the development of an internet-based comprehensive system offering the science of Bernoulli Stochastics on any level of application. So far it is planned that the 'Stochastikon' system (http://www.stochastikon.com/) will consist of five subsystems. Two of them are developed and introduced in this thesis. The first one is the e-learning programme 'Stochastikon Magister' and the second one 'Stochastikon Graphics' that provides the entire Stochastikon system with graphical illustrations. E-learning is the outcome of merging education and internet techniques. E-learning is characterized by the facts that teaching and learning are independent of place and time and of the availability of specially trained teachers. Knowledge offering as well as knowledge transferring are realized by using modern information technologies. Nowadays more and more e-learning environments are based on the internet as the primary tool for communication and presentation. E-learning presentation tools are for instance text-files, pictures, graphics, audio and videos, which can be networked with each other. There could be no limit as to the access to teaching contents. Moreover, the students can adapt the speed of learning to their individual abilities. E-learning is particularly appropriate for newly arising scientific and technical disciplines, which generally cannot be presented by traditional learning methods sufficiently well, because neither trained teachers nor textbooks are available. The first part of this dissertation introduces the state of the art of e-learning in statistics, since statistics and Bernoulli Stochastics are both based on probability theory and exhibit many similar features. Since Stochastikon Magister is the first e-learning programme for Bernoulli Stochastics, the educational statistics systems is selected for the purpose of comparison and evaluation. This makes sense as both disciplines are an attempt to handle uncertainty and use methods that often can be directly compared. The second part of this dissertation is devoted to Bernoulli Stochastics. This part aims at outlining the content of two courses, which have been developed for the anticipated e-learning programme Stochastikon Magister in order to show the difficulties in teaching, understanding and applying Bernoulli Stochastics. The third part discusses the realization of the e-learning programme Stochastikon Magister, its design and implementation, which aims at offering a systematic learning of principles and techniques developed in Bernoulli Stochastics. The resulting e-learning programme differs from the commonly developed e-learning programmes as it is an attempt to provide a virtual classroom that simulates all the functions of real classroom teaching. This is in general not necessary, since most of the e-learning programmes aim at supporting existing classroom teaching. The forth part presents two empirical evaluations of Stochastikon Magister. The evaluations are performed by means of comparisons between traditional classroom learning in statistics and e-learning of Bernoulli Stochastics. The aim is to assess the usability and learnability of Stochastikon Magister. Finally, the fifth part of this dissertation is added as an appendix. It refers to Stochastikon Graphics, the fifth component of the entire Stochastikon system. Stochastikon Graphics provides the other components with graphical representations of concepts, procedures and results obtained or used in the framework of Bernoulli Stochastics. The primary aim of this thesis is the development of an appropriate software for the anticipated e-learning environment meant for Bernoulli Stochastics, while the preparation of the necessary teaching material constitutes only a secondary aim used for demonstrating the functionality of the e-learning platform and the scientific novelty of Bernoulli Stochastics. To this end, a first version of two teaching courses are developed, implemented and offered on-line in order to collect practical experiences. The two courses, which were developed as part of this projects are submitted as a supplement to this dissertation. For the time being the first experience with the e-learning programme Stochastikon Magister has been made. Students of different faculties of the University of W{\"u}rzburg, as well as researchers and engineers, who are involved in the Stochastikon project have obtained access to Stochastikon Magister via internet. They have registered for Stochastikon Magister and participated in the course programme. This thesis reports on two assessments of these first experiences and the results will lead to further improvements with respect to content and organization of Stochastikon Magister.}, subject = {Moment }, language = {en} } @phdthesis{Zeiger2010, author = {Zeiger, Florian}, title = {Internet Protocol based networking of mobile robots}, isbn = {978-3-923959-59-4}, doi = {10.25972/OPUS-4661}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-54776}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2010}, abstract = {This work is composed of three main parts: remote control of mobile systems via Internet, ad-hoc networks of mobile robots, and remote control of mobile robots via 3G telecommunication technologies. The first part gives a detailed state of the art and a discussion of the problems to be solved in order to teleoperate mobile robots via the Internet. The focus of the application to be realized is set on a distributed tele-laboratory with remote experiments on mobile robots which can be accessed world-wide via the Internet. Therefore, analyses of the communication link are used in order to realize a robust system. The developed and implemented architecture of this distributed tele-laboratory allows for a smooth access also with a variable or low link quality. The second part covers the application of ad-hoc networks for mobile robots. The networking of mobile robots via mobile ad-hoc networks is a very promising approach to realize integrated telematic systems without relying on preexisting communication infrastructure. Relevant civilian application scenarios are for example in the area of search and rescue operations where first responders are supported by multi-robot systems. Here, mobile robots, humans, and also existing stationary sensors can be connected very fast and efficient. Therefore, this work investigates and analyses the performance of different ad-hoc routing protocols for IEEE 802.11 based wireless networks in relevant scenarios. The analysis of the different protocols allows for an optimization of the parameter settings in order to use these ad-hoc routing protocols for mobile robot teleoperation. Also guidelines for the realization of such telematics systems are given. Also traffic shaping mechanisms of application layer are presented which allow for a more efficient use of the communication link. An additional application scenario, the integration of a small size helicopter into an IP based ad-hoc network, is presented. The teleoperation of mobile robots via 3G telecommunication technologies is addressed in the third part of this work. The high availability, high mobility, and the high bandwidth provide a very interesting opportunity to realize scenarios for the teleoperation of mobile robots or industrial remote maintenance. This work analyses important parameters of the UMTS communication link and investigates also the characteristics for different data streams. These analyses are used to give guidelines which are necessary for the realization of or industrial remote maintenance or mobile robot teleoperation scenarios. All the results and guidelines for the design of telematic systems in this work were derived from analyses and experiments with real hardware.}, subject = {Robotik}, language = {en} } @article{YuanBorrmannHouetal.2021, author = {Yuan, Yijun and Borrmann, Dorit and Hou, Jiawei and Ma, Yuexin and N{\"u}chter, Andreas and Schwertfeger, S{\"o}ren}, title = {Self-Supervised point set local descriptors for Point Cloud Registration}, series = {Sensors}, volume = {21}, journal = {Sensors}, number = {2}, issn = {1424-8220}, doi = {10.3390/s21020486}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-223000}, year = {2021}, abstract = {Descriptors play an important role in point cloud registration. The current state-of-the-art resorts to the high regression capability of deep learning. However, recent deep learning-based descriptors require different levels of annotation and selection of patches, which make the model hard to migrate to new scenarios. In this work, we learn local registration descriptors for point clouds in a self-supervised manner. In each iteration of the training, the input of the network is merely one unlabeled point cloud. Thus, the whole training requires no manual annotation and manual selection of patches. In addition, we propose to involve keypoint sampling into the pipeline, which further improves the performance of our model. Our experiments demonstrate the capability of our self-supervised local descriptor to achieve even better performance than the supervised model, while being easier to train and requiring no data labeling.}, language = {en} } @phdthesis{Xu2014, author = {Xu, Zhihao}, title = {Cooperative Formation Controller Design for Time-Delay and Optimality Problems}, isbn = {978-3-923959-96-9}, doi = {10.25972/OPUS-10555}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-105555}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {This dissertation presents controller design methodologies for a formation of cooperative mobile robots to perform trajectory tracking and convoy protection tasks. Two major problems related to multi-agent formation control are addressed, namely the time-delay and optimality problems. For the task of trajectory tracking, a leader-follower based system structure is adopted for the controller design, where the selection criteria for controller parameters are derived through analyses of characteristic polynomials. The resulting parameters ensure the stability of the system and overcome the steady-state error as well as the oscillation behavior under time-delay effect. In the convoy protection scenario, a decentralized coordination strategy for balanced deployment of mobile robots is first proposed. Based on this coordination scheme, optimal controller parameters are generated in both centralized and decentralized fashion to achieve dynamic convoy protection in a unified framework, where distributed optimization technique is applied in the decentralized strategy. This unified framework takes into account the motion of the target to be protected, and the desired system performance, for instance, minimal energy to spend, equal inter-vehicle distance to keep, etc. Both trajectory tracking and convoy protection tasks are demonstrated through simulations and real-world hardware experiments based on the robotic equipment at Department of Computer Science VII, University of W{\"u}rzburg.}, subject = {Optimalwertregelung}, language = {en} } @article{WolffRutter2012, author = {Wolff, Alexander and Rutter, Iganz}, title = {Augmenting the Connectivity of Planar and Geometric Graphs}, series = {Journal of Graph Algorithms and Applications}, journal = {Journal of Graph Algorithms and Applications}, doi = {10.7155/jgaa.00275}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-97587}, year = {2012}, abstract = {In this paper we study connectivity augmentation problems. Given a connected graph G with some desirable property, we want to make G 2-vertex connected (or 2-edge connected) by adding edges such that the resulting graph keeps the property. The aim is to add as few edges as possible. The property that we consider is planarity, both in an abstract graph-theoretic and in a geometric setting, where vertices correspond to points in the plane and edges to straight-line segments. We show that it is NP-hard to � nd a minimum-cardinality augmentation that makes a planar graph 2-edge connected. For making a planar graph 2-vertex connected this was known. We further show that both problems are hard in the geometric setting, even when restricted to trees. The problems remain hard for higher degrees of connectivity. On the other hand we give polynomial-time algorithms for the special case of convex geometric graphs. We also study the following related problem. Given a planar (plane geometric) graph G, two vertices s and t of G, and an integer c, how many edges have to be added to G such that G is still planar (plane geometric) and contains c edge- (or vertex-) disjoint s{t paths? For the planar case we give a linear-time algorithm for c = 2. For the plane geometric case we give optimal worst-case bounds for c = 2; for c = 3 we characterize the cases that have a solution.}, language = {en} } @article{WolfDoellingerMaletal.2022, author = {Wolf, Erik and D{\"o}llinger, Nina and Mal, David and Wenninger, Stephan and Bartl, Andrea and Botsch, Mario and Latoschik, Marc Erich and Wienrich, Carolin}, title = {Does distance matter? Embodiment and perception of personalized avatars in relation to the self-observation distance in virtual reality}, series = {Frontiers in Virtual Reality}, volume = {3}, journal = {Frontiers in Virtual Reality}, issn = {2673-4192}, doi = {10.3389/frvir.2022.1031093}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-299415}, year = {2022}, abstract = {Virtual reality applications employing avatar embodiment typically use virtual mirrors to allow users to perceive their digital selves not only from a first-person but also from a holistic third-person perspective. However, due to distance-related biases such as the distance compression effect or a reduced relative rendering resolution, the self-observation distance (SOD) between the user and the virtual mirror might influence how users perceive their embodied avatar. Our article systematically investigates the effects of a short (1 m), middle (2.5 m), and far (4 m) SOD between users and mirror on the perception of their personalized and self-embodied avatars. The avatars were photorealistic reconstructed using state-of-the-art photogrammetric methods. Thirty participants repeatedly faced their real-time animated self-embodied avatars in each of the three SOD conditions, where they were repeatedly altered in their body weight, and participants rated the 1) sense of embodiment, 2) body weight perception, and 3) affective appraisal towards their avatar. We found that the different SODs are unlikely to influence any of our measures except for the perceived body weight estimation difficulty. Here, the participants perceived the difficulty significantly higher for the farthest SOD. We further found that the participants' self-esteem significantly impacted their ability to modify their avatar's body weight to their current body weight and that it positively correlated with the perceived attractiveness of the avatar. Additionally, the participants' concerns about their body shape affected how eerie they perceived their avatars. The participants' self-esteem and concerns about their body shape influenced the perceived body weight estimation difficulty. We conclude that the virtual mirror in embodiment scenarios can be freely placed and varied at a distance of one to four meters from the user without expecting major effects on the perception of the avatar.}, language = {en} } @phdthesis{Wolf2017, author = {Wolf, Beat}, title = {Reducing the complexity of OMICS data analysis}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-153687}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2017}, abstract = {The field of genetics faces a lot of challenges and opportunities in both research and diagnostics due to the rise of next generation sequencing (NGS), a technology that allows to sequence DNA increasingly fast and cheap. NGS is not only used to analyze DNA, but also RNA, which is a very similar molecule also present in the cell, in both cases producing large amounts of data. The big amount of data raises both infrastructure and usability problems, as powerful computing infrastructures are required and there are many manual steps in the data analysis which are complicated to execute. Both of those problems limit the use of NGS in the clinic and research, by producing a bottleneck both computationally and in terms of manpower, as for many analyses geneticists lack the required computing skills. Over the course of this thesis we investigated how computer science can help to improve this situation to reduce the complexity of this type of analysis. We looked at how to make the analysis more accessible to increase the number of people that can perform OMICS data analysis (OMICS groups various genomics data-sources). To approach this problem, we developed a graphical NGS data analysis pipeline aimed at a diagnostics environment while still being useful in research in close collaboration with the Human Genetics Department at the University of W{\"u}rzburg. The pipeline has been used in various research papers on covering subjects, including works with direct author participation in genomics, transcriptomics as well as epigenomics. To further validate the graphical pipeline, a user survey was carried out which confirmed that it lowers the complexity of OMICS data analysis. We also studied how the data analysis can be improved in terms of computing infrastructure by improving the performance of certain analysis steps. We did this both in terms of speed improvements on a single computer (with notably variant calling being faster by up to 18 times), as well as with distributed computing to better use an existing infrastructure. The improvements were integrated into the previously described graphical pipeline, which itself also was focused on low resource usage. As a major contribution and to help with future development of parallel and distributed applications, for the usage in genetics or otherwise, we also looked at how to make it easier to develop such applications. Based on the parallel object programming model (POP), we created a Java language extension called POP-Java, which allows for easy and transparent distribution of objects. Through this development, we brought the POP model to the cloud, Hadoop clusters and present a new collaborative distributed computing model called FriendComputing. The advances made in the different domains of this thesis have been published in various works specified in this document.}, subject = {Bioinformatik}, language = {en} } @phdthesis{Witek2014, author = {Witek, Maximilian}, title = {Multiobjective Traveling Salesman Problems and Redundancy of Complete Sets}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-110740}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {The first part of this thesis deals with the approximability of the traveling salesman problem. This problem is defined on a complete graph with edge weights, and the task is to find a Hamiltonian cycle of minimum weight that visits each vertex exactly once. We study the most important multiobjective variants of this problem. In the multiobjective case, the edge weights are vectors of natural numbers with one component for each objective, and since weight vectors are typically incomparable, the optimal Hamiltonian cycle does not exist. Instead we consider the Pareto set, which consists of those Hamiltonian cycles that are not dominated by some other, strictly better Hamiltonian cycles. The central goal in multiobjective optimization and in the first part of this thesis in particular is the approximation of such Pareto sets. We first develop improved approximation algorithms for the two-objective metric traveling salesman problem on multigraphs and for related Hamiltonian path problems that are inspired by the single-objective Christofides' heuristic. We further show arguments indicating that our algorithms are difficult to improve. Furthermore we consider multiobjective maximization versions of the traveling salesman problem, where the task is to find Hamiltonian cycles with high weight in each objective. We generalize single-objective techniques to the multiobjective case, where we first compute a cycle cover with high weight and then remove an edge with low weight in each cycle. Since weight vectors are often incomparable, the choice of the edges of low weight is non-trivial. We develop a general lemma that solves this problem and enables us to generalize the single-objective maximization algorithms to the multiobjective case. We obtain improved, randomized approximation algorithms for the multiobjective maximization variants of the traveling salesman problem. We conclude the first part by developing deterministic algorithms for these problems. The second part of this thesis deals with redundancy properties of complete sets. We call a set autoreducible if for every input instance x we can efficiently compute some y that is different from x but that has the same membership to the set. If the set can be split into two equivalent parts, then it is called weakly mitotic, and if the splitting is obtained by an efficiently decidable separator set, then it is called mitotic. For different reducibility notions and complexity classes, we analyze how redundant its complete sets are. Previous research in this field concentrates on polynomial-time computable reducibility notions. The main contribution of this part of the thesis is a systematic study of the redundancy properties of complete sets for typical complexity classes and reducibility notions that are computable in logarithmic space. We use different techniques to show autoreducibility and mitoticity that depend on the size of the complexity class and the strength of the reducibility notion considered. For small complexity classes such as NL and P we use self-reducible, complete sets to show that all complete sets are autoreducible. For large complexity classes such as PSPACE and EXP we apply diagonalization methods to show that all complete sets are even mitotic. For intermediate complexity classes such as NP and the remaining levels of the polynomial-time hierarchy we establish autoreducibility of complete sets by locally checking computational transcripts. In many cases we can show autoreducibility of complete sets, while mitoticity is not known to hold. We conclude the second part by showing that in some cases, autoreducibility of complete sets at least implies weak mitoticity.}, subject = {Mehrkriterielle Optimierung}, language = {en} } @phdthesis{Wirth2001, author = {Wirth, Hans-Christoph}, title = {Multicriteria Approximation of Network Design and Network Upgrade Problems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-2845}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2001}, abstract = {Network planning has come to great importance during the past decades. Today's telecommunication, traffic systems, and logistics would not have been evolved to the current state without careful analysis of the underlying network problems and precise implementation of the results obtained from those examinations. Graphs with node and arc attributes are a very useful tool to model realistic applications, while on the other hand they are well understood in theory. We investigate network design problems which are motivated particularly from applications in communication networks and logistics. Those problems include the search for homogeneous subgraphs in edge labeled graphs where either the total number of labels or the reload cost are subject to optimize. Further, we investigate some variants of the dial a ride problem. On the other hand, we use node and edge upgrade models to deal with the fact that in many cases one prefers to change existing networks rather than implementing a newly computed solution from scratch. We investigate the construction of bottleneck constrained forests under a node upgrade model, as well as several flow cost problems under a edge based upgrade model. All problems are examined within a framework of multi-criteria optimization. Many of the problems can be shown to be NP-hard, with the consequence that, under the widely accepted assumption that P is not equal to NP, there cannot exist efficient algorithms for solving the problems. This motivates the development of approximation algorithms which compute near-optimal solutions with provable performance guarantee in polynomial time.}, subject = {Netzplantechnik}, language = {en} } @article{WinterKernGalletal.2021, author = {Winter, Carla and Kern, Florian and Gall, Dominik and Latoschik, Marc Erich and Pauli, Paul and K{\"a}thner, Ivo}, title = {Immersive virtual reality during gait rehabilitation increases walking speed and motivation: a usability evaluation with healthy participants and individuals with multiple sclerosis and stroke}, series = {Journal of Neuroengineering and Rehabilitation}, volume = {18}, journal = {Journal of Neuroengineering and Rehabilitation}, number = {1}, issn = {1743-0003}, doi = {10.1186/s12984-021-00848-w}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-258698}, year = {2021}, abstract = {Background: The rehabilitation of gait disorders in patients with multiple sclerosis (MS) and stroke is often based on conventional treadmill training. Virtual reality (VR)-based treadmill training can increase motivation and improve therapy outcomes. The present study evaluated an immersive virtual reality application (using a head-mounted display, HMD) for gait rehabilitation with patients to (1) demonstrate its feasibility and acceptance and to (2) compare its short-term effects to a semi-immersive presentation (using a monitor) and a conventional treadmill training without VR to assess the usability of both systems and estimate the effects on walking speed and motivation. Methods: In a within-subjects study design, 36 healthy participants and 14 persons with MS or stroke participated in each of the three experimental conditions (VR via HMD, VR via monitor, treadmill training without VR). Results: For both groups, the walking speed in the HMD condition was higher than in treadmill training without VR and in the monitor condition. Healthy participants reported a higher motivation after the HMD condition as compared with the other conditions. Importantly, no side effects in the sense of simulator sickness occurred and usability ratings were high. No increases in heart rate were observed following the VR conditions. Presence ratings were higher for the HMD condition compared with the monitor condition for both user groups. Most of the healthy study participants (89\%) and patients (71\%) preferred the HMD-based training among the three conditions and most patients could imagine using it more frequently. Conclusions For the first time, the present study evaluated the usability of an immersive VR system for gait rehabilitation in a direct comparison with a semi-immersive system and a conventional training without VR with healthy participants and patients. The study demonstrated the feasibility of combining a treadmill training with immersive VR. Due to its high usability and low side effects, it might be particularly suited for patients to improve training motivation and training outcome e. g. the walking speed compared with treadmill training using no or only semi-immersive VR. Immersive VR systems still require specific technical setup procedures. This should be taken into account for specific clinical use-cases during a cost-benefit assessment.}, language = {en} } @article{WienrichLatoschik2021, author = {Wienrich, Carolin and Latoschik, Marc Erich}, title = {eXtended Artificial Intelligence: New Prospects of Human-AI Interaction Research}, series = {Frontiers in Virtual Reality}, volume = {2}, journal = {Frontiers in Virtual Reality}, doi = {10.3389/frvir.2021.686783}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-260296}, year = {2021}, abstract = {Artificial Intelligence (AI) covers a broad spectrum of computational problems and use cases. Many of those implicate profound and sometimes intricate questions of how humans interact or should interact with AIs. Moreover, many users or future users do have abstract ideas of what AI is, significantly depending on the specific embodiment of AI applications. Human-centered-design approaches would suggest evaluating the impact of different embodiments on human perception of and interaction with AI. An approach that is difficult to realize due to the sheer complexity of application fields and embodiments in reality. However, here XR opens new possibilities to research human-AI interactions. The article's contribution is twofold: First, it provides a theoretical treatment and model of human-AI interaction based on an XR-AI continuum as a framework for and a perspective of different approaches of XR-AI combinations. It motivates XR-AI combinations as a method to learn about the effects of prospective human-AI interfaces and shows why the combination of XR and AI fruitfully contributes to a valid and systematic investigation of human-AI interactions and interfaces. Second, the article provides two exemplary experiments investigating the aforementioned approach for two distinct AI-systems. The first experiment reveals an interesting gender effect in human-robot interaction, while the second experiment reveals an Eliza effect of a recommender system. Here the article introduces two paradigmatic implementations of the proposed XR testbed for human-AI interactions and interfaces and shows how a valid and systematic investigation can be conducted. In sum, the article opens new perspectives on how XR benefits human-centered AI design and development.}, language = {en} } @article{WienrichKommaVogtetal.2021, author = {Wienrich, Carolin and Komma, Philipp and Vogt, Stephanie and Latoschik, Marc E.}, title = {Spatial Presence in Mixed Realities - Considerations About the Concept, Measures, Design, and Experiments}, series = {Frontiers in Virtual Reality}, volume = {2}, journal = {Frontiers in Virtual Reality}, doi = {10.3389/frvir.2021.694315}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-260328}, year = {2021}, abstract = {Plenty of theories, models, measures, and investigations target the understanding of virtual presence, i.e., the sense of presence in immersive Virtual Reality (VR). Other varieties of the so-called eXtended Realities (XR), e.g., Augmented and Mixed Reality (AR and MR) incorporate immersive features to a lesser degree and continuously combine spatial cues from the real physical space and the simulated virtual space. This blurred separation questions the applicability of the accumulated knowledge about the similarities of virtual presence and presence occurring in other varieties of XR, and corresponding outcomes. The present work bridges this gap by analyzing the construct of presence in mixed realities (MR). To achieve this, the following presents (1) a short review of definitions, dimensions, and measurements of presence in VR, and (2) the state of the art views on MR. Additionally, we (3) derived a working definition of MR, extending the Milgram continuum. This definition is based on entities reaching from real to virtual manifestations at one time point. Entities possess different degrees of referential power, determining the selection of the frame of reference. Furthermore, we (4) identified three research desiderata, including research questions about the frame of reference, the corresponding dimension of transportation, and the dimension of realism in MR. Mainly the relationship between the main aspects of virtual presence of immersive VR, i.e., the place-illusion, and the plausibility-illusion, and of the referential power of MR entities are discussed regarding the concept, measures, and design of presence in MR. Finally, (5) we suggested an experimental setup to reveal the research heuristic behind experiments investigating presence in MR. The present work contributes to the theories and the meaning of and approaches to simulate and measure presence in MR. We hypothesize that research about essential underlying factors determining user experience (UX) in MR simulations and experiences is still in its infancy and hopes this article provides an encouraging starting point to tackle related questions.}, language = {en} } @article{WienrichCarolusRothIsigkeitetal.2022, author = {Wienrich, Carolin and Carolus, Astrid and Roth-Isigkeit, David and Hotho, Andreas}, title = {Inhibitors and enablers to explainable AI success: a systematic examination of explanation complexity and individual characteristics}, series = {Multimodal Technologies and Interaction}, volume = {6}, journal = {Multimodal Technologies and Interaction}, number = {12}, issn = {2414-4088}, doi = {10.3390/mti6120106}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-297288}, year = {2022}, abstract = {With the increasing adaptability and complexity of advisory artificial intelligence (AI)-based agents, the topics of explainable AI and human-centered AI are moving close together. Variations in the explanation itself have been widely studied, with some contradictory results. These could be due to users' individual differences, which have rarely been systematically studied regarding their inhibiting or enabling effect on the fulfillment of explanation objectives (such as trust, understanding, or workload). This paper aims to shed light on the significance of human dimensions (gender, age, trust disposition, need for cognition, affinity for technology, self-efficacy, attitudes, and mind attribution) as well as their interplay with different explanation modes (no, simple, or complex explanation). Participants played the game Deal or No Deal while interacting with an AI-based agent. The agent gave advice to the participants on whether they should accept or reject the deals offered to them. As expected, giving an explanation had a positive influence on the explanation objectives. However, the users' individual characteristics particularly reinforced the fulfillment of the objectives. The strongest predictor of objective fulfillment was the degree of attribution of human characteristics. The more human characteristics were attributed, the more trust was placed in the agent, advice was more likely to be accepted and understood, and important needs were satisfied during the interaction. Thus, the current work contributes to a better understanding of the design of explanations of an AI-based agent system that takes into account individual characteristics and meets the demand for both explainable and human-centered agent systems.}, language = {en} } @article{WienrichCarolusMarkusetal.2023, author = {Wienrich, Carolin and Carolus, Astrid and Markus, Andr{\´e} and Augustin, Yannik and Pfister, Jan and Hotho, Andreas}, title = {Long-term effects of perceived friendship with intelligent voice assistants on usage behavior, user experience, and social perceptions}, series = {Computers}, volume = {12}, journal = {Computers}, number = {4}, issn = {2073-431X}, doi = {10.3390/computers12040077}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-313552}, year = {2023}, abstract = {Social patterns and roles can develop when users talk to intelligent voice assistants (IVAs) daily. The current study investigates whether users assign different roles to devices and how this affects their usage behavior, user experience, and social perceptions. Since social roles take time to establish, we equipped 106 participants with Alexa or Google assistants and some smart home devices and observed their interactions for nine months. We analyzed diverse subjective (questionnaire) and objective data (interaction data). By combining social science and data science analyses, we identified two distinct clusters—users who assigned a friendship role to IVAs over time and users who did not. Interestingly, these clusters exhibited significant differences in their usage behavior, user experience, and social perceptions of the devices. For example, participants who assigned a role to IVAs attributed more friendship to them used them more frequently, reported more enjoyment during interactions, and perceived more empathy for IVAs. In addition, these users had distinct personal requirements, for example, they reported more loneliness. This study provides valuable insights into the role-specific effects and consequences of voice assistants. Recent developments in conversational language models such as ChatGPT suggest that the findings of this study could make an important contribution to the design of dialogic human-AI interactions.}, language = {en} } @phdthesis{Wiebusch2016, author = {Wiebusch, Dennis}, title = {Reusability for Intelligent Realtime Interactive Systems}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-040-5 (print)}, doi = {10.25972/WUP-978-3-95826-041-2}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-121869}, school = {W{\"u}rzburg University Press}, pages = {260}, year = {2016}, abstract = {Software frameworks for Realtime Interactive Systems (RIS), e.g., in the areas of Virtual, Augmented, and Mixed Reality (VR, AR, and MR) or computer games, facilitate a multitude of functionalities by coupling diverse software modules. In this context, no uniform methodology for coupling these modules does exist; instead various purpose-built solutions have been proposed. As a consequence, important software qualities, such as maintainability, reusability, and adaptability, are impeded. Many modern systems provide additional support for the integration of Artificial Intelligence (AI) methods to create so called intelligent virtual environments. These methods exacerbate the above-mentioned problem of coupling software modules in the thus created Intelligent Realtime Interactive Systems (IRIS) even more. This, on the one hand, is due to the commonly applied specialized data structures and asynchronous execution schemes, and the requirement for high consistency regarding content-wise coupled but functionally decoupled forms of data representation on the other. This work proposes an approach to decoupling software modules in IRIS, which is based on the abstraction of architecture elements using a semantic Knowledge Representation Layer (KRL). The layer facilitates decoupling the required modules, provides a means for ensuring interface compatibility and consistency, and in the end constitutes an interface for symbolic AI methods.}, subject = {Virtuelle Realit{\"a}t}, language = {en} } @article{WickHarteltPuppe2019, author = {Wick, Christoph and Hartelt, Alexander and Puppe, Frank}, title = {Staff, symbol and melody detection of Medieval manuscripts written in square notation using deep Fully Convolutional Networks}, series = {Applied Sciences}, volume = {9}, journal = {Applied Sciences}, number = {13}, issn = {2076-3417}, doi = {10.3390/app9132646}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-197248}, year = {2019}, abstract = {Even today, the automatic digitisation of scanned documents in general, but especially the automatic optical music recognition (OMR) of historical manuscripts, still remains an enormous challenge, since both handwritten musical symbols and text have to be identified. This paper focuses on the Medieval so-called square notation developed in the 11th-12th century, which is already composed of staff lines, staves, clefs, accidentals, and neumes that are roughly spoken connected single notes. The aim is to develop an algorithm that captures both the neumes, and in particular its melody, which can be used to reconstruct the original writing. Our pipeline is similar to the standard OMR approach and comprises a novel staff line and symbol detection algorithm based on deep Fully Convolutional Networks (FCN), which perform pixel-based predictions for either staff lines or symbols and their respective types. Then, the staff line detection combines the extracted lines to staves and yields an F\(_1\) -score of over 99\% for both detecting lines and complete staves. For the music symbol detection, we choose a novel approach that skips the step to identify neumes and instead directly predicts note components (NCs) and their respective affiliation to a neume. Furthermore, the algorithm detects clefs and accidentals. Our algorithm predicts the symbol sequence of a staff with a diplomatic symbol accuracy rate (dSAR) of about 87\%, which includes symbol type and location. If only the NCs without their respective connection to a neume, all clefs and accidentals are of interest, the algorithm reaches an harmonic symbol accuracy rate (hSAR) of approximately 90\%. In general, the algorithm recognises a symbol in the manuscript with an F\(_1\) -score of over 96\%.}, language = {en} } @phdthesis{Wick2020, author = {Wick, Christoph}, title = {Optical Medieval Music Recognition}, doi = {10.25972/OPUS-21434}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-214348}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2020}, abstract = {In recent years, great progress has been made in the area of Artificial Intelligence (AI) due to the possibilities of Deep Learning which steadily yielded new state-of-the-art results especially in many image recognition tasks. Currently, in some areas, human performance is achieved or already exceeded. This great development already had an impact on the area of Optical Music Recognition (OMR) as several novel methods relying on Deep Learning succeeded in specific tasks. Musicologists are interested in large-scale musical analysis and in publishing digital transcriptions in a collection enabling to develop tools for searching and data retrieving. The application of OMR promises to simplify and thus speed-up the transcription process by either providing fully-automatic or semi-automatic approaches. This thesis focuses on the automatic transcription of Medieval music with a focus on square notation which poses a challenging task due to complex layouts, highly varying handwritten notations, and degradation. However, since handwritten music notations are quite complex to read, even for an experienced musicologist, it is to be expected that even with new techniques of OMR manual corrections are required to obtain the transcriptions. This thesis presents several new approaches and open source software solutions for layout analysis and Automatic Text Recognition (ATR) for early documents and for OMR of Medieval manuscripts providing state-of-the-art technology. Fully Convolutional Networks (FCN) are applied for the segmentation of historical manuscripts and early printed books, to detect staff lines, and to recognize neume notations. The ATR engine Calamari is presented which allows for ATR of early prints and also the recognition of lyrics. Configurable CNN/LSTM-network architectures which are trained with the segmentation-free CTC-loss are applied to the sequential recognition of text but also monophonic music. Finally, a syllable-to-neume assignment algorithm is presented which represents the final step to obtain a complete transcription of the music. The evaluations show that the performances of any algorithm is highly depending on the material at hand and the number of training instances. The presented staff line detection correctly identifies staff lines and staves with an \$F_1\$-score of above \$99.5\\%\$. The symbol recognition yields a diplomatic Symbol Accuracy Rate (dSAR) of above \$90\\%\$ by counting the number of correct predictions in the symbols sequence normalized by its length. The ATR of lyrics achieved a Character Error Rate (CAR) (equivalently the number of correct predictions normalized by the sentence length) of above \$93\\%\$ trained on 771 lyric lines of Medieval manuscripts and of 99.89\\% when training on around 3.5 million lines of contemporary printed fonts. The assignment of syllables and their corresponding neumes reached \$F_1\$-scores of up to \$99.2\\%\$. A direct comparison to previously published performances is difficult due to different materials and metrics. However, estimations show that the reported values of this thesis exceed the state-of-the-art in the area of square notation. A further goal of this thesis is to enable musicologists without technical background to apply the developed algorithms in a complete workflow by providing a user-friendly and comfortable Graphical User Interface (GUI) encapsulating the technical details. For this purpose, this thesis presents the web-application OMMR4all. Its fully-functional workflow includes the proposed state-of-the-art machine-learning algorithms and optionally allows for a manual intervention at any stage to correct the output preventing error propagation. To simplify the manual (post-) correction, OMMR4all provides an overlay-editor that superimposes the annotations with a scan of the original manuscripts so that errors can easily be spotted. The workflow is designed to be iteratively improvable by training better models as soon as new Ground Truth (GT) is available.}, subject = {Neumenschrift}, language = {en} } @article{WernerStrohmeierRotheetal.2022, author = {Werner, Lennart and Strohmeier, Michael and Rothe, Julian and Montenegro, Sergio}, title = {Thrust vector observation for force feedback-controlled UAVs}, series = {Drones}, volume = {6}, journal = {Drones}, number = {2}, issn = {2504-446X}, doi = {10.3390/drones6020049}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-262153}, year = {2022}, abstract = {This paper presents a novel approach to Thrust Vector Control (TVC) for small Unmanned Aerial Vehicles (UAVs). The difficulties associated with conventional feed-forward TVC are outlined, and a practical solution to conquer these challenges is derived. The solution relies on observing boom deformations that are created by different thrust vector directions and high-velocity air inflow. The paper describes the required measurement electronics as well as the implementation of a dedicated testbed that allows the evaluation of mid-flight force measurements. Wind-tunnel tests show that the presented method for active thrust vector determination is able to quantify the disturbances due to the incoming air flow.}, language = {en} } @misc{Werner2024, type = {Master Thesis}, author = {Werner, Lennart}, title = {Terrain Mapping for Autonomous Navigation of Lunar Rovers}, doi = {10.25972/OPUS-35826}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-358268}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2024}, abstract = {Autonomous mobile robots operating in unknown terrain have to guide their drive decisions through local perception. Local mapping and traversability analysis is essential for safe rover operation and low level locomotion. This thesis deals with the challenge of building a local, robot centric map from ultra short baseline stereo imagery for height and traversability estimation. Several grid-based, incremental mapping algorithms are compared and evaluated in a multi size, multi resolution framework. A new, covariance based mapping update is introduced, which is capable of detecting sub- cellsize obstacles and abstracts the terrain of one cell as a first order surface. The presented mapping setup is capable of producing reliable ter- rain and traversability estimates under the conditions expected for the Cooperative Autonomous Distributed Robotic Exploreration (CADRE) mission. Algorithmic- and software architecture design targets high reliability and efficiency for meeting the tight constraints implied by CADRE's small on-board embedded CPU. Extensive evaluations are conducted to find possible edge-case scenar- ios in the operating envelope of the map and to confirm performance parameters. The research in this thesis targets the CADRE mission, but is applicable to any form of mobile robotics which require height- and traversability mapping.}, subject = {Mondfahrzeug}, language = {en} } @article{WangLiuXiaoetal.2023, author = {Wang, Xiaoliang and Liu, Xuan and Xiao, Yun and Mao, Yue and Wang, Nan and Wang, Wei and Wu, Shufan and Song, Xiaoyong and Wang, Dengfeng and Zhong, Xingwang and Zhu, Zhu and Schilling, Klaus and Damaren, Christopher}, title = {On-orbit verification of RL-based APC calibrations for micrometre level microwave ranging system}, series = {Mathematics}, volume = {11}, journal = {Mathematics}, number = {4}, issn = {2227-7390}, doi = {10.3390/math11040942}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-303970}, year = {2023}, abstract = {Micrometre level ranging accuracy between satellites on-orbit relies on the high-precision calibration of the antenna phase center (APC), which is accomplished through properly designed calibration maneuvers batch estimation algorithms currently. However, the unmodeled perturbations of the space dynamic and sensor-induced uncertainty complicated the situation in reality; ranging accuracy especially deteriorated outside the antenna main-lobe when maneuvers performed. This paper proposes an on-orbit APC calibration method that uses a reinforcement learning (RL) process, aiming to provide the high accuracy ranging datum for onboard instruments with micrometre level. The RL process used here is an improved Temporal Difference advantage actor critic algorithm (TDAAC), which mainly focuses on two neural networks (NN) for critic and actor function. The output of the TDAAC algorithm will autonomously balance the APC calibration maneuvers amplitude and APC-observed sensitivity with an object of maximal APC estimation accuracy. The RL-based APC calibration method proposed here is fully tested in software and on-ground experiments, with an APC calibration accuracy of less than 2 mrad, and the on-orbit maneuver data from 11-12 April 2022, which achieved 1-1.5 mrad calibration accuracy after RL training. The proposed RL-based APC algorithm may extend to prove mass calibration scenes with actions feedback to attitude determination and control system (ADCS), showing flexibility of spacecraft payload applications in the future.}, language = {en} } @article{WamserSeufertHalletal.2021, author = {Wamser, Florian and Seufert, Anika and Hall, Andrew and Wunderer, Stefan and Hoßfeld, Tobias}, title = {Valid statements by the crowd: statistical measures for precision in crowdsourced mobile measurements}, series = {Network}, volume = {1}, journal = {Network}, number = {2}, issn = {2673-8732}, doi = {10.3390/network1020013}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-284154}, pages = {215 -- 232}, year = {2021}, abstract = {Crowdsourced network measurements (CNMs) are becoming increasingly popular as they assess the performance of a mobile network from the end user's perspective on a large scale. Here, network measurements are performed directly on the end-users' devices, thus taking advantage of the real-world conditions end-users encounter. However, this type of uncontrolled measurement raises questions about its validity and reliability. The problem lies in the nature of this type of data collection. In CNMs, mobile network subscribers are involved to a large extent in the measurement process, and collect data themselves for the operator. The collection of data on user devices in arbitrary locations and at uncontrolled times requires means to ensure validity and reliability. To address this issue, our paper defines concepts and guidelines for analyzing the precision of CNMs; specifically, the number of measurements required to make valid statements. In addition to the formal definition of the aspect, we illustrate the problem and use an extensive sample data set to show possible assessment approaches. This data set consists of more than 20.4 million crowdsourced mobile measurements from across France, measured by a commercial data provider.}, language = {en} } @phdthesis{Wamser2015, author = {Wamser, Florian}, title = {Performance Assessment of Resource Management Strategies for Cellular and Wireless Mesh Networks}, issn = {1432-8801}, doi = {10.25972/OPUS-11151}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-111517}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {The rapid growth in the field of communication networks has been truly amazing in the last decades. We are currently experiencing a continuation thereof with an increase in traffic and the emergence of new fields of application. In particular, the latter is interesting since due to advances in the networks and new devices, such as smartphones, tablet PCs, and all kinds of Internet-connected devices, new additional applications arise from different areas. What applies for all these services is that they come from very different directions and belong to different user groups. This results in a very heterogeneous application mix with different requirements and needs on the access networks. The applications within these networks typically use the network technology as a matter of course, and expect that it works in all situations and for all sorts of purposes without any further intervention. Mobile TV, for example, assumes that the cellular networks support the streaming of video data. Likewise, mobile-connected electricity meters rely on the timely transmission of accounting data for electricity billing. From the perspective of the communication networks, this requires not only the technical realization for the individual case, but a broad consideration of all circumstances and all requirements of special devices and applications of the users. Such a comprehensive consideration of all eventualities can only be achieved by a dynamic, customized, and intelligent management of the transmission resources. This management requires to exploit the theoretical capacity as much as possible while also taking system and network architecture as well as user and application demands into account. Hence, for a high level of customer satisfaction, all requirements of the customers and the applications need to be considered, which requires a multi-faceted resource management. The prerequisite for supporting all devices and applications is consequently a holistic resource management at different levels. At the physical level, the technical possibilities provided by different access technologies, e.g., more transmission antennas, modulation and coding of data, possible cooperation between network elements, etc., need to be exploited on the one hand. On the other hand, interference and changing network conditions have to be counteracted at physical level. On the application and user level, the focus should be on the customer demands due to the currently increasing amount of different devices and diverse applications (medical, hobby, entertainment, business, civil protection, etc.). The intention of this thesis is the development, investigation, and evaluation of a holistic resource management with respect to new application use cases and requirements for the networks. Therefore, different communication layers are investigated and corresponding approaches are developed using simulative methods as well as practical emulation in testbeds. The new approaches are designed with respect to different complexity and implementation levels in order to cover the design space of resource management in a systematic way. Since the approaches cannot be evaluated generally for all types of access networks, network-specific use cases and evaluations are finally carried out in addition to the conceptual design and the modeling of the scenario. The first part is concerned with management of resources at physical layer. We study distributed resource allocation approaches under different settings. Due to the ambiguous performance objectives, a high spectrum reuse is conducted in current cellular networks. This results in possible interference between cells that transmit on the same frequencies. The focus is on the identification of approaches that are able to mitigate such interference. Due to the heterogeneity of the applications in the networks, increasingly different application-specific requirements are experienced by the networks. Consequently, the focus is shifted in the second part from optimization of network parameters to consideration and integration of the application and user needs by adjusting network parameters. Therefore, application-aware resource management is introduced to enable efficient and customized access networks. As indicated before, approaches cannot be evaluated generally for all types of access networks. Consequently, the third contribution is the definition and realization of the application-aware paradigm in different access networks. First, we address multi-hop wireless mesh networks. Finally, we focus with the fourth contribution on cellular networks. Application-aware resource management is applied here to the air interface between user device and the base station. Especially in cellular networks, the intensive cost-driven competition among the different operators facilitates the usage of such a resource management to provide cost-efficient and customized networks with respect to the running applications.}, subject = {Leistungsbewertung}, language = {en} } @article{WalterDegenPfeifferetal.2021, author = {Walter, Thomas and Degen, Jacqueline and Pfeiffer, Keram and St{\"o}ckl, Anna and Montenegro, Sergio and Degen, Tobias}, title = {A new innovative real-time tracking method for flying insects applicable under natural conditions}, series = {BMC Zoology}, volume = {6}, journal = {BMC Zoology}, doi = {10.1186/s40850-021-00097-3}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-265716}, year = {2021}, abstract = {Background Sixty percent of all species are insects, yet despite global efforts to monitor animal movement patterns, insects are continuously underrepresented. This striking difference between species richness and the number of species monitored is not due to a lack of interest but rather to the lack of technical solutions. Often the accuracy and speed of established tracking methods is not high enough to record behavior and react to it experimentally in real-time, which applies in particular to small flying animals. Results Our new method of real-time tracking relates to frequencies of solar radiation which are almost completely absorbed by traveling through the atmosphere. For tracking, photoluminescent tags with a peak emission (1400 nm), which lays in such a region of strong absorption through the atmosphere, were attached to the animals. The photoluminescent properties of passivated lead sulphide quantum dots were responsible for the emission of light by the tags and provide a superb signal-to noise ratio. We developed prototype markers with a weight of 12.5 mg and a diameter of 5 mm. Furthermore, we developed a short wave infrared detection system which can record and determine the position of an animal in a heterogeneous environment with a delay smaller than 10 ms. With this method we were able to track tagged bumblebees as well as hawk moths in a flight arena that was placed outside on a natural meadow. Conclusion Our new method eliminates the necessity of a constant or predictable environment for many experimental setups. Furthermore, we postulate that the developed matrix-detector mounted to a multicopter will enable tracking of small flying insects, over medium range distances (>1000m) in the near future because: a) the matrix-detector equipped with an 70 mm interchangeable lens weighs less than 380 g, b) it evaluates the position of an animal in real-time and c) it can directly control and communicate with electronic devices.}, language = {en} } @phdthesis{Walter2019, author = {Walter, J{\"u}rgen Christian}, title = {Automation in Software Performance Engineering Based on a Declarative Specification of Concerns}, doi = {10.25972/OPUS-18090}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-180904}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2019}, abstract = {Software performance is of particular relevance to software system design, operation, and evolution because it has a significant impact on key business indicators. During the life-cycle of a software system, its implementation, configuration, and deployment are subject to multiple changes that may affect the end-to-end performance characteristics. Consequently, performance analysts continually need to provide answers to and act based on performance-relevant concerns. To ensure a desired level of performance, software performance engineering provides a plethora of methods, techniques, and tools for measuring, modeling, and evaluating performance properties of software systems. However, the answering of performance concerns is subject to a significant semantic gap between the level on which performance concerns are formulated and the technical level on which performance evaluations are actually conducted. Performance evaluation approaches come with different strengths and limitations concerning, for example, accuracy, time-to-result, or system overhead. For the involved stakeholders, it can be an elaborate process to reasonably select, parameterize and correctly apply performance evaluation approaches, and to filter and interpret the obtained results. An additional challenge is that available performance evaluation artifacts may change over time, which requires to switch between different measurement-based and model-based performance evaluation approaches during the system evolution. At model-based analysis, the effort involved in creating performance models can also outweigh their benefits. To overcome the deficiencies and enable an automatic and holistic evaluation of performance throughout the software engineering life-cycle requires an approach that: (i) integrates multiple types of performance concerns and evaluation approaches, (ii) automates performance model creation, and (iii) automatically selects an evaluation methodology tailored to a specific scenario. This thesis presents a declarative approach —called Declarative Performance Engineering (DPE)— to automate performance evaluation based on a humanreadable specification of performance-related concerns. To this end, we separate the definition of performance concerns from their solution. The primary scientific contributions presented in this thesis are: A declarative language to express performance-related concerns and a corresponding processing framework: We provide a language to specify performance concerns independent of a concrete performance evaluation approach. Besides the specification of functional aspects, the language allows to include non-functional tradeoffs optionally. To answer these concerns, we provide a framework architecture and a corresponding reference implementation to process performance concerns automatically. It allows to integrate arbitrary performance evaluation approaches and is accompanied by reference implementations for model-based and measurement-based performance evaluation. Automated creation of architectural performance models from execution traces: The creation of performance models can be subject to significant efforts outweighing the benefits of model-based performance evaluation. We provide a model extraction framework that creates architectural performance models based on execution traces, provided by monitoring tools.The framework separates the derivation of generic information from model creation routines. To derive generic information, the framework combines state-of-the-art extraction and estimation techniques. We isolate object creation routines specified in a generic model builder interface based on concepts present in multiple performance-annotated architectural modeling formalisms. To create model extraction for a novel performance modeling formalism, developers only need to write object creation routines instead of creating model extraction software from scratch when reusing the generic framework. Automated and extensible decision support for performance evaluation approaches: We present a methodology and tooling for the automated selection of a performance evaluation approach tailored to the user concerns and application scenario. To this end, we propose to decouple the complexity of selecting a performance evaluation approach for a given scenario by providing solution approach capability models and a generic decision engine. The proposed capability meta-model enables to describe functional and non-functional capabilities of performance evaluation approaches and tools at different granularities. In contrast to existing tree-based decision support mechanisms, the decoupling approach allows to easily update characteristics of solution approaches as well as appending new rating criteria and thereby stay abreast of evolution in performance evaluation tooling and system technologies. Time-to-result estimation for model-based performance prediction: The time required to execute a model-based analysis plays an important role in different decision processes. For example, evaluation scenarios might require the prediction results to be available in a limited period of time such that the system can be adapted in time to ensure the desired quality of service. We propose a method to estimate the time-to-result for modelbased performance prediction based on model characteristics and analysis parametrization. We learn a prediction model using performancerelevant features thatwe determined using statistical tests. We implement the approach and demonstrate its practicability by applying it to analyze a simulation-based multi-step performance evaluation approach for a representative architectural performance modeling formalism. We validate each of the contributions based on representative case studies. The evaluation of automatic performance model extraction for two case study systems shows that the resulting models can accurately predict the performance behavior. Prediction accuracy errors are below 3\% for resource utilization and mostly less than 20\% for service response time. The separate evaluation of the reusability shows that the presented approach lowers the implementation efforts for automated model extraction tools by up to 91\%. Based on two case studies applying measurement-based and model-based performance evaluation techniques, we demonstrate the suitability of the declarative performance engineering framework to answer multiple kinds of performance concerns customized to non-functional goals. Subsequently, we discuss reduced efforts in applying performance analyses using the integrated and automated declarative approach. Also, the evaluation of the declarative framework reviews benefits and savings integrating performance evaluation approaches into the declarative performance engineering framework. We demonstrate the applicability of the decision framework for performance evaluation approaches by applying it to depict existing decision trees. Then, we show how we can quickly adapt to the evolution of performance evaluation methods which is challenging for static tree-based decision support systems. At this, we show how to cope with the evolution of functional and non-functional capabilities of performance evaluation software and explain how to integrate new approaches. Finally, we evaluate the accuracy of the time-to-result estimation for a set of machinelearning algorithms and different training datasets. The predictions exhibit a mean percentage error below 20\%, which can be further improved by including performance evaluations of the considered model into the training data. The presented contributions represent a significant step towards an integrated performance engineering process that combines the strengths of model-based and measurement-based performance evaluation. The proposed performance concern language in conjunction with the processing framework significantly reduces the complexity of applying performance evaluations for all stakeholders. Thereby it enables performance awareness throughout the software engineering life-cycle. The proposed performance concern language removes the semantic gap between the level on which performance concerns are formulated and the technical level on which performance evaluations are actually conducted by the user.}, subject = {Software}, language = {en} } @article{WagnerWannerSchichetal.2017, author = {Wagner, Martin and Wanner, Christoph and Schich, Martin and Kotseva, Kornelia and Wood, David and Hartmann, Katrin and Fette, Georg and R{\"u}cker, Viktoria and Oezkur, Mehmet and St{\"o}rk, Stefan and Heuschmann, Peter U.}, title = {Patient's and physician's awareness of kidney disease in coronary heart disease patients - a cross-sectional analysis of the German subset of the EUROASPIRE IV survey}, series = {BMC Nephrology}, volume = {18}, journal = {BMC Nephrology}, number = {321}, doi = {10.1186/s12882-017-0730-3}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-158387}, year = {2017}, abstract = {Background Chronic kidney disease (CKD) is a common comorbid condition in coronary heart disease (CHD). CKD predisposes the patient to acute kidney injury (AKI) during hospitalization. Data on awareness of kidney dysfunction among CHD patients and their treating physicians are lacking. In the current cross-sectional analysis of the German EUROASPIRE IV sample we aimed to investigate the physician's awareness of kidney disease of patients hospitalized for CHD and also the patient's awareness of CKD in a study visit following hospital discharge. Methods All serum creatinine (SCr) values measured during the hospital stay were used to describe impaired kidney function (eGFR\(_{CKD-EPI}\) < 60 ml/min/1.73m2) at admission, discharge and episodes of AKI (KDIGO definition). Information extracted from hospital discharge letters and correct ICD coding for kidney disease was studied as a surrogate of physician's awareness of kidney disease. All patients were interrogated 0.5 to 3 years after hospital discharge, whether they had ever been told about kidney disease by a physician. Results Of the 536 patients, 32\% had evidence for acute or chronic kidney disease during the index hospital stay. Either condition was mentioned in the discharge letter in 22\%, and 72\% were correctly coded according to ICD-10. At the study visit in the outpatient setting 35\% had impaired kidney function. Of 158 patients with kidney disease, 54 (34\%) were aware of CKD. Determinants of patient's awareness were severity of CKD (OR\(_{eGFR}\) 0.94; 95\%CI 0.92-0.96), obesity (OR 1.97; 1.07-3.64), history of heart failure (OR 1.99; 1.00-3.97), and mentioning of kidney disease in the index event's hospital discharge letter (OR 5.51; 2.35-12.9). Conclusions Although CKD is frequent in CHD, only one third of patients is aware of this condition. Patient's awareness was associated with kidney disease being mentioned in the hospital discharge letter. Future studies should examine how raising physician's awareness for kidney dysfunction may improve patient's awareness of CKD.}, language = {en} } @phdthesis{vonKistowski2019, author = {von Kistowski, J{\´o}akim Gunnarsson}, title = {Measuring, Rating, and Predicting the Energy Efficiency of Servers}, doi = {10.25972/OPUS-17847}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-178478}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2019}, abstract = {Energy efficiency of computing systems has become an increasingly important issue over the last decades. In 2015, data centers were responsible for 2\% of the world's greenhouse gas emissions, which is roughly the same as the amount produced by air travel. In addition to these environmental concerns, power consumption of servers in data centers results in significant operating costs, which increase by at least 10\% each year. To address this challenge, the U.S. EPA and other government agencies are considering the use of novel measurement methods in order to label the energy efficiency of servers. The energy efficiency and power consumption of a server is subject to a great number of factors, including, but not limited to, hardware, software stack, workload, and load level. This huge number of influencing factors makes measuring and rating of energy efficiency challenging. It also makes it difficult to find an energy-efficient server for a specific use-case. Among others, server provisioners, operators, and regulators would profit from information on the servers in question and on the factors that affect those servers' power consumption and efficiency. However, we see a lack of measurement methods and metrics for energy efficiency of the systems under consideration. Even assuming that a measurement methodology existed, making decisions based on its results would be challenging. Power prediction methods that make use of these results would aid in decision making. They would enable potential server customers to make better purchasing decisions and help operators predict the effects of potential reconfigurations. Existing energy efficiency benchmarks cannot fully address these challenges, as they only measure single applications at limited sets of load levels. In addition, existing efficiency metrics are not helpful in this context, as they are usually a variation of the simple performance per power ratio, which is only applicable to single workloads at a single load level. Existing data center efficiency metrics, on the other hand, express the efficiency of the data center space and power infrastructure, not focusing on the efficiency of the servers themselves. Power prediction methods for not-yet-available systems that could make use of the results provided by a comprehensive power rating methodology are also lacking. Existing power prediction models for hardware designers have a very fine level of granularity and detail that would not be useful for data center operators. This thesis presents a measurement and rating methodology for energy efficiency of servers and an energy efficiency metric to be applied to the results of this methodology. We also design workloads, load intensity and distribution models, and mechanisms that can be used for energy efficiency testing. Based on this, we present power prediction mechanisms and models that utilize our measurement methodology and its results for power prediction. Specifically, the six major contributions of this thesis are: We present a measurement methodology and metrics for energy efficiency rating of servers that use multiple, specifically chosen workloads at different load levels for a full system characterization. We evaluate the methodology and metric with regard to their reproducibility, fairness, and relevance. We investigate the power and performance variations of test results and show fairness of the metric through a mathematical proof and a correlation analysis on a set of 385 servers. We evaluate the metric's relevance by showing the relationships that can be established between metric results and third-party applications. We create models and extraction mechanisms for load profiles that vary over time, as well as load distribution mechanisms and policies. The models are designed to be used to define arbitrary dynamic load intensity profiles that can be leveraged for benchmarking purposes. The load distribution mechanisms place workloads on computing resources in a hierarchical manner. Our load intensity models can be extracted in less than 0.2 seconds and our resulting models feature a median modeling error of 12.7\% on average. In addition, our new load distribution strategy can save up to 10.7\% of power consumption on a single server node. We introduce an approach to create small-scale workloads that emulate the power consumption-relevant behavior of large-scale workloads by approximating their CPU performance counter profile, and we introduce TeaStore, a distributed, micro-service-based reference application. TeaStore can be used to evaluate power and performance model accuracy, elasticity of cloud auto-scalers, and the effectiveness of power saving mechanisms for distributed systems. We show that we are capable of emulating the power consumption behavior of realistic workloads with a mean deviation less than 10\% and down to 0.2 watts (1\%). We demonstrate the use of TeaStore in the context of performance model extraction and cloud auto-scaling also showing that it may generate workloads with different effects on the power consumption of the system under consideration. We present a method for automated selection of interpolation strategies for performance and power characterization. We also introduce a configuration approach for polynomial interpolation functions of varying degrees that improves prediction accuracy for system power consumption for a given system utilization. We show that, in comparison to regression, our automated interpolation method selection and configuration approach improves modeling accuracy by 43.6\% if additional reference data is available and by 31.4\% if it is not. We present an approach for explicit modeling of the impact a virtualized environment has on power consumption and a method to predict the power consumption of a software application. Both methods use results produced by our measurement methodology to predict the respective power consumption for servers that are otherwise not available to the person making the prediction. Our methods are able to predict power consumption reliably for multiple hypervisor configurations and for the target application workloads. Application workload power prediction features a mean average absolute percentage error of 9.5\%. Finally, we propose an end-to-end modeling approach for predicting the power consumption of component placements at run-time. The model can also be used to predict the power consumption at load levels that have not yet been observed on the running system. We show that we can predict the power consumption of two different distributed web applications with a mean absolute percentage error of 2.2\%. In addition, we can predict the power consumption of a system at a previously unobserved load level and component distribution with an error of 1.2\%. The contributions of this thesis already show a significant impact in science and industry. The presented efficiency rating methodology, including its metric, have been adopted by the U.S. EPA in the latest version of the ENERGY STAR Computer Server program. They are also being considered by additional regulatory agencies, including the EU Commission and the China National Institute of Standardization. In addition, the methodology's implementation and the underlying methodology itself have already found use in several research publications. Regarding future work, we see a need for new workloads targeting specialized server hardware. At the moment, we are witnessing a shift in execution hardware to specialized machine learning chips, general purpose GPU computing, FPGAs being embedded into compute servers, etc. To ensure that our measurement methodology remains relevant, workloads covering these areas are required. Similarly, power prediction models must be extended to cover these new scenarios.}, subject = {Benchmarking}, language = {en} } @techreport{VomhoffGeisslerHossfeld2022, type = {Working Paper}, author = {Vomhoff, Viktoria and Geißler, Stefan and Hoßfeld, Tobias}, title = {Identification of Signaling Patterns in Mobile IoT Signaling Traffic}, series = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, journal = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, doi = {10.25972/OPUS-28081}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-280819}, pages = {4}, year = {2022}, abstract = {We attempt to identify sequences of signaling dialogs, to strengthen our understanding of the signaling behavior of IoT devices by examining a dataset containing over 270.000 distinct IoT devices whose signaling traffic has been observed over a 31-day period in a 2G network [4]. We propose a set of rules that allows the assembly of signaling dialogs into so-called sessions in order to identify common patterns and lay the foundation for future research in the areas of traffic modeling and anomaly detection.}, subject = {Datennetz}, language = {en} } @techreport{VomhoffGeisslerGebertetal.2023, type = {Working Paper}, author = {Vomhoff, Viktoria and Geissler, Stefan and Gebert, Steffen and Hossfeld, Tobias}, title = {Towards Understanding the Global IPX Network from an MVNO Perspective}, series = {KuVS Fachgespr{\"a}ch - W{\"u}rzburg Workshop on Modeling, Analysis and Simulation of Next-Generation Communication Networks 2023 (WueWoWAS'23)}, journal = {KuVS Fachgespr{\"a}ch - W{\"u}rzburg Workshop on Modeling, Analysis and Simulation of Next-Generation Communication Networks 2023 (WueWoWAS'23)}, doi = {10.25972/OPUS-32212}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-322121}, pages = {4}, year = {2023}, abstract = {In this paper, we work to understand the global IPX network from the perspective of an MVNO. In order to do this, we provide a brief description of the global architecture of mobile carriers. We provide initial results with respect to mapping the vast and complex interconnection network enabling global roaming from the point of view of a single MVNO. Finally, we provide preliminary results regarding the quality of service observed under global roaming conditions.}, language = {en} } @article{UnruhLandeckOberdoerferetal.2021, author = {Unruh, Fabian and Landeck, Maximilian and Oberd{\"o}rfer, Sebastian and Lugrin, Jean-Luc and Latoschik, Marc Erich}, title = {The Influence of Avatar Embodiment on Time Perception - Towards VR for Time-Based Therapy}, series = {Frontiers in Virtual Reality}, volume = {2}, journal = {Frontiers in Virtual Reality}, doi = {10.3389/frvir.2021.658509}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-259076}, pages = {658509}, year = {2021}, abstract = {Psycho-pathological conditions, such as depression or schizophrenia, are often accompanied by a distorted perception of time. People suffering from this conditions often report that the passage of time slows down considerably and that they are "stuck in time." Virtual Reality (VR) could potentially help to diagnose and maybe treat such mental conditions. However, the conditions in which a VR simulation could correctly diagnose a time perception deviation are still unknown. In this paper, we present an experiment investigating the difference in time experience with and without a virtual body in VR, also known as avatar. The process of substituting a person's body with a virtual body is called avatar embodiment. Numerous studies demonstrated interesting perceptual, emotional, behavioral, and psychological effects caused by avatar embodiment. However, the relations between time perception and avatar embodiment are still unclear. Whether or not the presence or absence of an avatar is already influencing time perception is still open to question. Therefore, we conducted a between-subjects design with and without avatar embodiment as well as a real condition (avatar vs. no-avatar vs. real). A group of 105 healthy subjects had to wait for seven and a half minutes in a room without any distractors (e.g., no window, magazine, people, decoration) or time indicators (e.g., clocks, sunlight). The virtual environment replicates the real physical environment. Participants were unaware that they will be asked to estimate their waiting time duration as well as describing their experience of the passage of time at a later stage. Our main finding shows that the presence of an avatar is leading to a significantly faster perceived passage of time. It seems to be promising to integrate avatar embodiment in future VR time-based therapy applications as they potentially could modulate a user's perception of the passage of time. We also found no significant difference in time perception between the real and the VR conditions (avatar, no-avatar), but further research is needed to better understand this outcome.}, language = {en} } @phdthesis{Tzschichholz2014, author = {Tzschichholz, Tristan}, title = {Relative pose estimation of known rigid objects using a novel approach to high-level PMD-/CCD- sensor data fusion with regard to applications in space}, isbn = {978-3-923959-95-2}, issn = {1868-7474}, doi = {10.25972/OPUS-10391}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-103918}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {In this work, a novel method for estimating the relative pose of a known object is presented, which relies on an application-specific data fusion process. A PMD-sensor in conjunction with a CCD-sensor is used to perform the pose estimation. Furthermore, the work provides a method for extending the measurement range of the PMD sensor along with the necessary calibration methodology. Finally, extensive measurements on a very accurate Rendezvous and Docking testbed are made to evaluate the performance, what includes a detailed discussion of lighting conditions.}, subject = {Bildverarbeitung}, language = {en} } @article{TsouliasJoerissenNuechter2022, author = {Tsoulias, Nikos and J{\"o}rissen, Sven and N{\"u}chter, Andreas}, title = {An approach for monitoring temperature on fruit surface by means of thermal point cloud}, series = {MethodsX}, volume = {9}, journal = {MethodsX}, issn = {2215-0161}, doi = {10.1016/j.mex.2022.101712}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-300270}, year = {2022}, abstract = {Heat and excessive solar radiation can produce abiotic stresses during apple maturation, resulting fruit quality. Therefore, the monitoring of temperature on fruit surface (FST) over the growing period can allow to identify thresholds, above of which several physiological disorders such as sunburn may occur in apple. The current approaches neglect spatial variation of FST and have reduced repeatability, resulting in unreliable predictions. In this study, LiDAR laser scanning and thermal imaging were employed to detect the temperature on fruit surface by means of 3D point cloud. A process for calibrating the two sensors based on an active board target and producing a 3D thermal point cloud was suggested. After calibration, the sensor system was utilised to scan the fruit trees, while temperature values assigned in the corresponding 3D point cloud were based on the extrinsic calibration. Whereas a fruit detection algorithm was performed to segment the FST from each apple. • The approach allows the calibration of LiDAR laser scanner with thermal camera in order to produce a 3D thermal point cloud. • The method can be applied in apple trees for segmenting FST in 3D. Whereas the approach can be utilised to predict several physiological disorders including sunburn on fruit surface.}, language = {en} } @inproceedings{TrumanvonMammen2021, author = {Truman, Samuel and von Mammen, Sebastian}, title = {Interactive Self-Assembling Agent Ensembles}, series = {Proceedings of the 1st Games Technology Summit}, booktitle = {Proceedings of the 1st Games Technology Summit}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-246032}, pages = {29-36}, year = {2021}, abstract = {In this paper, we bridge the gap between procedural content generation (PCG) and user-generated content (UGC) by proposing and demonstrating an interactive agent-based model of self-assembling ensembles that can be directed though user input. We motivate these efforts by considering the opportunities technology provides to pursue game designs based on according game design frameworks. We present three different use cases of the proposed model that emphasize its potential to (1) self-assemble into predefined 3D graphical assets, (2) define new structures in the context of virtual environments by self-assembling layers on the surfaces of arbitrary 3D objects, and (3) allow novel structures to self-assemble only considering the model's configuration and no external dependencies. To address the performance restrictions in computer games, we realized the prototypical model implementation by means of an efficient entity component system (ECS). We conclude the paper with an outlook on future steps to further explore novel interactive, dynamic PCG mechanics and to ensure their efficiency.}, language = {en} } @phdthesis{Travers2007, author = {Travers, Stephen}, title = {Structural Properties of NP-Hard Sets and Uniform Characterisations of Complexity Classes}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-27124}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2007}, abstract = {This thesis is devoted to the study of computational complexity theory, a branch of theoretical computer science. Computational complexity theory investigates the inherent difficulty in designing efficient algorithms for computational problems. By doing so, it analyses the scalability of computational problems and algorithms and places practical limits on what computers can actually accomplish. Computational problems are categorised into complexity classes. Among the most important complexity classes are the class NP and the subclass of NP-complete problems, which comprises many important optimisation problems in the field of operations research. Moreover, with the P-NP-problem, the class NP represents the most important unsolved question in computer science. The first part of this thesis is devoted to the study of NP-complete-, and more generally, NP-hard problems. It aims at improving our understanding of this important complexity class by systematically studying how altering NP-hard sets affects their NP-hardness. This research is related to longstanding open questions concerning the complexity of unions of disjoint NP-complete sets, and the existence of sparse NP-hard sets. The second part of the thesis is also dedicated to complexity classes but takes a different perspective: In a sense, after investigating the interior of complexity classes in the first part, the focus shifts to the description of complexity classes and thereby to the exterior in the second part. It deals with the description of complexity classes through leaf languages, a uniform framework which allows us to characterise a great variety of important complexity classes. The known concepts are complemented by a new leaf-language model. To a certain extent, this new approach combines the advantages of the known models. The presented results give evidence that the connection between the theory of formal languages and computational complexity theory might be closer than formerly known.}, subject = {Berechnungskomplexit{\"a}t}, language = {en} } @book{TranGiaHossfeld2021, author = {Tran-Gia, Phuoc and Hoßfeld, Tobias}, title = {Performance Modeling and Analysis of Communication Networks}, edition = {1st edition}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-152-5}, doi = {10.25972/WUP-978-3-95826-153-2}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-241920}, publisher = {W{\"u}rzburg University Press}, pages = {xiii, 353}, year = {2021}, abstract = {This textbook provides an introduction to common methods of performance modeling and analysis of communication systems. These methods form the basis of traffic engineering, teletraffic theory, and analytical system dimensioning. The fundamentals of probability theory, stochastic processes, Markov processes, and embedded Markov chains are presented. Basic queueing models are described with applications in communication networks. Advanced methods are presented that have been frequently used in recent practice, especially discrete-time analysis algorithms, or which go beyond classical performance measures such as Quality of Experience or energy efficiency. Recent examples of modern communication networks include Software Defined Networking and the Internet of Things. Throughout the book, illustrative examples are used to provide practical experience in performance modeling and analysis. Target group: The book is aimed at students and scientists in computer science and technical computer science, operations research, electrical engineering and economics.}, language = {en} } @article{ToepferCorovicFetteetal.2015, author = {Toepfer, Martin and Corovic, Hamo and Fette, Georg and Kl{\"u}gl, Peter and St{\"o}rk, Stefan and Puppe, Frank}, title = {Fine-grained information extraction from German transthoracic echocardiography reports}, series = {BMC Medical Informatics and Decision Making}, volume = {15}, journal = {BMC Medical Informatics and Decision Making}, number = {91}, doi = {doi:10.1186/s12911-015-0215-x}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-125509}, year = {2015}, abstract = {Background Information extraction techniques that get structured representations out of unstructured data make a large amount of clinically relevant information about patients accessible for semantic applications. These methods typically rely on standardized terminologies that guide this process. Many languages and clinical domains, however, lack appropriate resources and tools, as well as evaluations of their applications, especially if detailed conceptualizations of the domain are required. For instance, German transthoracic echocardiography reports have not been targeted sufficiently before, despite of their importance for clinical trials. This work therefore aimed at development and evaluation of an information extraction component with a fine-grained terminology that enables to recognize almost all relevant information stated in German transthoracic echocardiography reports at the University Hospital of W{\"u}rzburg. Methods A domain expert validated and iteratively refined an automatically inferred base terminology. The terminology was used by an ontology-driven information extraction system that outputs attribute value pairs. The final component has been mapped to the central elements of a standardized terminology, and it has been evaluated according to documents with different layouts. Results The final system achieved state-of-the-art precision (micro average.996) and recall (micro average.961) on 100 test documents that represent more than 90 \% of all reports. In particular, principal aspects as defined in a standardized external terminology were recognized with f 1=.989 (micro average) and f 1=.963 (macro average). As a result of keyword matching and restraint concept extraction, the system obtained high precision also on unstructured or exceptionally short documents, and documents with uncommon layout. Conclusions The developed terminology and the proposed information extraction system allow to extract fine-grained information from German semi-structured transthoracic echocardiography reports with very high precision and high recall on the majority of documents at the University Hospital of W{\"u}rzburg. Extracted results populate a clinical data warehouse which supports clinical research.}, language = {en} } @phdthesis{Tischler2008, author = {Tischler, German}, title = {Theory and Applications of Parametric Weighted Finite Automata}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-28145}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2008}, abstract = {Parametric weighted finite automata (PWFA) are a multi-dimensional generalization of weighted finite automata. The expressiveness of PWFA contains the expressiveness of weighted finite automata as well as the expressiveness of affine iterated function system. The thesis discusses theory and applications of PWFA. The properties of PWFA definable sets are studied and it is shown that some fractal generator systems can be simulated using PWFA and that various real and complex functions can be represented by PWFA. Furthermore, the decoding of PWFA and the interpretation of PWFA definable sets is discussed.}, subject = {Automat }, language = {en} } @article{SirbuBeckerCaminitietal.2015, author = {S{\^i}rbu, Alina and Becker, Martin and Caminiti, Saverio and De Baets, Bernard and Elen, Bart and Francis, Louise and Gravino, Pietro and Hotho, Andreas and Ingarra, Stefano and Loreto, Vittorio and Molino, Andrea and Mueller, Juergen and Peters, Jan and Ricchiuti, Ferdinando and Saracino, Fabio and Servedio, Vito D.P. and Stumme, Gerd and Theunis, Jan and Tria, Francesca and Van den Bossche, Joris}, title = {Participatory Patterns in an International Air Quality Monitoring Initiative}, series = {PLoS ONE}, volume = {10}, journal = {PLoS ONE}, number = {8}, doi = {10.1371/journal. pone.0136763}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-151379}, pages = {e0136763}, year = {2015}, abstract = {The issue of sustainability is at the top of the political and societal agenda, being considered of extreme importance and urgency. Human individual action impacts the environment both locally (e.g., local air/water quality, noise disturbance) and globally (e.g., climate change, resource use). Urban environments represent a crucial example, with an increasing realization that the most effective way of producing a change is involving the citizens themselves in monitoring campaigns (a citizen science bottom-up approach). This is possible by developing novel technologies and IT infrastructures enabling large citizen participation. Here, in the wider framework of one of the first such projects, we show results from an international competition where citizens were involved in mobile air pollution monitoring using low cost sensing devices, combined with a web-based game to monitor perceived levels of pollution. Measures of shift in perceptions over the course of the campaign are provided, together with insights into participatory patterns emerging from this study. Interesting effects related to inertia and to direct involvement in measurement activities rather than indirect information exposure are also highlighted, indicating that direct involvement can enhance learning and environmental awareness. In the future, this could result in better adoption of policies towards decreasing pollution.}, language = {en} } @phdthesis{Sun2014, author = {Sun, Kaipeng}, title = {Six Degrees of Freedom Object Pose Estimation with Fusion Data from a Time-of-flight Camera and a Color Camera}, isbn = {978-3-923959-97-6}, doi = {10.25972/OPUS-10508}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-105089}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {Object six Degrees of Freedom (6DOF) pose estimation is a fundamental problem in many practical robotic applications, where the target or an obstacle with a simple or complex shape can move fast in cluttered environments. In this thesis, a 6DOF pose estimation algorithm is developed based on the fused data from a time-of-flight camera and a color camera. The algorithm is divided into two stages, an annealed particle filter based coarse pose estimation stage and a gradient decent based accurate pose optimization stage. In the first stage, each particle is evaluated with sparse representation. In this stage, the large inter-frame motion of the target can be well handled. In the second stage, the range data based conventional Iterative Closest Point is extended by incorporating the target appearance information and used for calculating the accurate pose by refining the coarse estimate from the first stage. For dealing with significant illumination variations during the tracking, spherical harmonic illumination modeling is investigated and integrated into both stages. The robustness and accuracy of the proposed algorithm are demonstrated through experiments on various objects in both indoor and outdoor environments. Moreover, real-time performance can be achieved with graphics processing unit acceleration.}, subject = {Mustererkennung}, language = {en} } @article{StrohmeierWalterRotheetal.2018, author = {Strohmeier, Michael and Walter, Thomas and Rothe, Julian and Montenegro, Sergio}, title = {Ultra-wideband based pose estimation for small unmanned aerial vehicles}, series = {IEEE Access}, volume = {6}, journal = {IEEE Access}, doi = {10.1109/ACCESS.2018.2873571}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-177503}, pages = {57526-57535}, year = {2018}, abstract = {This paper proposes a 3-D local pose estimation system for a small Unmanned Aerial Vehicle (UAV) with a weight limit of 200 g and a very small footprint of 10 cm×10cm. The system is realized by fusing 3-D position estimations from an Ultra-Wide Band (UWB) transceiver network with Inertial Measurement Unit (IMU) sensor data and data from a barometric pressure sensor. The 3-D position from the UWB network is estimated using Multi-Dimensional Scaling (MDS) and range measurements between the transceivers. The range measurements are obtained using Double-Sided Two-Way Ranging (DS-TWR), thus eliminating the need for an additional clock synchronization mechanism. The sensor fusion is accomplished using a loosely coupled Extended Kalman Filter (EKF) architecture. Extensive evaluation of the proposed system shows that a position accuracy with a Root-Mean-Square Error (RMSE) of 0.20cm can be obtained. The orientation angle can be estimated with an RMSE of 1.93°.}, language = {en} } @article{StrohmeierMontenegro2017, author = {Strohmeier, Michael and Montenegro, Sergio}, title = {Coupled GPS/MEMS IMU Attitude Determination of Small UAVs with COTS}, series = {Electronics}, volume = {6}, journal = {Electronics}, number = {1}, doi = {10.3390/electronics6010015}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-171179}, pages = {15}, year = {2017}, abstract = {This paper proposes an attitude determination system for small Unmanned Aerial Vehicles (UAV) with a weight limit of 5 kg and a small footprint of 0.5m x 0.5 m. The system is realized by coupling single-frequency Global Positioning System (GPS) code and carrier-phase measurements with the data acquired from a Micro-Electro-Mechanical System (MEMS) Inertial Measurement Unit (IMU) using consumer-grade Components-Off-The-Shelf (COTS) only. The sensor fusion is accomplished using two Extended Kalman Filters (EKF) that are coupled by exchanging information about the currently estimated baseline. With a baseline of 48 cm, the static heading accuracy of the proposed system is comparable to the one of a commercial single-frequency GPS heading system with an accuracy of approximately 0.25°/m. Flight testing shows that the proposed system is able to obtain a reliable and stable GPS heading estimation without an aiding magnetometer.}, language = {en} } @phdthesis{Strohmeier2021, author = {Strohmeier, Michael}, title = {FARN - A Novel UAV Flight Controller for Highly Accurate and Reliable Navigation}, doi = {10.25972/OPUS-22313}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-223136}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2021}, abstract = {This thesis describes the functional principle of FARN, a novel flight controller for Unmanned Aerial Vehicles (UAVs) designed for mission scenarios that require highly accurate and reliable navigation. The required precision is achieved by combining low-cost inertial sensors and Ultra-Wide Band (UWB) radio ranging with raw and carrier phase observations from the Global Navigation Satellite System (GNSS). The flight controller is developed within the scope of this work regarding the mission requirements of two research projects, and successfully applied under real conditions. FARN includes a GNSS compass that allows a precise heading estimation even in environments where the conventional heading estimation based on a magnetic compass is not reliable. The GNSS compass combines the raw observations of two GNSS receivers with FARN's real-time capable attitude determination. Thus, especially the deployment of UAVs in Arctic environments within the project for ROBEX is possible despite the weak horizontal component of the Earth's magnetic field. Additionally, FARN allows centimeter-accurate relative positioning of multiple UAVs in real-time. This enables precise flight maneuvers within a swarm, but also the execution of cooperative tasks in which several UAVs have a common goal or are physically coupled. A drone defense system based on two cooperative drones that act in a coordinated manner and carry a commonly suspended net to capture a potentially dangerous drone in mid-air was developed in conjunction with the project MIDRAS. Within this thesis, both theoretical and practical aspects are covered regarding UAV development with an emphasis on the fields of signal processing, guidance and control, electrical engineering, robotics, computer science, and programming of embedded systems. Furthermore, this work aims to provide a condensed reference for further research in the field of UAVs. The work describes and models the utilized UAV platform, the propulsion system, the electronic design, and the utilized sensors. After establishing mathematical conventions for attitude representation, the actual core of the flight controller, namely the embedded ego-motion estimation and the principle control architecture are outlined. Subsequently, based on basic GNSS navigation algorithms, advanced carrier phase-based methods and their coupling to the ego-motion estimation framework are derived. Additionally, various implementation details and optimization steps of the system are described. The system is successfully deployed and tested within the two projects. After a critical examination and evaluation of the developed system, existing limitations and possible improvements are outlined.}, subject = {Drohne }, language = {en} } @article{SteiningerKobsDavidsonetal.2021, author = {Steininger, Michael and Kobs, Konstantin and Davidson, Padraig and Krause, Anna and Hotho, Andreas}, title = {Density-based weighting for imbalanced regression}, series = {Machine Learning}, volume = {110}, journal = {Machine Learning}, number = {8}, issn = {1573-0565}, doi = {10.1007/s10994-021-06023-5}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-269177}, pages = {2187-2211}, year = {2021}, abstract = {In many real world settings, imbalanced data impedes model performance of learning algorithms, like neural networks, mostly for rare cases. This is especially problematic for tasks focusing on these rare occurrences. For example, when estimating precipitation, extreme rainfall events are scarce but important considering their potential consequences. While there are numerous well studied solutions for classification settings, most of them cannot be applied to regression easily. Of the few solutions for regression tasks, barely any have explored cost-sensitive learning which is known to have advantages compared to sampling-based methods in classification tasks. In this work, we propose a sample weighting approach for imbalanced regression datasets called DenseWeight and a cost-sensitive learning approach for neural network regression with imbalanced data called DenseLoss based on our weighting scheme. DenseWeight weights data points according to their target value rarities through kernel density estimation (KDE). DenseLoss adjusts each data point's influence on the loss according to DenseWeight, giving rare data points more influence on model training compared to common data points. We show on multiple differently distributed datasets that DenseLoss significantly improves model performance for rare data points through its density-based weighting scheme. Additionally, we compare DenseLoss to the state-of-the-art method SMOGN, finding that our method mostly yields better performance. Our approach provides more control over model training as it enables us to actively decide on the trade-off between focusing on common or rare cases through a single hyperparameter, allowing the training of better models for rare data points.}, language = {en} } @article{SteiningerAbelZiegleretal.2023, author = {Steininger, Michael and Abel, Daniel and Ziegler, Katrin and Krause, Anna and Paeth, Heiko and Hotho, Andreas}, title = {ConvMOS: climate model output statistics with deep learning}, series = {Data Mining and Knowledge Discovery}, volume = {37}, journal = {Data Mining and Knowledge Discovery}, number = {1}, issn = {1384-5810}, doi = {10.1007/s10618-022-00877-6}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-324213}, pages = {136-166}, year = {2023}, abstract = {Climate models are the tool of choice for scientists researching climate change. Like all models they suffer from errors, particularly systematic and location-specific representation errors. One way to reduce these errors is model output statistics (MOS) where the model output is fitted to observational data with machine learning. In this work, we assess the use of convolutional Deep Learning climate MOS approaches and present the ConvMOS architecture which is specifically designed based on the observation that there are systematic and location-specific errors in the precipitation estimates of climate models. We apply ConvMOS models to the simulated precipitation of the regional climate model REMO, showing that a combination of per-location model parameters for reducing location-specific errors and global model parameters for reducing systematic errors is indeed beneficial for MOS performance. We find that ConvMOS models can reduce errors considerably and perform significantly better than three commonly used MOS approaches and plain ResNet and U-Net models in most cases. Our results show that non-linear MOS models underestimate the number of extreme precipitation events, which we alleviate by training models specialized towards extreme precipitation events with the imbalanced regression method DenseLoss. While we consider climate MOS, we argue that aspects of ConvMOS may also be beneficial in other domains with geospatial data, such as air pollution modeling or weather forecasts.}, subject = {Klima}, language = {en} } @phdthesis{Steininger2023, author = {Steininger, Michael}, title = {Deep Learning for Geospatial Environmental Regression}, doi = {10.25972/OPUS-31312}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-313121}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {Environmental issues have emerged especially since humans burned fossil fuels, which led to air pollution and climate change that harm the environment. These issues' substantial consequences evoked strong efforts towards assessing the state of our environment. Various environmental machine learning (ML) tasks aid these efforts. These tasks concern environmental data but are common ML tasks otherwise, i.e., datasets are split (training, validatition, test), hyperparameters are optimized on validation data, and test set metrics measure a model's generalizability. This work focuses on the following environmental ML tasks: Regarding air pollution, land use regression (LUR) estimates air pollutant concentrations at locations where no measurements are available based on measured locations and each location's land use (e.g., industry, streets). For LUR, this work uses data from London (modeled) and Zurich (measured). Concerning climate change, a common ML task is model output statistics (MOS), where a climate model's output for a study area is altered to better fit Earth observations and provide more accurate climate data. This work uses the regional climate model (RCM) REMO and Earth observations from the E-OBS dataset for MOS. Another task regarding climate is grain size distribution interpolation where soil properties at locations without measurements are estimated based on the few measured locations. This can provide climate models with soil information, that is important for hydrology. For this task, data from Lower Franconia is used. Such environmental ML tasks commonly have a number of properties: (i) geospatiality, i.e., their data refers to locations relative to the Earth's surface. (ii) The environmental variables to estimate or predict are usually continuous. (iii) Data can be imbalanced due to relatively rare extreme events (e.g., extreme precipitation). (iv) Multiple related potential target variables can be available per location, since measurement devices often contain different sensors. (v) Labels are spatially often only sparsely available since conducting measurements at all locations of interest is usually infeasible. These properties present challenges but also opportunities when designing ML methods for such tasks. In the past, environmental ML tasks have been tackled with conventional ML methods, such as linear regression or random forests (RFs). However, the field of ML has made tremendous leaps beyond these classic models through deep learning (DL). In DL, models use multiple layers of neurons, producing increasingly higher-level feature representations with growing layer depth. DL has made previously infeasible ML tasks feasible, improved the performance for many tasks in comparison to existing ML models significantly, and eliminated the need for manual feature engineering in some domains due to its ability to learn features from raw data. To harness these advantages for environmental domains it is promising to develop novel DL methods for environmental ML tasks. This thesis presents methods for dealing with special challenges and exploiting opportunities inherent to environmental ML tasks in conjunction with DL. To this end, the proposed methods explore the following techniques: (i) Convolutions as in convolutional neural networks (CNNs) to exploit reoccurring spatial patterns in geospatial data. (ii) Posing the problems as regression tasks to estimate the continuous variables. (iii) Density-based weighting to improve estimation performance for rare and extreme events. (iv) Multi-task learning to make use of multiple related target variables. (v) Semi-supervised learning to cope with label sparsity. Using these techniques, this thesis considers four research questions: (i) Can air pollution be estimated without manual feature engineering? This is answered positively by the introduction of the CNN-based LUR model MapLUR as well as the off-the-shelf LUR solution OpenLUR. (ii) Can colocated pollution data improve spatial air pollution models? Multi-task learning for LUR is developed for this, showing potential for improvements with colocated data. (iii) Can DL models improve the quality of climate model outputs? The proposed DL climate MOS architecture ConvMOS demonstrates this. Additionally, semi-supervised training of multilayer perceptrons (MLPs) for grain size distribution interpolation is presented, which can provide improved input data. (iv) Can DL models be taught to better estimate climate extremes? To this end, density-based weighting for imbalanced regression (DenseLoss) is proposed and applied to the DL architecture ConvMOS, improving climate extremes estimation. These methods show how especially DL techniques can be developed for environmental ML tasks with their special characteristics in mind. This allows for better models than previously possible with conventional ML, leading to more accurate assessment and better understanding of the state of our environment.}, subject = {Deep learning}, language = {en} } @article{SteinhaeusserOberdoerfervonMammenetal.2022, author = {Steinhaeusser, Sophia C. and Oberd{\"o}rfer, Sebastian and von Mammen, Sebastian and Latoschik, Marc Erich and Lugrin, Birgit}, title = {Joyful adventures and frightening places - designing emotion-inducing virtual environments}, series = {Frontiers in Virtual Reality}, volume = {3}, journal = {Frontiers in Virtual Reality}, issn = {2673-4192}, doi = {10.3389/frvir.2022.919163}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-284831}, year = {2022}, abstract = {Virtual environments (VEs) can evoke and support emotions, as experienced when playing emotionally arousing games. We theoretically approach the design of fear and joy evoking VEs based on a literature review of empirical studies on virtual and real environments as well as video games' reviews and content analyses. We define the design space and identify central design elements that evoke specific positive and negative emotions. Based on that, we derive and present guidelines for emotion-inducing VE design with respect to design themes, colors and textures, and lighting configurations. To validate our guidelines in two user studies, we 1) expose participants to 360° videos of VEs designed following the individual guidelines and 2) immerse them in a neutral, positive and negative emotion-inducing VEs combining all respective guidelines in Virtual Reality. The results support our theoretically derived guidelines by revealing significant differences in terms of fear and joy induction.}, language = {en} } @article{StauffertNieblingLatoschik2020, author = {Stauffert, Jan-Philipp and Niebling, Florian and Latoschik, Marc Erich}, title = {Latency and Cybersickness: Impact, Causes, and Measures. A Review}, series = {Frontiers in Virtual Reality}, volume = {1}, journal = {Frontiers in Virtual Reality}, doi = {10.3389/frvir.2020.582204}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-236133}, year = {2020}, abstract = {Latency is a key characteristic inherent to any computer system. Motion-to-Photon (MTP) latency describes the time between the movement of a tracked object and its corresponding movement rendered and depicted by computer-generated images on a graphical output screen. High MTP latency can cause a loss of performance in interactive graphics applications and, even worse, can provoke cybersickness in Virtual Reality (VR) applications. Here, cybersickness can degrade VR experiences or may render the experiences completely unusable. It can confound research findings of an otherwise sound experiment. Latency as a contributing factor to cybersickness needs to be properly understood. Its effects need to be analyzed, its sources need to be identified, good measurement methods need to be developed, and proper counter measures need to be developed in order to reduce potentially harmful impacts of latency on the usability and safety of VR systems. Research shows that latency can exhibit intricate timing patterns with various spiking and periodic behavior. These timing behaviors may vary, yet most are found to provoke cybersickness. Overall, latency can differ drastically between different systems interfering with generalization of measurement results. This review article describes the causes and effects of latency with regard to cybersickness. We report on different existing approaches to measure and report latency. Hence, the article provides readers with the knowledge to understand and report latency for their own applications, evaluations, and experiments. It should also help to measure, identify, and finally control and counteract latency and hence gain confidence into the soundness of empirical data collected by VR exposures. Low latency increases the usability and safety of VR systems.}, language = {en} }