@article{MontenegroDannemann2011, author = {Montenegro, Sergio and Dannemann, Frank}, title = {Experiences and Best Practice Requirements Engineering for Small Satellites}, series = {Computing Science and Technology International Journal}, volume = {1}, journal = {Computing Science and Technology International Journal}, number = {2}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-153307}, year = {2011}, abstract = {The design and implementation of a satellite mission is divided into several different phases. Parallel to these phases an evolution of requirements will take place. Because so many people in different locations and from different background have to work in different subsystems concurrently the ideas and concepts of different subsystems and different locations will diverge. We have to bring them together again. To do this we introduce synchronization points. We bring representatives from all subsystems and all location in a Concurrent Engineering Facility (CEF) room together. Between CEF sessions the different subsystems will diverge again, but each time the diversion will be smaller. Our subjective experience from test projects says this CEF sessions are most effective in the first phases of the development, from Requirements engineering until first coarse design. After Design and the concepts are fix, the developers are going to implementation and the concept divergences will be much smaller, therefore the CEF sessions are not a very big help any more.}, language = {en} } @phdthesis{Rehfeld2016, author = {Rehfeld, Stephan}, title = {Untersuchung der Nebenl{\"a}ufigkeit, Latenz und Konsistenz asynchroner Interaktiver Echtzeitsysteme mittels Profiling und Model Checking}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-147431}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2016}, abstract = {Im Rahmen dieser Arbeit werden die Nebenl{\"a}ufigkeit, Konsistenz und Latenz in asynchronen Interaktiven Echtzeitsystemen durch die Techniken des Profilings und des Model Checkings untersucht. Zu Beginn wird erl{\"a}utert, warum das asynchrone Modell das vielversprechendste f{\"u}r die Nebenl{\"a}ufigkeit in einem Interaktiven Echtzeitsystem ist. Hierzu wird ein Vergleich zu anderen Modellen gezogen. Dar{\"u}ber hinaus wird ein detaillierter Vergleich von Synchronisationstechnologien, welche die Grundlage f{\"u}r Konsistenz schaffen, durchgef{\"u}hrt. Auf der Grundlage dieser beiden Vergleiche und der Betrachtung anderer Systeme wird ein Synchronisationskonzept entwickelt. Auf dieser Basis wird die Nebenl{\"a}ufigkeit, Konsistenz und Latenz mit zwei Verfahren untersucht. Die erste Technik ist das Profiling, wobei einige neue Darstellungsformen von gemessenen Daten entwickelt werden. Diese neu entwickelten Darstellungsformen werden in der Implementierung eines Profilers verwendet. Als zweite Technik wird das Model Checking analysiert, welches bisher noch nicht im Kontext von Interaktiven Echtzeitsystemen verwendet wurde. Model Checking dient dazu, die Verhaltensweise eines Interaktiven Echtzeitsystems vorherzusagen. Diese Vorhersagen werden mit den Messungen aus dem Profiler verglichen.}, subject = {Model Checking}, language = {de} } @phdthesis{Gebert2017, author = {Gebert, Steffen Christian}, title = {Architectures for Softwarized Networks and Their Performance Evaluation}, issn = {1432-8801}, doi = {10.25972/OPUS-15063}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-150634}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2017}, abstract = {This thesis contributes to several issues in the context of SDN and NFV, with an emphasis on performance and management. The main contributions are guide lines for operators migrating to software-based networks, as well as an analytical model for the packet processing in a Linux system using the Kernel NAPI.}, subject = {Telekommunikationsnetz}, language = {en} } @phdthesis{Runge2017, author = {Runge, Armin}, title = {Advances in Deflection Routing based Network on Chips}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-149700}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2017}, abstract = {The progress which has been made in semiconductor chip production in recent years enables a multitude of cores on a single die. However, due to further decreasing structure sizes, fault tolerance and energy consumption will represent key challenges. Furthermore, an efficient communication infrastructure is indispensable due to the high parallelism at those systems. The predominant communication system at such highly parallel systems is a Network on Chip (NoC). The focus of this thesis is on NoCs which are based on deflection routing. In this context, contributions are made to two domains, fault tolerance and dimensioning of the optimal link width. Both aspects are essential for the application of reliable, energy efficient, and deflection routing based NoCs. It is expected that future semiconductor systems have to cope with high fault probabilities. The inherently given high connectivity of most NoC topologies can be exploited to tolerate the breakdown of links and other components. In this thesis, a fault-tolerant router architecture has been developed, which stands out for the deployed interconnection architecture and the method to overcome complex fault situations. The presented simulation results show, all data packets arrive at their destination, even at high fault probabilities. In contrast to routing table based architectures, the hardware costs of the herein presented architecture are lower and, in particular, independent of the number of components in the network. Besides fault tolerance, hardware costs and energy efficiency are of great importance. The utilized link width has a decisive influence on these aspects. In particular, at deflection routing based NoCs, over- and under-sizing of the link width leads to unnecessary high hardware costs and bad performance, respectively. In the second part of this thesis, the optimal link width at deflection routing based NoCs is investigated. Additionally, a method to reduce the link width is introduced. Simulation and synthesis results show, the herein presented method allows a significant reduction of hardware costs at comparable performance.}, subject = {Network-on-Chip}, language = {en} } @phdthesis{Milenkoski2016, author = {Milenkoski, Aleksandar}, title = {Evaluation of Intrusion Detection Systems in Virtualized Environments}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-141846}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2016}, abstract = {Virtualization allows the creation of virtual instances of physical devices, such as network and processing units. In a virtualized system, governed by a hypervisor, resources are shared among virtual machines (VMs). Virtualization has been receiving increasing interest as away to reduce costs through server consolidation and to enhance the flexibility of physical infrastructures. Although virtualization provides many benefits, it introduces new security challenges; that is, the introduction of a hypervisor introduces threats since hypervisors expose new attack surfaces. Intrusion detection is a common cyber security mechanism whose task is to detect malicious activities in host and/or network environments. This enables timely reaction in order to stop an on-going attack, or to mitigate the impact of a security breach. The wide adoption of virtualization has resulted in the increasingly common practice of deploying conventional intrusion detection systems (IDSs), for example, hardware IDS appliances or common software-based IDSs, in designated VMs as virtual network functions (VNFs). In addition, the research and industrial communities have developed IDSs specifically designed to operate in virtualized environments (i.e., hypervisorbased IDSs), with components both inside the hypervisor and in a designated VM. The latter are becoming increasingly common with the growing proliferation of virtualized data centers and the adoption of the cloud computing paradigm, for which virtualization is as a key enabling technology. To minimize the risk of security breaches, methods and techniques for evaluating IDSs in an accurate manner are essential. For instance, one may compare different IDSs in terms of their attack detection accuracy in order to identify and deploy the IDS that operates optimally in a given environment, thereby reducing the risks of a security breach. However, methods and techniques for realistic and accurate evaluation of the attack detection accuracy of IDSs in virtualized environments (i.e., IDSs deployed as VNFs or hypervisor-based IDSs) are lacking. That is, workloads that exercise the sensors of an evaluated IDS and contain attacks targeting hypervisors are needed. Attacks targeting hypervisors are of high severity since they may result in, for example, altering the hypervisors's memory and thus enabling the execution of malicious code with hypervisor privileges. In addition, there are no metrics and measurement methodologies for accurately quantifying the attack detection accuracy of IDSs in virtualized environments with elastic resource provisioning (i.e., on-demand allocation or deallocation of virtualized hardware resources to VMs). Modern hypervisors allow for hotplugging virtual CPUs and memory on the designated VM where the intrusion detection engine of hypervisor-based IDSs, as well as of IDSs deployed as VNFs, typically operates. Resource hotplugging may have a significant impact on the attack detection accuracy of an evaluated IDS, which is not taken into account by existing metrics for quantifying IDS attack detection accuracy. This may lead to inaccurate measurements, which, in turn, may result in the deployment of misconfigured or ill-performing IDSs, increasing the risk of security breaches. This thesis presents contributions that span the standard components of any system evaluation scenario: workloads, metrics, and measurement methodologies. The scientific contributions of this thesis are: A comprehensive systematization of the common practices and the state-of-theart on IDS evaluation. This includes: (i) a definition of an IDS evaluation design space allowing to put existing practical and theoretical work into a common context in a systematic manner; (ii) an overview of common practices in IDS evaluation reviewing evaluation approaches and methods related to each part of the design space; (iii) and a set of case studies demonstrating how different IDS evaluation approaches are applied in practice. Given the significant amount of existing practical and theoretical work related to IDS evaluation, the presented systematization is beneficial for improving the general understanding of the topic by providing an overview of the current state of the field. In addition, it is beneficial for identifying and contrasting advantages and disadvantages of different IDS evaluation methods and practices, while also helping to identify specific requirements and best practices for evaluating current and future IDSs. An in-depth analysis of common vulnerabilities of modern hypervisors as well as a set of attack models capturing the activities of attackers triggering these vulnerabilities. The analysis includes 35 representative vulnerabilities of hypercall handlers (i.e., hypercall vulnerabilities). Hypercalls are software traps from a kernel of a VM to the hypervisor. The hypercall interface of hypervisors, among device drivers and VM exit events, is one of the attack surfaces that hypervisors expose. Triggering a hypercall vulnerability may lead to a crash of the hypervisor or to altering the hypervisor's memory. We analyze the origins of the considered hypercall vulnerabilities, demonstrate and analyze possible attacks that trigger them (i.e., hypercall attacks), develop hypercall attack models(i.e., systematized activities of attackers targeting the hypercall interface), and discuss future research directions focusing on approaches for securing hypercall interfaces. A novel approach for evaluating IDSs enabling the generation of workloads that contain attacks targeting hypervisors, that is, hypercall attacks. We propose an approach for evaluating IDSs using attack injection (i.e., controlled execution of attacks during regular operation of the environment where an IDS under test is deployed). The injection of attacks is performed based on attack models that capture realistic attack scenarios. We use the hypercall attack models developed as part of this thesis for injecting hypercall attacks. A novel metric and measurement methodology for quantifying the attack detection accuracy of IDSs in virtualized environments that feature elastic resource provisioning. We demonstrate how the elasticity of resource allocations in such environments may impact the IDS attack detection accuracy and show that using existing metrics in such environments may lead to practically challenging and inaccurate measurements. We also demonstrate the practical use of the metric we propose through a set of case studies, where we evaluate common conventional IDSs deployed as VNFs. In summary, this thesis presents the first systematization of the state-of-the-art on IDS evaluation, considering workloads, metrics and measurement methodologies as integral parts of every IDS evaluation approach. In addition, we are the first to examine the hypercall attack surface of hypervisors in detail and to propose an approach using attack injection for evaluating IDSs in virtualized environments. Finally, this thesis presents the first metric and measurement methodology for quantifying the attack detection accuracy of IDSs in virtualized environments that feature elastic resource provisioning. From a technical perspective, as part of the proposed approach for evaluating IDSsthis thesis presents hInjector, a tool for injecting hypercall attacks. We designed hInjector to enable the rigorous, representative, and practically feasible evaluation of IDSs using attack injection. We demonstrate the application and practical usefulness of hInjector, as well as of the proposed approach, by evaluating a representative hypervisor-based IDS designed to detect hypercall attacks. While we focus on evaluating the capabilities of IDSs to detect hypercall attacks, the proposed IDS evaluation approach can be generalized and applied in a broader context. For example, it may be directly used to also evaluate security mechanisms of hypervisors, such as hypercall access control (AC) mechanisms. It may also be applied to evaluate the capabilities of IDSs to detect attacks involving operations that are functionally similar to hypercalls, for example, the input/output control (ioctl) calls that the Kernel-based Virtual Machine (KVM) hypervisor supports. For IDSs in virtualized environments featuring elastic resource provisioning, our approach for injecting hypercall attacks can be applied in combination with the attack detection accuracy metric and measurement methodology we propose. Our approach for injecting hypercall attacks, and our metric and measurement methodology, can also be applied independently beyond the scenarios considered in this thesis. The wide spectrum of security mechanisms in virtualized environments whose evaluation can directly benefit from the contributions of this thesis (e.g., hypervisor-based IDSs, IDSs deployed as VNFs, and AC mechanisms) reflects the practical implication of the thesis.}, subject = {Eindringerkennung}, language = {en} } @article{KirchnerDittrichBeckenbaueretal.2016, author = {Kirchner, Felix and Dittrich, Marco and Beckenbauer, Phillip and N{\"o}th, Maximilian}, title = {OCR bei Inkunabeln - Offizinspezifischer Ansatz der Universit{\"a}tsbibliothek W{\"u}rzburg}, series = {ABI Technik}, volume = {36}, journal = {ABI Technik}, number = {3}, issn = {2191-4664}, doi = {10.1515/abitech-2016-0036}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-194002}, pages = {178-188}, year = {2016}, abstract = {Im Rahmen des BMBF-gef{\"o}rderten Projekts KALLIMACHOS an der Universit{\"a}t W{\"u}rzburg soll unter anderem die Textgrundlage f{\"u}r digitale Editionen per OCR gewonnen werden. Das Bearbeitungskorpus besteht aus deutschen, franz{\"o}sischen und lateinischen Inkunabeln. Dieser Artikel zeigt, wie man mit bereits heute existierenden Methoden und Programmen den Problemen bei der OCR von Inkunabeln entgegentreten kann. Hierzu wurde an der Universit{\"a}tsbibliothek W{\"u}rzburg ein Verfahren erprobt, mit dem auf ausgew{\"a}hlten Werken einer Druckerwerkstatt bereits Zeichengenauigkeiten von bis zu 95 Prozent und Wortgenauigkeiten von bis zu 73 Prozent erzielt werden.}, language = {de} } @phdthesis{Weinhard2019, author = {Weinhard, Alexander}, title = {Managing RFID Implementations - Implications for Managerial Decision Making}, doi = {10.25972/OPUS-17816}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-178161}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2019}, abstract = {The present dissertation investigates the management of RFID implementations in retail trade. Our work contributes to this by investigating important aspects that have so far received little attention in scientific literature. We therefore perform three studies about three important aspects of managing RFID implementations. We evaluate in our first study customer acceptance of pervasive retail systems using privacy calculus theory. The results of our study reveal the most important aspects a retailer has to consider when implementing pervasive retail systems. In our second study we analyze RFID-enabled robotic inventory taking with the help of a simulation model. The results show that retailers should implement robotic inventory taking if the accuracy rates of the robots are as high as the robots' manufacturers claim. In our third and last study we evaluate the potentials of RFID data for supporting managerial decision making. We propose three novel methods in order to extract useful information from RFID data and propose a generic information extraction process. Our work is geared towards practitioners who want to improve their RFID-enabled processes and towards scientists conducting RFID-based research.}, subject = {RFID}, language = {en} } @phdthesis{Hirth2016, author = {Hirth, Matthias Johannes Wilhem}, title = {Modeling Crowdsourcing Platforms - A Use-Case Driven Approach}, issn = {1432-8801}, doi = {10.25972/OPUS-14072}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-140726}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2016}, abstract = {Computer systems have replaced human work-force in many parts of everyday life, but there still exists a large number of tasks that cannot be automated, yet. This also includes tasks, which we consider to be rather simple like the categorization of image content or subjective ratings. Traditionally, these tasks have been completed by designated employees or outsourced to specialized companies. However, recently the crowdsourcing paradigm is more and more applied to complete such human-labor intensive tasks. Crowdsourcing aims at leveraging the huge number of Internet users all around the globe, which form a potentially highly available, low-cost, and easy accessible work-force. To enable the distribution of work on a global scale, new web-based services emerged, so called crowdsourcing platforms, that act as mediator between employers posting tasks and workers completing tasks. However, the crowdsourcing approach, especially the large anonymous worker crowd, results in two types of challenges. On the one hand, there are technical challenges like the dimensioning of crowdsourcing platform infrastructure or the interconnection of crowdsourcing platforms and machine clouds to build hybrid services. On the other hand, there are conceptual challenges like identifying reliable workers or migrating traditional off-line work to the crowdsourcing environment. To tackle these challenges, this monograph analyzes and models current crowdsourcing systems to optimize crowdsourcing workflows and the underlying infrastructure. First, a categorization of crowdsourcing tasks and platforms is developed to derive generalizable properties. Based on this categorization and an exemplary analysis of a commercial crowdsourcing platform, models for different aspects of crowdsourcing platforms and crowdsourcing mechanisms are developed. A special focus is put on quality assurance mechanisms for crowdsourcing tasks, where the models are used to assess the suitability and costs of existing approaches for different types of tasks. Further, a novel quality assurance mechanism solely based on user-interactions is proposed and its feasibility is shown. The findings from the analysis of existing platforms, the derived models, and the developed quality assurance mechanisms are finally used to derive best practices for two crowdsourcing use-cases, crowdsourcing-based network measurements and crowdsourcing-based subjective user studies. These two exemplary use-cases cover aspects typical for a large range of crowdsourcing tasks and illustrated the potential benefits, but also resulting challenges when using crowdsourcing. With the ongoing digitalization and globalization of the labor markets, the crowdsourcing paradigm is expected to gain even more importance in the next years. This is already evident in the currently new emerging fields of crowdsourcing, like enterprise crowdsourcing or mobile crowdsourcing. The models developed in the monograph enable platform providers to optimize their current systems and employers to optimize their workflows to increase their commercial success. Moreover, the results help to improve the general understanding of crowdsourcing systems, a key for identifying necessary adaptions and future improvements.}, subject = {Open Innovation}, language = {en} } @phdthesis{Rygielski2017, author = {Rygielski, Piotr}, title = {Flexible Modeling of Data Center Networks for Capacity Management}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-146235}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2017}, abstract = {Nowadays, data centers are becoming increasingly dynamic due to the common adoption of virtualization technologies. Systems can scale their capacity on demand by growing and shrinking their resources dynamically based on the current load. However, the complexity and performance of modern data centers is influenced not only by the software architecture, middleware, and computing resources, but also by network virtualization, network protocols, network services, and configuration. The field of network virtualization is not as mature as server virtualization and there are multiple competing approaches and technologies. Performance modeling and prediction techniques provide a powerful tool to analyze the performance of modern data centers. However, given the wide variety of network virtualization approaches, no common approach exists for modeling and evaluating the performance of virtualized networks. The performance community has proposed multiple formalisms and models for evaluating the performance of infrastructures based on different network virtualization technologies. The existing performance models can be divided into two main categories: coarse-grained analytical models and highly-detailed simulation models. Analytical performance models are normally defined at a high level of abstraction and thus they abstract many details of the real network and therefore have limited predictive power. On the other hand, simulation models are normally focused on a selected networking technology and take into account many specific performance influencing factors, resulting in detailed models that are tightly bound to a given technology, infrastructure setup, or to a given protocol stack. Existing models are inflexible, that means, they provide a single solution method without providing means for the user to influence the solution accuracy and solution overhead. To allow for flexibility in the performance prediction, the user is required to build multiple different performance models obtaining multiple performance predictions. Each performance prediction may then have different focus, different performance metrics, prediction accuracy, and solving time. The goal of this thesis is to develop a modeling approach that does not require the user to have experience in any of the applied performance modeling formalisms. The approach offers the flexibility in the modeling and analysis by balancing between: (a) generic character and low overhead of coarse-grained analytical models, and (b) the more detailed simulation models with higher prediction accuracy. The contributions of this thesis intersect with technologies and research areas, such as: software engineering, model-driven software development, domain-specific modeling, performance modeling and prediction, networking and data center networks, network virtualization, Software-Defined Networking (SDN), Network Function Virtualization (NFV). The main contributions of this thesis compose the Descartes Network Infrastructure (DNI) approach and include: • Novel modeling abstractions for virtualized network infrastructures. This includes two meta-models that define modeling languages for modeling data center network performance. The DNI and miniDNI meta-models provide means for representing network infrastructures at two different abstraction levels. Regardless of which variant of the DNI meta-model is used, the modeling language provides generic modeling elements allowing to describe the majority of existing and future network technologies, while at the same time abstracting factors that have low influence on the overall performance. I focus on SDN and NFV as examples of modern virtualization technologies. • Network deployment meta-model—an interface between DNI and other meta- models that allows to define mapping between DNI and other descriptive models. The integration with other domain-specific models allows capturing behaviors that are not reflected in the DNI model, for example, software bottlenecks, server virtualization, and middleware overheads. • Flexible model solving with model transformations. The transformations enable solving a DNI model by transforming it into a predictive model. The model transformations vary in size and complexity depending on the amount of data abstracted in the transformation process and provided to the solver. In this thesis, I contribute six transformations that transform DNI models into various predictive models based on the following modeling formalisms: (a) OMNeT++ simulation, (b) Queueing Petri Nets (QPNs), (c) Layered Queueing Networks (LQNs). For each of these formalisms, multiple predictive models are generated (e.g., models with different level of detail): (a) two for OMNeT++, (b) two for QPNs, (c) two for LQNs. Some predictive models can be solved using multiple alternative solvers resulting in up to ten different automated solving methods for a single DNI model. • A model extraction method that supports the modeler in the modeling process by automatically prefilling the DNI model with the network traffic data. The contributed traffic profile abstraction and optimization method provides a trade-off by balancing between the size and the level of detail of the extracted profiles. • A method for selecting feasible solving methods for a DNI model. The method proposes a set of solvers based on trade-off analysis characterizing each transformation with respect to various parameters such as its specific limitations, expected prediction accuracy, expected run-time, required resources in terms of CPU and memory consumption, and scalability. • An evaluation of the approach in the context of two realistic systems. I evaluate the approach with focus on such factors like: prediction of network capacity and interface throughput, applicability, flexibility in trading-off between prediction accuracy and solving time. Despite not focusing on the maximization of the prediction accuracy, I demonstrate that in the majority of cases, the prediction error is low—up to 20\% for uncalibrated models and up to 10\% for calibrated models depending on the solving technique. In summary, this thesis presents the first approach to flexible run-time performance prediction in data center networks, including network based on SDN. It provides ability to flexibly balance between performance prediction accuracy and solving overhead. The approach provides the following key benefits: • It is possible to predict the impact of changes in the data center network on the performance. The changes include: changes in network topology, hardware configuration, traffic load, and applications deployment. • DNI can successfully model and predict the performance of multiple different of network infrastructures including proactive SDN scenarios. • The prediction process is flexible, that is, it provides balance between the granularity of the predictive models and the solving time. The decreased prediction accuracy is usually rewarded with savings of the solving time and consumption of resources required for solving. • The users are enabled to conduct performance analysis using multiple different prediction methods without requiring the expertise and experience in each of the modeling formalisms. The components of the DNI approach can be also applied to scenarios that are not considered in this thesis. The approach is generalizable and applicable for the following examples: (a) networks outside of data centers may be analyzed with DNI as long as the background traffic profile is known; (b) uncalibrated DNI models may serve as a basis for design-time performance analysis; (c) the method for extracting and compacting of traffic profiles may be used for other, non-network workloads as well.}, subject = {Modellierung}, language = {en} } @phdthesis{Ali2017, author = {Ali, Qasim}, title = {Distributed Control of Cooperating Mini UAVs}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-140686}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2017}, abstract = {Mini Unmanned Aerial Vehicles (MUAVs) werden immer beliebtere Forschungsplattformen. Vor allem in den letzten Jahren ziehen sie aufgrund ihrer Erschwinglichkeit und ihrer Flexibilit{\"a}t, die es erlaubt sie in fast allen Lebensbereichen einzusetzen, betr{\"a}chtliche Aufmerksamkeit auf sich. MUAVs haben offensichtliche Vorteile gegen{\"u}ber bemannten Plattformen einschließlich ihrer viel geringeren Herstellungs- und Betriebskosten, Risikovermeidung f{\"u}r den menschlichen Piloten, der M{\"o}glichkeit sicher niedrig und langsam fliegen zu k{\"o}nnen, und Realisierung von Operationen, die {\"u}ber die inh{\"a}renten Grenzen des menschlichen K{\"o}rpers hinausgehen. Der Fortschritt in der Micro Electro-Mechanical System (MEMS) Technologie, Avionik und Miniaturisierung von Sensoren spielte auch eine bedeutende Rolle bei der Entwicklung der MUAVs. Diese Flugger{\"a}te reichen von einfachem Spielzeug aus dem Elektrofachhandel bis zu hoch entwickelten, kommerziellen Plattformen, die die Durchf{\"u}hrung neuer Aufgaben wie Offshore-Windkraftwerk Inspektionen, 3D-Modellierung von Geb{\"a}uden usw. erlauben. MUAVs sind auch umweltfreundlich, da sie weniger Luftverschmutzung und L{\"a}rm verursachen. Unbemannt ist daher un{\"u}bertroffen. Aktuelle Forschung konzentriert sich auf die M{\"o}glichkeit mehrere kosteng{\"u}nstige Flugger{\"a}te zusammen fliegen zu lassen, w{\"a}hrend die erforderliche relative r{\"a}umliche Trennungen beibehalten wird. Dies erm{\"o}glicht es effizient Aufgaben zu erf{\"u}llen im Vergleich zu einem einzigen sehr teuren Flugger{\"a}t. Durch die Redundanz entf{\"a}llt auch das Risiko des Scheiterns der Mission durch den Verlust eines einzigen Flugger{\"a}ts. Wertvolle Aufgaben, die kooperative Flugger{\"a}te ausf{\"u}hren k{\"o}nnen, sind beispielsweise gemeinsame Lasttransporte, Such- und Rettungsmissionen, mobile Kommunikationsrelais, Spr{\"u}hen von Pestiziden und Wetterbeobachtung. Obwohl die Realisierung von Fl{\"u}gen mit mehreren, gekoppelten UAVs komplex ist, rechtfertigen dennoch offensichtliche Vorteile diese m{\"u}hsame und aufw{\"a}ndige Entwicklungsarbeit. Verteilte Steuerung von kooperierenden Einheiten ist ein multidisziplin{\"a}res Thema, das es erfordert in diversifizierten Bereichen zu arbeiten. Dazu geh{\"o}ren MUAV Hardware und Software, Kommunikationstechniken f{\"u}r den notwendigen Informationsaustausch, Flugdynamik, Regelungstechnik, insbesondere f{\"u}r verteilte / kooperative Steuerungstechniken, Graphentheorie f{\"u}r Kommunikationstopologie Modellierung und Sensoren-Technologie wie Differential GPS (DGPS). F{\"u}r eine Flotte von Agenten, die in unmittelbarer N{\"a}he fliegen, ist eine genaue Positionsbestimmung zwingend n{\"o}tig um Kollisionen zu vermeiden und die Anforderungen f{\"u}r die meisten Missionen wie Georeferenzierung zu erf{\"u}llen. F{\"u}r solche Szenarien ist DGPS ein potenzieller Kandidat. Ein Teil der Forschung konzentriert sich daher auf die Entwicklung von DGPS Code. Eines der Module dieser Forschung war Hardware-Implementierung. Ein einfacher Test-Aufbau zur Realisierung von Basisfunktionalit{\"a}ten f{\"u}r Formationsflug von Quadrocoptern wurde am Lehrstuhl f{\"u}r Informationstechnik in der Luft- und Raumfahrt der Universit{\"a}t W{\"u}rzburg entwickelt. Diese Testumgebung kann nicht nur zur Pr{\"u}fung und Validierung von Algorithmen f{\"u}r Formationsflug in realer Umgebung genutzt werden, sondern dient auch zur Ausbildung von Studenten. Ein bereits vorhandener Pr{\"u}fstand f{\"u}r einzelne Quadrocopter wurde mit den notwendigen Kommunikation und verteilten Steuerung erweitert, um Algorithmen f{\"u}r Formationsfl{\"u}ge in drei Freiheitsgraden (Roll / Nick / Gier) zu testen. Diese Studie umfasst die Bereiche der Kommunikation, Steuerungstechnik und Embedded-System-Programmierung. Das Bluetooth-Protokoll wurde f{\"u}r die gegenseitige Kommunikation zwischen zwei Quadrocoptern verwendet. Eine einfache Technik der Proportional-Integral-Differential (PID) Steuerung in Kombination mit Kalman-Filter wurde genutzt. Die MATLAB Instrument Control Toolbox wurde f{\"u}r die Datenanzeige, die Analyse und das Plotten verwendet. Plots k{\"o}nnen in Echtzeit gezeichnet werden und empfangene Daten k{\"o}nnen auch in Form von Dateien zur sp{\"a}teren Verwendung und Analyse gespeichert werden. Das System wurde preisg{\"u}nstig, unter Ber{\"u}cksichtigung eines einfachen Aufbaus, entwickelt. Der vorgeschlagene Aufbau ist sehr flexibel und kann einfach an ver{\"a}nderte Anforderungen angepasst werden. Als verteiltes Steuerungsschema wurde ein zentralisierter, heterogener Formationsflug Positionsregler formuliert, der einen „explicit model following Linear Quadratic Regulator Proportional Integral (LQR PI)" Regler verwendet. Der Anf{\"u}hrer Quadrocopter ist ein stabiles Referenzmodell mit der gew{\"u}nschten Dynamik, deren Ausgang vollkommen von den beiden Wingmen Quadrocopter verfolgt wird. Der Anf{\"u}hrer selbst wird durch Pole Placement Steuerverfahren mit den gew{\"u}nschten Stabilit{\"a}tseigenschaften gesteuert, w{\"a}hrend die beiden Anh{\"a}nger durch robuste und adaptive LQR PI Steuerverfahren geregelt werden. F{\"u}r diese Studie wird ein Vollzustandsvektor der Quadrocopter betrachtet w{\"a}hrend nur die resultierende Leistung verfolgt wird. Die ausgew{\"a}hlte 3D Formationsgeometrie und die statische Stabilit{\"a}t bleibt unter einer Vielzahl von m{\"o}glichen St{\"o}rungen erhalten. Bei Kommunikationsverlust zwischen Anf{\"u}hrer und einem der Anh{\"a}nger, leitet der andere Anh{\"a}nger die Daten, die er vom Anf{\"u}hrer erhalten hat, an den betroffenen Anh{\"a}nger weiter. Die Stabilit{\"a}t des Regelsystems wurde unter Verwendung von Singul{\"a}rwerten analysiert. Der vorgeschlagene Ansatz f{\"u}r eng gekoppelten Formationsflug von MUAVs wurde mit Hilfe von umfangreichen Simulationen unter MATLAB® / Simulink® validiert und ergab viel versprechende Ergebnisse. Auch die Tracking-Leistung wurde f{\"u}r zeitlich ver{\"a}nderliche Befehle gezeigt. Die vorgeschlagene Architektur ist skalierbar und kann problemlos erweitert werden. Dieser Ansatz ist f{\"u}r die Szenarien geeignet, die eng gekoppelte Formationsflug ben{\"o}tigen, wie kooperatives Greifen oder gemeinsame Lasttransporte. Ein innovatives Framework f{\"u}r die Teamarbeit von zwei Quadrocopter Flotten wurde entwickelt. Als Beispielmission wurde ein Szenario gew{\"a}hlt, bei dem ein Feuer auf einer gr{\"o}ßeren Fl{\"a}che gel{\"o}scht werden muss. Jede Formation hat ihre angegebene Formationsgeometrie und eine zugewiesene Aufgabe. Die Lageregelung f{\"u}r die Quadrocopter in einer der Formationen wurde durch ein LQR PI-Regelschema, das auf „explicit model following" basiert, umgesetzt. Die Quadrocopter in anderen Formation werden durch ein LQR PI Servomechanismus Regelsystem gesteuert. Die beiden Steuersysteme werden in Bezug auf ihre Leistung und ihren Steuerungsaufwand verglichen. Beide Formationen werden durch entsprechende Bodenstationen durch virtuelle Anf{\"u}hrer kommandiert. Die Bodenstationen tauschen die befohlene H{\"o}heninformation aus, um gegenseitig eine sichere Trennung zwischen den Formationen zu gew{\"a}hrleisten. Die Quadrocopter k{\"o}nnen kommandierte Solltrajektorien folgen und {\"u}ber erw{\"u}nschten Punkten f{\"u}r eine vorgegebene Zeit schweben. Bei Kommunikationsverlust zwischen Bodenstation und einem der Quadcopter leitet der benachbarte Quadrocopter die Befehlsdaten, die er von der Bodenstation erhalten hat, an die betroffene Einheit weiter. Das vorgeschlagene Framework wurde durch umfangreiche Simulationen mit Hilfe von MATLAB® / Simulink® validiert und liefert sehr brauchbare Ergebnisse. Cluster-Rekonfiguration von Agenten wird in unserer Arbeit ebenfalls gezeigt. Dies erlaubt es die Formationsgeometrie w{\"a}hrend des Fluges auf eine beliebige neue Form umzuschalten. F{\"u}r die genannten Anwendungen sind Konsens Algorithmen nicht erw{\"u}nscht, da wir von den Quadrocopter Flotten fordern, dass sie dem von uns gew{\"a}hlten Weg folgen, und nicht ihren Weg selbst w{\"a}hlen. Eine Reihe der praktischen Probleme von Kommunikationsnetzen kann in geeigneter Weise durch Graphen dargestellt werden. Dies erleichtert die Problemformulierung und den Analyseprozess. Kommunikationstopologien f{\"u}r Netzwerke mit einer großen Anzahl von Einheiten, wie zum Beispiel Schw{\"a}rme von Luftfahrzeugen, k{\"o}nnen durch einen graphentheoretischen Ansatz untersucht werden. Um die Bildung solcher Probleme zu erleichtern, wird der Graph mit Hilfe der Laplace-Matrix dargestellt. Eigenwerte der Laplace-Matrix wurden in unserer Studie angemessene Ber{\"u}cksichtigung gegeben einen Einblick in die Graphen / Subgraphen Eigenschaften zu verleihen. Der gleiche wurden genutzt um die bekannte Euler Formel zu verallgemeinern und somit auf Graphen und Subgraphen anwendbar zu machen. Eine modifizierte Euler-Formel wird ebenfalls vorgestellt. Die Verwendung der Graphentheorie in verteilten / kooperativen Regelsystemen wird auch durch Simulationen gezeigt. Kooperative Kontrolschemas, die auf auf Konsens-Algorithmen beruhenden, wurden f{\"u}r die Lageregelung von Quadrocopter-Flotten, in denen kein expliziter Anf{\"u}hrer existiert, verwendet. Konsens-Algorithmen wurden in Kombination mit verschiedenen Steuersystemen verwendet, was zur Autonomie von Quadrocoptern beitr{\"a}gt. Die Steuersysteme, die f{\"u}r diesen Zweck verwendet werden, umfassen LQR PI-Regelung basierend auf „model following" und LQR PI Servo-Mechanismus. Die Regelungen wurden unter verschiedenen Kommunikationstopologien untersucht, darunter voll verbundene ungerichtete Graphen, gerichteten Graphen und Zyklus-Topologie. Der Informationsfluss unter den Agenten in einem Cluster wurde durch Laplace-Matrix modelliert. Die Auswirkungen von Eingangs Verzerrungen auf Konsens Werte wurden ebenfalls untersucht. Quadrocopter k{\"o}nnen durch gegenseitigen Konsens Flugbahnen verfolgen und die Zielpunkte erreichen. Die vorgeschlagenen Regelungssysteme wurden unter verschiedenen Kommunikationstopologien in Matlab / Simulink-Umgebung durch umfangreiche Simulationen validiert. Die Ergebnisse bescheinigen die Wirksamkeit der pr{\"a}sentierten Schemata mit dem zus{\"a}tzlichen Vorteil der Einfachheit der Umsetzung. Das vorgeschlagene Regelungssystem ist skalierbar f{\"u}r große Gruppen von MUAVs. F{\"u}r Formationsflug sind die Anforderungen an die Positionsgenauigkeit sehr hoch. GPS-Signale allein bieten keine ausreichend hohe Positionsgenauigkeit um die Anforderung zu erf{\"u}llen; eine Technik f{\"u}r die genauere Positionsbestimmung ist daher erforderlich, beispielsweise DGPS. Es existiert eine Anzahl von {\"o}ffentlichen Codes f{\"u}r die GPS-Positionsbestimmung und Baseline-Bestimmung im Offline-Modus. Es existiert jedoch keine Software f{\"u}r DGPS, die Korrekturfaktoren der Basisstationen nutzt, ohne auf Doppel Differenz Informationen zu vertrauen. Um dies zu erreichen, wurde eine Methodik in MATLAB-Umgebung f{\"u}r DGPS mit C/A Pseudoranges nur auf einzelne Frequenz L1 eingef{\"u}hrt es machbar f{\"u}r Empf{\"a}nger kosteng{\"u}nstig GPS zu nutzen. Unsere Basisstation wird an einem genau vermessen Referenzpunkt aufgestellt. Pseudoranges und geometrische Abst{\"a}nde werden an der Basisstation verglichen, um die Korrekturfaktoren zu berechnen. Diese Korrekturfaktoren, f{\"u}r aller g{\"u}ltigen Satelliten w{\"a}hrend einer Epoche, werden dann an einen Rover {\"u}bergeben. Das Rover ber{\"u}cksichtigt innerhalb der entsprechenden Epoche diese f{\"u}r seine eigene wahre Positionsbestimmung. Zur Validierung der vorgeschlagenen Algorithmen wird unsere Rover ebenfalls an einer vorbestimmten Stelle platziert. Der vorgeschlagene Code ist ein geeignetes und einfaches Werkzeug f{\"u}r die Nachbearbeitung von GPS-Rohdaten f{\"u}r eine genaue Positionsbestimmung eines Rover, z.B. eines UAV w{\"a}hrend der Post-Missionsanalyse.}, subject = {Micro Air Vehicle}, language = {en} } @book{OPUS4-5759, title = {W{\"u}rzburger Hochschulschriften : 1581 - 1803 ; Bestandsverzeichnis}, organization = {Universit{\"a}tsbibliothek W{\"u}rzburg}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-69739}, publisher = {Universit{\"a}t W{\"u}rzburg}, year = {1992}, abstract = {Die Universit{\"a}tsbibliothek W{\"u}rzburg hat f{\"u}r ihre umfangreiche Sammlung alter W{\"u}rzburger Hochschulschriften einen Katalog erarbeitet, der haupts{\"a}chlich Dissertationen und Thesen verzeichnet, aber auch andere Pr{\"u}fungsarbeiten, die f{\"u}r den Erwerb unterschiedlicher akademischer Grade und Titel ausgearbeitet und publiziert worden sind und die aus der f{\"u}rstbisch{\"o}flichen Zeit unserer Universit{\"a}t stammen (1582 - 1803).}, subject = {Universit{\"a}t}, language = {de} } @book{OPUS4-5760, title = {W{\"u}rzburger Hochschulschriften : 1804 - 1885 ; Bestandsverzeichnis}, editor = {M{\"a}lzer, Gottfried and Baumann, Brigitte}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-69743}, publisher = {Universit{\"a}t W{\"u}rzburg}, year = {1994}, abstract = {Die Universit{\"a}tsbibliothek W{\"u}rzburg hat f{\"u}r ihre umfangreiche Sammlung alter W{\"u}rzburger Hochschulschriften einen Katalog erarbeitet, der haupts{\"a}chlich Dissertationen und Thesen verzeichnet, aber auch andere Pr{\"u}fungsarbeiten, die f{\"u}r den Erwerb unterschiedlicher akademischer Grade und Titel ausgearbeitet und publiziert worden sind. Dies ist der 2. Band der Nachweise f{\"u}r die Jahre 1804 bis 1885 mit 2510 Titeln.}, subject = {Universit{\"a}t}, language = {de} } @misc{OPUS4-6452, title = {R{\"u}ckBLICK - Der Jahresbericht 2012 der Julius-Maximilians-Universit{\"a}t W{\"u}rzburg}, volume = {2012}, organization = {Julius-Maximilians-Universit{\"a}t W{\"u}rzburg}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-76762}, year = {2013}, abstract = {Die Entwicklung der Universit{\"a}t W{\"u}rzburg im Jahr 2012.}, subject = {W{\"u}rzburg}, language = {de} } @phdthesis{Roth2020, author = {Roth, Daniel}, title = {Intrapersonal, Interpersonal, and Hybrid Interactions in Virtual Reality}, doi = {10.25972/OPUS-18862}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-188627}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2020}, abstract = {Virtual reality and related media and communication technologies have a growing impact on professional application fields and our daily life. Virtual environments have the potential to change the way we perceive ourselves and how we interact with others. In comparison to other technologies, virtual reality allows for the convincing display of a virtual self-representation, an avatar, to oneself and also to others. This is referred to as user embodiment. Avatars can be of varying realism and abstraction in their appearance and in the behaviors they convey. Such userembodying interfaces, in turn, can impact the perception of the self as well as the perception of interactions. For researchers, designers, and developers it is of particular interest to understand these perceptual impacts, to apply them to therapy, assistive applications, social platforms, or games, for example. The present thesis investigates and relates these impacts with regard to three areas: intrapersonal effects, interpersonal effects, and effects of social augmentations provided by the simulation. With regard to intrapersonal effects, we specifically explore which simulation properties impact the illusion of owning and controlling a virtual body, as well as a perceived change in body schema. Our studies lead to the construction of an instrument to measure these dimensions and our results indicate that these dimensions are especially affected by the level of immersion, the simulation latency, as well as the level of personalization of the avatar. With regard to interpersonal effects we compare physical and user-embodied social interactions, as well as different degrees of freedom in the replication of nonverbal behavior. Our results suggest that functional levels of interaction are maintained, whereas aspects of presence can be affected by avatar-mediated interactions, and collaborative motor coordination can be disturbed by immersive simulations. Social interaction is composed of many unknown symbols and harmonic patterns that define our understanding and interpersonal rapport. For successful virtual social interactions, a mere replication of physical world behaviors to virtual environments may seem feasible. However, the potential of mediated social interactions goes beyond this mere replication. In a third vein of research, we propose and evaluate alternative concepts on how computers can be used to actively engage in mediating social interactions, namely hybrid avatar-agent technologies. Specifically, we investigated the possibilities to augment social behaviors by modifying and transforming user input according to social phenomena and behavior, such as nonverbal mimicry, directed gaze, joint attention, and grouping. Based on our results we argue that such technologies could be beneficial for computer-mediated social interactions such as to compensate for lacking sensory input and disturbances in data transmission or to increase aspects of social presence by visual substitution or amplification of social behaviors. Based on related work and presented findings, the present thesis proposes the perspective of considering computers as social mediators. Concluding from prototypes and empirical studies, the potential of technology to be an active mediator of social perception with regard to the perception of the self, as well as the perception of social interactions may benefit our society by enabling further methods for diagnosis, treatment, and training, as well as the inclusion of individuals with social disorders. To this regard, we discuss implications for our society and ethical aspects. This thesis extends previous empirical work and further presents novel instruments, concepts, and implications to open up new perspectives for the development of virtual reality, mixed reality, and augmented reality applications.}, subject = {Virtuelle Realit{\"a}t}, language = {en} } @phdthesis{Wick2020, author = {Wick, Christoph}, title = {Optical Medieval Music Recognition}, doi = {10.25972/OPUS-21434}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-214348}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2020}, abstract = {In recent years, great progress has been made in the area of Artificial Intelligence (AI) due to the possibilities of Deep Learning which steadily yielded new state-of-the-art results especially in many image recognition tasks. Currently, in some areas, human performance is achieved or already exceeded. This great development already had an impact on the area of Optical Music Recognition (OMR) as several novel methods relying on Deep Learning succeeded in specific tasks. Musicologists are interested in large-scale musical analysis and in publishing digital transcriptions in a collection enabling to develop tools for searching and data retrieving. The application of OMR promises to simplify and thus speed-up the transcription process by either providing fully-automatic or semi-automatic approaches. This thesis focuses on the automatic transcription of Medieval music with a focus on square notation which poses a challenging task due to complex layouts, highly varying handwritten notations, and degradation. However, since handwritten music notations are quite complex to read, even for an experienced musicologist, it is to be expected that even with new techniques of OMR manual corrections are required to obtain the transcriptions. This thesis presents several new approaches and open source software solutions for layout analysis and Automatic Text Recognition (ATR) for early documents and for OMR of Medieval manuscripts providing state-of-the-art technology. Fully Convolutional Networks (FCN) are applied for the segmentation of historical manuscripts and early printed books, to detect staff lines, and to recognize neume notations. The ATR engine Calamari is presented which allows for ATR of early prints and also the recognition of lyrics. Configurable CNN/LSTM-network architectures which are trained with the segmentation-free CTC-loss are applied to the sequential recognition of text but also monophonic music. Finally, a syllable-to-neume assignment algorithm is presented which represents the final step to obtain a complete transcription of the music. The evaluations show that the performances of any algorithm is highly depending on the material at hand and the number of training instances. The presented staff line detection correctly identifies staff lines and staves with an \$F_1\$-score of above \$99.5\\%\$. The symbol recognition yields a diplomatic Symbol Accuracy Rate (dSAR) of above \$90\\%\$ by counting the number of correct predictions in the symbols sequence normalized by its length. The ATR of lyrics achieved a Character Error Rate (CAR) (equivalently the number of correct predictions normalized by the sentence length) of above \$93\\%\$ trained on 771 lyric lines of Medieval manuscripts and of 99.89\\% when training on around 3.5 million lines of contemporary printed fonts. The assignment of syllables and their corresponding neumes reached \$F_1\$-scores of up to \$99.2\\%\$. A direct comparison to previously published performances is difficult due to different materials and metrics. However, estimations show that the reported values of this thesis exceed the state-of-the-art in the area of square notation. A further goal of this thesis is to enable musicologists without technical background to apply the developed algorithms in a complete workflow by providing a user-friendly and comfortable Graphical User Interface (GUI) encapsulating the technical details. For this purpose, this thesis presents the web-application OMMR4all. Its fully-functional workflow includes the proposed state-of-the-art machine-learning algorithms and optionally allows for a manual intervention at any stage to correct the output preventing error propagation. To simplify the manual (post-) correction, OMMR4all provides an overlay-editor that superimposes the annotations with a scan of the original manuscripts so that errors can easily be spotted. The workflow is designed to be iteratively improvable by training better models as soon as new Ground Truth (GT) is available.}, subject = {Neumenschrift}, language = {en} } @phdthesis{SchauerMarinRodrigues2020, author = {Schauer Marin Rodrigues, Johannes}, title = {Detecting Changes and Finding Collisions in 3D Point Clouds : Data Structures and Algorithms for Post-Processing Large Datasets}, isbn = {978-3-945459-32-4}, doi = {10.25972/OPUS-21428}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-214285}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2020}, abstract = {Affordable prices for 3D laser range finders and mature software solutions for registering multiple point clouds in a common coordinate system paved the way for new areas of application for 3D point clouds. Nowadays we see 3D laser scanners being used not only by digital surveying experts but also by law enforcement officials, construction workers or archaeologists. Whether the purpose is digitizing factory production lines, preserving historic sites as digital heritage or recording environments for gaming or virtual reality applications -- it is hard to imagine a scenario in which the final point cloud must also contain the points of "moving" objects like factory workers, pedestrians, cars or flocks of birds. For most post-processing tasks, moving objects are undesirable not least because moving objects will appear in scans multiple times or are distorted due to their motion relative to the scanner rotation. The main contributions of this work are two postprocessing steps for already registered 3D point clouds. The first method is a new change detection approach based on a voxel grid which allows partitioning the input points into static and dynamic points using explicit change detection and subsequently remove the latter for a "cleaned" point cloud. The second method uses this cleaned point cloud as input for detecting collisions between points of the environment point cloud and a point cloud of a model that is moved through the scene. Our approach on explicit change detection is compared to the state of the art using multiple datasets including the popular KITTI dataset. We show how our solution achieves similar or better F1-scores than an existing solution while at the same time being faster. To detect collisions we do not produce a mesh but approximate the raw point cloud data by spheres or cylindrical volumes. We show how our data structures allow efficient nearest neighbor queries that make our CPU-only approach comparable to a massively-parallel algorithm running on a GPU. The utilized algorithms and data structures are discussed in detail. All our software is freely available for download under the terms of the GNU General Public license. Most of the datasets used in this thesis are freely available as well. We provide shell scripts that allow one to directly reproduce the quantitative results shown in this thesis for easy verification of our findings.}, subject = {Punktwolke}, language = {en} } @phdthesis{Albert2019, author = {Albert, Michael}, title = {Intelligent analysis of medical data in a generic telemedicine infrastructure}, isbn = {978-3-945459-26-3 (Online)}, doi = {10.25972/OPUS-17421}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-174213}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2019}, abstract = {Telemedicine uses telecommunication and information technology to provide health care services over spatial distances. In the upcoming demographic changes towards an older average population age, especially rural areas suffer from a decreasing doctor to patient ratio as well as a limited amount of available medical specialists in acceptable distance. These areas could benefit the most from telemedicine applications as they are known to improve access to medical services, medical expertise and can also help to mitigate critical or emergency situations. Although the possibilities of telemedicine applications exist in the entire range of healthcare, current systems focus on one specific disease while using dedicated hardware to connect the patient with the supervising telemedicine center. This thesis describes the development of a telemedical system which follows a new generic design approach. This bridges the gap of existing approaches that only tackle one specific application. The proposed system on the contrary aims at supporting as many diseases and use cases as possible by taking all the stakeholders into account at the same time. To address the usability and acceptance of the system it is designed to use standardized hardware like commercial medical sensors and smartphones for collecting medical data of the patients and transmitting them to the telemedical center. The smartphone can also act as interface to the patient for health questionnaires or feedback. The system can handle the collection and transport of medical data, analysis and visualization of the data as well as providing a real time communication with video and audio between the users. On top of the generic telemedical framework the issue of scalability is addressed by integrating a rule-based analysis tool for the medical data. Rules can be easily created by medical personnel via a visual editor and can be personalized for each patient. The rule-based analysis tool is extended by multiple options for visualization of the data, mechanisms to handle complex rules and options for performing actions like raising alarms or sending automated messages. It is sometimes hard for the medical experts to formulate their knowledge into rules and there may be information in the medical data that is not yet known. This is why a machine learning module was integrated into the system. It uses the incoming medical data of the patients to learn new rules that are then presented to the medical personnel for inspection. This is in line with European legislation where the human still needs to be in charge of such decisions. Overall, we were able to show the benefit of the generic approach by evaluating it in three completely different medical use cases derived from specific application needs: monitoring of COPD (chronic obstructive pulmonary disease) patients, support of patients performing dialysis at home and councils of intensive-care experts. In addition the system was used for a non-medical use case: monitoring and optimization of industrial machines and robots. In all of the mentioned cases, we were able to prove the robustness of the generic approach with real users of the corresponding domain. This is why we can propose this approach for future development of telemedical systems.}, subject = {Telemedizin}, language = {en} } @inproceedings{KleinehagenbrockPetersen2011, author = {Kleinehagenbrock, Frank and Petersen, Stefan}, title = {Geschichte studiert - und dann? Berufsfelder f{\"u}r Historikerinnen und Historiker sowie Studierende anderer Geisteswissenschaften. Ein Leitfaden}, isbn = {978-3-923959-80-8}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-66703}, year = {2011}, abstract = {Viele Studierende der Geschichte und anderer Geisteswissenschaften streben das Lehramt an. Darin Fuß zu fassen, wird in den kommenden Jahren immer schwieriger. Andere Studierende haben sogar {\"u}berhaupt keine Vorstellungen von ihrer beruflichen Zukunft. Dieser Leitfaden m{\"o}chte Orientierung bei der Berufswahl vermitteln und mit Hilfe von Experten Perspektiven er{\"o}ffnen.}, subject = {Geschichtsstudium}, language = {de} } @misc{OPUS4-5621, title = {R{\"u}ckBLICK - Der Jahresbericht 2011 der Julius-Maximilians-Universit{\"a}t W{\"u}rzburg}, volume = {2011}, organization = {Julius-Maximilians-Universit{\"a}t W{\"u}rzburg}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-69544}, year = {2012}, abstract = {Die Entwicklung der Universit{\"a}t W{\"u}rzburg im Jahr 2011.}, subject = {W{\"u}rzburg}, language = {de} } @phdthesis{Xu2014, author = {Xu, Zhihao}, title = {Cooperative Formation Controller Design for Time-Delay and Optimality Problems}, isbn = {978-3-923959-96-9}, doi = {10.25972/OPUS-10555}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-105555}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {This dissertation presents controller design methodologies for a formation of cooperative mobile robots to perform trajectory tracking and convoy protection tasks. Two major problems related to multi-agent formation control are addressed, namely the time-delay and optimality problems. For the task of trajectory tracking, a leader-follower based system structure is adopted for the controller design, where the selection criteria for controller parameters are derived through analyses of characteristic polynomials. The resulting parameters ensure the stability of the system and overcome the steady-state error as well as the oscillation behavior under time-delay effect. In the convoy protection scenario, a decentralized coordination strategy for balanced deployment of mobile robots is first proposed. Based on this coordination scheme, optimal controller parameters are generated in both centralized and decentralized fashion to achieve dynamic convoy protection in a unified framework, where distributed optimization technique is applied in the decentralized strategy. This unified framework takes into account the motion of the target to be protected, and the desired system performance, for instance, minimal energy to spend, equal inter-vehicle distance to keep, etc. Both trajectory tracking and convoy protection tasks are demonstrated through simulations and real-world hardware experiments based on the robotic equipment at Department of Computer Science VII, University of W{\"u}rzburg.}, subject = {Optimalwertregelung}, language = {en} } @phdthesis{Buckel2014, author = {Buckel, Thomas}, title = {Verbesserung und {\"U}berwachung von RFID-Infrastrukturen im Einzelhandel - ein aktionsforschungsbasierter Ansatz}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-106719}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {Die Grundlage f{\"u}r eine hohe Bestandsgenauigkeit ist die unternehmens{\"u}bergreifende Identifikation und Nachverfolgung von Waren, die mit automatisierten Identifizierungstechnologien (Auto-ID-Technologien) erm{\"o}glicht wird. Die Einf{\"u}hrung der Auto-ID-Technologie des Barcodes hat die Industrie vor mehr als 30 Jahren fundamental ver{\"a}ndert. Darauf aufbauend versprechen neuere Auto-ID-Technologien wie die „Radio Frequency Identification" (RFID) Probleme wie die Nichtverf{\"u}gbarkeit von Waren, eine intransparente Diebstahlrate oder Warenschwund durch eine bessere Nachverfolgung aller Waren und eine h{\"o}here Bestandsgenauigkeit zu l{\"o}sen. Die Vorteile von RFID gegen{\"u}ber dem Barcode sind unter anderem die h{\"o}here Datendichte, die gr{\"o}ßere Robustheit gegen{\"u}ber Umwelteinfl{\"u}ssen sowie die schnellere und mehrfache Erfassung von Gegenst{\"a}nden. Viele Unternehmen sehen sich jedoch vor allem nach der Implementierung einer RFID-Infrastruktur mit einer Vielzahl von Problemen konfrontiert. Aspekte wie wenig Unterst{\"u}tzung durch das Management, interner Widerstand durch Mitarbeiter, Probleme bei der Integration von Hardware und Software und vor allem eine mangelnde Datenqualit{\"a}t verhindern, dass die prognostizierten positiven Effekte erreicht werden k{\"o}nnen. Derartige Ph{\"a}nomene werden passend unter dem Begriff „Credibility Gap" zusammengefasst. Dieser beschreibt die Problematik, dass es insgesamt an Verfahren, Methoden und gezielter Unterst{\"u}tzung mangelt, um die in der Literatur umfangreich versprochenen positiven Effekte tats{\"a}chlich und nachhaltig zu realisieren. Passend werden die erwarteten Einsparungen und Verbesserungen durch den RFID-Einsatz oftmals als Expertensch{\"a}tzungen und sogar als gr{\"o}ßtenteils rein spekulativ bezeichnet. Das Ziel dieser Dissertation ist es, Praktikern das Erreichen der positiven RFID-Effekte zu erm{\"o}glichen. Hierzu wurden vielf{\"a}ltige Untersuchungen auf Basis einer langfristigen Kooperation mit einem der weltweit gr{\"o}ßten Bekleidungsh{\"a}ndler durchgef{\"u}hrt, indem ein RFID-Implementierungsprojekt begleitet und intensiv mitgestaltet wurde. Zun{\"a}chst wird best{\"a}tigt, dass die prognostizierten Vorteile der RFID-Technologie tats{\"a}chlich nicht allein durch die Implementierung der ben{\"o}tigten Infrastruktur erreicht werden k{\"o}nnen. Als Grund werden hohe Bestandsungenauigkeiten der verwendeten Bestandssysteme identifiziert, die sowohl auf technische als auch auf menschlich verursachte Fehler zur{\"u}ckzuf{\"u}hren sind. Als Folge ist die RFID-Datenqualit{\"a}t nicht verl{\"a}sslich. Die Dissertation setzt an den Problemen des Credibility Gap an und diagnostiziert bei einer bereits implementierten RFID-Infrastruktur zun{\"a}chst die Fehler und Ursachen der mangelnden Datenqualit{\"a}t. Darauf aufbauend werden Maßnahmen und Handlungsanweisungen vorgestellt, mit deren Hilfe die Fehler behoben und die Infrastruktur schließlich verbessert und {\"u}berwacht werden kann. Um insgesamt die Anforderungen der Praxis und der Wissenschaft erfolgreich miteinander zu verkn{\"u}pfen, wird als Forschungsmethode eine neuartige Kombination zweier Auspr{\"a}gungen der Aktionsforschung verwendet. Als Ergebnis werden einerseits f{\"u}r Praktiker hilfreiche Frameworks und Tests zur Fehlerbehebung, {\"U}berwachungskennzahlen sowie Regeln des effektiven RFID-Systemmanagements beschrieben. Alle durchgef{\"u}hrten und in der Dissertation vorgestellten Maßnahmen f{\"u}hren nachweislich zu einer erh{\"o}hten Datenqualit{\"a}t eines implementierten RFID-Systems und stellen M{\"o}glichkeiten zur kennzahlenbasierten Visualisierung der RFID-Prozessperformance bereit. Andererseits wird ein Modell f{\"u}r die Verwendung der Aktionsforschung vorgeschlagen sowie eine umfangreiche Validierung der Methodik durchgef{\"u}hrt. Auf diese Weise wird neben der Praxisrelevanz der Ergebnisse auch die Pr{\"a}zision der Forschungsergebnisse sichergestellt. S{\"a}mtliche Ergebnisse dienen als Basis f{\"u}r vielf{\"a}ltige Forschungsans{\"a}tze. So erm{\"o}glichen eine h{\"o}here Verl{\"a}sslichkeit und Datenqualit{\"a}t der RFID-Informationen aussagekr{\"a}ftigere Analysen. Weiter sind durch fehlerkorrigierte Prozessdaten neuartige Methoden des RFID-Data-Mining denkbar. Dieser Forschungsbereich ist nach wie vor gr{\"o}ßtenteils unber{\"u}hrt und bietet enormes Potential, weitere durch RFID in Aussicht gestellte Vorteile zu realisieren.}, subject = {RFID}, language = {de} } @techreport{Winkelmann2015, type = {Working Paper}, author = {Winkelmann, Axel}, title = {Proceedings of the Doctoral Consortium WI 2015}, editor = {Winkelmann, Axel}, issn = {2199-0328}, doi = {10.25972/OPUS-11171}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-111715}, pages = {213}, year = {2015}, abstract = {Bereits seit Anfang der 1990er Jahre wird jungen Wissenschaftlern im Vorfeld der Tagung "Wirtschaftsinformatik" ein Doctoral Consortium als unterst{\"u}tzendes Forum angeboten. Diese Einrichtung wurde auch zur gr{\"o}ßten Internationalen Konferenz der Wirtschaftsinformatik, der WI 2015 in Osnabr{\"u}ck fortgef{\"u}hrt. Dieser Band fasst die zum Vortag ausgew{\"a}hlten Beitr{\"a}ge zusammen.}, subject = {Wirtschaftsinformatik}, language = {en} } @phdthesis{Witek2014, author = {Witek, Maximilian}, title = {Multiobjective Traveling Salesman Problems and Redundancy of Complete Sets}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-110740}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {The first part of this thesis deals with the approximability of the traveling salesman problem. This problem is defined on a complete graph with edge weights, and the task is to find a Hamiltonian cycle of minimum weight that visits each vertex exactly once. We study the most important multiobjective variants of this problem. In the multiobjective case, the edge weights are vectors of natural numbers with one component for each objective, and since weight vectors are typically incomparable, the optimal Hamiltonian cycle does not exist. Instead we consider the Pareto set, which consists of those Hamiltonian cycles that are not dominated by some other, strictly better Hamiltonian cycles. The central goal in multiobjective optimization and in the first part of this thesis in particular is the approximation of such Pareto sets. We first develop improved approximation algorithms for the two-objective metric traveling salesman problem on multigraphs and for related Hamiltonian path problems that are inspired by the single-objective Christofides' heuristic. We further show arguments indicating that our algorithms are difficult to improve. Furthermore we consider multiobjective maximization versions of the traveling salesman problem, where the task is to find Hamiltonian cycles with high weight in each objective. We generalize single-objective techniques to the multiobjective case, where we first compute a cycle cover with high weight and then remove an edge with low weight in each cycle. Since weight vectors are often incomparable, the choice of the edges of low weight is non-trivial. We develop a general lemma that solves this problem and enables us to generalize the single-objective maximization algorithms to the multiobjective case. We obtain improved, randomized approximation algorithms for the multiobjective maximization variants of the traveling salesman problem. We conclude the first part by developing deterministic algorithms for these problems. The second part of this thesis deals with redundancy properties of complete sets. We call a set autoreducible if for every input instance x we can efficiently compute some y that is different from x but that has the same membership to the set. If the set can be split into two equivalent parts, then it is called weakly mitotic, and if the splitting is obtained by an efficiently decidable separator set, then it is called mitotic. For different reducibility notions and complexity classes, we analyze how redundant its complete sets are. Previous research in this field concentrates on polynomial-time computable reducibility notions. The main contribution of this part of the thesis is a systematic study of the redundancy properties of complete sets for typical complexity classes and reducibility notions that are computable in logarithmic space. We use different techniques to show autoreducibility and mitoticity that depend on the size of the complexity class and the strength of the reducibility notion considered. For small complexity classes such as NL and P we use self-reducible, complete sets to show that all complete sets are autoreducible. For large complexity classes such as PSPACE and EXP we apply diagonalization methods to show that all complete sets are even mitotic. For intermediate complexity classes such as NP and the remaining levels of the polynomial-time hierarchy we establish autoreducibility of complete sets by locally checking computational transcripts. In many cases we can show autoreducibility of complete sets, while mitoticity is not known to hold. We conclude the second part by showing that in some cases, autoreducibility of complete sets at least implies weak mitoticity.}, subject = {Mehrkriterielle Optimierung}, language = {en} } @phdthesis{Schoeneberg2014, author = {Sch{\"o}neberg, Hendrik}, title = {Semiautomatische Metadaten-Extraktion und Qualit{\"a}tsmanagement in Workflow-Systemen zur Digitalisierung historischer Dokumente}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-104878}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {Performing Named Entity Recognition on ancient documents is a time-consuming, complex and error-prone manual task. It is a prerequisite though to being able to identify related documents and correlate between named entities in distinct sources, helping to precisely recreate historic events. In order to reduce the manual effort, automated classification approaches could be leveraged. Classifying terms in ancient documents in an automated manner poses a difficult task due to the sources' challenging syntax and poor conservation states. This thesis introduces and evaluates approaches that can cope with complex syntactial environments by using statistical information derived from a term's context and combining it with domain-specific heuristic knowledge to perform a classification. Furthermore this thesis demonstrates how metadata generated by these approaches can be used as error heuristics to greatly improve the performance of workflow systems for digitizations of early documents.}, subject = {Klassifikation}, language = {de} } @phdthesis{Fehrmann2015, author = {Fehrmann, Sven}, title = {Ontologiebasiertes Cloud Computing}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-111929}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {Die Dissertation „Ontologiebasiertes Cloud Computing" im Fachbereich Wirtschaftsinformatik behandelt das Thema Cloud Computing und veranschaulicht die M{\"o}glichkeiten der theoretischen und praktischen Nutzung einer Ontologie f{\"u}r das Cloud Computing. Neben den Private und Public Clouds sowie Hybrid-L{\"o}sungen wird vor allem eine ausgefeilte Virtualisierungstechnologie die Zukunft im IT-Bereich mitgestalten. Die Vielfalt und Anzahl der angebotenen Services nimmt besonders auf dem Sektor der Public Clouds weiterhin stark zu, w{\"a}hrend im Hybrid-Bereich ansprechende L{\"o}sungen noch ausstehen. Das Nutzen eines Cloud-Services ist in der Regel einfach und wird mit den fallenden Preisen zunehmend interessanter. Eine Reihe von Punkten, die im Vorfeld genau betrachtet und festgelegt werden m{\"u}ssen, wie Aspekte der IT-Sicherheit, des Datenschutzes und der Kosten, erm{\"o}glichen eine wirtschaftliche und rechtssichere Inanspruchnahme eines Cloud-Services. Vor der Nutzung eines Services m{\"u}ssen zudem der Wert, die Nutzungsh{\"a}ufigkeit und die Geheimhaltungsstufe der eigenen Daten bekannt sein, um sicher bestimmen zu k{\"o}nnen, ob alle Informationen oder nur ein Teil zum Auslagern geeignet sind. Dazu bedarf es einer klaren Festlegung der vertraglichen Rahmenbedingungen und einer Regelung bez{\"u}glich des Schadensersatzes bei einem Ausfall. Ein aktives Change Management sollte schon vor der Einf{\"u}hrung eines Services Akzeptanz f{\"u}r die sich im IT-Umfeld {\"a}ndernden Aufgabengebiete schaffen. Vergleichbare Alternativen zu finden, dies war die Zielvorgabe der durchgef{\"u}hrten, breiten Untersuchung von 15 Serviceanbietern, verbunden mit dem Aufbau einer Ontologie. Auf einem sehr dynamischen Cloud Computing Markt k{\"o}nnen diese Untersuchungen nat{\"u}rlich nur eine Momentaufnahme abbilden, denn neue Provider etablieren sich, schon l{\"a}nger bestehende ver{\"a}ndern und verbessern ihre Angebote. Damit diese Momentaufnahme nicht in einem statischen Endzustand verbleibt, wurde eine Ontologie aufgebaut, die die konsistente Einpflege ver{\"a}nderter Sachverhalte zul{\"a}sst. Die Idealvorstellung ist es, dass beim Bekanntwerden einer neuen Information diese auch immer in die Ontologie einfließt. Die Anbieteruntersuchung zeigt, dass Cloud-Services heute schon ein hohes Potential haben. Um sich einen Gesamt{\"u}berblick {\"u}ber die unterschiedlichen Services und ihre Angebote zu verschaffen, ist eine Ontologie besonders geeignet. Die aufgebaute Cloud-Ontologie beinhaltet eine Service-Auswahl, die auf die Literatur- und Anbieteruntersuchung aufbaut. {\"A}hnlich einer Suchmaschine hilft sie, sich {\"u}ber bestehende Angebote auf dem Markt zu informieren. Und sie vereinfacht die Selektion, definiert klar bekannte technische Details, erleichtert die Suche z. B. nach ben{\"o}tigten Zusatzdienstleistungen {\"u}ber standardisierte Schnittstellen, versucht Transparenz und Nachvollziehbarkeit bei den Abrechnungsmodellen herzustellen, um eine Vergleichbarkeit {\"u}berhaupt erst zu erm{\"o}glichen. Der gr{\"o}ßte Vorteil liegt in der Zeitersparnis: Die Recherche nach passenden Cloud-Services wird durch formalisierte und somit vergleichbare Kriterien verk{\"u}rzt. Bei mehreren passenden Anbietern l{\"a}sst sich {\"u}ber weitere Abfragen oder Kostenvergleiche der jeweils f{\"u}r den Nutzer beste Anbieter gezielt finden. Ebenso k{\"o}nnen Services mit signifikanten Ausschlusskriterien fr{\"u}hzeitig aus der Auswahl entfernt werden. Durch das Verbot bestimmter Zuweisungen oder durch die Forderung von Mindestbedingungen innerhalb der Ontologie wird die Einpflege falscher Sachverhalte verhindert und sie erweist sich damit wesentlich unempfindlicher als viele Programme. Die Aufgabenstellung bei der Modellerstellung lag darin, zu einer allgemeinen Aussagekraft der modellierten Abh{\"a}ngigkeiten zu kommen. Außerdem erf{\"u}llt die Cloud-Ontologie die vier typischen Anforderungen an eine Ontologie: Sie ist ausschließlich durch die standardisierte Sprache OWL beschrieben, kann durch einen Inferenzalgorithmus (z. B. Pellet) ausgewertet werden, unterscheidet eindeutig zwischen 80 Klassen und 342 Individuals und bildet zahlreiche Informationen {\"u}ber 2657 Verkn{\"u}pfungen ab. Die Ontologie kann mit geringem Aufwand auch in ein Programm mit einer ansprechenden Oberfl{\"a}che {\"u}berf{\"u}hrt werden, wie der programmierte Prototyp beweist. In der Praxis m{\"u}ssen f{\"u}r Unternehmen verst{\"a}rkt Hilfsmittel angeboten werden oder in den Vordergrund r{\"u}cken, wie Cloud-Ontologien, die die Auswahl von Services erleichtern, Vergleiche erst erm{\"o}glichen, die Suche verk{\"u}rzen und zum Schluss zu Ergebnissen f{\"u}hren, die den Vorstellungen des k{\"u}nftigen Nutzers entsprechen.}, subject = {Cloud Computing}, language = {de} } @phdthesis{Wamser2015, author = {Wamser, Florian}, title = {Performance Assessment of Resource Management Strategies for Cellular and Wireless Mesh Networks}, issn = {1432-8801}, doi = {10.25972/OPUS-11151}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-111517}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {The rapid growth in the field of communication networks has been truly amazing in the last decades. We are currently experiencing a continuation thereof with an increase in traffic and the emergence of new fields of application. In particular, the latter is interesting since due to advances in the networks and new devices, such as smartphones, tablet PCs, and all kinds of Internet-connected devices, new additional applications arise from different areas. What applies for all these services is that they come from very different directions and belong to different user groups. This results in a very heterogeneous application mix with different requirements and needs on the access networks. The applications within these networks typically use the network technology as a matter of course, and expect that it works in all situations and for all sorts of purposes without any further intervention. Mobile TV, for example, assumes that the cellular networks support the streaming of video data. Likewise, mobile-connected electricity meters rely on the timely transmission of accounting data for electricity billing. From the perspective of the communication networks, this requires not only the technical realization for the individual case, but a broad consideration of all circumstances and all requirements of special devices and applications of the users. Such a comprehensive consideration of all eventualities can only be achieved by a dynamic, customized, and intelligent management of the transmission resources. This management requires to exploit the theoretical capacity as much as possible while also taking system and network architecture as well as user and application demands into account. Hence, for a high level of customer satisfaction, all requirements of the customers and the applications need to be considered, which requires a multi-faceted resource management. The prerequisite for supporting all devices and applications is consequently a holistic resource management at different levels. At the physical level, the technical possibilities provided by different access technologies, e.g., more transmission antennas, modulation and coding of data, possible cooperation between network elements, etc., need to be exploited on the one hand. On the other hand, interference and changing network conditions have to be counteracted at physical level. On the application and user level, the focus should be on the customer demands due to the currently increasing amount of different devices and diverse applications (medical, hobby, entertainment, business, civil protection, etc.). The intention of this thesis is the development, investigation, and evaluation of a holistic resource management with respect to new application use cases and requirements for the networks. Therefore, different communication layers are investigated and corresponding approaches are developed using simulative methods as well as practical emulation in testbeds. The new approaches are designed with respect to different complexity and implementation levels in order to cover the design space of resource management in a systematic way. Since the approaches cannot be evaluated generally for all types of access networks, network-specific use cases and evaluations are finally carried out in addition to the conceptual design and the modeling of the scenario. The first part is concerned with management of resources at physical layer. We study distributed resource allocation approaches under different settings. Due to the ambiguous performance objectives, a high spectrum reuse is conducted in current cellular networks. This results in possible interference between cells that transmit on the same frequencies. The focus is on the identification of approaches that are able to mitigate such interference. Due to the heterogeneity of the applications in the networks, increasingly different application-specific requirements are experienced by the networks. Consequently, the focus is shifted in the second part from optimization of network parameters to consideration and integration of the application and user needs by adjusting network parameters. Therefore, application-aware resource management is introduced to enable efficient and customized access networks. As indicated before, approaches cannot be evaluated generally for all types of access networks. Consequently, the third contribution is the definition and realization of the application-aware paradigm in different access networks. First, we address multi-hop wireless mesh networks. Finally, we focus with the fourth contribution on cellular networks. Application-aware resource management is applied here to the air interface between user device and the base station. Especially in cellular networks, the intensive cost-driven competition among the different operators facilitates the usage of such a resource management to provide cost-efficient and customized networks with respect to the running applications.}, subject = {Leistungsbewertung}, language = {en} } @phdthesis{NavarroBullock2015, author = {Navarro Bullock, Beate}, title = {Privacy aware social information retrieval and spam filtering using folksonomies}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-120941}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {Social interactions as introduced by Web 2.0 applications during the last decade have changed the way the Internet is used. Today, it is part of our daily lives to maintain contacts through social networks, to comment on the latest developments in microblogging services or to save and share information snippets such as photos or bookmarks online. Social bookmarking systems are part of this development. Users can share links to interesting web pages by publishing bookmarks and providing descriptive keywords for them. The structure which evolves from the collection of annotated bookmarks is called a folksonomy. The sharing of interesting and relevant posts enables new ways of retrieving information from the Web. Users can search or browse the folksonomy looking at resources related to specific tags or users. Ranking methods known from search engines have been adjusted to facilitate retrieval in social bookmarking systems. Hence, social bookmarking systems have become an alternative or addendum to search engines. In order to better understand the commonalities and differences of social bookmarking systems and search engines, this thesis compares several aspects of the two systems' structure, usage behaviour and content. This includes the use of tags and query terms, the composition of the document collections and the rankings of bookmarks and search engine URLs. Searchers (recorded via session ids), their search terms and the clicked on URLs can be extracted from a search engine query logfile. They form similar links as can be found in folksonomies where a user annotates a resource with tags. We use this analogy to build a tripartite hypergraph from query logfiles (a logsonomy), and compare structural and semantic properties of log- and folksonomies. Overall, we have found similar behavioural, structural and semantic characteristics in both systems. Driven by this insight, we investigate, if folksonomy data can be of use in web information retrieval in a similar way to query log data: we construct training data from query logs and a folksonomy to build models for a learning-to-rank algorithm. First experiments show a positive correlation of ranking results generated from the ranking models of both systems. The research is based on various data collections from the social bookmarking systems BibSonomy and Delicious, Microsoft's search engine MSN (now Bing) and Google data. To maintain social bookmarking systems as a good source for information retrieval, providers need to fight spam. This thesis introduces and analyses different features derived from the specific characteristics of social bookmarking systems to be used in spam detection classification algorithms. Best results can be derived from a combination of profile, activity, semantic and location-based features. Based on the experiments, a spam detection framework which identifies and eliminates spam activities for the social bookmarking system BibSonomy has been developed. The storing and publication of user-related bookmarks and profile information raises questions about user data privacy. What kinds of personal information is collected and how do systems handle user-related items? In order to answer these questions, the thesis looks into the handling of data privacy in the social bookmarking system BibSonomy. Legal guidelines about how to deal with the private data collected and processed in social bookmarking systems are also presented. Experiments will show that the consideration of user data privacy in the process of feature design can be a first step towards strengthening data privacy.}, subject = {Information Retrieval}, language = {en} } @phdthesis{Oberdoerfer2021, author = {Oberd{\"o}rfer, Sebastian}, title = {Better Learning with Gaming: Knowledge Encoding and Knowledge Learning Using Gamification}, doi = {10.25972/OPUS-21970}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-219707}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2021}, abstract = {Computer games are highly immersive, engaging, and motivating learning environments. By providing a tutorial at the start of a new game, players learn the basics of the game's underlying principles as well as practice how to successfully play the game. During the actual gameplay, players repetitively apply this knowledge, thus improving it due to repetition. Computer games also challenge players with a constant stream of new challenges which increase in difficulty over time. As a result, computer games even require players to transfer their knowledge to master these new challenges. A computer game consists of several game mechanics. Game mechanics are the rules of a computer game and encode the game's underlying principles. They create the virtual environments, generate a game's challenges and allow players to interact with the game. Game mechanics also can encode real world knowledge. This knowledge may be acquired by players via gameplay. However, the actual process of knowledge encoding and knowledge learning using game mechanics has not been thoroughly defined, yet. This thesis therefore proposes a theoretical model to define the knowledge learning using game mechanics: the Gamified Knowledge Encoding. The model is applied to design a serious game for affine transformations, i.e., GEtiT, and to predict the learning outcome of playing a computer game that encodes orbital mechanics in its game mechanics, i.e., Kerbal Space Program. To assess the effects of different visualization technologies on the overall learning outcome, GEtiT visualizes the gameplay in desktop-3D and immersive virtual reality. The model's applicability for effective game design as well as GEtiT's overall design are evaluated in a usability study. The learning outcome of playing GEtiT and Kerbal Space Program is assessed in four additional user studies. The studies' results validate the use of the Gamified Knowledge Encoding for the purpose of developing effective serious games and to predict the learning outcome of existing serious games. GEtiT and Kerbal Space Program yield a similar training effect but a higher motivation to tackle the assignments in comparison to a traditional learning method. In conclusion, this thesis expands the understanding of using game mechanics for an effective learning of knowledge. The presented results are of high importance for researches, educators, and developers as they also provide guidelines for the development of effective serious games.}, subject = {Serious game}, language = {en} } @incollection{Klawitter1989, author = {Klawitter, J{\"o}rg}, title = {Nachwort: Interdisziplinarit{\"a}t als Idee der Einheit in der Verschiedenheit der Wissenschaft}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-44303}, publisher = {Universit{\"a}t W{\"u}rzburg}, year = {1989}, abstract = {No abstract available}, subject = {Umwelt{\"o}konomie}, language = {de} } @phdthesis{Bauer2021, author = {Bauer, Andr{\´e}}, title = {Automated Hybrid Time Series Forecasting: Design, Benchmarking, and Use Cases}, doi = {10.25972/OPUS-22025}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-220255}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2021}, abstract = {These days, we are living in a digitalized world. Both our professional and private lives are pervaded by various IT services, which are typically operated using distributed computing systems (e.g., cloud environments). Due to the high level of digitalization, the operators of such systems are confronted with fast-paced and changing requirements. In particular, cloud environments have to cope with load fluctuations and respective rapid and unexpected changes in the computing resource demands. To face this challenge, so-called auto-scalers, such as the threshold-based mechanism in Amazon Web Services EC2, can be employed to enable elastic scaling of the computing resources. However, despite this opportunity, business-critical applications are still run with highly overprovisioned resources to guarantee a stable and reliable service operation. This strategy is pursued due to the lack of trust in auto-scalers and the concern that inaccurate or delayed adaptations may result in financial losses. To adapt the resource capacity in time, the future resource demands must be "foreseen", as reacting to changes once they are observed introduces an inherent delay. In other words, accurate forecasting methods are required to adapt systems proactively. A powerful approach in this context is time series forecasting, which is also applied in many other domains. The core idea is to examine past values and predict how these values will evolve as time progresses. According to the "No-Free-Lunch Theorem", there is no algorithm that performs best for all scenarios. Therefore, selecting a suitable forecasting method for a given use case is a crucial task. Simply put, each method has its benefits and drawbacks, depending on the specific use case. The choice of the forecasting method is usually based on expert knowledge, which cannot be fully automated, or on trial-and-error. In both cases, this is expensive and prone to error. Although auto-scaling and time series forecasting are established research fields, existing approaches cannot fully address the mentioned challenges: (i) In our survey on time series forecasting, we found that publications on time series forecasting typically consider only a small set of (mostly related) methods and evaluate their performance on a small number of time series with only a few error measures while providing no information on the execution time of the studied methods. Therefore, such articles cannot be used to guide the choice of an appropriate method for a particular use case; (ii) Existing open-source hybrid forecasting methods that take advantage of at least two methods to tackle the "No-Free-Lunch Theorem" are computationally intensive, poorly automated, designed for a particular data set, or they lack a predictable time-to-result. Methods exhibiting a high variance in the time-to-result cannot be applied for time-critical scenarios (e.g., auto-scaling), while methods tailored to a specific data set introduce restrictions on the possible use cases (e.g., forecasting only annual time series); (iii) Auto-scalers typically scale an application either proactively or reactively. Even though some hybrid auto-scalers exist, they lack sophisticated solutions to combine reactive and proactive scaling. For instance, resources are only released proactively while resource allocation is entirely done in a reactive manner (inherently delayed); (iv) The majority of existing mechanisms do not take the provider's pricing scheme into account while scaling an application in a public cloud environment, which often results in excessive charged costs. Even though some cost-aware auto-scalers have been proposed, they only consider the current resource demands, neglecting their development over time. For example, resources are often shut down prematurely, even though they might be required again soon. To address the mentioned challenges and the shortcomings of existing work, this thesis presents three contributions: (i) The first contribution-a forecasting benchmark-addresses the problem of limited comparability between existing forecasting methods; (ii) The second contribution-Telescope-provides an automated hybrid time series forecasting method addressing the challenge posed by the "No-Free-Lunch Theorem"; (iii) The third contribution-Chamulteon-provides a novel hybrid auto-scaler for coordinated scaling of applications comprising multiple services, leveraging Telescope to forecast the workload intensity as a basis for proactive resource provisioning. In the following, the three contributions of the thesis are summarized: Contribution I - Forecasting Benchmark To establish a level playing field for evaluating the performance of forecasting methods in a broad setting, we propose a novel benchmark that automatically evaluates and ranks forecasting methods based on their performance in a diverse set of evaluation scenarios. The benchmark comprises four different use cases, each covering 100 heterogeneous time series taken from different domains. The data set was assembled from publicly available time series and was designed to exhibit much higher diversity than existing forecasting competitions. Besides proposing a new data set, we introduce two new measures that describe different aspects of a forecast. We applied the developed benchmark to evaluate Telescope. Contribution II - Telescope To provide a generic forecasting method, we introduce a novel machine learning-based forecasting approach that automatically retrieves relevant information from a given time series. More precisely, Telescope automatically extracts intrinsic time series features and then decomposes the time series into components, building a forecasting model for each of them. Each component is forecast by applying a different method and then the final forecast is assembled from the forecast components by employing a regression-based machine learning algorithm. In more than 1300 hours of experiments benchmarking 15 competing methods (including approaches from Uber and Facebook) on 400 time series, Telescope outperformed all methods, exhibiting the best forecast accuracy coupled with a low and reliable time-to-result. Compared to the competing methods that exhibited, on average, a forecast error (more precisely, the symmetric mean absolute forecast error) of 29\%, Telescope exhibited an error of 20\% while being 2556 times faster. In particular, the methods from Uber and Facebook exhibited an error of 48\% and 36\%, and were 7334 and 19 times slower than Telescope, respectively. Contribution III - Chamulteon To enable reliable auto-scaling, we present a hybrid auto-scaler that combines proactive and reactive techniques to scale distributed cloud applications comprising multiple services in a coordinated and cost-effective manner. More precisely, proactive adaptations are planned based on forecasts of Telescope, while reactive adaptations are triggered based on actual observations of the monitored load intensity. To solve occurring conflicts between reactive and proactive adaptations, a complex conflict resolution algorithm is implemented. Moreover, when deployed in public cloud environments, Chamulteon reviews adaptations with respect to the cloud provider's pricing scheme in order to minimize the charged costs. In more than 400 hours of experiments evaluating five competing auto-scaling mechanisms in scenarios covering five different workloads, four different applications, and three different cloud environments, Chamulteon exhibited the best auto-scaling performance and reliability while at the same time reducing the charged costs. The competing methods provided insufficient resources for (on average) 31\% of the experimental time; in contrast, Chamulteon cut this time to 8\% and the SLO (service level objective) violations from 18\% to 6\% while using up to 15\% less resources and reducing the charged costs by up to 45\%. The contributions of this thesis can be seen as major milestones in the domain of time series forecasting and cloud resource management. (i) This thesis is the first to present a forecasting benchmark that covers a variety of different domains with a high diversity between the analyzed time series. Based on the provided data set and the automatic evaluation procedure, the proposed benchmark contributes to enhance the comparability of forecasting methods. The benchmarking results for different forecasting methods enable the selection of the most appropriate forecasting method for a given use case. (ii) Telescope provides the first generic and fully automated time series forecasting approach that delivers both accurate and reliable forecasts while making no assumptions about the analyzed time series. Hence, it eliminates the need for expensive, time-consuming, and error-prone procedures, such as trial-and-error searches or consulting an expert. This opens up new possibilities especially in time-critical scenarios, where Telescope can provide accurate forecasts with a short and reliable time-to-result. Although Telescope was applied for this thesis in the field of cloud computing, there is absolutely no limitation regarding the applicability of Telescope in other domains, as demonstrated in the evaluation. Moreover, Telescope, which was made available on GitHub, is already used in a number of interdisciplinary data science projects, for instance, predictive maintenance in an Industry 4.0 context, heart failure prediction in medicine, or as a component of predictive models of beehive development. (iii) In the context of cloud resource management, Chamulteon is a major milestone for increasing the trust in cloud auto-scalers. The complex resolution algorithm enables reliable and accurate scaling behavior that reduces losses caused by excessive resource allocation or SLO violations. In other words, Chamulteon provides reliable online adaptations minimizing charged costs while at the same time maximizing user experience.}, subject = {Zeitreihenanalyse}, language = {en} } @phdthesis{Poehner2021, author = {P{\"o}hner, Nicolai}, title = {Educational robotics competitions as out-of-school learning setting for STEM education: An empirical study on students' learning of problem solving skills through participation in the World Robot Olympiad}, doi = {10.25972/OPUS-24317}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-243179}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2021}, abstract = {Educational robotics is an innovative approach to teaching and learning a variety of different concepts and skills as well as motivating students in the field of Science, Technology, Engineering, and Mathematics (STEM) education. This especially applies to educational robotics competitions such as, for example, the FIRST LEGO League, the RoboCup Junior, or the World Robot Olympiad as out-of-school and goal-oriented approach to educational robotics. These competitions have gained greatly in popularity in recent years and thousands of students participate in these competitions worldwide each year. Moreover, the corresponding technology became more accessible for teachers and students to use it in their classrooms and has arguably a high potential to impact the nature of science education at all levels. One skill, which is said to be benefitting from educational robotics, is problem solving. This thesis understands problem solving skills as engineering design skills (in contrast to scientific inquiry). Problem solving skills count as important skills as demanded by industry leaders and policy makers in the context of 21st century skills, which are relevant for students to be well-prepared for their future working life in today's world, shaped by an ongoing process of automation, globalization, and digitalization. The overall aim of this thesis is to try to answer the question if educational robotics competitions such as the World Robot Olympiad (WRO) have a positive impact on students' learning in terms of their problem solving skills (as part of 21st century skills). In detail, this thesis focuses on a) if students can improve their problem solving skills through participation in educational robotics competitions, b) how this skill development is accomplished, and c) the teachers' support of their students during their learning process in the competition. The corresponding empirical studies were conducted throughout the seasons of 2018 and 2019 of the WRO in Germany. The results show overall positive effects of the participation in the WRO on students' learning of problem solving skills. They display an increase of students' problem solving skills, which is not moderated by other variables such as the competition's category or age group, the students' gender or experience, or the success of the teams at the competition. Moreover, the results indicate that students develop their problem solving skills by using a systematic engineering design process and sophisticated problem solving strategies. Lastly, the teacher's role in the educational robotics competitions as manager and guide (in terms of the constructionist learning theory) of the students' learning process (especially regarding the affective level) is underlined by the results of this thesis. All in all, this thesis contributes to the research gap concerning the lack of systematic evaluation of educational robotics to promote students' learning by providing more (methodologically) sophisticated research on this topic. Thereby, this thesis follows the call for more rigorous (quantitative) research by the educational robotics community, which is necessary to validate the impact of educational robotics.}, subject = {Fachdidaktik}, language = {en} } @phdthesis{Leutert2021, author = {Leutert, Florian}, title = {Flexible Augmented Reality Systeme f{\"u}r robotergest{\"u}tzte Produktionsumgebungen}, isbn = {978-3-945459-39-3}, doi = {10.25972/OPUS-24972}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-249728}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2021}, abstract = {Produktionssysteme mit Industrierobotern werden zunehmend komplex; waren deren Arbeitsbereiche fr{\"u}her noch statisch und abgeschirmt, und die programmierten Abl{\"a}ufe gleichbleibend, so sind die Anforderungen an moderne Robotik-Produktionsanlagen gestiegen: Diese sollen sich jetzt mithilfe von intelligenter Sensorik auch in unstrukturierten Umgebungen einsetzen lassen, sich bei sinkenden Losgr{\"o}ßen aufgrund individualisierter Produkte und h{\"a}ufig {\"a}ndernden Produktionsaufgaben leicht rekonfigurieren lassen, und sogar eine direkte Zusammenarbeit zwischen Mensch und Roboter erm{\"o}glichen. Gerade auch bei dieser Mensch-Roboter-Kollaboration wird es damit notwendig, dass der Mensch die Daten und Aktionen des Roboters leicht verstehen kann. Aufgrund der gestiegenen Anforderungen m{\"u}ssen somit auch die Bedienerschnittstellen dieser Systeme verbessert werden. Als Grundlage f{\"u}r diese neuen Benutzerschnittstellen bietet sich Augmented Reality (AR) als eine Technologie an, mit der sich komplexe r{\"a}umliche Daten f{\"u}r den Bediener leicht verst{\"a}ndlich darstellen lassen. Komplexe Informationen werden dabei in der Arbeitsumgebung der Nutzer visualisiert und als virtuelle Einblendungen sichtbar gemacht, und so auf einen Blick verst{\"a}ndlich. Die diversen existierenden AR-Anzeigetechniken sind f{\"u}r verschiedene Anwendungsfelder unterschiedlich gut geeignet, und sollten daher flexibel kombinier- und einsetzbar sein. Auch sollen diese AR-Systeme schnell und einfach auf verschiedenartiger Hardware in den unterschiedlichen Arbeitsumgebungen in Betrieb genommen werden k{\"o}nnen. In dieser Arbeit wird ein Framework f{\"u}r Augmented Reality Systeme vorgestellt, mit dem sich die genannten Anforderungen umsetzen lassen, ohne dass daf{\"u}r spezialisierte AR-Hardware notwendig wird. Das Flexible AR-Framework kombiniert und b{\"u}ndelt daf{\"u}r verschiedene Softwarefunktionen f{\"u}r die grundlegenden AR-Anzeigeberechnungen, f{\"u}r die Kalibrierung der notwendigen Hardware, Algorithmen zur Umgebungserfassung mittels Structured Light sowie generische ARVisualisierungen und erlaubt es dadurch, verschiedene AR-Anzeigesysteme schnell und flexibel in Betrieb zu nehmen und parallel zu betreiben. Im ersten Teil der Arbeit werden Standard-Hardware f{\"u}r verschiedene AR-Visualisierungsformen sowie die notwendigen Algorithmen vorgestellt, um diese flexibel zu einem AR-System zu kombinieren. Dabei m{\"u}ssen die einzelnen verwendeten Ger{\"a}te pr{\"a}zise kalibriert werden; hierf{\"u}r werden verschiedene M{\"o}glichkeiten vorgestellt, und die mit ihnen dann erreichbaren typischen Anzeige- Genauigkeiten in einer Evaluation charakterisiert. Nach der Vorstellung der grundlegenden ARSysteme des Flexiblen AR-Frameworks wird dann eine Reihe von Anwendungen vorgestellt, bei denen das entwickelte System in konkreten Praxis-Realisierungen als AR-Benutzerschnittstelle zum Einsatz kam, unter anderem zur {\"U}berwachung von, Zusammenarbeit mit und einfachen Programmierung von Industrierobotern, aber auch zur Visualisierung von komplexen Sensordaten oder zur Fernwartung. Im Verlauf der Arbeit werden dadurch die Vorteile, die sich durch Verwendung der AR-Technologie in komplexen Produktionssystemen ergeben, herausgearbeitet und in Nutzerstudien belegt.}, subject = {Erweiterte Realit{\"a}t }, language = {de} } @phdthesis{Flederer2021, author = {Flederer, Frank}, title = {CORFU - An Extended Model-Driven Framework for Small Satellite Software with Code Feedback}, doi = {10.25972/OPUS-24981}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-249817}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2021}, abstract = {Corfu is a framework for satellite software, not only for the onboard part but also for the ground. Developing software with Corfu follows an iterative model-driven approach. The basis of the process is an engineering model. Engineers formally describe the basic structure of the onboard software in configuration files, which build the engineering model. In the first step, Corfu verifies the model at different levels. Not only syntactically and semantically but also on a higher level such as the scheduling. Based on the model, Corfu generates a software scaffold, which follows an application-centric approach. Software images onboard consist of a list of applications connected through communication channels called topics. Corfu's generic and generated code covers this fundamental communication, telecommand, and telemetry handling. All users have to do is inheriting from a generated class and implement the behavior in overridden methods. For each application, the generator creates an abstract class with pure virtual methods. Those methods are callback functions, e.g., for handling telecommands or executing code in threads. However, from the model, one can not foresee the software implementation by users. Therefore, as an innovation compared to other frameworks, Corfu introduces feedback from the user code back to the model. In this way, we extend the engineering model with information about functions/methods, their invocations, their stack usage, and information about events and telemetry emission. Indeed, it would be possible to add further information extraction for additional use cases. We extract the information in two ways: assembly and source code analysis. The assembly analysis collects information about the stack usage of functions and methods. On the one side, Corfu uses the gathered information to accomplished additional verification steps, e.g., checking if stack usages exceed stack sizes of threads. On the other side, we use the gathered information to improve the performance of onboard software. In a use case, we show how the compiled binary and bandwidth towards the ground is reducible by exploiting source code information at run-time.}, subject = {FRAMEWORK }, language = {en} } @phdthesis{Dombrovski2022, author = {Dombrovski, Veaceslav}, title = {Software Framework to Support Operations of Nanosatellite Formations}, isbn = {978-3-945459-38-6}, doi = {10.25972/OPUS-24931}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-249314}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2022}, abstract = {Since the first CubeSat launch in 2003, the hardware and software complexity of the nanosatellites was continuosly increasing. To keep up with the continuously increasing mission complexity and to retain the primary advantages of a CubeSat mission, a new approach for the overall space and ground software architecture and protocol configuration is elaborated in this work. The aim of this thesis is to propose a uniform software and protocol architecture as a basis for software development, test, simulation and operation of multiple pico-/nanosatellites based on ultra-low power components. In contrast to single-CubeSat missions, current and upcoming nanosatellite formation missions require faster and more straightforward development, pre-flight testing and calibration procedures as well as simultaneous operation of multiple satellites. A dynamic and decentral Compass mission network was established in multiple active CubeSat missions, consisting of uniformly accessible nodes. Compass middleware was elaborated to unify the communication and functional interfaces between all involved mission-related software and hardware components. All systems can access each other via dynamic routes to perform service-based M2M communication. With the proposed model-based communication approach, all states, abilities and functionalities of a system are accessed in a uniform way. The Tiny scripting language was designed to allow dynamic code execution on ultra-low power components as a basis for constraint-based in-orbit scheduler and experiment execution. The implemented Compass Operations front-end enables far-reaching monitoring and control capabilities of all ground and space systems. Its integrated constraint-based operations task scheduler allows the recording of complex satellite operations, which are conducted automatically during the overpasses. The outcome of this thesis became an enabling technology for UWE-3, UWE-4 and NetSat CubeSat missions.}, subject = {Kleinsatellit}, language = {en} } @phdthesis{Kryven2022, author = {Kryven, Myroslav}, title = {Optimizing Crossings in Circular-Arc Drawings and Circular Layouts}, isbn = {978-3-95826-174-7}, doi = {10.25972/WUP-978-3-95826-175-4}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-245960}, school = {Universit{\"a}t W{\"u}rzburg}, pages = {viii, 129}, year = {2022}, abstract = {A graph is an abstract network that represents a set of objects, called vertices, and relations between these objects, called edges. Graphs can model various networks. For example, a social network where the vertices correspond to users of the network and the edges represent relations between the users. To better see the structure of a graph it is helpful to visualize it. The research field of visualizing graphs is called Graph Drawing. A standard visualization is a node-link diagram in the Euclidean plane. In such a representation the vertices are drawn as points in the plane and edges are drawn as Jordan curves between every two vertices connected by an edge. Edge crossings decrease the readability of a drawing, therefore, Crossing Optimization is a fundamental problem in Graph Drawing. Graphs that can be drawn with few crossings are called beyond-planar graphs. The topic that deals with definition and analysis of beyond-planar graphs is called Beyond Planarity and it is an important and relatively new research area in Graph Drawing. In general, beyond planar graphs posses drawings where edge crossings are restricted in some way. For example, the number of crossings may be bounded by a constant independent of the size of the graph. Crossings can also be restricted locally by, for example, restricting the number of crossings per edge, restricting the number of pairwise crossing edges, or bounding the crossing angle of two edges in the drawing from below. This PhD thesis defines and analyses beyond-planar graph classes that arise from such local restrictions on edge crossings.}, subject = {Graphenzeichnen}, language = {en} } @phdthesis{Koenigbauer2021, author = {K{\"o}nigbauer, Martina}, title = {Adaptives Referenzmodell f{\"u}r hybrides Projektmanagement}, issn = {1432-8801}, doi = {10.25972/OPUS-24751}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-247519}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2021}, abstract = {Das Management von Projekten, welche sowohl einmalige und interdisziplin{\"a}re Aufgabenstellungen als auch individuelle Rahmenbedingungen und Einschr{\"a}nkungen umfassen, stellt eine anspruchsvolle Aufgabe dar. Es gibt einige standardisierte Vorgehensmodelle, die einen organisatorischen Rahmen aus Phasen, Prozessen, Rollen und anzuwendenden Methoden anbieten. Traditionellen Vorgehensmodellen wird in der Regel gefolgt, wenn die zu erzielenden Ergebnisse und der Ablauf eines Projektes auf Basis der zur Verf{\"u}gung stehenden Informationen geplant werden k{\"o}nnen. Agile Vorgehensmodelle werden vorranging genutzt, wenn keine ausreichenden Informationen zur Verf{\"u}gung stehen, um eine vollst{\"a}ndige Planung aufzusetzen. Ihr Fokus liegt darauf, flexibel auf sich {\"a}ndernde Anforderungen einzugehen. Im direkten Austausch mit Kunden werden in meist mehreren aufeinander folgenden Zyklen Zwischenergebnisse bewertet und darauf basierend die jeweils n{\"a}chsten Entwicklungsschritte geplant und umgesetzt. Hybride Vorgehensmodelle werden genutzt, wenn Methoden aus mehreren unterschiedlichen Vorgehensmodellen erforderlich sind, um ein Projekt zu bearbeiten. Die Bedeutung hybrider Vorgehensmodelle hat {\"u}ber die Jahre immer weiter zugenommen. Ihr besonderer Nutzen liegt darin, dass die Methodenauswahl auf den individuellen Kontext eines Projektes angepasst werden kann. Da es in der Praxis aber eine sehr große Anzahl an Methoden gibt, ist die Auswahl der zum Kontext passenden und deren Kombination zu einem individuellen Vorgehensmodell selbst f{\"u}r Experten/-innen eine Herausforderung. Die Forschungsergebnisse der vorliegenden Arbeit zeigen, dass es bisher auch kein Schema zur Unterst{\"u}tzung dieses Prozesses gab. Um diese Forschungsl{\"u}cke zu schließen, wurde ein adaptives Referenzmodell f{\"u}r hybrides Projektmanagement (ARHP) entwickelt. Der wissenschaftliche Beitrag besteht zum einen in der Entwicklung eines Ablaufs zur Selektion und Kombination von zum Kontext passenden Methoden und zum anderen in der Umsetzung des Ablaufs als semi-automatisches Werkzeug. Referenzmodellnutzer/-innen k{\"o}nnen darin ihren individuellen Projektkontext durch die Auswahl zutreffender Kriterien (sogenannter Parameterauspr{\"a}gungen) erfassen. Das ARHP bietet ihnen dann ein Vorgehensmodell an, welches aus miteinander anwendbaren und verkn{\"u}pfbaren Methoden besteht. Da in der Projektmanagement Community h{\"a}ufig schnelle Entscheidungen f{\"u}r ein geeignetes Vorgehensmodell erforderlich sind und selbst Experten/-innen nicht alle Methoden kennen, wird der Nutzen der ''digitalen Beratung'', die das semi-automatische ARHP bietet, als hoch eingestuft. Sowohl die f{\"u}r die Erfassung des Kontextes erforderlichen Parameter als auch die Methoden mit der h{\"o}chsten Praxisrelevanz, wurden anhand einer umfangreichen Umfrage erforscht. Ihr wissenschaftlicher Beitrag besteht unter anderem in der erstmaligen Erfassung von Begr{\"u}ndungen f{\"u}r die Verwendung von Methoden im Rahmen individueller, hybrider Vorgehensmodelle. Zudem erlauben die gesammelten Daten einen direkten Vergleich der Methodennutzung in funktionierenden und nicht funktionierenden hybriden Vorgehensmodellen. Mit der so vorhandenen Datengrundlage wird in drei Design Science Research Zyklen ein Algorithmus entwickelt, der den Adaptionsmechanismus des ARHP bildet. Die Evaluation des ARHP erfolgt anhand des entwickelten semi-automatischen Prototypen unter Einbeziehung von Projektmanagementexperten/-innen. Ausf{\"u}hrungen zur Pflege des ARHP k{\"o}nnen als Handlungsanleitung f{\"u}r Referenzmodellkonstrukteure/-innen verstanden werden. Sie bilden den letzten Teil der Arbeit und zeigen, wie das ARHP kontinuierlich weiterentwickelt werden kann. Zudem wird ein Ausblick darauf gegeben, um welche Themen das ARHP im Rahmen weiterf{\"u}hrender Forschung erweitert werden kann. Dabei handelt es sich zum Beispiel um eine noch st{\"a}rkere Automatisierung und Empfehlungen f{\"u}r das Change Management, welche beide bereits in Vorbereitung sind.}, subject = {Projektmanagement}, language = {de} } @phdthesis{Appold2015, author = {Appold, Christian}, title = {Symbolische BDD-basierte Modellpr{\"u}fung asynchroner nebenl{\"a}ufiger Systeme}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-137029}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {Today, information and communication systems are ubiquitous and consist very often of several interacting and communicating components. One reason is the widespread use of multi-core processors and the increasing amount of concurrent software for the efficient usage of multi-core processors. Also, the dissemination of distributed emergent technologies like sensor networks or the internet of things is growing. Additionally, a lot of internet protocols are client-server architectures with clients which execute computations in parallel and servers that can handle requests of several clients in parallel. Systems which consist of several interacting and communicating components are often very complex and due to their complexity also prone to errors. Errors in systems can have dramatic consequenses, especially in safety-critical areas where human life can be endangered by incorrect system behavior. Hence, it is inevitable to have methods that ensure the proper functioning of such systems. This thesis aims on improving the verifiability of asynchronous concurrent systems using symbolic model checking based on Binary Decision Diagrams (BDDs). An asynchronous concurrent system is a system that consists of several components, from which only one component can execute a transition at a time. Model checking is a formal verification technique. For a given system description and a set of desired properties, the validity of the properties for the system is decided in model checking automatically by software tools called model checkers. The main problem of model checking is the state-space explosion problem. One approach to reduce this problem is the use of symbolic model checking. There, system states and transitions are not stored explicitely as in explicit model checking. Instead, in symbolic model checking sets of states and sets of transitions are stored and also manipulated together. The data structure which is used in this thesis to store those sets are BDDs. BDD-based symbolic model checking has already been used successful in industry for several times. Nevertheless, BDD-based symbolic model checking still suffers from the state-space explosion problem and further improvements are necessary to improve its applicability. Central operations in BDD-based symbolic model checking are the computation of successor and predecessor states of a given set of states. Those computations are called image computations. They are applied repeatedly in BDD-based symbolic model checking to decide the validity of properties for a given system description. Hence, their efficient execution is crucial for the memory and runtime requirements of a model checker. In an image computation a BDD for a set of transitions and a BDD for a set of states are combined to compute a set of successor or predecessor states. Often, also the size of the BDDs to represent the transition relation is critical for the successful use of model checking. To further improve the applicability of symbolic model checking, we present in this thesis new data structures to store the transition relation of asynchronous concurrent systems. Additionally, we present new image computation algorithms. Both can lead to large runtime and memory reductions for BDD-based symbolic model checking. Asynchronous concurrent systems often contain symmetries. A technique to exploit those symmetries to diminish the state-space explosion problem is symmetry reduction. In this thesis we also present a new efficient algorithm for symmetry reduction in BDD-based symbolic model checking.}, subject = {Programmverifikation}, language = {de} } @phdthesis{Mautner2012, author = {Mautner, Romana}, title = {Datenintegration f{\"u}r Bauprojekte der Gebietsk{\"o}rperschaften}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-138611}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2012}, abstract = {Bei der Durchf{\"u}hrung {\"o}ffentlicher Bauprojekte ist eine intensive Zusammenarbeit zwi¬schen vielen Beteiligten erforderlich: die in der Bauverwaltung des Bauherren angesiedelte Projektleitung, Bedarfstr{\"a}ger (z. B. Universit{\"a}t oder Be¬h{\"o}rde), Gre-mien des Bauherrn (Kommunal-, Kreis- oder Bundesparlament), dessen Haus-haltsabteilung, Objekt- und Fachplaner (freiberuflich oder als Mitarbeiter der Bauverwaltung), Gutachter, Bauunternehmen, Lieferanten und Dienstleister, Raumordnungs-, Planfeststellungs- und Genehmigungsbeh{\"o}rden. Der Planungs-, Genehmigungs- und Realisationsprozess erstreckt sich meist {\"u}ber mehrere Jahre. W{\"a}hrenddessen ist ein intensiver Informations- und Kommunikationsaustausch zwischen den Beteiligten erforderlich. Baupl{\"a}ne, Leistungsverzeichnisse, Ange-bote, Vertr{\"a}ge, Protokolle, Bauzeitenpl{\"a}ne und Rechnungen werden immer noch per E-Mail oder in Papierform ausgetauscht. Wegen der meist gr{\"o}ßeren Zahl zeit-gleich betreuter Bauprojekte f{\"u}hrt dies bei fast allen Beteiligten regelm{\"a}ßig zu einer herausfordernd großen Korrespondenz und einem als mangelhaft zu be-zeichnenden {\"U}berblick {\"u}ber die aktuellen Projektdaten. Wegen der hochgradigen Interdependenz der Teilprozesse {\"u}ber alle Phasen hin-weg sind aber eine m{\"o}glichst reibungslose Koordination und die st{\"a}ndige Verf{\"u}g-barkeit aktueller Daten bei allen Beteiligten unabdingbare Voraussetzungen, um eine Baumaßnahme z{\"u}gig und im vorgesehenen Kostenrahmen auszuf{\"u}hren. W{\"a}hrend Datenaustausch und Koordination bei großen gewerblichen Bauprojek-ten bereits mit Erfolg durch virtuelle Projektr{\"a}ume unterst{\"u}tzt werden, sind die {\"o}ffentlichen Bauverwaltungen hier noch z{\"o}gerlich. Die Erstellung eines einheitli-chen und prozess{\"u}bergreifenden Datenmodells speziell f{\"u}r die Abl{\"a}ufe {\"o}ffentli-cher Auftraggeber als Ziel der Arbeit k{\"o}nnte helfen, die Vorteile eines zentralen, f{\"u}r alle Beteiligten zug{\"a}nglichen Datenbestandes auch f{\"u}r die Bauverwaltungen und ihre Projekte nutzbar zu machen und vormals getrennt gehaltene Datenbe-st{\"a}nde zu einem einzigen zusammenzuf{\"u}hren (Datenintegration). Die gr{\"u}ndliche Analyse der Abl{\"a}ufe und Informationsfl{\"u}sse zwischen den Beteiligten {\"u}ber alle Phasen eines {\"o}ffentlichen Bauprojekts hinweg sowie eine Bestandsaufnahme der gegenw{\"a}rtig am Markt verf{\"u}gbaren virtuellen Projektr{\"a}ume im ersten Teil der Arbeit bilden die Grundlage f{\"u}r die Modellierung der Daten sowie ihrer Zusam-menh{\"a}nge im zweiten Teil. Mit der Gesamtdarstellung der Beteiligten, ihrer Rollen und Aufgaben, der Do-kumente und der zugeh{\"o}rigen Metadaten {\"u}ber alle Phasen und Baufachbereiche hinweg wurde ein neuer Forschungsbeitrag erarbeitet. Die unterschiedlichen Be-zeichnungen z. B. in Hoch- und Tiefbauprojekten wurden im Interesse der Ver-st{\"a}ndlichkeit erhalten, aber in einer gemeinsamen Struktur zusammengef{\"u}hrt. Diese Modellierung ist die Voraussetzung f{\"u}r eine verbesserte informationstech-nische Unterst{\"u}tzung {\"o}ffentlicher Bauprojekte und zugleich die ureigenste Aufga-be des Wirtschaftsinformatikers als Mittler zwischen Anwendern und Entwick-lern. Das in dieser Arbeit entwickelte Datenmodell erlaubt wegen seiner verwaltungs- und baufachbereichs{\"u}bergreifenden Konzeption im Sinne eines Referenzmodells den Einsatz als Basis einer Standardanwendungssoftware, die mit geringem An-passungsaufwand bei einer großen Zahl an Kunden im {\"o}ffentlichen Bereich einge-setzt werden kann. Beispiele sind Projektraumanwendungen sowie Workflow-Management-Systeme. Es ist zugleich ein Referenzvorschlag an die Entwickler bestehender Anwendungen zur Definition von Schnittstellen und schließlich zur Umsetzung applikations{\"u}bergreifender Integrationsans{\"a}tze.}, subject = {Bauvorhaben}, language = {de} } @phdthesis{Schwartz2016, author = {Schwartz, Christian}, title = {Modeling and Evaluation of Multi-Stakeholder Scenarios in Communication Networks}, issn = {1432-8801}, doi = {10.25972/OPUS-13388}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-133887}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2016}, abstract = {Today's Internet is no longer only controlled by a single stakeholder, e.g. a standard body or a telecommunications company. Rather, the interests of a multitude of stakeholders, e.g. application developers, hardware vendors, cloud operators, and network operators, collide during the development and operation of applications in the Internet. Each of these stakeholders considers different KPIs to be important and attempts to optimise scenarios in its favour. This results in different, often opposing views and can cause problems for the complete network ecosystem. One example of such a scenario are Signalling Storms in the mobile Internet, with one of the largest occurring in Japan in 2012 due to the release and high popularity of a free instant messaging application. The network traffic generated by the application caused a high number of connections to the Internet being established and terminated. This resulted in a similarly high number of signalling messages in the mobile network, causing overload and a loss of service for 2.5 million users over 4 hours. While the network operator suffers the largest impact of this signalling overload, it does not control the application. Thus, the network operator can not change the application traffic characteristics to generate less network signalling traffic. The stakeholders who could prevent, or at least reduce, such behaviour, i.e. application developers or hardware vendors, have no direct benefit from modifying their products in such a way. This results in a clash of interests which negatively impacts the network performance for all participants. The goal of this monograph is to provide an overview over the complex structures of stakeholder relationships in today's Internet applications in mobile networks. To this end, we study different scenarios where such interests clash and suggest methods where tradeoffs can be optimised for all participants. If such an optimisation is not possible or attempts at it might lead to adverse effects, we discuss the reasons.}, subject = {Leistungsbewertung}, language = {en} } @unpublished{Dandekar2007, author = {Dandekar, Thomas}, title = {Some general system properties of a living observer and the environment he explores}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-33537}, year = {2007}, abstract = {In a nice assay published in Nature in 1993 the physicist Richard God III started from a human observer and made a number of witty conclusions about our future prospects giving estimates for the existence of the Berlin Wall, the human race and all the rest of the universe. In the same spirit, we derive implications for "the meaning of life, the universe and all the rest" from few principles. Adams´ absurd answer "42" tells the lesson "garbage in / garbage out" - or suggests that the question is non calculable. We show that experience of "meaning" and to decide fundamental questions which can not be decided by formal systems imply central properties of life: Ever higher levels of internal representation of the world and an escalating tendency to become more complex. An observer, "collecting observations" and three measures for complexity are examined. A theory on living systems is derived focussing on their internal representation of information. Living systems are more complex than Kolmogorov complexity ("life is NOT simple") and overcome decision limits (G{\"o}del theorem) for formal systems as illustrated for cell cycle. Only a world with very fine tuned environments allows life. Such a world is itself rather complex and hence excessive large in its space of different states - a living observer has thus a high probability to reside in a complex and fine tuned universe.}, subject = {Komplex }, language = {en} } @misc{DandekarDandekar1994, author = {Dandekar, Thomas and Dandekar, G.}, title = {Schlange als Attribut des {\"A}skulap}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-29822}, year = {1994}, abstract = {No abstract available}, language = {de} } @phdthesis{Dannemann2015, author = {Dannemann, Frank}, title = {Unified Monitoring of Spacecrafts}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-115934}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {Within this thesis a new philosophy in monitoring spacecrafts is presented: the unification of the various kinds of monitoring techniques used during the different lifecylce phases of a spacecraft. The challenging requirements being set for this monitoring framework are: - "separation of concerns" as a design principle (dividing the steps of logging from registered sources, sending to connected sinks and displaying of information), - usage during all mission phases, - usage by all actors (EGSE engineers, groundstation operators, etc.), - configurable at runtime, especially regarding the level of detail of logging information, and - very low resource consumption. First a prototype of the monitoring framework was developed as a support library for the real-time operating system RODOS. This prototype was tested on dedicated hardware platforms relevant for space, and also on a satellite demonstrator used for educational purposes. As a second step, the results and lessons learned from the development and usage of this prototype were transfered to a real space mission: the first satellite of the DLR compact satellite series - a space based platform for DLR's own research activities. Within this project, the software of the avionic subsystem was supplemented by a powerful logging component, which enhances the traditional housekeeping capabilities and offers extensive filtering and debugging techniques for monitoring and FDIR needs. This logging component is the major part of the flight version of the monitoring framework. It is completed by counterparts running on the development computers and as well as the EGSE hardware in the integration room, making it most valuable already in the earliest stages of traditional spacecraft development. Future plans in terms of adding support from the groundstation as well will lead to a seamless integration of the monitoring framework not only into to the spacecraft itself, but into the whole space system.}, subject = {Raumfahrzeug}, language = {en} } @phdthesis{Schwartges2015, author = {Schwartges, Nadine}, title = {Dynamic Label Placement in Practice}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-115003}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {The general map-labeling problem is as follows: given a set of geometric objects to be labeled, or features, in the plane, and for each feature a set of label positions, maximize the number of placed labels such that there is at most one label per feature and no two labels overlap. There are three types of features in a map: point, line, and area features. Unfortunately, one cannot expect to find efficient algorithms that solve the labeling problem optimally. Interactive maps are digital maps that only show a small part of the entire map whereas the user can manipulate the shown part, the view, by continuously panning, zooming, rotating, and tilting (that is, changing the perspective between a top and a bird view). An example for the application of interactive maps is in navigational devices. Interactive maps are challenging in that the labeling must be updated whenever labels leave the view and, while zooming, the label size must be constant on the screen (which either makes space for further labels or makes labels overlap when zooming in or out, respectively). These updates must be computed in real time, that is, the computation must be so fast that the user does not notice that we spend time on the computation. Additionally, labels must not jump or flicker, that is, labels must not suddenly change their positions or, while zooming out, a vanished label must not appear again. In this thesis, we present efficient algorithms that dynamically label point and line features in interactive maps. We try to label as many features as possible while we prohibit labels that overlap, jump, and flicker. We have implemented all our approaches and tested them on real-world data. We conclude that our algorithms are indeed real-time capable.}, subject = {Computerkartografie}, language = {en} } @article{GageikReinthalBenzetal.2014, author = {Gageik, Nils and Reinthal, Eric and Benz, Paul and Montenegro, Sergio}, title = {Complementary Vision based Data Fusion for Robust Positioning and Directed Flight of an Autonomous Quadrocopter}, doi = {10.5121/ijaia.2014.5501}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-113621}, year = {2014}, abstract = {The present paper describes an improved 4 DOF (x/y/z/yaw) vision based positioning solution for fully 6 DOF autonomous UAVs, optimised in terms of computation and development costs as well as robustness and performance. The positioning system combines Fourier transform-based image registration (Fourier Tracking) and differential optical flow computation to overcome the drawbacks of a single approach. The first method is capable of recognizing movement in four degree of freedom under variable lighting conditions, but suffers from low sample rate and high computational costs. Differential optical flow computation, on the other hand, enables a very high sample rate to gain control robustness. This method, however, is limited to translational movement only and performs poor in bad lighting conditions. A reliable positioning system for autonomous flights with free heading is obtained by fusing both techniques. Although the vision system can measure the variable altitude during flight, infrared and ultrasonic sensors are used for robustness. This work is part of the AQopterI8 project, which aims to develop an autonomous flying quadrocopter for indoor application and makes autonomous directed flight possible.}, language = {en} } @article{AliMontenegro2014, author = {Ali, Quasim and Montenegro, Sergio}, title = {A Matlab Implementation of Differential GPS for Low-cost GPS Receivers}, doi = {10.12716/1001.08.03.03}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-113618}, year = {2014}, abstract = {A number of public codes exist for GPS positioning and baseline determination in off-line mode. However, no software code exists for DGPS exploiting correction factors at base stations, without relying on double difference information. In order to accomplish it, a methodology is introduced in MATLAB environment for DGPS using C/A pseudoranges on single frequency L1 only to make it feasible for low-cost GPS receivers. Our base station is at accurately surveyed reference point. Pseudoranges and geometric ranges are compared at base station to compute the correction factors. These correction factors are then handed over to rover for all valid satellites observed during an epoch. The rover takes it into account for its own true position determination for corresponding epoch. In order to validate the proposed algorithm, our rover is also placed at a pre-determined location. The proposed code is an appropriate and simple to use tool for post-processing of GPS raw data for accurate position determination of a rover e.g. Unmanned Aerial Vehicle during post-mission analysis.}, language = {en} } @article{MontenegroAliGageik2014, author = {Montenegro, Sergio and Ali, Qasim and Gageik, Nils}, title = {A review on Distributed Control of Cooperating MINI UAVs}, doi = {10.5121/ijaia.2014.5401}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-113009}, year = {2014}, abstract = {Mini Unmanned Aerial Vehicles (MUAVs) are becoming popular research platform and drawing considerable attention, particularly during the last decade due to their multi-dimensional applications in almost every walk of life. MUAVs range from simple toys found at electronic supermarkets for entertainment purpose to highly sophisticated commercial platforms performing novel assignments like offshore wind power station inspection and 3D modelling of buildings. This paper presents an overview of the main aspects in the domain of distributed control of cooperating MUAVs to facilitate the potential users in this fascinating field. Furthermore it gives an overview on state of the art in MUAV technologies e.g. Photonic Mixer Devices (PMD) camera, distributed control methods and on-going work and challenges, which is the motivation for many researchers all over the world to work in this field.}, language = {en} } @article{SchultzBaier2014, author = {Schultz, J{\"o}rg and Baier, Herbert}, title = {ISAAC - InterSpecies Analysing Application using Containers}, doi = {10.1186/1471-2105-15-18}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-110124}, year = {2014}, abstract = {Background Information about genes, transcripts and proteins is spread over a wide variety of databases. Different tools have been developed using these databases to identify biological signals in gene lists from large scale analysis. Mostly, they search for enrichments of specific features. But, these tools do not allow an explorative walk through different views and to change the gene lists according to newly upcoming stories. Results To fill this niche, we have developed ISAAC, the InterSpecies Analysing Application using Containers. The central idea of this web based tool is to enable the analysis of sets of genes, transcripts and proteins under different biological viewpoints and to interactively modify these sets at any point of the analysis. Detailed history and snapshot information allows tracing each action. Furthermore, one can easily switch back to previous states and perform new analyses. Currently, sets can be viewed in the context of genomes, protein functions, protein interactions, pathways, regulation, diseases and drugs. Additionally, users can switch between species with an automatic, orthology based translation of existing gene sets. As todays research usually is performed in larger teams and consortia, ISAAC provides group based functionalities. Here, sets as well as results of analyses can be exchanged between members of groups. Conclusions ISAAC fills the gap between primary databases and tools for the analysis of large gene lists. With its highly modular, JavaEE based design, the implementation of new modules is straight forward. Furthermore, ISAAC comes with an extensive web-based administration interface including tools for the integration of third party data. Thus, a local installation is easily feasible. In summary, ISAAC is tailor made for highly explorative interactive analyses of gene, transcript and protein sets in a collaborative environment.}, language = {en} } @article{KrupitzerTemizerPrantletal.2020, author = {Krupitzer, Christian and Temizer, Timur and Prantl, Thomas and Raibulet, Claudia}, title = {An Overview of Design Patterns for Self-Adaptive Systems in the Context of the Internet of Things}, series = {IEEE Access}, volume = {8}, journal = {IEEE Access}, doi = {10.1109/ACCESS.2020.3031189}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-229984}, pages = {187384-187399}, year = {2020}, abstract = {The Internet of Things (IoT) requires the integration of all available, highly specialized, and heterogeneous devices, ranging from embedded sensor nodes to servers in the cloud. The self-adaptive research domain provides adaptive capabilities that can support the integration in IoT systems. However, developing such systems is a challenging, error-prone, and time-consuming task. In this context, design patterns propose already used and optimized solutions to specific problems in various contexts. Applying design patterns might help to reuse existing knowledge about similar development issues. However, so far, there is a lack of taxonomies on design patterns for self-adaptive systems. To tackle this issue, in this paper, we provide a taxonomy on design patterns for self-adaptive systems that can be transferred to support adaptivity in IoT systems. Besides describing the taxonomy and the design patterns, we discuss their applicability in an Industrial IoT case study.}, language = {en} } @article{FreimannDierkesPetermannetal.2021, author = {Freimann, A. and Dierkes, M. and Petermann, T. and Liman, C. and Kempf, F. and Schilling, K.}, title = {ESTNeT: a discrete event simulator for space-terrestrial networks}, series = {CEAS Space Journal}, volume = {13}, journal = {CEAS Space Journal}, issn = {1868-2502}, doi = {10.1007/s12567-020-00316-6}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-235835}, pages = {39-49}, year = {2021}, abstract = {The capabilities of small satellites have improved significantly in recent years. Specifically multi-satellite systems become increasingly popular, since they allow the support of new applications. The development and testing of these multi-satellite systems is a new challenge for engineers and requires the implementation of appropriate development and testing environments. In this paper, a modular network simulation framework for space-terrestrial systems is presented. It enables discrete event simulations for the development and testing of communication protocols, as well as mission-based analysis of other satellite system aspects, such as power supply and attitude control. ESTNeT is based on the discrete event simulator OMNeT++ and will be released under an open source license.}, language = {en} } @article{DuekingHolmbergKunzetal.2020, author = {D{\"u}king, Peter and Holmberg, Hans‑Christer and Kunz, Philipp and Leppich, Robert and Sperlich, Billy}, title = {Intra-individual physiological response of recreational runners to different training mesocycles: a randomized cross-over study}, series = {European Journal of Applied Physiology}, volume = {120}, journal = {European Journal of Applied Physiology}, issn = {1439-6319}, doi = {10.1007/s00421-020-04477-4}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-235022}, pages = {2705-2713}, year = {2020}, abstract = {Purpose Pronounced differences in individual physiological adaptation may occur following various training mesocycles in runners. Here we aimed to assess the individual changes in performance and physiological adaptation of recreational runners performing mesocycles with different intensity, duration and frequency. Methods Employing a randomized cross-over design, the intra-individual physiological responses [i.e., peak (\(\dot{VO}_{2peak}\)) and submaximal (\(\dot{VO}_{2submax}\)) oxygen uptake, velocity at lactate thresholds (V\(_2\), V\(_4\))] and performance (time-to-exhaustion (TTE)) of 13 recreational runners who performed three 3-week sessions of high-intensity interval training (HIIT), high-volume low-intensity training (HVLIT) or more but shorter sessions of HVLIT (high-frequency training; HFT) were assessed. Results \(\dot{VO}_{2submax}\), V\(_2\), V\(_4\) and TTE were not altered by HIIT, HVLIT or HFT (p > 0.05). \(\dot{VO}_{2peak}\) improved to the same extent following HVLIT (p = 0.045) and HFT (p = 0.02). The number of moderately negative responders was higher following HIIT (15.4\%); and HFT (15.4\%) than HVLIT (7.6\%). The number of very positive responders was higher following HVLIT (38.5\%) than HFT (23\%) or HIIT (7.7\%). 46\% of the runners responded positively to two mesocycles, while 23\% did not respond to any. Conclusion On a group level, none of the interventions altered \(\dot{VO}_{2submax}\), V\(_2\), V\(_4\) or TTE, while HVLIT and HFT improved \(\dot{VO}_{2peak}\). The mean adaptation index indicated similar numbers of positive, negative and non-responders to HIIT, HVLIT and HFT, but more very positive responders to HVLIT than HFT or HIIT. 46\% responded positively to two mesocycles, while 23\% did not respond to any. These findings indicate that the magnitude of responses to HIIT, HVLIT and HFT is highly individual and no pattern was apparent.}, language = {en} } @article{FreyGassenmaierHofmannetal.2020, author = {Frey, Anna and Gassenmaier, Tobias and Hofmann, Ulrich and Schmitt, Dominik and Fette, Georg and Marx, Almuth and Heterich, Sabine and Boivin-Jahns, Val{\´e}rie and Ertl, Georg and Bley, Thorsten and Frantz, Stefan and Jahns, Roland and St{\"o}rk, Stefan}, title = {Coagulation factor XIII activity predicts left ventricular remodelling after acute myocardial infarction}, series = {ESC Heart Failure}, volume = {7}, journal = {ESC Heart Failure}, number = {5}, doi = {10.1002/ehf2.12774}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-236013}, pages = {2354-2364}, year = {2020}, abstract = {Aims Acute myocardial infarction (MI) is the major cause of chronic heart failure. The activity of blood coagulation factor XIII (FXIIIa) plays an important role in rodents as a healing factor after MI, whereas its role in healing and remodelling processes in humans remains unclear. We prospectively evaluated the relevance of FXIIIa after acute MI as a potential early prognostic marker for adequate healing. Methods and results This monocentric prospective cohort study investigated cardiac remodelling in patients with ST-elevation MI and followed them up for 1 year. Serum FXIIIa was serially assessed during the first 9 days after MI and after 2, 6, and 12 months. Cardiac magnetic resonance imaging was performed within 4 days after MI (Scan 1), after 7 to 9 days (Scan 2), and after 12 months (Scan 3). The FXIII valine-to-leucine (V34L) single-nucleotide polymorphism rs5985 was genotyped. One hundred forty-six patients were investigated (mean age 58 ± 11 years, 13\% women). Median FXIIIa was 118 \% (quartiles, 102-132\%) and dropped to a trough on the second day after MI: 109\%(98-109\%; P < 0.001). FXIIIa recovered slowly over time, reaching the baseline level after 2 to 6 months and surpassed baseline levels only after 12 months: 124 \% (110-142\%). The development of FXIIIa after MI was independent of the genotype. FXIIIa on Day 2 was strongly and inversely associated with the relative size of MI in Scan 1 (Spearman's ρ = -0.31; P = 0.01) and Scan 3 (ρ = -0.39; P < 0.01) and positively associated with left ventricular ejection fraction: ρ = 0.32 (P < 0.01) and ρ = 0.24 (P = 0.04), respectively. Conclusions FXIII activity after MI is highly dynamic, exhibiting a significant decline in the early healing period, with reconstitution 6 months later. Depressed FXIIIa early after MI predicted a greater size of MI and lower left ventricular ejection fraction after 1 year. The clinical relevance of these findings awaits to be tested in a randomized trial.}, language = {en} } @article{MietchenHagedornFoerstneretal.2011, author = {Mietchen, Daniel and Hagedorn, Gregor and F{\"o}rstner, Konrad U. and Kubke, M Fabiana and Koltzenburg, Claudia and Hahnel, Mark J. and Penev, Lyubomir}, title = {Wikis in scholarly publishing}, doi = {10.3233/ISU-2011-0621}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-87770}, year = {2011}, abstract = {Scientific research is a process concerned with the creation, collective accumulation, contextualization, updating and maintenance of knowledge. Wikis provide an environment that allows to collectively accumulate, contextualize, update and maintain knowledge in a coherent and transparent fashion. Here, we examine the potential of wikis as platforms for scholarly publishing. In the hope to stimulate further discussion, the article itself was drafted on Species-ID - a wiki that hosts a prototype for wiki-based scholarly publishing - where it can be updated, expanded or otherwise improved.}, subject = {Elektronisches Publizieren}, language = {en} } @masterthesis{Busch2022, type = {Bachelor Thesis}, author = {Busch, Marlene Corinna}, title = {Developing a virtual Control Room for future satellite missions}, doi = {10.25972/OPUS-25826}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-258261}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2022}, abstract = {This thesis deals with the first part of a larger project that follows the ultimate goal of implementing a software tool that creates a Mission Control Room in Virtual Reality. The software is to be used for the operation of spacecrafts and is specially developed for the unique real-time requirements of unmanned satellite missions. Beginning from launch, throughout the whole mission up to the recovery or disposal of the satellite, all systems need to be monitored and controlled in continuous intervals, to ensure the mission's success. Mission Operation is an essential part of every space mission and has been undertaken for decades. Recent technological advancements in the realm of immersive technologies pave the way for innovative methods to operate spacecrafts. Virtual Reality has the capability to resolve the physical constraints set by traditional Mission Control Rooms and thereby delivers novel opportunities. The paper highlights underlying theoretical aspects of Virtual Reality, Mission Control and IP Communication. However, the focus lies upon the practical part of this thesis which revolves around the first steps of the implementation of the virtual Mission Control Room in the Unity Game Engine. Overall, this paper serves as a demonstration of Virtual Reality technology and shows its possibilities with respect to the operation of spacecrafts.}, subject = {Control room}, language = {en} } @masterthesis{Hofmann2020, type = {Bachelor Thesis}, author = {Hofmann, Jan}, title = {Deep Reinforcement Learning for Configuration of Time-Sensitive-Networking}, doi = {10.25972/OPUS-21595}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-215953}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2020}, abstract = {Reliable, deterministic real-time communication is fundamental to most industrial systems today. In many other domains Ethernet has become the most common platform for communication networks, but has been unsuitable to satisfy the requirements of industrial networks for a long time. This has changed with the introduction of Time-Sensitive-Networking (TSN), a set of standards utilizing Ethernet to implement deterministic real-time networks. This makes Ethernet a viable alternative to the expensive fieldbus systems commonly used in industrial environments. However, TSN is not a silver bullet. Industrial networks are a complex and highly dynamic environment and the configuration of TSN, especially with respect to latency, is a challenging but crucial task. Various approaches have been pursued for the configuration of TSN in dynamic industrial environments. Optimization techniques like Linear Programming (LP) are able to determine an optimal configuration for a given network, but the time consumption exponentially increases with the complexity of the environment. Machine Learning (ML) has become widely popular in the last years and is able to approximate a near-optimal TSN configuration for networks of different complexity. Yet, ML models are usually trained in a supervised manner which requires large amounts of data that have to be generated for the specific environment. Therefore, supervised methods are not scalable and do not adapt to changing dynamics of the network environment. To address these issues, this work proposes a Deep Reinforcement Learning (DRL) approach to the configuration of TSN in industrial networks. DRL combines two different disciplines, Deep Learning (DL) and Reinforcement Learning (RL), and has gained considerable traction in the last years due to breakthroughs in various domains. RL is supposed to autonomously learn a challenging task like the configuration of TSN without requiring any training data. The addition of DL allows to apply well-studied RL methods to a complex environment such as dynamic industrial networks. There are two major contributions made in this work. In the first step, an interactive environment is proposed which allows for the simulation and configuration of industrial networks using basic TSN mechanisms. The environment provides an interface that allows to apply various DRL methods to the problem of TSN configuration. The second contribution of this work is an in-depth study on the application of two fundamentally different DRL methods to the proposed environment. Both methods are evaluated on networks of different complexity and the results are compared to the ground truth and to the results of two supervised ML approaches. Ultimately, this work investigates if DRL can adapt to changing dynamics of the environment in a more scalable manner than supervised methods.}, subject = {Reinforcement Learning}, language = {en} } @phdthesis{Ifflaender2021, author = {Iffl{\"a}nder, Lukas}, title = {Attack-aware Security Function Management}, doi = {10.25972/OPUS-22421}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-224211}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2021}, abstract = {Over the last decades, cybersecurity has become an increasingly important issue. Between 2019 and 2011 alone, the losses from cyberattacks in the United States grew by 6217\%. At the same time, attacks became not only more intensive but also more and more versatile and diverse. Cybersecurity has become everyone's concern. Today, service providers require sophisticated and extensive security infrastructures comprising many security functions dedicated to various cyberattacks. Still, attacks become more violent to a level where infrastructures can no longer keep up. Simply scaling up is no longer sufficient. To address this challenge, in a whitepaper, the Cloud Security Alliance (CSA) proposed multiple work packages for security infrastructure, leveraging the possibilities of Software-defined Networking (SDN) and Network Function Virtualization (NFV). Security functions require a more sophisticated modeling approach than regular network functions. Notably, the property to drop packets deemed malicious has a significant impact on Security Service Function Chains (SSFCs)—service chains consisting of multiple security functions to protect against multiple at- tack vectors. Under attack, the order of these chains influences the end-to-end system performance depending on the attack type. Unfortunately, it is hard to predict the attack composition at system design time. Thus, we make a case for dynamic attack-aware SSFC reordering. Also, we tackle the issues of the lack of integration between security functions and the surrounding network infrastructure, the insufficient use of short term CPU frequency boosting, and the lack of Intrusion Detection and Prevention Systems (IDPS) against database ransomware attacks. Current works focus on characterizing the performance of security functions and their behavior under overload without considering the surrounding infrastructure. Other works aim at replacing security functions using network infrastructure features but do not consider integrating security functions within the network. Further publications deal with using SDN for security or how to deal with new vulnerabilities introduced through SDN. However, they do not take security function performance into account. NFV is a popular field for research dealing with frameworks, benchmarking methods, the combination with SDN, and implementing security functions as Virtualized Network Functions (VNFs). Research in this area brought forth the concept of Service Function Chains (SFCs) that chain multiple network functions after one another. Nevertheless, they still do not consider the specifics of security functions. The mentioned CSA whitepaper proposes many valuable ideas but leaves their realization open to others. This thesis presents solutions to increase the performance of single security functions using SDN, performance modeling, a framework for attack-aware SSFC reordering, a solution to make better use of CPU frequency boosting, and an IDPS against database ransomware. Specifically, the primary contributions of this work are: • We present approaches to dynamically bypass Intrusion Detection Systems (IDS) in order to increase their performance without reducing the security level. To this end, we develop and implement three SDN-based approaches (two dynamic and one static). We evaluate the proposed approaches regarding security and performance and show that they significantly increase the performance com- pared to an inline IDS without significant security deficits. We show that using software switches can further increase the performance of the dynamic approaches up to a point where they can eliminate any throughput drawbacks when using the IDS. • We design a DDoS Protection System (DPS) against TCP SYN flood at tacks in the form of a VNF that works inside an SDN-enabled network. This solution eliminates known scalability and performance drawbacks of existing solutions for this attack type. Then, we evaluate this solution showing that it correctly handles the connection establishment and present solutions for an observed issue. Next, we evaluate the performance showing that our solution increases performance up to three times. Parallelization and parameter tuning yields another 76\% performance boost. Based on these findings, we discuss optimal deployment strategies. • We introduce the idea of attack-aware SSFC reordering and explain its impact in a theoretical scenario. Then, we discuss the required information to perform this process. We validate our claim of the importance of the SSFC order by analyzing the behavior of single security functions and SSFCs. Based on the results, we conclude that there is a massive impact on the performance up to three orders of magnitude, and we find contradicting optimal orders for different workloads. Thus, we demonstrate the need for dynamic reordering. Last, we develop a model for SSFC regarding traffic composition and resource demands. We classify the traffic into multiple classes and model the effect of single security functions on the traffic and their generated resource demands as functions of the incoming network traffic. Based on our model, we propose three approaches to determine optimal orders for reordering. • We implement a framework for attack-aware SSFC reordering based on this knowledge. The framework places all security functions inside an SDN-enabled network and reorders them using SDN flows. Our evaluation shows that the framework can enforce all routes as desired. It correctly adapts to all attacks and returns to the original state after the attacks cease. We find possible security issues at the moment of reordering and present solutions to eliminate them. • Next, we design and implement an approach to load balance servers while taking into account their ability to go into a state of Central Processing Unit (CPU) frequency boost. To this end, the approach collects temperature information from available hosts and places services on the host that can attain the boosted mode the longest. We evaluate this approach and show its effectiveness. For high load scenarios, the approach increases the overall performance and the performance per watt. Even better results show up for low load workloads, where not only all performance metrics improve but also the temperatures and total power consumption decrease. • Last, we design an IDPS protecting against database ransomware attacks that comprise multiple queries to attain their goal. Our solution models these attacks using a Colored Petri Net (CPN). A proof-of-concept implementation shows that our approach is capable of detecting attacks without creating false positives for benign scenarios. Furthermore, our solution creates only a small performance impact. Our contributions can help to improve the performance of security infrastructures. We see multiple application areas from data center operators over software and hardware developers to security and performance researchers. Most of the above-listed contributions found use in several research publications. Regarding future work, we see the need to better integrate SDN-enabled security functions and SSFC reordering in data center networks. Future SSFC should discriminate between different traffic types, and security frameworks should support automatically learning models for security functions. We see the need to consider energy efficiency when regarding SSFCs and take CPU boosting technologies into account when designing performance models as well as placement, scaling, and deployment strategies. Last, for a faster adaptation against recent ransomware attacks, we propose machine-assisted learning for database IDPS signatures.}, subject = {Software-defined networking}, language = {en} } @phdthesis{Kluegl2015, author = {Kl{\"u}gl, Peter}, title = {Context-specific Consistencies in Information Extraction: Rule-based and Probabilistic Approaches}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-018-4 (print)}, doi = {10.25972/WUP-978-3-95826-019-1}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-108352}, school = {W{\"u}rzburg University Press}, year = {2015}, abstract = {Large amounts of communication, documentation as well as knowledge and information are stored in textual documents. Most often, these texts like webpages, books, tweets or reports are only available in an unstructured representation since they are created and interpreted by humans. In order to take advantage of this huge amount of concealed information and to include it in analytic processes, it needs to be transformed into a structured representation. Information extraction considers exactly this task. It tries to identify well-defined entities and relations in unstructured data and especially in textual documents. Interesting entities are often consistently structured within a certain context, especially in semi-structured texts. However, their actual composition varies and is possibly inconsistent among different contexts. Information extraction models stay behind their potential and return inferior results if they do not consider these consistencies during processing. This work presents a selection of practical and novel approaches for exploiting these context-specific consistencies in information extraction tasks. The approaches direct their attention not only to one technique, but are based on handcrafted rules as well as probabilistic models. A new rule-based system called UIMA Ruta has been developed in order to provide optimal conditions for rule engineers. This system consists of a compact rule language with a high expressiveness and strong development support. Both elements facilitate rapid development of information extraction applications and improve the general engineering experience, which reduces the necessary efforts and costs when specifying rules. The advantages and applicability of UIMA Ruta for exploiting context-specific consistencies are illustrated in three case studies. They utilize different engineering approaches for including the consistencies in the information extraction task. Either the recall is increased by finding additional entities with similar composition, or the precision is improved by filtering inconsistent entities. Furthermore, another case study highlights how transformation-based approaches are able to correct preliminary entities using the knowledge about the occurring consistencies. The approaches of this work based on machine learning rely on Conditional Random Fields, popular probabilistic graphical models for sequence labeling. They take advantage of a consistency model, which is automatically induced during processing the document. The approach based on stacked graphical models utilizes the learnt descriptions as feature functions that have a static meaning for the model, but change their actual function for each document. The other two models extend the graph structure with additional factors dependent on the learnt model of consistency. They include feature functions for consistent and inconsistent entities as well as for additional positions that fulfill the consistencies. The presented approaches are evaluated in three real-world domains: segmentation of scientific references, template extraction in curricula vitae, and identification and categorization of sections in clinical discharge letters. They are able to achieve remarkable results and provide an error reduction of up to 30\% compared to usually applied techniques.}, subject = {Information Extraction}, language = {en} } @phdthesis{Ifland2014, author = {Ifland, Marianus}, title = {Feedback-Generierung f{\"u}r offene, strukturierte Aufgaben in E-Learning-Systemen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-106348}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {Bei Lernprozessen spielt das Anwenden der zu erlernenden T{\"a}tigkeit eine wichtige Rolle. Im Kontext der Ausbildung an Schulen und Hochschulen bedeutet dies, dass es wichtig ist, Sch{\"u}lern und Studierenden ausreichend viele {\"U}bungsm{\"o}glichkeiten anzubieten. Die von Lehrpersonal bei einer "Korrektur" erstellte R{\"u}ckmeldung, auch Feedback genannt, ist jedoch teuer, da der zeitliche Aufwand je nach Art der Aufgabe betr{\"a}chtlich ist. Eine L{\"o}sung dieser Problematik stellen E-Learning-Systeme dar. Geeignete Systeme k{\"o}nnen nicht nur Lernstoff pr{\"a}sentieren, sondern auch {\"U}bungsaufgaben anbieten und nach deren Bearbeitung quasi unmittelbar entsprechendes Feedback generieren. Es ist jedoch im Allgemeinen nicht einfach, maschinelle Verfahren zu implementieren, die Bearbeitungen von {\"U}bungsaufgaben korrigieren und entsprechendes Feedback erstellen. F{\"u}r einige Aufgabentypen, wie beispielsweise Multiple-Choice-Aufgaben, ist dies zwar trivial, doch sind diese vor allem dazu gut geeignet, sogenanntes Faktenwissen abzupr{\"u}fen. Das Ein{\"u}ben von Lernzielen im Bereich der Anwendung ist damit kaum m{\"o}glich. Die Behandlung dieser nach g{\"a}ngigen Taxonomien h{\"o}heren kognitiven Lernziele erlauben sogenannte offene Aufgabentypen, deren Bearbeitung meist durch die Erstellung eines Freitexts in nat{\"u}rlicher Sprache erfolgt. Die Information bzw. das Wissen, das Lernende eingeben, liegt hier also in sogenannter „unstrukturierter" Form vor. Dieses unstrukturierte Wissen ist maschinell nur schwer verwertbar, sodass sich Trainingssysteme, die Aufgaben dieser Art stellen und entsprechende R{\"u}ckmeldung geben, bisher nicht durchgesetzt haben. Es existieren jedoch auch offene Aufgabentypen, bei denen Lernende das Wissen in strukturierter Form eingeben, so dass es maschinell leichter zu verwerten ist. F{\"u}r Aufgaben dieser Art lassen sich somit Trainingssysteme erstellen, die eine gute M{\"o}glichkeit darstellen, Sch{\"u}lern und Studierenden auch f{\"u}r praxisnahe Anwendungen viele {\"U}bungsm{\"o}glichkeiten zur Verf{\"u}gung zu stellen, ohne das Lehrpersonal zus{\"a}tzlich zu belasten. In dieser Arbeit wird beschrieben, wie bestimmte Eigenschaften von Aufgaben ausgenutzt werden, um entsprechende Trainingssysteme konzipieren und implementieren zu k{\"o}nnen. Es handelt sich dabei um Aufgaben, deren L{\"o}sungen strukturiert und maschinell interpretierbar sind. Im Hauptteil der Arbeit werden vier Trainingssysteme bzw. deren Komponenten beschrieben und es wird von den Erfahrungen mit deren Einsatz in der Praxis berichtet: Eine Komponente des Trainingssystems „CaseTrain" kann Feedback zu UML Klassendiagrammen erzeugen. Das neuartige Trainingssystem „WARP" generiert zu UML Aktivit{\"a}tsdiagrammen Feedback in mehreren Ebenen, u.a. indem es das durch Aktivit{\"a}tsdiagramme definierte Verhalten von Robotern in virtuellen Umgebungen visualisiert. Mit „{\"U}PS" steht ein Trainingssystem zur Verf{\"u}gung, mit welchem die Eingabe von SQL-Anfragen einge{\"u}bt werden kann. Eine weitere in „CaseTrain" implementierte Komponente f{\"u}r Bildmarkierungsaufgaben erm{\"o}glicht eine unmittelbare, automatische Bewertung entsprechender Aufgaben. Die Systeme wurden im Zeitraum zwischen 2011 und 2014 an der Universit{\"a}t W{\"u}rzburg in Vorlesungen mit bis zu 300 Studierenden eingesetzt und evaluiert. Die Evaluierung ergab eine hohe Nutzung und eine gute Bewertung der Studierenden der eingesetzten Konzepte, womit belegt wurde, dass elektronische Trainingssysteme f{\"u}r offene Aufgaben in der Praxis eingesetzt werden k{\"o}nnen.}, subject = {E-Learning}, language = {de} } @misc{Vorbach2014, type = {Master Thesis}, author = {Vorbach, Paul}, title = {Analysen und Heuristiken zur Verbesserung von OCR-Ergebnissen bei Frakturtexten}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-106527}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {Zahlreiche Digitalisierungsprojekte machen das Wissen vergangener Jahrhunderte jederzeit verf{\"u}gbar. Das volle Potenzial der Digitalisierung von Dokumenten entfaltet sich jedoch erst, wenn diese als durchsuchbare Volltexte verf{\"u}gbar gemacht werden. Mithilfe von OCR-Software kann die Erfassung weitestgehend automatisiert werden. Fraktur war ab dem 16. Jahrhundert bis zur Mitte des 20. Jahrhunderts die verbreitete Schrift des deutschen Sprachraums. Durch einige Besonderheiten von Fraktur bleiben die Erkennungsraten bei Frakturtexten aber meist deutlich hinter den Erkennungsergebnissen bei Antiquatexten zur{\"u}ck. Diese Arbeit konzentriert sich auf die Verbesserung der Erkennungsergebnisse der OCR-Software Tesseract bei Frakturtexten. Dazu wurden die Software und bestehende Sprachpakete gesondert auf die Eigenschaften von Fraktur hin analysiert. Durch spezielles Training und Anpassungen an der Software wurde anschließend versucht, die Ergebnisse zu verbessern und Erkenntnisse {\"u}ber die Effektivit{\"a}t verschiedener Ans{\"a}tze zu gewinnen. Die Zeichenfehlerraten konnten durch verschiedene Experimente von zuvor 2,5 Prozent auf 1,85 Prozent gesenkt werden. Außerdem werden Werkzeuge vorgestellt, die das Training neuer Schriftarten f{\"u}r Tesseract erleichtern und eine Evaluation der erzielten Verbesserungen erm{\"o}glichen.}, subject = {Optische Zeichenerkennung}, language = {de} } @inproceedings{FoerstnerHagedornKoltzenburgetal.2011, author = {F{\"o}rstner, Konrad and Hagedorn, Gregor and Koltzenburg, Claudia and Kubke, Fabiana and Mietchen, Daniel}, title = {Collaborative platforms for streamlining workflows in Open Science}, series = {Proceedings of the 6th Open Knowledge Conference}, booktitle = {Proceedings of the 6th Open Knowledge Conference}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-101678}, year = {2011}, abstract = {Despite the internet's dynamic and collaborative nature, scientists continue to produce grant proposals, lab notebooks, data files, conclusions etc. that stay in static formats or are not published online and therefore not always easily accessible to the interested public. Because of limited adoption of tools that seamlessly integrate all aspects of a research project (conception, data generation, data evaluation, peerreviewing and publishing of conclusions), much effort is later spent on reproducing or reformatting individual entities before they can be repurposed independently or as parts of articles. We propose that workflows - performed both individually and collaboratively - could potentially become more efficient if all steps of the research cycle were coherently represented online and the underlying data were formatted, annotated and licensed for reuse. Such a system would accelerate the process of taking projects from conception to publication stages and allow for continuous updating of the data sets and their interpretation as well as their integration into other independent projects. A major advantage of such work ows is the increased transparency, both with respect to the scientific process as to the contribution of each participant. The latter point is important from a perspective of motivation, as it enables the allocation of reputation, which creates incentives for scientists to contribute to projects. Such work ow platforms offering possibilities to fine-tune the accessibility of their content could gradually pave the path from the current static mode of research presentation into a more coherent practice of open science.}, language = {en} } @phdthesis{Lemmerich2014, author = {Lemmerich, Florian}, title = {Novel Techniques for Efficient and Effective Subgroup Discovery}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-97812}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {Large volumes of data are collected today in many domains. Often, there is so much data available, that it is difficult to identify the relevant pieces of information. Knowledge discovery seeks to obtain novel, interesting and useful information from large datasets. One key technique for that purpose is subgroup discovery. It aims at identifying descriptions for subsets of the data, which have an interesting distribution with respect to a predefined target concept. This work improves the efficiency and effectiveness of subgroup discovery in different directions. For efficient exhaustive subgroup discovery, algorithmic improvements are proposed for three important variations of the standard setting: First, novel optimistic estimate bounds are derived for subgroup discovery with numeric target concepts. These allow for skipping the evaluation of large parts of the search space without influencing the results. Additionally, necessary adaptations to data structures for this setting are discussed. Second, for exceptional model mining, that is, subgroup discovery with a model over multiple attributes as target concept, a generic extension of the well-known FP-tree data structure is introduced. The modified data structure stores intermediate condensed data representations, which depend on the chosen model class, in the nodes of the trees. This allows the application for many popular model classes. Third, subgroup discovery with generalization-aware measures is investigated. These interestingness measures compare the target share or mean value in the subgroup with the respective maximum value in all its generalizations. For this setting, a novel method for deriving optimistic estimates is proposed. In contrast to previous approaches, the novel measures are not exclusively based on the anti-monotonicity of instance coverage, but also takes the difference of coverage between the subgroup and its generalizations into account. In all three areas, the advances lead to runtime improvements of more than an order of magnitude. The second part of the contributions focuses on the \emph{effectiveness} of subgroup discovery. These improvements aim to identify more interesting subgroups in practical applications. For that purpose, the concept of expectation-driven subgroup discovery is introduced as a new family of interestingness measures. It computes the score of a subgroup based on the difference between the actual target share and the target share that could be expected given the statistics for the separate influence factors that are combined to describe the subgroup. In doing so, previously undetected interesting subgroups are discovered, while other, partially redundant findings are suppressed. Furthermore, this work also approaches practical issues of subgroup discovery: In that direction, the VIKAMINE II tool is presented, which extends its predecessor with a rebuild user interface, novel algorithms for automatic discovery, new interactive mining techniques, as well novel options for result presentation and introspection. Finally, some real-world applications are described that utilized the presented techniques. These include the identification of influence factors on the success and satisfaction of university students and the description of locations using tagging data of geo-referenced images.}, subject = {Data Mining}, language = {en} } @misc{Sieber2013, type = {Master Thesis}, author = {Sieber, Christian}, title = {Holistic Evaluation of Novel Adaptation Logics for DASH and SVC}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-92362}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2013}, abstract = {Streaming of videos has become the major traffic generator in today's Internet and the video traffic share is still increasing. According to Cisco's annual Visual Networking Index report, in 2012, 60\% of the global Internet IP traffic was generated by video streaming services. Furthermore, the study predicts further increase to 73\% by 2017. At the same time, advances in the fields of mobile communications and embedded devices lead to a widespread adoption of Internet video enabled mobile and wireless devices (e.g. Smartphones). The report predicts that by 2017, the traffic originating from mobile and wireless devices will exceed the traffic from wired devices and states that mobile video traffic was the source of roughly half of the mobile IP traffic at the end of 2012. With the increasing importance of Internet video streaming in today's world, video content provider find themselves in a highly competitive market where user expectations are high and customer loyalty depends strongly on the user's satisfaction with the provided service. In particular paying customers expect their viewing experience to be the same across all their viewing devices and independently of their currently utilized Internet access technology. However, providing video streaming services is costly in terms of storage space, required bandwidth and generated traffic. Therefore, content providers face a trade-off between the user perceived Quality of Experience (QoE) and the costs for providing the service. Today, a variety of transport and application protocols exist for providing video streaming services, but the one utilized depends on the scenario in mind. Video streaming services can be divided up in three categories: Video conferencing, IPTV and Video-on-Demand services. IPTV and video-conferencing have severe real-time constraints and thus utilize mostly datagram-based protocols like the RTP/UDP protocol for the video transmission. Video-on-Demand services in contrast can profit from pre-encoded content, buffers at the end user's device, and mostly utilize TCP-based protocols in combination with progressive streaming for the media delivery. In recent years, the HTTP protocol on top of the TCP protocol gained widespread popularity as a cost-efficient way to distribute pre-encoded video content to customers via progressive streaming. This is due to the fact that HTTP-based video streaming profits from a well-established infrastructure which was originally implemented to efficiently satisfy the increasing demand for web browsing and file downloads. Large Content Delivery Networks (CDN) are the key components of that distribution infrastructure. CDNs prevent expensive long-haul data traffic and delays by distributing HTTP content to world-wide locations close to the customers. As of 2012, already 53\% of the global video traffic in the Internet originates from Content Delivery Networks and that percentage is expected to increase to 65\% by the year 2017. Furthermore, HTTP media streaming profits from existing HTTP caching infrastructure, ease of NAT and proxy traversal and firewall friendliness. Video delivery through heterogeneous wired and wireless communications networks is prone to distortions due to insufficient network resources. This is especially true in wireless scenarios, where user mobility and insufficient signal strength can result in a very poor transport service performance (e.g. high packet loss, delays and low and varying bandwidth). A poor performance of the transport in turn may degrade the Quality of Experience as perceived by the user, either due to buffer underruns (i.e. playback interruptions) for TCP-based delivery or image distortions for datagram-based real-time video delivery. In order to overcome QoE degradations due to insufficient network resources, content provider have to consider adaptive video streaming. One possibility to implement this for HTTP/TCP streaming is by partitioning the content into small segments, encode the segments into different quality levels and provide access to the segments and the quality level details (e.g. resolution, average bitrate). During the streaming session, a client-centric adaptation algorithm can use the supplied details to adapt the playback to the current environment. However, a lack of a common HTTP adaptive streaming standard led to multiple proprietary solutions developed by major Internet companies like Microsoft (Smooth Streaming), Apple (HTTP Live Streaming) and Adobe (HTTP Dynamic Streaming) loosely based on the aforementioned principle. In 2012, the ISO/IEC published the Dynamic Adaptive Streaming over HTTP (MPEG-DASH) standard. As of today, DASH is becoming widely accepted with major companies announcing their support or having already implemented the standard into their products. MPEG-DASH is typically used with single layer codecs like H.264/AVC, but recent publications show that scalable video coding can use the existing HTTP infrastructure more efficiently. Furthermore, the layered approach of scalable video coding extends the adaptation options for the client, since already downloaded segments can be enhanced at a later time. The influence of distortions on the perceived QoE for non-adaptive video streaming are well reviewed and published. For HTTP streaming, the QoE of the user is influenced by the initial delay (i.e. the time the client pre-buffers video data) and the length and frequency of playback interruptions due to a depleted video playback buffer. Studies highlight that even low stalling times and frequencies have a negative impact on the QoE of the user and should therefore be avoided. The first contribution of this thesis is the identification of QoE influence factors of adaptive video streaming by the means of crowd-sourcing and a laboratory study. MPEG-DASH does not specify how to adapt the playback to the available bandwidth and therefore the design of a download/adaptation algorithm is left to the developer of the client logic. The second contribution of this thesis is the design of a novel user-centric adaption logic for DASH with SVC. Other download algorithms for segmented HTTP streaming with single layer and scalable video coding have been published lately. However, there is little information about the behavior of these algorithms regarding the identified QoE-influence factors. The third contribution is a user-centric performance evaluation of three existing adaptation algorithms and a comparison to the proposed algorithm. In the performance evaluation we also evaluate the fairness of the algorithms. In one fairness scenario, two clients deploy the same adaptation algorithm and share one Internet connection. For a fair adaptation algorithm, we expect the behavior of the two clients to be identical. In a second fairness scenario, one client shares the Internet connection with a large HTTP file download and we expect an even bandwidth distribution between the video streaming and the file download. The forth contribution of this thesis is an evaluation of the behavior of the algorithms in a two-client and HTTP cross traffic scenario. The remainder of this thesis is structured as follows. Chapter II gives a brief introduction to video coding with H.264, the HTTP adaptive streaming standard MPEG-DASH, the investigated adaptation algorithms and metrics of Quality of Experience (QoE) for video streaming. Chapter III presents the methodology and results of the subjective studies conducted in the course of this thesis to identify the QoE influence factors of adaptive video streaming. In Chapter IV, we introduce the proposed adaptation algorithm and the methodology of the performance evaluation. Chapter V highlights the results of the performance evaluation and compares the investigated adaptation algorithms. Section VI summarizes the main findings and gives an outlook towards QoE-centric management of DASH with SVC.}, subject = {DASH}, language = {en} } @phdthesis{LyTung2017, author = {Ly Tung, Nam}, title = {Toward an Intelligent Long-Term Assistance for People with Dementia In the Context of Navigation in Indoor Environments}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-155235}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2017}, abstract = {Dementia is a complex neurodegenerative syndrome that by 2050 could affect about 135 Million people worldwide. People with dementia experience a progressive decline in their cognitive abilities and have serious problems coping with activities of daily living, including orientation and wayfinding tasks. They even experience difficulties in finding their way in a familiar environment. Being lost or fear of getting lost may consequently develop into other psychological deficits such as anxiety, suspicions, illusions, and aggression. Frequent results are social isolation and a reduced quality of life. Moreover, the lives of relatives and caregivers of people with dementia are also negatively affected. Regarding navigation and orientation, most existing approaches focus on outdoor environment and people with mild dementia, who have the capability to use mobile devices. However, Rasquin (2007) observe that even a device with three buttons may be too complicated for people with moderate to severe dementia. In addition, people who are living in care homes mainly perform indoor activities. Given this background, we decided to focus on designing a system for indoor environments for people with moderate to severe dementia, who are unable or reluctant to use smartphone technology. Adopting user-centered design approach, context and requirements of people with dementia were gathered as a first step to understand needs and difficulties (especially in spatial disorientation and wayfinding problems) experienced in dementia care facilities. Then, an "Implicit Interactive Intelligent (III) Environment" for people with dementia was proposed emphasizing implicit interaction and natural interface. The backbone of this III Environment is based on supporting orientation and navigation tasks with three systems: a Monitoring system, an intelligent system, and a guiding system. The monitoring system and intelligent system automatically detect and interpret the locations and activities performed by the users i.e. people with dementia. This approach (implicit input) reduces cognitive workload as well as physical workload on the user to provide input. The intelligent system is also aware of context, predicts next situations (location, activity), and decides when to provide an appropriate service to the users. The guiding system with intuitive and dynamic environmental cues (lighting with color) has the responsibility for guiding the users to the places they need to be. Overall, three types of a monitoring system with Ultra-Wideband and iBeacon technologies, different techniques and algorithms were implemented for different contexts of use. They showed a high user acceptance with a reasonable price as well as decent accuracy and precision. In the intelligent system, models were built to recognize the users' current activity, detect the erroneous activity, predict the next location and activity, and analyze the history data, detect issues, notify them and suggest solutions to caregivers via visualized web interfaces. About the guiding systems, five studies were conducted to test and evaluate the effect of lighting with color on people with dementia. The results were promising. Although several components of III Environment in general and three systems, in particular, are in place (implemented and tested separately), integrating them all together and employing this in the dementia context as a fully properly evaluation with formal stakeholders (people with dementia and caregivers) are needed for the future step.}, language = {en} } @article{ColvillBoothNilletal.2016, author = {Colvill, Emma and Booth, Jeremy and Nill, Simeon and Fast, Martin and Bedford, James and Oelfke, Uwe and Nakamura, Mitsuhiro and Poulsen, Per and Worm, Esben and Hansen, Rune and Ravkilde, Thomas and Rydh{\"o}g, Jonas Scherman and Pommer, Tobias and af Rosenschold, Per Munck and Lang, Stephanie and Guckenberger, Matthias and Groh, Christian and Herrmann, Christian and Verellen, Dirk and Poels, Kenneth and Wang, Lei and Hadsell, Michael and Sothmann, Thilo and Blanck, Oliver and Keall, Paul}, title = {A dosimetric comparison of real-time adaptive and non-adaptive radiotherapy: a multi-institutional study encompassing robotic, gimbaled, multileaf collimator and couch tracking}, series = {Radiotherapy and Oncology}, volume = {119}, journal = {Radiotherapy and Oncology}, number = {1}, doi = {10.1016/j.radonc.2016.03.006}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-189605}, pages = {159-165}, year = {2016}, abstract = {Purpose: A study of real-time adaptive radiotherapy systems was performed to test the hypothesis that, across delivery systems and institutions, the dosimetric accuracy is improved with adaptive treatments over non-adaptive radiotherapy in the presence of patient-measured tumor motion. Methods and materials: Ten institutions with robotic(2), gimbaled(2), MLC(4) or couch tracking(2) used common materials including CT and structure sets, motion traces and planning protocols to create a lung and a prostate plan. For each motion trace, the plan was delivered twice to a moving dosimeter; with and without real-time adaptation. Each measurement was compared to a static measurement and the percentage of failed points for gamma-tests recorded. Results: For all lung traces all measurement sets show improved dose accuracy with a mean 2\%/2 mm gamma-fail rate of 1.6\% with adaptation and 15.2\% without adaptation (p < 0.001). For all prostate the mean 2\%/2 mm gamma-fail rate was 1.4\% with adaptation and 17.3\% without adaptation (p < 0.001). The difference between the four systems was small with an average 2\%/2 mm gamma-fail rate of <3\% for all systems with adaptation for lung and prostate. Conclusions: The investigated systems all accounted for realistic tumor motion accurately and performed to a similar high standard, with real-time adaptation significantly outperforming non-adaptive delivery methods.}, language = {en} } @phdthesis{Dorin2022, author = {Dorin, Michael}, title = {The Relationship Between Software Complicacy and Software Reliability}, doi = {10.25972/OPUS-28308}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-283085}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2022}, abstract = {An enduring engineering problem is the creation of unreliable software leading to unreliable systems. One reason for this is source code is written in a complicated manner making it too hard for humans to review and understand. Complicated code leads to other issues beyond dependability, such as expanded development efforts and ongoing difficulties with maintenance, ultimately costing developers and users more money. There are many ideas regarding where blame lies in the reation of buggy and unreliable systems. One prevalent idea is the selected life cycle model is to blame. The oft-maligned "waterfall" life cycle model is a particularly popular recipient of blame. In response, many organizations changed their life cycle model in hopes of addressing these issues. Agile life cycle models have become very popular, and they promote communication between team members and end users. In theory, this communication leads to fewer misunderstandings and should lead to less complicated and more reliable code. Changing the life cycle model can indeed address communications ssues, which can resolve many problems with understanding requirements. However, most life cycle models do not specifically address coding practices or software architecture. Since lifecycle models do not address the structure of the code, they are often ineffective at addressing problems related to code complicacy. This dissertation answers several research questions concerning software complicacy, beginning with an investigation of traditional metrics and static analysis to evaluate their usefulness as measurement tools. This dissertation also establishes a new concept in applied linguistics by creating a measurement of software complicacy based on linguistic economy. Linguistic economy describes the efficiencies of speech, and this thesis shows the applicability of linguistic economy to software. Embedded in each topic is a discussion of the ramifications of overly complicated software, including the relationship of complicacy to software faults. Image recognition using machine learning is also investigated as a potential method of identifying problematic source code. The central part of the work focuses on analyzing the source code of hundreds of different projects from different areas. A static analysis was performed on the source code of each project, and traditional software metrics were calculated. Programs were also analyzed using techniques developed by linguists to measure expression and statement complicacy and identifier complicacy. Professional software engineers were also directly surveyed to understand mainstream perspectives. This work shows it is possible to use traditional metrics as indicators of potential project bugginess. This work also discovered it is possible to use image recognition to identify problematic pieces of source code. Finally, this work discovered it is possible to use linguistic methods to determine which statements and expressions are least desirable and more complicated for programmers. This work's principle conclusion is that there are multiple ways to discover traits indicating a project or a piece of source code has characteristics of being buggy. Traditional metrics and static analysis can be used to gain some understanding of software complicacy and bugginess potential. Linguistic economy demonstrates a new tool for measuring software complicacy, and machine learning can predict where bugs may lie in source code. The significant implication of this work is developers can recognize when a project is becoming buggy and take practical steps to avoid creating buggy projects.}, subject = {Softwareentwicklung}, language = {en} } @phdthesis{Balagurin2022, author = {Balagurin, Oleksii}, title = {Designoptimierung von Sternsensoren f{\"u}r Pico- und Nanosatelliten}, doi = {10.25972/OPUS-25896}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-258966}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2022}, abstract = {Die Raumfahrt ist eine der konservativsten Industriebranchen. Neue Entwicklungen von Komponenten und Systemen beruhen auf existierenden Standards und eigene Erfahrungen der Entwickler. Die Systeme sollen in einem vorgegebenen engen Zeitrahmen projektiert, in sehr kleiner St{\"u}ckzahl gefertigt und schließlich aufwendig qualifiziert werden. Erfahrungsgem{\"a}ß reicht die Zeit f{\"u}r Entwicklungsiterationen und weitgehende Perfektionierung des Systems oft nicht aus. Fertige Sensoren, Subsysteme und Systeme sind Unikate, die nur f{\"u}r eine bestimme Funktion und in manchen F{\"a}llen sogar nur f{\"u}r bestimmte Missionen konzipiert sind. Eine Neuentwicklung solcher Komponenten ist extrem teuer und risikobehaftet. Deswegen werden flugerprobte Systeme ohne {\"A}nderungen und Optimierung mehrere Jahre eingesetzt, ohne Technologiefortschritte zu ber{\"u}cksichtigen. Aufgrund des enormen finanziellen Aufwandes und der Tr{\"a}gheit ist die konventionelle Vorgehensweise in der Entwicklung nicht direkt auf Kleinsatelliten {\"u}bertragbar. Eine dynamische Entwicklung im Low Cost Bereich ben{\"o}tigt eine universale und f{\"u}r unterschiedliche Anwendungsbereiche leicht modifizierbare Strategie. Diese Strategie soll nicht nur flexibel sein, sondern auch zu einer m{\"o}glichst optimalen und effizienten Hardwarel{\"o}sung f{\"u}hren. Diese Arbeit stellt ein Software-Tool f{\"u}r eine zeit- und kosteneffiziente Entwicklung von Sternsensoren f{\"u}r Kleinsatelliten vor. Um eine maximale Leistung des Komplettsystems zu erreichen, soll der Sensor die Anforderungen und Randbedingungen vorgegebener Anwendungen erf{\"u}llen und dar{\"u}ber hinaus f{\"u}r diese Anwendungen optimiert sein. Wegen der komplexen Zusammenh{\"a}nge zwischen den Parametern optischer Sensorsysteme ist keine „straightforward" L{\"o}sung des Problems m{\"o}glich. Nur durch den Einsatz computerbasierter Optimierungsverfahren kann schnell und effizient ein bestm{\"o}gliches Systemkonzept f{\"u}r die gegebenen Randbedingungen ausgearbeitet werden.}, subject = {Sternsensor}, language = {de} } @phdthesis{Schloer2022, author = {Schl{\"o}r, Daniel}, title = {Detecting Anomalies in Transaction Data}, doi = {10.25972/OPUS-29856}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-298569}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2022}, abstract = {Detecting anomalies in transaction data is an important task with a high potential to avoid financial loss due to irregularities deliberately or inadvertently carried out, such as credit card fraud, occupational fraud in companies or ordering and accounting errors. With ongoing digitization of our world, data-driven approaches, including machine learning, can draw benefit from data with less manual effort and feature engineering. A large variety of machine learning-based anomaly detection methods approach this by learning a precise model of normality from which anomalies can be distinguished. Modeling normality in transactional data, however, requires to capture distributions and dependencies within the data precisely with special attention to numerical dependencies such as quantities, prices or amounts. To implicitly model numerical dependencies, Neural Arithmetic Logic Units have been proposed as neural architecture. In practice, however, these have stability and precision issues. Therefore, we first develop an improved neural network architecture, iNALU, which is designed to better model numerical dependencies as found in transaction data. We compare this architecture to the previous approach and show in several experiments of varying complexity that our novel architecture provides better precision and stability. We integrate this architecture into two generative neural network models adapted for transaction data and investigate how well normal behavior is modeled. We show that both architectures can successfully model normal transaction data, with our neural architecture improving generative performance for one model. Since categorical and numerical variables are common in transaction data, but many machine learning methods only process numerical representations, we explore different representation learning techniques to transform categorical transaction data into dense numerical vectors. We extend this approach by proposing an outlier-aware discretization, thus incorporating numerical attributes into the computation of categorical embeddings, and investigate latent spaces, as well as quantitative performance for anomaly detection. Next, we evaluate different scenarios for anomaly detection on transaction data. We extend our iNALU architecture to a neural layer that can model both numerical and non-numerical dependencies and evaluate it in a supervised and one-class setting. We investigate the stability and generalizability of our approach and show that it outperforms a variety of models in the balanced supervised setting and performs comparably in the one-class setting. Finally, we evaluate three approaches to using a generative model as an anomaly detector and compare the anomaly detection performance.}, subject = {Anomalieerkennung}, language = {en} } @phdthesis{Stauffert2022, author = {Stauffert, Jan-Philipp}, title = {Temporal Confounding Effects in Virtual and Extended Reality Systems}, doi = {10.25972/OPUS-29060}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-290609}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2022}, abstract = {Latency is an inherent problem of computing systems. Each computation takes time until the result is available. Virtual reality systems use elaborated computer resources to create virtual experiences. The latency of those systems is often ignored or assumed as small enough to provide a good experience. This cumulative thesis is comprised of published peer reviewed research papers exploring the behaviour and effects of latency. Contrary to the common description of time invariant latency, latency is shown to fluctuate. Few other researchers have looked into this time variant behaviour. This thesis explores time variant latency with a focus on randomly occurring latency spikes. Latency spikes are observed both for small algorithms and as end to end latency in complete virtual reality systems. Most latency measurements gather close to the mean latency with potentially multiple smaller clusters of larger latency values and rare extreme outliers. The latency behaviour differs for different implementations of an algorithm. Operating system schedulers and programming language environments such as garbage collectors contribute to the overall latency behaviour. The thesis demonstrates these influences on the example of different implementations of message passing. The plethora of latency sources result in an unpredictable latency behaviour. Measuring and reporting it in scientific experiments is important. This thesis describes established approaches to measuring latency and proposes an enhanced setup to gather detailed information. The thesis proposes to dissect the measured data with a stacked z-outlier-test to separate the clusters of latency measurements for better reporting. Latency in virtual reality applications can degrade the experience in multiple ways. The thesis focuses on cybersickness as a major detrimental effect. An approach to simulate time variant latency is proposed to make latency available as an independent variable in experiments to understand latency's effects. An experiment with modified latency shows that latency spikes can contribute to cybersickness. A review of related research shows that different time invariant latency behaviour also contributes to cybersickness.}, subject = {Virtuelle Realit{\"a}t}, language = {en} } @phdthesis{Freimann2022, author = {Freimann, Andreas}, title = {Efficient Communication in Networks of Small Low Earth Orbit Satellites and Ground Stations}, isbn = {978-3-945459-41-6}, doi = {10.25972/OPUS-28052}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-280521}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2022}, abstract = {With the miniaturization of satellites a fundamental change took place in the space industry. Instead of single big monolithic satellites nowadays more and more systems are envisaged consisting of a number of small satellites to form cooperating systems in space. The lower costs for development and launch as well as the spatial distribution of these systems enable the implementation of new scientific missions and commercial services. With this paradigm shift new challenges constantly emerge for satellite developers, particularly in the area of wireless communication systems and network protocols. Satellites in low Earth orbits and ground stations form dynamic space-terrestrial networks. The characteristics of these networks differ fundamentally from those of other networks. The resulting challenges with regard to communication system design, system analysis, packet forwarding, routing and medium access control as well as challenges concerning the reliability and efficiency of wireless communication links are addressed in this thesis. The physical modeling of space-terrestrial networks is addressed by analyzing existing satellite systems and communication devices, by evaluating measurements and by implementing a simulator for space-terrestrial networks. The resulting system and channel models were used as a basis for the prediction of the dynamic network topologies, link properties and channel interference. These predictions allowed for the implementation of efficient routing and medium access control schemes for space-terrestrial networks. Further, the implementation and utilization of software-defined ground stations is addressed, and a data upload scheme for the operation of small satellite formations is presented.}, subject = {Satellitenfunk}, language = {en} } @phdthesis{Becker2018, author = {Becker, Martin}, title = {Understanding Human Navigation using Bayesian Hypothesis Comparison}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-163522}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2018}, abstract = {Understanding human navigation behavior has implications for a wide range of application scenarios. For example, insights into geo-spatial navigation in urban areas can impact city planning or public transport. Similarly, knowledge about navigation on the web can help to improve web site structures or service experience. In this work, we focus on a hypothesis-driven approach to address the task of understanding human navigation: We aim to formulate and compare ideas — for example stemming from existing theory, literature, intuition, or previous experiments — based on a given set of navigational observations. For example, we may compare whether tourists exploring a city walk "short distances" before taking their next photo vs. they tend to "travel long distances between points of interest", or whether users browsing Wikipedia "navigate semantically" vs. "click randomly". For this, the Bayesian method HypTrails has recently been proposed. However, while HypTrails is a straightforward and flexible approach, several major challenges remain: i) HypTrails does not account for heterogeneity (e.g., incorporating differently behaving user groups such as tourists and locals is not possible), ii) HypTrails does not support the user in conceiving novel hypotheses when confronted with a large set of possibly relevant background information or influence factors, e.g., points of interest, popularity of locations, time of the day, or user properties, and finally iii) formulating hypotheses can be technically challenging depending on the application scenario (e.g., due to continuous observations or temporal constraints). In this thesis, we address these limitations by introducing various novel methods and tools and explore a wide range of case studies. In particular, our main contributions are the methods MixedTrails and SubTrails which specifically address the first two limitations: MixedTrails is an approach for hypothesis comparison that extends the previously proposed HypTrails method to allow formulating and comparing heterogeneous hypotheses (e.g., incorporating differently behaving user groups). SubTrails is a method that supports hypothesis conception by automatically discovering interpretable subgroups with exceptional navigation behavior. In addition, our methodological contributions also include several tools consisting of a distributed implementation of HypTrails, a web application for visualizing geo-spatial human navigation in the context of background information, as well as a system for collecting, analyzing, and visualizing mobile participatory sensing data. Furthermore, we conduct case studies in many application domains, which encompass — among others — geo-spatial navigation based on photos from the photo-sharing platform Flickr, browsing behavior on the social tagging system BibSonomy, and task choosing behavior on a commercial crowdsourcing platform. In the process, we develop approaches to cope with application specific subtleties (like continuous observations and temporal constraints). The corresponding studies illustrate the variety of domains and facets in which navigation behavior can be studied and, thus, showcase the expressiveness, applicability, and flexibility of our methods. Using these methods, we present new aspects of navigational phenomena which ultimately help to better understand the multi-faceted characteristics of human navigation behavior.}, subject = {Bayes-Verfahren}, language = {en} } @phdthesis{Griebel2022, author = {Griebel, Matthias}, title = {Applied Deep Learning: from Data to Deployment}, doi = {10.25972/OPUS-27765}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-277650}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2022}, abstract = {Novel deep learning (DL) architectures, better data availability, and a significant increase in computing power have enabled scientists to solve problems that were considered unassailable for many years. A case in point is the "protein folding problem", a 50-year-old grand challenge in biology that was recently solved by the DL-system AlphaFold. Other examples comprise the development of large DL-based language models that, for instance, generate newspaper articles that hardly differ from those written by humans. However, developing unbiased, reliable, and accurate DL models for various practical applications remains a major challenge - and many promising DL projects get stuck in the piloting stage, never to be completed. In light of these observations, this thesis investigates the practical challenges encountered throughout the life cycle of DL projects and proposes solutions to develop and deploy rigorous DL models. The first part of the thesis is concerned with prototyping DL solutions in different domains. First, we conceptualize guidelines for applied image recognition and showcase their application in a biomedical research project. Next, we illustrate the bottom-up development of a DL backend for an augmented intelligence system in the manufacturing sector. We then turn to the fashion domain and present an artificial curation system for individual fashion outfit recommendations that leverages DL techniques and unstructured data from social media and fashion blogs. After that, we showcase how DL solutions can assist fashion designers in the creative process. Finally, we present our award-winning DL solution for the segmentation of glomeruli in human kidney tissue images that was developed for the Kaggle data science competition HuBMAP - Hacking the Kidney. The second part continues the development path of the biomedical research project beyond the prototyping stage. Using data from five laboratories, we show that ground truth estimation from multiple human annotators and training of DL model ensembles help to establish objectivity, reliability, and validity in DL-based bioimage analyses. In the third part, we present deepflash2, a DL solution that addresses the typical challenges encountered during training, evaluation, and application of DL models in bioimaging. The tool facilitates the objective and reliable segmentation of ambiguous bioimages through multi-expert annotations and integrated quality assurance. It is embedded in an easy-to-use graphical user interface and offers best-in-class predictive performance for semantic and instance segmentation under economical usage of computational resources.}, language = {en} } @phdthesis{Tscharn2019, author = {Tscharn, Robert}, title = {Innovative And Age-Inclusive Interaction Design with Image-Schematic Metaphors}, doi = {10.25972/OPUS-17576}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-175762}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2019}, abstract = {The field of human-computer interaction (HCI) strives for innovative user interfaces. Innovative and novel user interfaces are a challenge for a growing population of older users and endanger older adults to be excluded from an increasingly digital world. This is because older adults often have lower cognitive abilities and little prior experiences with technology. This thesis aims at resolving the tension between innovation and age-inclusiveness by developing user interfaces that can be used regardless of cognitive abilities and technology-dependent prior knowledge. The method of image-schematic metaphors holds promises for innovative and age-inclusive interaction design. Image-schematic metaphors represent a form of technology-independent prior knowledge. They reveal basic mental models and can be gathered in language (e.g. bank account is container from "I put money into my bank account"). Based on a discussion of previous applications of image-schematic metaphors in HCI, the present work derives three empirical research questions regarding image-schematic metaphors for innovative and age-inclusive interaction design. The first research question addresses the yet untested assumption that younger and older adults overlap in their technology-independent prior knowledge and, therefore, their usage of image-schematic metaphors. In study 1, a total of 41 participants described abstract concepts from the domains of online banking and everyday life. In study 2, ten contextual interviews were conducted. In both studies, younger and older adults showed a substantial overlap of 70\% to 75\%, indicating that also their mental models overlap substantially. The second research question addresses the applicability and potential of image-schematic metaphors for innovative design from the perspective of designers. In study 3, 18 student design teams completed an ideation process with either an affinity diagram as the industry standard, image-schematic metaphors or both methods in combination and created paper prototypes. The image-schematic metaphor method alone, but not the combination of both methods, was readily adopted and applied just as a well as the more familiar standard method. In study 4, professional interaction designers created prototypes either with or without image-schematic metaphors. In both studies, the method of image-schematic metaphors was perceived as applicable and creativity stimulating. The third research question addresses whether designs that explicitly follow image-schematic metaphors are more innovative and age-inclusive regarding differences in cognitive abilities and prior technological knowledge. In two experimental studies (study 5 and 6) involving a total of 54 younger and 53 older adults, prototypes that were designed with image-schematic metaphors were perceived as more innovative compared to those who were designed without image-schematic metaphors. Moreover, the impact of prior technological knowledge on interaction was reduced for prototypes that had been designed with image-schematic metaphors. However, participants' cognitive abilities and age still influenced the interaction significantly. The present work provides empirical as well as methodological findings that can help to promote the method of image-schematic metaphors in interaction design. As a result of these studies it can be concluded that the image-schematic metaphors are an applicable and effective method for innovative user interfaces that can be used regardless of prior technological knowledge.}, subject = {Mensch-Maschine-Kommunikation}, language = {en} } @phdthesis{Furth2018, author = {Furth, Sebastian}, title = {Linkable Technical Documentation}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-174185}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2018}, abstract = {The success of semantic systems has been proven over the last years. Nowadays, Linked Data is the driver for the rapid development of ever new intelligent systems. Especially in enterprise environments semantic systems successfully support more and more business processes. This is especially true for after sales service in the mechanical engineering domain. Here, service technicians need effective access to relevant technical documentation in order to diagnose and solve problems and defects. Therefore, the usage of semantic information retrieval systems has become the new system metaphor. Unlike classical retrieval software Linked Enterprise Data graphs are exploited to grant targeted and problem-oriented access to relevant documents. However, huge parts of legacy technical documents have not yet been integrated into Linked Enterprise Data graphs. Additionally, a plethora of information models for the semantic representation of technical information exists. The semantic maturity of these information models can hardly be measured. This thesis motivates that there is an inherent need for a self-contained semantification approach for technical documents. This work introduces a maturity model that allows to quickly assess existing documentation. Additionally, the approach comprises an abstracting semantic representation for technical documents that is aligned to all major standard information models. The semantic representation combines structural and rhetorical aspects to provide access to so called Core Documentation Entities. A novel and holistic semantification process describes how technical documents in different legacy formats can be transformed to a semantic and linked representation. The practical significance of the semantification approach depends on tools supporting its application. This work presents an accompanying tool chain of semantification applications, especially the semantification framework CAPLAN that is a highly integrated development and runtime environment for semantification processes. The complete semantification approach is evaluated in four real-life projects: in a spare part augmentation project, semantification projects for earth moving technology and harvesting technology, as well as an ontology population project for special purpose vehicles. Three additional case studies underline the broad applicability of the presented ideas.}, subject = {Linked Data}, language = {en} } @phdthesis{Muehlberger2018, author = {M{\"u}hlberger, Clemens}, title = {Design of a Self-Organizing MAC Protocol for Dynamic Multi-Hop Topologies}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-158788}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2018}, abstract = {Biologically inspired self-organization methods can help to manage the access control to the shared communication medium of Wireless Sensor Networks. One lightweight approach is the primitive of desynchronization, which relies on the periodic transmission of short control messages - similar to the periodical pulses of oscillators. This primitive of desynchronization has already been successfully implemented as MAC protocol for single-hop topologies. Moreover, there are also some concepts of such a protocol formulti-hop topologies available. However, the existing implementations may handle just a certain class of multi-hop topologies or are not robust against topology dynamics. In addition to the sophisticated access control of the sensor nodes of a Wireless Sensor Network in arbitrary multi-hop topologies, the communication protocol has to be lightweight, applicable, and scalable. These characteristics are of particular interest for distributed and randomly deployed networks (e.g., by dropping nodes off an airplane). In this work we present the development of a self-organizing MAC protocol for dynamic multi-hop topologies. This implies the evaluation of related work, the conception of our new communication protocol based on the primitive of desynchronization as well as its implementation for sensor nodes. As a matter of course, we also analyze our realization with regard to our specific requirements. This analysis is based on several (simulative as well as real-world) scenarios. Since we are mainly interested in the convergence behavior of our protocol, we do not focus on the "classical" network issues, like routing behavior or data rate, within this work. Nevertheless, for this purpose we make use of several real-world testbeds, but also of our self-developed simulation framework. According to the results of our evaluation phase, our self-organizing MAC protocol for WSNs, which is based on the primitive of desynchronization, meets all our demands. In fact, our communication protocol operates in arbitrary multi-hop topologies and copes well with topology dynamics. In this regard, our protocol is the first and only MAC protocol to the best of our knowledge. Moreover, due to its periodic transmission scheme, it may be an appropriate starting base for additional network services, like time synchronization or routing.}, language = {en} } @phdthesis{Seufert2017, author = {Seufert, Michael Thomas}, title = {Quality of Experience and Access Network Traffic Management of HTTP Adaptive Video Streaming}, issn = {1432-8801}, doi = {10.25972/OPUS-15413}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-154131}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2017}, abstract = {The thesis focuses on Quality of Experience (QoE) of HTTP adaptive video streaming (HAS) and traffic management in access networks to improve the QoE of HAS. First, the QoE impact of adaptation parameters and time on layer was investigated with subjective crowdsourcing studies. The results were used to compute a QoE-optimal adaptation strategy for given video and network conditions. This allows video service providers to develop and benchmark improved adaptation logics for HAS. Furthermore, the thesis investigated concepts to monitor video QoE on application and network layer, which can be used by network providers in the QoE-aware traffic management cycle. Moreover, an analytic and simulative performance evaluation of QoE-aware traffic management on a bottleneck link was conducted. Finally, the thesis investigated socially-aware traffic management for HAS via Wi-Fi offloading of mobile HAS flows. A model for the distribution of public Wi-Fi hotspots and a platform for socially-aware traffic management on private home routers was presented. A simulative performance evaluation investigated the impact of Wi-Fi offloading on the QoE and energy consumption of mobile HAS.}, subject = {Quality of Experience}, language = {en} } @phdthesis{Lange2019, author = {Lange, Stanislav}, title = {Optimization of Controller Placement and Information Flow in Softwarized Networks}, issn = {1432-8801}, doi = {10.25972/OPUS-17457}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-174570}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2019}, abstract = {The Software Defined Networking (SDN) paradigm offers network operators numerous improvements in terms of flexibility, scalability, as well as cost efficiency and vendor independence. However, in order to maximize the benefit from these features, several new challenges in areas such as management and orchestration need to be addressed. This dissertation makes contributions towards three key topics from these areas. Firstly, we design, implement, and evaluate two multi-objective heuristics for the SDN controller placement problem. Secondly, we develop and apply mechanisms for automated decision making based on the Pareto frontiers that are returned by the multi-objective optimizers. Finally, we investigate and quantify the performance benefits for the SDN control plane that can be achieved by integrating information from external entities such as Network Management Systems (NMSs) into the control loop. Our evaluation results demonstrate the impact of optimizing various parameters of softwarized networks at different levels and are used to derive guidelines for an efficient operation.}, subject = {Leistungsbewertung}, language = {en} } @article{RingLandesHotho2018, author = {Ring, Markus and Landes, Dieter and Hotho, Andreas}, title = {Detection of slow port scans in flow-based network traffic}, series = {PLoS ONE}, volume = {13}, journal = {PLoS ONE}, number = {9}, doi = {10.1371/journal.pone.0204507}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-226305}, pages = {e0204507, 1-18}, year = {2018}, abstract = {Frequently, port scans are early indicators of more serious attacks. Unfortunately, the detection of slow port scans in company networks is challenging due to the massive amount of network data. This paper proposes an innovative approach for preprocessing flow-based data which is specifically tailored to the detection of slow port scans. The preprocessing chain generates new objects based on flow-based data aggregated over time windows while taking domain knowledge as well as additional knowledge about the network structure into account. The computed objects are used as input for the further analysis. Based on these objects, we propose two different approaches for detection of slow port scans. One approach is unsupervised and uses sequential hypothesis testing whereas the other approach is supervised and uses classification algorithms. We compare both approaches with existing port scan detection algorithms on the flow-based CIDDS-001 data set. Experiments indicate that the proposed approaches achieve better detection rates and exhibit less false alarms than similar algorithms.}, language = {en} } @phdthesis{Borchers2020, author = {Borchers, Kai}, title = {Decentralized and Pulse-based Clock Synchronization in SpaceWire Networks for Time-triggered Data Transfers}, doi = {10.25972/OPUS-21560}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-215606}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2020}, abstract = {Time-triggered communication is widely used throughout several industry do- mains, primarily for reliable and real-time capable data transfers. However, existing time-triggered technologies are designed for terrestrial usage and not directly applicable to space applications due to the harsh environment. In- stead, specific hardware must be developed to deal with thermal, mechanical, and especially radiation effects. SpaceWire, as an event-triggered communication technology, has been used for years in a large number of space missions. Its moderate complexity, her- itage, and transmission rates up to 400 MBits/s are one of the main ad- vantages and often without alternatives for on-board computing systems of spacecraft. At present, real-time data transfers are either achieved by prior- itization inside SpaceWire routers or by applying a simplified time-triggered approach. These solutions either imply problems if they are used inside dis- tributed on-board computing systems or in case of networks with more than a single router are required. This work provides a solution for the real-time problem by developing a novel clock synchronization approach. This approach is focused on being compatible with distributed system structures and allows time-triggered data transfers. A significant difference to existing technologies is the remote clock estimation by the use of pulses. They are transferred over the network and remove the need for latency accumulation, which allows the incorporation of standardized SpaceWire equipment. Additionally, local clocks are controlled decentralized and provide different correction capabilities in order to handle oscillator induced uncertainties. All these functionalities are provided by a developed Network Controller (NC), able to isolate the attached network and to control accesses.}, subject = {Daten{\"u}bertragung}, language = {en} } @phdthesis{Sharan2017, author = {Sharan, Malvika}, title = {Bio-computational identification and characterization of RNA-binding proteins in bacteria}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-153573}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2017}, abstract = {RNA-binding proteins (RBPs) have been extensively studied in eukaryotes, where they post-transcriptionally regulate many cellular events including RNA transport, translation, and stability. Experimental techniques, such as cross-linking and co-purification followed by either mass spectrometry or RNA sequencing has enabled the identification and characterization of RBPs, their conserved RNA-binding domains (RBDs), and the regulatory roles of these proteins on a genome-wide scale. These developments in quantitative, high-resolution, and high-throughput screening techniques have greatly expanded our understanding of RBPs in human and yeast cells. In contrast, our knowledge of number and potential diversity of RBPs in bacteria is comparatively poor, in part due to the technical challenges associated with existing global screening approaches developed in eukaryotes. Genome- and proteome-wide screening approaches performed in silico may circumvent these technical issues to obtain a broad picture of the RNA interactome of bacteria and identify strong RBP candidates for more detailed experimental study. Here, I report APRICOT ("Analyzing Protein RNA Interaction by Combined Output Technique"), a computational pipeline for the sequence-based identification and characterization of candidate RNA-binding proteins encoded in the genomes of all domains of life using RBDs known from experimental studies. The pipeline identifies functional motifs in protein sequences of an input proteome using position-specific scoring matrices and hidden Markov models of all conserved domains available in the databases and then statistically score them based on a series of sequence-based features. Subsequently, APRICOT identifies putative RBPs and characterizes them according to functionally relevant structural properties. APRICOT performed better than other existing tools for the sequence-based prediction on the known RBP data sets. The applications and adaptability of the software was demonstrated on several large bacterial RBP data sets including the complete proteome of Salmonella Typhimurium strain SL1344. APRICOT reported 1068 Salmonella proteins as RBP candidates, which were subsequently categorized using the RBDs that have been reported in both eukaryotic and bacterial proteins. A set of 131 strong RBP candidates was selected for experimental confirmation and characterization of RNA-binding activity using RNA co-immunoprecipitation followed by high-throughput sequencing (RIP-Seq) experiments. Based on the relative abundance of transcripts across the RIP-Seq libraries, a catalogue of enriched genes was established for each candidate, which shows the RNA-binding potential of 90\% of these proteins. Furthermore, the direct targets of few of these putative RBPs were validated by means of cross-linking and co-immunoprecipitation (CLIP) experiments. This thesis presents the computational pipeline APRICOT for the global screening of protein primary sequences for potential RBPs in bacteria using RBD information from all kingdoms of life. Furthermore, it provides the first bio-computational resource of putative RBPs in Salmonella, which could now be further studied for their biological and regulatory roles. The command line tool and its documentation are available at https://malvikasharan.github.io/APRICOT/.}, language = {en} } @phdthesis{VoulgariKokota2020, author = {Voulgari Kokota, Anna}, title = {Microbiota interactions and dynamics in megachilid bee nests}, doi = {10.25972/OPUS-18249}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-182493}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2020}, abstract = {Insect microbiota plays an essential role on the hosts' health and fitness, regulating their development, nutrition and immunity. The natural microbiota of bees, in particular, has been given much attention, largely because of the globally reported bee population declines. However, although the worker honey bee has been associated with distinctive and specialized microbiota, the microbiota of solitary bees has not been examined in detail, despite their enormous ecological importance. The main objectives of the present thesis were a) the bacterial community description for various solitary bee species, b) the association of the solitary bee microbiota with ecological factors such as landscape type, c) the relation of the bee foraging preferences with their nest bacterial microbiota, d) the examination of the nest building material contribution to the nest microbiota, e) the isolation of bacterial strains with beneficial or harmful properties for the solitary bee larvae and f) the pathological investigation of bacteria found in deceased solitary bee larvae. The findings of the present study revealed a high bacterial biodiversity in the solitary bee nests. At the same time, the bacterial communities were different for each bee host species. Furthermore, it was shown that the pollen bacterial communities underwent compositional shifts reflecting a reduction in floral bacteria with progressing larval development, while a clear landscape effect was absent. The examination of the nest pollen provisions showed different foraging preferences for each included bee species. Both the pollen composition and the host species identity had a strong effect on the pollen bacteria, indicating that the pollen bacterial communities are the result of a combinatory process. The introduced environmental material also contributed to the nest natural microbiome. However, although the larval microbiota was significantly influenced by the pollen microbiota, it was not much associated with that of the nest material. Two Paenibacillus strains isolated from O. bicornis nests showed strong antifungal activities, while several isolated strains were able to metabolize various oligosaccharides which are common in pollen and nectar. Screening for potential pathogenic bacteria in the nests of O. bicornis unveiled bacterial taxa, which dominated the bacterial community in deceased larvae, while at the same time they were undetectable in the healthy individuals. vi Finally, larvae which were raised in vitro developed distinct bacterial microbiomes according to their diet, while their life span was affected. The present thesis described aspects of the microbiota dynamics in the nests of seven megachilid solitary bee nests, by suggesting which transmission pathways shape the established bacterial communities and how these are altered with larval development. Furthermore, specific bacterial taxa were associated with possible services they might provide to the larvae, while others were related with possible harmful effects. Future studies should integrate microbiota examination of different bee generations and parallel investigation of the microbiota of the nests and their surrounding environment (plant community, soil) to elucidate the bacterial transmission paths which establish the nest microbiota of solitary bees. Functional assays will also allow future studies to characterize specific nest bacteria as beneficial or harmful and describe how they assist the development of healthy bees and the fitness of bee populations.}, subject = {Bienen }, language = {en} } @phdthesis{DinhXuan2018, author = {Dinh-Xuan, Lam}, title = {Quality of Experience Assessment of Cloud Applications and Performance Evaluation of VNF-Based QoE Monitoring}, issn = {1432-8801}, doi = {10.25972/OPUS-16918}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-169182}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2018}, abstract = {In this thesis various aspects of Quality of Experience (QoE) research are examined. The work is divided into three major blocks: QoE Assessment, QoE Monitoring, and VNF Performance Evaluation. First, prominent cloud applications such as Google Docs and a cloud-based photo album are explored. The QoE is characterized and the influence of packet loss and delay is studied. Afterwards, objective QoE monitoring for HTTP Adaptive Video Streaming (HAS) in the cloud is investigated. Additionally, by using a Virtual Network Function (VNF) for QoE monitoring in the cloud, the feasibility of an interworking of Network Function Virtualization (NFV) and cloud paradigm is evaluated. To this end, a VNF that exploits deep packet inspection technique was used to parse the video traffic. An algorithm is then designed accordingly to estimate video quality and QoE based on network and application layer parameters. To assess the accuracy of the estimation, the VNF is measured in different scenarios under different network QoS and the virtual environment of the cloud architecture. The insights show that the different geographical deployments of the VNF influence the accuracy of the video quality and QoE estimation. Various Service Function Chain (SFC) placement algorithms have been proposed and compared in the context of edge cloud networks. On the one hand, this research is aimed at cloud service providers by providing methods for evaluating QoE for cloud applications. On the other hand, network operators can learn the pitfalls and disadvantages of using the NFV paradigm for such a QoE monitoring mechanism.}, subject = {Quality of Experience}, language = {en} } @phdthesis{Burger2017, author = {Burger, Valentin}, title = {Performance Evalution and Optimization of Content Delivery Networks}, issn = {1432-8801}, doi = {10.25972/OPUS-15276}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-152769}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2017}, abstract = {Content Delivery Networks (CDNs) are networks that distribute content in the Internet. CDNs are increasingly responsible for the largest share of traffic in the Internet. CDNs distribute popular content to caches in many geographical areas to save bandwidth by avoiding unnecessary multihop retransmission. By bringing the content geographically closer to the user, CDNs also reduce the latency of the services. Besides end users and content providers, which require high availability of high quality content, CDN providers and Internet Service Providers (ISPs) are interested in an efficient operation of CDNs. In order to ensure an efficient replication of the content, CDN providers have a network of (globally) distributed interconnected datacenters at different points of presence (PoPs). ISPs aim to provide reliable and high speed Internet access. They try to keep the load on the network low and to reduce cost for connectivity with other ISPs. The increasing number of mobile devices such as smart phones and tablets, high definition video content and high resolution displays result in a continuous growth in mobile traffic. This growth in mobile traffic is further accelerated by newly emerging services, such as mobile live streaming and broadcasting services. The steep increase in mobile traffic is expected to reach by 2018 roughly 60\% of total network traffic, the majority of which will be video. To handle the growth in mobile networks, the next generation of 5G mobile networks is designed to have higher access rates and an increased densification of the network infrastructure. With the explosion of access rates and number of base stations the backhaul of wireless networks will become congested. To reduce the load on the backhaul, the research community suggests installing local caches in gateway routers between the wireless network and the Internet, in base stations of different sizes, and in end-user devices. The local deployment of caches allows keeping the traffic within the ISPs network. The caches are organized in a hierarchy, where caches in the lowest tier are requested first. The request is forwarded to the next tier, if the requested object is not found. Appropriate evaluation methods are required to optimally dimension the caches dependent on the traffic characteristics and the available resources. Additionally methods are necessary that allow performance evaluation of backhaul bandwidth aggregation systems, which further reduce the load on the backhaul. This thesis analyses CDNs utilizing locally available resources and develops the following evaluations and optimization approaches: Characterization of CDNs and distribution of resources in the Internet, analysis and optimization of hierarchical caching systems with bandwidth constraints and performance evaluation of bandwidth aggregation systems.}, subject = {CDN-Netzwerk}, language = {en} } @article{LopezArreguinMontenegro2020, author = {Lopez-Arreguin, A. J. R. and Montenegro, S.}, title = {Towards bio-inspired robots for underground and surface exploration in planetary environments: An overview and novel developments inspired in sand-swimmers}, series = {Heliyon}, volume = {6}, journal = {Heliyon}, doi = {10.1016/j.heliyon.2020.e04148}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-230309}, year = {2020}, abstract = {Dessert organisms like sandfish lizards (SLs) bend and generate thrust in granular mediums to scape heat and hunt for prey [1]. Further, SLs seems to have striking capabilities to swim in undulatory form keeping the same wavelength even in terrains with different volumetric densities, hence behaving as rigid bodies. This paper tries to recommend new research directions for planetary robotics, adapting principles of sand swimmers for improving robustness of surface exploration robots. First, we summarize previous efforts on bio-inspired hardware developed for granular terrains and accessing complex geological features. Later, a rigid wheel design has been proposed to imitate SLs locomotion capabilities. In order to derive the force models to predict performance of such bio-inspired mobility system, different approaches as RFT (Resistive Force Theory) and analytical terramechanics are introduced. Even in typical wheeled robots the slip and sinkage increase with time, the new design intends to imitate traversability capabilities of SLs, that seem to keep the same slip while displacing at subsurface levels.}, language = {en} } @article{HossfeldHeegaardSkrorinKapovetal.2020, author = {Hoßfeld, Tobias and Heegaard, Poul E. and Skrorin-Kapov, Lea and Varela, Mart{\´i}n}, title = {Deriving QoE in systems: from fundamental relationships to a QoE-based Service-level Quality Index}, series = {Quality and User Experience}, volume = {5}, journal = {Quality and User Experience}, issn = {2366-0139}, doi = {10.1007/s41233-020-00035-0}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-235597}, year = {2020}, abstract = {With Quality of Experience (QoE) research having made significant advances over the years, service and network providers aim at user-centric evaluation of the services provided in their system. The question arises how to derive QoE in systems. In the context of subjective user studies conducted to derive relationships between influence factors and QoE, user diversity leads to varying distributions of user rating scores for different test conditions. Such models are commonly exploited by providers to derive various QoE metrics in their system, such as expected QoE, or the percentage of users rating above a certain threshold. The question then becomes how to combine (a) user rating distributions obtained from subjective studies, and (b) system parameter distributions, so as to obtain the actual observed QoE distribution in the system? Moreover, how can various QoE metrics of interest in the system be derived? We prove fundamental relationships for the derivation of QoE in systems, thus providing an important link between the QoE community and the systems community. In our numerical examples, we focus mainly on QoE metrics. We furthermore provide a more generalized view on quantifying the quality of systems by defining a QoE-based Service-level Quality Index. This index exploits the fact that quality can be seen as a proxy measure for utility. Following the assumption that not all user sessions should be weighted equally, we aim to provide a generic framework that can be utilized to quantify the overall utility of a service delivered by a system.}, language = {en} } @article{StauffertNieblingLatoschik2020, author = {Stauffert, Jan-Philipp and Niebling, Florian and Latoschik, Marc Erich}, title = {Latency and Cybersickness: Impact, Causes, and Measures. A Review}, series = {Frontiers in Virtual Reality}, volume = {1}, journal = {Frontiers in Virtual Reality}, doi = {10.3389/frvir.2020.582204}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-236133}, year = {2020}, abstract = {Latency is a key characteristic inherent to any computer system. Motion-to-Photon (MTP) latency describes the time between the movement of a tracked object and its corresponding movement rendered and depicted by computer-generated images on a graphical output screen. High MTP latency can cause a loss of performance in interactive graphics applications and, even worse, can provoke cybersickness in Virtual Reality (VR) applications. Here, cybersickness can degrade VR experiences or may render the experiences completely unusable. It can confound research findings of an otherwise sound experiment. Latency as a contributing factor to cybersickness needs to be properly understood. Its effects need to be analyzed, its sources need to be identified, good measurement methods need to be developed, and proper counter measures need to be developed in order to reduce potentially harmful impacts of latency on the usability and safety of VR systems. Research shows that latency can exhibit intricate timing patterns with various spiking and periodic behavior. These timing behaviors may vary, yet most are found to provoke cybersickness. Overall, latency can differ drastically between different systems interfering with generalization of measurement results. This review article describes the causes and effects of latency with regard to cybersickness. We report on different existing approaches to measure and report latency. Hence, the article provides readers with the knowledge to understand and report latency for their own applications, evaluations, and experiments. It should also help to measure, identify, and finally control and counteract latency and hence gain confidence into the soundness of empirical data collected by VR exposures. Low latency increases the usability and safety of VR systems.}, language = {en} } @article{KramerBangertSchilling2020, author = {Kramer, Alexander and Bangert, Philip and Schilling, Klaus}, title = {UWE-4: First Electric Propulsion on a 1U CubeSat — In-Orbit Experiments and Characterization}, series = {Aerospace}, volume = {7}, journal = {Aerospace}, number = {7}, doi = {10.3390/aerospace7070098}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-236124}, year = {2020}, abstract = {The electric propulsion system NanoFEEP was integrated and tested in orbit on the UWE-4 satellite, which marks the first successful demonstration of an electric propulsion system on board a 1U CubeSat. In-orbit characterization measurements of the heating process of the propellant and the power consumption of the propulsion system at different thrust levels are presented. Furthermore, an analysis of the thrust vector direction based on its effect on the attitude of the spacecraft is described. The employed heater liquefies the propellant for a duration of 30 min per orbit and consumes 103 ± 4 mW. During this time, the respective thruster can be activated. The propulsion system including one thruster head, its corresponding heater, the neutralizer and the digital components of the power processing unit consume 8.5 ± 0.1 mW ⋅μ A\(^{-1}\) + 184 ± 8.5 mW and scales with the emitter current. The estimated thrust directions of two thruster heads are at angles of 15.7 ± 7.6∘ and 13.2 ± 5.5∘ relative to their mounting direction in the CubeSat structure. In light of the very limited power on a 1U CubeSat, the NanoFEEP propulsion system renders a very viable option. The heater of subsequent NanoFEEP thrusters was already improved, such that the system can be activated during the whole orbit period.}, language = {en} } @phdthesis{Wolf2017, author = {Wolf, Beat}, title = {Reducing the complexity of OMICS data analysis}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-153687}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2017}, abstract = {The field of genetics faces a lot of challenges and opportunities in both research and diagnostics due to the rise of next generation sequencing (NGS), a technology that allows to sequence DNA increasingly fast and cheap. NGS is not only used to analyze DNA, but also RNA, which is a very similar molecule also present in the cell, in both cases producing large amounts of data. The big amount of data raises both infrastructure and usability problems, as powerful computing infrastructures are required and there are many manual steps in the data analysis which are complicated to execute. Both of those problems limit the use of NGS in the clinic and research, by producing a bottleneck both computationally and in terms of manpower, as for many analyses geneticists lack the required computing skills. Over the course of this thesis we investigated how computer science can help to improve this situation to reduce the complexity of this type of analysis. We looked at how to make the analysis more accessible to increase the number of people that can perform OMICS data analysis (OMICS groups various genomics data-sources). To approach this problem, we developed a graphical NGS data analysis pipeline aimed at a diagnostics environment while still being useful in research in close collaboration with the Human Genetics Department at the University of W{\"u}rzburg. The pipeline has been used in various research papers on covering subjects, including works with direct author participation in genomics, transcriptomics as well as epigenomics. To further validate the graphical pipeline, a user survey was carried out which confirmed that it lowers the complexity of OMICS data analysis. We also studied how the data analysis can be improved in terms of computing infrastructure by improving the performance of certain analysis steps. We did this both in terms of speed improvements on a single computer (with notably variant calling being faster by up to 18 times), as well as with distributed computing to better use an existing infrastructure. The improvements were integrated into the previously described graphical pipeline, which itself also was focused on low resource usage. As a major contribution and to help with future development of parallel and distributed applications, for the usage in genetics or otherwise, we also looked at how to make it easier to develop such applications. Based on the parallel object programming model (POP), we created a Java language extension called POP-Java, which allows for easy and transparent distribution of objects. Through this development, we brought the POP model to the cloud, Hadoop clusters and present a new collaborative distributed computing model called FriendComputing. The advances made in the different domains of this thesis have been published in various works specified in this document.}, subject = {Bioinformatik}, language = {en} } @phdthesis{Herbst2018, author = {Herbst, Nikolas Roman}, title = {Methods and Benchmarks for Auto-Scaling Mechanisms in Elastic Cloud Environments}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-164314}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2018}, abstract = {A key functionality of cloud systems are automated resource management mechanisms at the infrastructure level. As part of this, elastic scaling of allocated resources is realized by so-called auto-scalers that are supposed to match the current demand in a way that the performance remains stable while resources are efficiently used. The process of rating cloud infrastructure offerings in terms of the quality of their achieved elastic scaling remains undefined. Clear guidance for the selection and configuration of an auto-scaler for a given context is not available. Thus, existing operating solutions are optimized in a highly application specific way and usually kept undisclosed. The common state of practice is the use of simplistic threshold-based approaches. Due to their reactive nature they incur performance degradation during the minutes of provisioning delays. In the literature, a high-number of auto-scalers has been proposed trying to overcome the limitations of reactive mechanisms by employing proactive prediction methods. In this thesis, we identify potentials in automated cloud system resource management and its evaluation methodology. Specifically, we make the following contributions: We propose a descriptive load profile modeling framework together with automated model extraction from recorded traces to enable reproducible workload generation with realistic load intensity variations. The proposed Descartes Load Intensity Model (DLIM) with its Limbo framework provides key functionality to stress and benchmark resource management approaches in a representative and fair manner. We propose a set of intuitive metrics for quantifying timing, stability and accuracy aspects of elasticity. Based on these metrics, we propose a novel approach for benchmarking the elasticity of Infrastructure-as-a-Service (IaaS) cloud platforms independent of the performance exhibited by the provisioned underlying resources. We tackle the challenge of reducing the risk of relying on a single proactive auto-scaler by proposing a new self-aware auto-scaling mechanism, called Chameleon, combining multiple different proactive methods coupled with a reactive fallback mechanism. Chameleon employs on-demand, automated time series-based forecasting methods to predict the arriving load intensity in combination with run-time service demand estimation techniques to calculate the required resource consumption per work unit without the need for a detailed application instrumentation. It can also leverage application knowledge by solving product-form queueing networks used to derive optimized scaling actions. The Chameleon approach is first in resolving conflicts between reactive and proactive scaling decisions in an intelligent way. We are confident that the contributions of this thesis will have a long-term impact on the way cloud resource management approaches are assessed. While this could result in an improved quality of autonomic management algorithms, we see and discuss arising challenges for future research in cloud resource management and its assessment methods: The adoption of containerization on top of virtual machine instances introduces another level of indirection. As a result, the nesting of virtual resources increases resource fragmentation and causes unreliable provisioning delays. Furthermore, virtualized compute resources tend to become more and more inhomogeneous associated with various priorities and trade-offs. Due to DevOps practices, cloud hosted service updates are released with a higher frequency which impacts the dynamics in user behavior.}, subject = {Cloud Computing}, language = {en} } @phdthesis{YazdaniRashvanlouei2017, author = {Yazdani Rashvanlouei, Kourosh}, title = {Developing a Framework for International Projects of ERP Implementation}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-154000}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2017}, abstract = {Enterprise Systeme werden immer mehr von Bedeutung, was sie in die Mitte der Aufmerksamkeit und der Ber{\"u}cksichtigung durch Organisationen in verschiedensten Formen r{\"u}ckt - seien es Unternehmen oder Industrien von riesigen {\"o}ffentlichen oder privaten Organisationen bis hin zu mittleren und kleinen Dienstleistungsunternehmen. Diese Systeme verbessern sich st{\"a}ndig, sowohl funktionell, als auch technologisch und sie sind unumg{\"a}nglich f{\"u}r Unternehmen, um ihre Produktivit{\"a}t zu vergr{\"o}ßern und um in dem nationalen und globalen Wettbewerb mitzuhalten. Da lokale Softwarel{\"o}sungen die Bedingungen, speziell von großen Betrieben, funktionell und technologisch nicht erf{\"u}llen konnten und da riesige globale Softwarehersteller, wie SAP, Oracle und Microsoft ihre L{\"o}sungen rapide verbessern und sie ihren Markt immer mehr {\"u}ber den Globus expandieren, nimmt die Nachfrage f{\"u}r diese globalen Marken und deren nahezu einwandfreien Softwarel{\"o}sungen t{\"a}glich zu. Die Zustimmung f{\"u}r internationale ERP Unternehmensberatungsanwendungen nimmt deswegen exponentiell zu, w{\"a}hrend die Forschung der beeinflussenden Faktoren und des Fachwissens wenig verbreitet ist. Deswegen ist es so dringlich, dieses Gebiet zu erforschen. Das schlussendliche f{\"u}nf-in-f{\"u}nf Framework dieser Studie sammelt zum ersten Mal in der Geschichte alle historisch erw{\"a}hnten, kritischen Erfolgsfaktoren und Projektaktivit{\"a}ten. Diese wurden in f{\"u}nf Phasen unterteilt und nach den f{\"u}nf Schwerpunkten der internationalen ERP Projektdurchf{\"u}hrung kategorisiert. Dieses Framework bietet einen {\"U}berblick und bildet einen umfassenden Fahrplan f{\"u}r solche Projekte.}, subject = {ERP}, language = {en} } @phdthesis{Ring2021, author = {Ring, Markus}, title = {Detektion sicherheitskritischer Ereignisse in Unternehmensnetzwerken mittels Data Mining}, doi = {10.25972/OPUS-21956}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-219561}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2021}, abstract = {E-Mails, Online Banking und Videokonferenzen sind aus unserem heutigen Alltag nicht mehr wegzudenken. Bei all diesen Aktivit{\"a}ten werden zahlreiche personenbezogene Informationen und vertrauensw{\"u}rdige Daten digital {\"u}bertragen und gespeichert. Zur Sicherstellung der digitalen Daten vor unbefugten Zugriffen und Manipulationen existieren verschiedenste Konzepte, Methoden und Verfahren, die sich unter dem Begriff IT-Sicherheit zusammenfassen lassen. Klassische Sicherheitsl{\"o}sungen aus dem Bereich IT-Sicherheit sind Firewalls und Virenscanner. Derartige Ans{\"a}tze sind meist regelbasiert und pr{\"u}fen Dateien beziehungsweise eingehenden Netzwerkverkehr anhand einer Liste bekannter Angriffssignaturen. Folglich k{\"o}nnen diese Systeme nur bereits bekannte Angriffsszenarien detektieren und bieten keinen Schutz vor neuartigen Angriffen. Somit entsteht im Bereich IT-Sicherheit ein Wettlauf zwischen Hackern und IT-Sicherheitsexperten, bei dem die Hacker stets nach neuen Mitteln und Wegen suchen, die existierenden Sicherheitsl{\"o}sungen zu {\"u}berwinden, w{\"a}hrend IT-Sicherheitsexperten stetig ihre Schutzmechanismen verbessern. Die vorliegende Arbeit widmet sich der Detektion von Angriffsszenarien in Unternehmensnetzwerken mithilfe von Data Mining-Methoden. Diese Methoden sind in der Lage anhand von repr{\"a}sentativen Daten die darin enthaltenen Strukturen zu erlernen und zu generalisieren. Folglich k{\"o}nnen sich Data Mining-Methoden grunds{\"a}tzlich zur Detektion neuer Angriffsszenarien eignen, wenn diese Angriffsszenarien {\"U}berschneidungen mit bekannten Angriffsszenarien aufweisen oder sich wesentlich vom bekannten Normalverhalten unterscheiden. In dieser Arbeit werden netzwerkbasierte Daten im NetFlow Format analysiert, da diese einen aggregierten {\"U}berblick {\"u}ber das Geschehen im Netzwerk bieten. H{\"a}ufig k{\"o}nnen Netzwerkdaten aufgrund datenschutzrechtlicher Bedenken nicht ver{\"o}ffentlicht werden, was f{\"u}r die Erzeugung synthetischer, aber realistischer Netzwerkdaten spricht. Des Weiteren f{\"u}hrt die Beschaffenheit der Netzwerkdaten dazu, dass eine Kombination von kontinuierlichen und kategorischen Attributen analysiert werden muss, was vor allem das Vergleichen der Daten bez{\"u}glich ihrer {\"A}hnlichkeit erschwert. Diese Arbeit liefert methodische Beitr{\"a}ge zu jeder der drei genannten Herausforderungen. Im Bereich der Abstandsberechnung kategorischer Werte werden mit ConDist und IP2Vec zwei unterschiedliche Ans{\"a}tze entwickelt. ConDist ist ein universell einsetzbares Abstandsmaß zur Berechnung von Abst{\"a}nden zwischen Datenpunkten, die aus kontinuierlichen und kategorischen Attributen bestehen. IP2Vec ist auf Netzwerkdaten spezialisiert und transformiert kategorische Werte in kontinuierliche Vektoren. Im Bereich der Generierung realistischer Netzwerkdaten werden neben einer ausf{\"u}hrlichen Literaturrecherche zwei unterschiedliche Ans{\"a}tze vorgestellt. Zun{\"a}chst wird ein auf Simulation basierter Ansatz zur Generierung flowbasierter Datens{\"a}tze entwickelt. Dieser Ansatz basiert auf einer Testumgebung und simuliert typische Benutzeraktivit{\"a}ten durch automatisierte Python Skripte. Parallel hierzu wird ein zweiter Ansatz zur synthetischen Generierung flowbasierter Netzwerkdaten durch Modellierung mithilfe von Generative Adversarial Networks entwickelt. Dieser Ansatz erlernt die zugrundeliegenden Eigenschaften der Netzwerkdaten und ist anschließend in der Lage, neue Netzwerkdaten mit gleichen Eigenschaften zu generieren.W{\"a}hrend sich der erste Ansatz zur Erstellung neuer Datens{\"a}tze eignet, kann der zweite Ansatz zur Anreicherung existierender Datens{\"a}tze genutzt werden. Schließlich liefert diese Arbeit noch zwei Beitr{\"a}ge zur Detektion von Angriffsszenarien. Im ersten Beitrag wird ein Konzept zur Detektion von Angriffsszenarien entwickelt, welches sich an die typischen Phasen eines Angriffsszenarios orientiert. Im zweiten Beitrag werden eine {\"u}berwachte und eine un{\"u}berwachte Methode zur Detektion von langsamen Port Scans vorgestellt.}, subject = {Data Mining}, language = {de} } @techreport{MetzgerSchroederRafetseder2021, type = {Working Paper}, author = {Metzger, Florian and Schr{\"o}der, Svenja and Rafetseder, Albert}, title = {Subjective And Objective Assessment Of Video Game Context Factors}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-242471}, pages = {7}, year = {2021}, abstract = {The recently published ITU-T Recommendation G1.032 proposes a list of factors that may influence cloud and online gaming Quality of Experience (QoE). This paper provides two practical evaluations of proposed system and context influence factors: First, it investigates through an online survey (n=488) the popularity of platforms, preferred ways of distribution, and motivational aspects including subjective valuations of characteristics offered by today's prevalent gaming platforms. Second, the paper evaluates a large dataset of objective metrics for various gaming platforms: game lists, playthrough lengths, prices, etc., and contrasts these metrics against the gamers' opinions. The combined data-driven approach presented in this paper complements in-person and lab studies usually employed.}, subject = {Videospiel}, language = {en} } @techreport{MetzgerRafetsederSchroederetal.2016, type = {Working Paper}, author = {Metzger, Florian and Rafetseder, Albert and Schr{\"o}der, Svenja and Zwickl, Patrick}, title = {The Prospects of Cloud Gaming: Do the Benefits Outweigh the Costs?}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-242452}, pages = {10}, year = {2016}, abstract = {In recent years, cloud gaming has become a popular research topic and has claimed many benefits in the commercial domain over conventional gaming. While, cloud gaming platforms have frequently failed in the past, they have received a new impetus over the last years that brought it to the edge of commercial breakthrough. The fragility of the cloud gaming market may be caused by the high investment costs, offered pricing models or competition from existing "{\`a} la carte" platforms. This paper aims at investigating the costs and benefits of both platform types through a twofold approach. We first take on the perspective of the customers, and investigate several cloud gaming platforms and their pricing models in comparison to the costs of other gaming platforms. Then, we explore engagement metrics in order to assess the enjoyment of playing the offered games. Lastly, coming from the perspective of the service providers, we aim to identify challenges in cost-effectively operating a large-scale cloud gaming service while maintaining high QoE values. Our analysis provides initial, yet still comprehensive reasons and models for the prospects of cloud gaming in a highly competitive market.}, subject = {Cloud Computing}, language = {en} } @book{TranGiaHossfeld2021, author = {Tran-Gia, Phuoc and Hoßfeld, Tobias}, title = {Performance Modeling and Analysis of Communication Networks}, edition = {1st edition}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-152-5}, doi = {10.25972/WUP-978-3-95826-153-2}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-241920}, publisher = {W{\"u}rzburg University Press}, pages = {xiii, 353}, year = {2021}, abstract = {This textbook provides an introduction to common methods of performance modeling and analysis of communication systems. These methods form the basis of traffic engineering, teletraffic theory, and analytical system dimensioning. The fundamentals of probability theory, stochastic processes, Markov processes, and embedded Markov chains are presented. Basic queueing models are described with applications in communication networks. Advanced methods are presented that have been frequently used in recent practice, especially discrete-time analysis algorithms, or which go beyond classical performance measures such as Quality of Experience or energy efficiency. Recent examples of modern communication networks include Software Defined Networking and the Internet of Things. Throughout the book, illustrative examples are used to provide practical experience in performance modeling and analysis. Target group: The book is aimed at students and scientists in computer science and technical computer science, operations research, electrical engineering and economics.}, language = {en} } @phdthesis{Pfitzner2019, author = {Pfitzner, Christian}, title = {Visual Human Body Weight Estimation with Focus on Clinical Applications}, isbn = {978-3-945459-27-0 (online)}, doi = {10.25972/OPUS-17484}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-174842}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2019}, abstract = {It is the aim of this thesis to present a visual body weight estimation, which is suitable for medical applications. A typical scenario where the estimation of the body weight is essential, is the emergency treatment of stroke patients: In case of an ischemic stroke, the patient has to receive a body weight adapted drug, to solve a blood clot in a vessel. The accuracy of the estimated weight influences the outcome of the therapy directly. However, the treatment has to start as early as possible after the arrival at a trauma room, to provide sufficient treatment. Weighing a patient takes time, and the patient has to be moved. Furthermore, patients are often not able to communicate a value for their body weight due to their stroke symptoms. Therefore, it is state of the art that physicians guess the body weight. A patient receiving a too low dose has an increased risk that the blood clot does not dissolve and brain tissue is permanently damaged. Today, about one-third gets an insufficient dosage. In contrast to that, an overdose can cause bleedings and further complications. Physicians are aware of this issue, but a reliable alternative is missing. The thesis presents state-of-the-art principles and devices for the measurement and estimation of body weight in the context of medical applications. While scales are common and available at a hospital, the process of weighing takes too long and can hardly be integrated into the process of stroke treatment. Sensor systems and algorithms are presented in the section for related work and provide an overview of different approaches. The here presented system -- called Libra3D -- consists of a computer installed in a real trauma room, as well as visual sensors integrated into the ceiling. For the estimation of the body weight, the patient is on a stretcher which is placed in the field of view of the sensors. The three sensors -- two RGB-D and a thermal camera -- are calibrated intrinsically and extrinsically. Also, algorithms for sensor fusion are presented to align the data from all sensors which is the base for a reliable segmentation of the patient. A combination of state-of-the-art image and point cloud algorithms is used to localize the patient on the stretcher. The challenges in the scenario with the patient on the bed is the dynamic environment, including other people or medical devices in the field of view. After the successful segmentation, a set of hand-crafted features is extracted from the patient's point cloud. These features rely on geometric and statistical values and provide a robust input to a subsequent machine learning approach. The final estimation is done with a previously trained artificial neural network. The experiment section offers different configurations of the previously extracted feature vector. Additionally, the here presented approach is compared to state-of-the-art methods; the patient's own assessment, the physician's guess, and an anthropometric estimation. Besides the patient's own estimation, Libra3D outperforms all state-of-the-art estimation methods: 95 percent of all patients are estimated with a relative error of less than 10 percent to ground truth body weight. It takes only a minimal amount of time for the measurement, and the approach can easily be integrated into the treatment of stroke patients, while physicians are not hindered. Furthermore, the section for experiments demonstrates two additional applications: The extracted features can also be used to estimate the body weight of people standing, or even walking in front of a 3D camera. Also, it is possible to determine or classify the BMI of a subject on a stretcher. A potential application for this approach is the reduction of the radiation dose of patients being exposed to X-rays during a CT examination. During the time of this thesis, several data sets were recorded. These data sets contain the ground truth body weight, as well as the data from the sensors. They are available for the collaboration in the field of body weight estimation for medical applications.}, subject = {Punktwolke}, language = {en} } @phdthesis{Borchert2020, author = {Borchert, Kathrin Johanna}, title = {Estimating Quality of Experience of Enterprise Applications - A Crowdsourcing-based Approach}, issn = {1432-8801}, doi = {10.25972/OPUS-21697}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-216978}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2020}, abstract = {Nowadays, employees have to work with applications, technical services, and systems every day for hours. Hence, performance degradation of such systems might be perceived negatively by the employees, increase frustration, and might also have a negative effect on their productivity. The assessment of the application's performance in order to provide a smooth operation of the application is part of the application management. Within this process it is not sufficient to assess the system performance solely on technical performance parameters, e.g., response or loading times. These values have to be set into relation to the perceived performance quality on the user's side - the quality of experience (QoE). This dissertation focuses on the monitoring and estimation of the QoE of enterprise applications. As building models to estimate the QoE requires quality ratings from the users as ground truth, one part of this work addresses methods to collect such ratings. Besides the evaluation of approaches to improve the quality of results of tasks and studies completed on crowdsourcing platforms, a general concept for monitoring and estimating QoE in enterprise environments is presented. Here, relevant design dimension of subjective studies are identified and their impact of the QoE is evaluated and discussed. By considering the findings, a methodology for collecting quality ratings from employees during their regular work is developed. The method is realized by implementing a tool to conduct short surveys and deployed in a cooperating company. As a foundation for learning QoE estimation models, this work investigates the relationship between user-provided ratings and technical performance parameters. This analysis is based on a data set collected in a user study in a cooperating company during a time span of 1.5 years. Finally, two QoE estimation models are introduced and their performance is evaluated.}, subject = {Quality of Experience}, language = {en} } @phdthesis{Koch2018, author = {Koch, Rainer}, title = {Sensor Fusion for Precise Mapping of Transparent and Specular Reflective Objects}, isbn = {978-3-945459-25-6}, doi = {10.25972/OPUS-16346}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-163462}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2018}, abstract = {Almost once a week broadcasts about earthquakes, hurricanes, tsunamis, or forest fires are filling the news. While oneself feels it is hard to watch such news, it is even harder for rescue troops to enter such areas. They need some skills to get a quick overview of the devastated area and find victims. Time is ticking, since the chance for survival shrinks the longer it takes till help is available. To coordinate the teams efficiently, all information needs to be collected at the command center. Therefore, teams investigate the destroyed houses and hollow spaces for victims. Doing so, they never can be sure that the building will not fully collapse while they are inside. Here, rescue robots are welcome helpers, as they are replaceable and make work more secure. Unfortunately, rescue robots are not usable off-the-shelf, yet. There is no doubt, that such a robot has to fulfil essential requirements to successfully accomplish a rescue mission. Apart from the mechanical requirements it has to be able to build a 3D map of the environment. This is essential to navigate through rough terrain and fulfil manipulation tasks (e.g. open doors). To build a map and gather environmental information, robots are equipped with multiple sensors. Since laser scanners produce precise measurements and support a wide scanning range, they are common visual sensors utilized for mapping. Unfortunately, they produce erroneous measurements when scanning transparent (e.g. glass, transparent plastic) or specular reflective objects (e.g. mirror, shiny metal). It is understood that such objects can be everywhere and a pre-manipulation to prevent their influences is impossible. Using additional sensors also bear risks. The problem is that these objects are occasionally visible, based on the incident angle of the laser beam, the surface, and the type of object. Hence, for transparent objects, measurements might result from the object surface or objects behind it. For specular reflective objects, measurements might result from the object surface or a mirrored object. These mirrored objects are illustrated behind the surface which is wrong. To obtain a precise map, the surfaces need to be recognised and mapped reliably. Otherwise, the robot navigates into it and crashes. Further, points behind the surface should be identified and treated based on the object type. Points behind a transparent surface should remain as they represent real objects. In contrast, Points behind a specular reflective surface should be erased. To do so, the object type needs to be classified. Unfortunately, none of the current approaches is capable to fulfil these requirements. Therefore, the following thesis addresses this problem to detect transparent and specular reflective objects and to identify their influences. To give the reader a start up, the first chapters describe: the theoretical background concerning propagation of light; sensor systems applied for range measurements; mapping approaches used in this work; and the state-of-the-art concerning detection and identification of transparent and specular reflective objects. Afterwards, the Reflection-Identification-Approach, which is the core of subject thesis is presented. It describes 2D and a 3D implementation to detect and classify such objects. Both are available as ROS-nodes. In the next chapter, various experiments demonstrate the applicability and reliability of these nodes. It proves that transparent and specular reflective objects can be detected and classified. Therefore, a Pre- and Post-Filter module is required in 2D. In 3D, classification is possible solely with the Pre-Filter. This is due to the higher amount of measurements. An example shows that an updatable mapping module allows the robot navigation to rely on refined maps. Otherwise, two individual maps are build which require a fusion afterwards. Finally, the last chapter summarizes the results and proposes suggestions for future work.}, subject = {laserscanner}, language = {en} } @phdthesis{Gall2022, author = {Gall, Dominik}, title = {Increasing the effectiveness of human-computer interfaces for mental health interventions}, doi = {10.25972/OPUS-23012}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-230120}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2022}, abstract = {Human-computer interfaces have the potential to support mental health practitioners in alleviating mental distress. Adaption of this technology in practice is, however, slow. We provide means to extend the design space of human-computer interfaces for mitigating mental distress. To this end, we suggest three complementary approaches: using presentation technology, using virtual environments, and using communication technology to facilitate social interaction. We provide new evidence that elementary aspects of presentation technology affect the emotional processing of virtual stimuli, that perception of our environment affects the way we assess our environment, and that communication technologies affect social bonding between users. By showing how interfaces modify emotional reactions and facilitate social interaction, we provide converging evidence that human-computer interfaces can help alleviate mental distress. These findings may advance the goal of adapting technological means to the requirements of mental health practitioners.}, subject = {Mensch-Maschine-Kommunikation}, language = {en} } @article{BorchertSeufertGamboaetal.2020, author = {Borchert, Kathrin and Seufert, Anika and Gamboa, Edwin and Hirth, Matthias and Hoßfeld, Tobias}, title = {In Vitro vs In Vivo: Does the Study's Interface Design Influence Crowdsourced Video QoE?}, series = {Quality and User Experience}, volume = {6}, journal = {Quality and User Experience}, issn = {2366-0139}, doi = {10.1007/s41233-020-00041-2}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-235586}, year = {2020}, abstract = {Evaluating the Quality of Experience (QoE) of video streaming and its influence factors has become paramount for streaming providers, as they want to maintain high satisfaction for their customers. In this context, crowdsourced user studies became a valuable tool to evaluate different factors which can affect the perceived user experience on a large scale. In general, most of these crowdsourcing studies either use, what we refer to, as an in vivo or an in vitro interface design. In vivo design means that the study participant has to rate the QoE of a video that is embedded in an application similar to a real streaming service, e.g., YouTube or Netflix. In vitro design refers to a setting, in which the video stream is separated from a specific service and thus, the video plays on a plain background. Although these interface designs vary widely, the results are often compared and generalized. In this work, we use a crowdsourcing study to investigate the influence of three interface design alternatives, an in vitro and two in vivo designs with different levels of interactiveness, on the perceived video QoE. Contrary to our expectations, the results indicate that there is no significant influence of the study's interface design in general on the video experience. Furthermore, we found that the in vivo design does not reduce the test takers' attentiveness. However, we observed that participants who interacted with the test interface reported a higher video QoE than other groups.}, language = {en} } @article{Seufert2021, author = {Seufert, Michael}, title = {Statistical methods and models based on quality of experience distributions}, series = {Quality and User Experience}, volume = {6}, journal = {Quality and User Experience}, issn = {2366-0139}, doi = {10.1007/s41233-020-00044-z}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-235733}, year = {2021}, abstract = {Due to biased assumptions on the underlying ordinal rating scale in subjective Quality of Experience (QoE) studies, Mean Opinion Score (MOS)-based evaluations provide results, which are hard to interpret and can be misleading. This paper proposes to consider the full QoE distribution for evaluating, reporting, and modeling QoE results instead of relying on MOS-based metrics derived from results based on ordinal rating scales. The QoE distribution can be represented in a concise way by using the parameters of a multinomial distribution without losing any information about the underlying QoE ratings, and even keeps backward compatibility with previous, biased MOS-based results. Considering QoE results as a realization of a multinomial distribution allows to rely on a well-established theoretical background, which enables meaningful evaluations also for ordinal rating scales. Moreover, QoE models based on QoE distributions keep detailed information from the results of a QoE study of a technical system, and thus, give an unprecedented richness of insights into the end users' experience with the technical system. In this work, existing and novel statistical methods for QoE distributions are summarized and exemplary evaluations are outlined. Furthermore, using the novel concept of quality steps, simulative and analytical QoE models based on QoE distributions are presented and showcased. The goal is to demonstrate the fundamental advantages of considering QoE distributions over MOS-based evaluations if the underlying rating data is ordinal in nature.}, language = {en} } @phdthesis{Moldovan2021, author = {Moldovan, Christian}, title = {Performance Modeling of Mobile Video Streaming}, issn = {1432-8801}, doi = {10.25972/OPUS-22871}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-228715}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2021}, abstract = {In the past two decades, there has been a trend to move from traditional television to Internet-based video services. With video streaming becoming one of the most popular applications in the Internet and the current state of the art in media consumption, quality expectations of consumers are increasing. Low quality videos are no longer considered acceptable in contrast to some years ago due to the increased sizes and resolution of devices. If the high expectations of the users are not met and a video is delivered in poor quality, they often abandon the service. Therefore, Internet Service Providers (ISPs) and video service providers are facing the challenge of providing seamless multimedia delivery in high quality. Currently, during peak hours, video streaming causes almost 58\\% of the downstream traffic on the Internet. With higher mobile bandwidth, mobile video streaming has also become commonplace. According to the 2019 Cisco Visual Networking Index, in 2022 79\% of mobile traffic will be video traffic and, according to Ericsson, by 2025 video is forecasted to make up 76\% of total Internet traffic. Ericsson further predicts that in 2024 over 1.4 billion devices will be subscribed to 5G, which will offer a downlink data rate of 100 Mbit/s in dense urban environments. One of the most important goals of ISPs and video service providers is for their users to have a high Quality of Experience (QoE). The QoE describes the degree of delight or annoyance a user experiences when using a service or application. In video streaming the QoE depends on how seamless a video is played and whether there are stalling events or quality degradations. These characteristics of a transmitted video are described as the application layer Quality of Service (QoS). In general, the QoS is defined as "the totality of characteristics of a telecommunications service that bear on its ability to satisfy stated and implied needs of the user of the service" by the ITU. The network layer QoS describes the performance of the network and is decisive for the application layer QoS. In Internet video, typically a buffer is used to store downloaded video segments to compensate for network fluctuations. If the buffer runs empty, stalling occurs. If the available bandwidth decreases temporarily, the video can still be played out from the buffer without interruption. There are different policies and parameters that determine how large the buffer is, at what buffer level to start the video, and at what buffer level to resume playout after stalling. These have to be finely tuned to achieve the highest QoE for the user. If the bandwidth decreases for a longer time period, a limited buffer will deplete and stalling can not be avoided. An important research question is how to configure the buffer optimally for different users and situations. In this work, we tackle this question using analytic models and measurement studies. With HTTP Adaptive Streaming (HAS), the video players have the capability to adapt the video bit rate at the client side according to the available network capacity. This way the depletion of the video buffer and thus stalling can be avoided. In HAS, the quality in which the video is played and the number of quality switches also has an impact on the QoE. Thus, an important problem is the adaptation of video streaming so that these parameters are optimized. In a shared WiFi multiple video users share a single bottleneck link and compete for bandwidth. In such a scenario, it is important that resources are allocated to users in a way that all can have a similar QoE. In this work, we therefore investigate the possible fairness gain when moving from network fairness towards application-layer QoS fairness. In mobile scenarios, the energy and data consumption of the user device are limited resources and they must be managed besides the QoE. Therefore, it is also necessary, to investigate solutions, that conserve these resources in mobile devices. But how can resources be conserved without sacrificing application layer QoS? As an example for such a solution, this work presents a new probabilistic adaptation algorithm that uses abandonment statistics for ts decision making, aiming at minimizing the resource consumption while maintaining high QoS. With current protocol developments such as 5G, bandwidths are increasing, latencies are decreasing and networks are becoming more stable, leading to higher QoS. This allows for new real time data intensive applications such as cloud gaming, virtual reality and augmented reality applications to become feasible on mobile devices which pose completely new research questions. The high energy consumption of such applications still remains an issue as the energy capacity of devices is currently not increasing as quickly as the available data rates. In this work we compare the optimal performance of different strategies for adaptive 360-degree video streaming.}, subject = {Video{\"u}bertragung}, language = {en} } @phdthesis{Dose2021, author = {Dose, Titus}, title = {Balance Problems for Integer Circuits and Separations of Relativized Conjectures on Incompleteness in Promise Classes}, doi = {10.25972/OPUS-22220}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-222209}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2021}, abstract = {This thesis is divided into two parts. In the first part we contribute to a working program initiated by Pudl{\´a}k (2017) who lists several major complexity theoretic conjectures relevant to proof complexity and asks for oracles that separate pairs of corresponding relativized conjectures. Among these conjectures are: - \(\mathsf{CON}\) and \(\mathsf{SAT}\): coNP (resp., NP) does not contain complete sets that have P-optimal proof systems. - \(\mathsf{CON}^{\mathsf{N}}\): coNP does not contain complete sets that have optimal proof systems. - \(\mathsf{TFNP}\): there do not exist complete total polynomial search problems (also known as total NP search problems). - \(\mathsf{DisjNP}\) and \(\mathsf{DisjCoNP}\): There do not exist complete disjoint NP pairs (coNP pairs). - \(\mathsf{UP}\): UP does not contain complete problems. - \(\mathsf{NP}\cap\mathsf{coNP}\): \(\mathrm{NP}\cap\mathrm{coNP}\) does not contain complete problems. - \(\mathrm{P}\ne\mathrm{NP}\). We construct several of the oracles that Pudl{\´a}k asks for. In the second part we investigate the computational complexity of balance problems for \(\{-,\cdot\}\)-circuits computing finite sets of natural numbers (note that \(-\) denotes the set difference). These problems naturally build on problems for integer expressions and integer circuits studied by Stockmeyer and Meyer (1973), McKenzie and Wagner (2007), and Glaßer et al. (2010). Our work shows that the balance problem for \(\{-,\cdot\}\)-circuits is undecidable which is the first natural problem for integer circuits or related constraint satisfaction problems that admits only one arithmetic operation and is proven to be undecidable. Starting from this result we precisely characterize the complexity of balance problems for proper subsets of \(\{-,\cdot\}\). These problems turn out to be complete for one of the classes L, NL, and NP.}, subject = {NP-vollst{\"a}ndiges Problem}, language = {en} }