@article{MontenegroDannemann2011, author = {Montenegro, Sergio and Dannemann, Frank}, title = {Experiences and Best Practice Requirements Engineering for Small Satellites}, series = {Computing Science and Technology International Journal}, volume = {1}, journal = {Computing Science and Technology International Journal}, number = {2}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-153307}, year = {2011}, abstract = {The design and implementation of a satellite mission is divided into several different phases. Parallel to these phases an evolution of requirements will take place. Because so many people in different locations and from different background have to work in different subsystems concurrently the ideas and concepts of different subsystems and different locations will diverge. We have to bring them together again. To do this we introduce synchronization points. We bring representatives from all subsystems and all location in a Concurrent Engineering Facility (CEF) room together. Between CEF sessions the different subsystems will diverge again, but each time the diversion will be smaller. Our subjective experience from test projects says this CEF sessions are most effective in the first phases of the development, from Requirements engineering until first coarse design. After Design and the concepts are fix, the developers are going to implementation and the concept divergences will be much smaller, therefore the CEF sessions are not a very big help any more.}, language = {en} } @phdthesis{Rehfeld2016, author = {Rehfeld, Stephan}, title = {Untersuchung der Nebenl{\"a}ufigkeit, Latenz und Konsistenz asynchroner Interaktiver Echtzeitsysteme mittels Profiling und Model Checking}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-147431}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2016}, abstract = {Im Rahmen dieser Arbeit werden die Nebenl{\"a}ufigkeit, Konsistenz und Latenz in asynchronen Interaktiven Echtzeitsystemen durch die Techniken des Profilings und des Model Checkings untersucht. Zu Beginn wird erl{\"a}utert, warum das asynchrone Modell das vielversprechendste f{\"u}r die Nebenl{\"a}ufigkeit in einem Interaktiven Echtzeitsystem ist. Hierzu wird ein Vergleich zu anderen Modellen gezogen. Dar{\"u}ber hinaus wird ein detaillierter Vergleich von Synchronisationstechnologien, welche die Grundlage f{\"u}r Konsistenz schaffen, durchgef{\"u}hrt. Auf der Grundlage dieser beiden Vergleiche und der Betrachtung anderer Systeme wird ein Synchronisationskonzept entwickelt. Auf dieser Basis wird die Nebenl{\"a}ufigkeit, Konsistenz und Latenz mit zwei Verfahren untersucht. Die erste Technik ist das Profiling, wobei einige neue Darstellungsformen von gemessenen Daten entwickelt werden. Diese neu entwickelten Darstellungsformen werden in der Implementierung eines Profilers verwendet. Als zweite Technik wird das Model Checking analysiert, welches bisher noch nicht im Kontext von Interaktiven Echtzeitsystemen verwendet wurde. Model Checking dient dazu, die Verhaltensweise eines Interaktiven Echtzeitsystems vorherzusagen. Diese Vorhersagen werden mit den Messungen aus dem Profiler verglichen.}, subject = {Model Checking}, language = {de} } @phdthesis{Gebert2017, author = {Gebert, Steffen Christian}, title = {Architectures for Softwarized Networks and Their Performance Evaluation}, issn = {1432-8801}, doi = {10.25972/OPUS-15063}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-150634}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2017}, abstract = {This thesis contributes to several issues in the context of SDN and NFV, with an emphasis on performance and management. The main contributions are guide lines for operators migrating to software-based networks, as well as an analytical model for the packet processing in a Linux system using the Kernel NAPI.}, subject = {Telekommunikationsnetz}, language = {en} } @phdthesis{Runge2017, author = {Runge, Armin}, title = {Advances in Deflection Routing based Network on Chips}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-149700}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2017}, abstract = {The progress which has been made in semiconductor chip production in recent years enables a multitude of cores on a single die. However, due to further decreasing structure sizes, fault tolerance and energy consumption will represent key challenges. Furthermore, an efficient communication infrastructure is indispensable due to the high parallelism at those systems. The predominant communication system at such highly parallel systems is a Network on Chip (NoC). The focus of this thesis is on NoCs which are based on deflection routing. In this context, contributions are made to two domains, fault tolerance and dimensioning of the optimal link width. Both aspects are essential for the application of reliable, energy efficient, and deflection routing based NoCs. It is expected that future semiconductor systems have to cope with high fault probabilities. The inherently given high connectivity of most NoC topologies can be exploited to tolerate the breakdown of links and other components. In this thesis, a fault-tolerant router architecture has been developed, which stands out for the deployed interconnection architecture and the method to overcome complex fault situations. The presented simulation results show, all data packets arrive at their destination, even at high fault probabilities. In contrast to routing table based architectures, the hardware costs of the herein presented architecture are lower and, in particular, independent of the number of components in the network. Besides fault tolerance, hardware costs and energy efficiency are of great importance. The utilized link width has a decisive influence on these aspects. In particular, at deflection routing based NoCs, over- and under-sizing of the link width leads to unnecessary high hardware costs and bad performance, respectively. In the second part of this thesis, the optimal link width at deflection routing based NoCs is investigated. Additionally, a method to reduce the link width is introduced. Simulation and synthesis results show, the herein presented method allows a significant reduction of hardware costs at comparable performance.}, subject = {Network-on-Chip}, language = {en} } @phdthesis{Milenkoski2016, author = {Milenkoski, Aleksandar}, title = {Evaluation of Intrusion Detection Systems in Virtualized Environments}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-141846}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2016}, abstract = {Virtualization allows the creation of virtual instances of physical devices, such as network and processing units. In a virtualized system, governed by a hypervisor, resources are shared among virtual machines (VMs). Virtualization has been receiving increasing interest as away to reduce costs through server consolidation and to enhance the flexibility of physical infrastructures. Although virtualization provides many benefits, it introduces new security challenges; that is, the introduction of a hypervisor introduces threats since hypervisors expose new attack surfaces. Intrusion detection is a common cyber security mechanism whose task is to detect malicious activities in host and/or network environments. This enables timely reaction in order to stop an on-going attack, or to mitigate the impact of a security breach. The wide adoption of virtualization has resulted in the increasingly common practice of deploying conventional intrusion detection systems (IDSs), for example, hardware IDS appliances or common software-based IDSs, in designated VMs as virtual network functions (VNFs). In addition, the research and industrial communities have developed IDSs specifically designed to operate in virtualized environments (i.e., hypervisorbased IDSs), with components both inside the hypervisor and in a designated VM. The latter are becoming increasingly common with the growing proliferation of virtualized data centers and the adoption of the cloud computing paradigm, for which virtualization is as a key enabling technology. To minimize the risk of security breaches, methods and techniques for evaluating IDSs in an accurate manner are essential. For instance, one may compare different IDSs in terms of their attack detection accuracy in order to identify and deploy the IDS that operates optimally in a given environment, thereby reducing the risks of a security breach. However, methods and techniques for realistic and accurate evaluation of the attack detection accuracy of IDSs in virtualized environments (i.e., IDSs deployed as VNFs or hypervisor-based IDSs) are lacking. That is, workloads that exercise the sensors of an evaluated IDS and contain attacks targeting hypervisors are needed. Attacks targeting hypervisors are of high severity since they may result in, for example, altering the hypervisors's memory and thus enabling the execution of malicious code with hypervisor privileges. In addition, there are no metrics and measurement methodologies for accurately quantifying the attack detection accuracy of IDSs in virtualized environments with elastic resource provisioning (i.e., on-demand allocation or deallocation of virtualized hardware resources to VMs). Modern hypervisors allow for hotplugging virtual CPUs and memory on the designated VM where the intrusion detection engine of hypervisor-based IDSs, as well as of IDSs deployed as VNFs, typically operates. Resource hotplugging may have a significant impact on the attack detection accuracy of an evaluated IDS, which is not taken into account by existing metrics for quantifying IDS attack detection accuracy. This may lead to inaccurate measurements, which, in turn, may result in the deployment of misconfigured or ill-performing IDSs, increasing the risk of security breaches. This thesis presents contributions that span the standard components of any system evaluation scenario: workloads, metrics, and measurement methodologies. The scientific contributions of this thesis are: A comprehensive systematization of the common practices and the state-of-theart on IDS evaluation. This includes: (i) a definition of an IDS evaluation design space allowing to put existing practical and theoretical work into a common context in a systematic manner; (ii) an overview of common practices in IDS evaluation reviewing evaluation approaches and methods related to each part of the design space; (iii) and a set of case studies demonstrating how different IDS evaluation approaches are applied in practice. Given the significant amount of existing practical and theoretical work related to IDS evaluation, the presented systematization is beneficial for improving the general understanding of the topic by providing an overview of the current state of the field. In addition, it is beneficial for identifying and contrasting advantages and disadvantages of different IDS evaluation methods and practices, while also helping to identify specific requirements and best practices for evaluating current and future IDSs. An in-depth analysis of common vulnerabilities of modern hypervisors as well as a set of attack models capturing the activities of attackers triggering these vulnerabilities. The analysis includes 35 representative vulnerabilities of hypercall handlers (i.e., hypercall vulnerabilities). Hypercalls are software traps from a kernel of a VM to the hypervisor. The hypercall interface of hypervisors, among device drivers and VM exit events, is one of the attack surfaces that hypervisors expose. Triggering a hypercall vulnerability may lead to a crash of the hypervisor or to altering the hypervisor's memory. We analyze the origins of the considered hypercall vulnerabilities, demonstrate and analyze possible attacks that trigger them (i.e., hypercall attacks), develop hypercall attack models(i.e., systematized activities of attackers targeting the hypercall interface), and discuss future research directions focusing on approaches for securing hypercall interfaces. A novel approach for evaluating IDSs enabling the generation of workloads that contain attacks targeting hypervisors, that is, hypercall attacks. We propose an approach for evaluating IDSs using attack injection (i.e., controlled execution of attacks during regular operation of the environment where an IDS under test is deployed). The injection of attacks is performed based on attack models that capture realistic attack scenarios. We use the hypercall attack models developed as part of this thesis for injecting hypercall attacks. A novel metric and measurement methodology for quantifying the attack detection accuracy of IDSs in virtualized environments that feature elastic resource provisioning. We demonstrate how the elasticity of resource allocations in such environments may impact the IDS attack detection accuracy and show that using existing metrics in such environments may lead to practically challenging and inaccurate measurements. We also demonstrate the practical use of the metric we propose through a set of case studies, where we evaluate common conventional IDSs deployed as VNFs. In summary, this thesis presents the first systematization of the state-of-the-art on IDS evaluation, considering workloads, metrics and measurement methodologies as integral parts of every IDS evaluation approach. In addition, we are the first to examine the hypercall attack surface of hypervisors in detail and to propose an approach using attack injection for evaluating IDSs in virtualized environments. Finally, this thesis presents the first metric and measurement methodology for quantifying the attack detection accuracy of IDSs in virtualized environments that feature elastic resource provisioning. From a technical perspective, as part of the proposed approach for evaluating IDSsthis thesis presents hInjector, a tool for injecting hypercall attacks. We designed hInjector to enable the rigorous, representative, and practically feasible evaluation of IDSs using attack injection. We demonstrate the application and practical usefulness of hInjector, as well as of the proposed approach, by evaluating a representative hypervisor-based IDS designed to detect hypercall attacks. While we focus on evaluating the capabilities of IDSs to detect hypercall attacks, the proposed IDS evaluation approach can be generalized and applied in a broader context. For example, it may be directly used to also evaluate security mechanisms of hypervisors, such as hypercall access control (AC) mechanisms. It may also be applied to evaluate the capabilities of IDSs to detect attacks involving operations that are functionally similar to hypercalls, for example, the input/output control (ioctl) calls that the Kernel-based Virtual Machine (KVM) hypervisor supports. For IDSs in virtualized environments featuring elastic resource provisioning, our approach for injecting hypercall attacks can be applied in combination with the attack detection accuracy metric and measurement methodology we propose. Our approach for injecting hypercall attacks, and our metric and measurement methodology, can also be applied independently beyond the scenarios considered in this thesis. The wide spectrum of security mechanisms in virtualized environments whose evaluation can directly benefit from the contributions of this thesis (e.g., hypervisor-based IDSs, IDSs deployed as VNFs, and AC mechanisms) reflects the practical implication of the thesis.}, subject = {Eindringerkennung}, language = {en} } @article{KirchnerDittrichBeckenbaueretal.2016, author = {Kirchner, Felix and Dittrich, Marco and Beckenbauer, Phillip and N{\"o}th, Maximilian}, title = {OCR bei Inkunabeln - Offizinspezifischer Ansatz der Universit{\"a}tsbibliothek W{\"u}rzburg}, series = {ABI Technik}, volume = {36}, journal = {ABI Technik}, number = {3}, issn = {2191-4664}, doi = {10.1515/abitech-2016-0036}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-194002}, pages = {178-188}, year = {2016}, abstract = {Im Rahmen des BMBF-gef{\"o}rderten Projekts KALLIMACHOS an der Universit{\"a}t W{\"u}rzburg soll unter anderem die Textgrundlage f{\"u}r digitale Editionen per OCR gewonnen werden. Das Bearbeitungskorpus besteht aus deutschen, franz{\"o}sischen und lateinischen Inkunabeln. Dieser Artikel zeigt, wie man mit bereits heute existierenden Methoden und Programmen den Problemen bei der OCR von Inkunabeln entgegentreten kann. Hierzu wurde an der Universit{\"a}tsbibliothek W{\"u}rzburg ein Verfahren erprobt, mit dem auf ausgew{\"a}hlten Werken einer Druckerwerkstatt bereits Zeichengenauigkeiten von bis zu 95 Prozent und Wortgenauigkeiten von bis zu 73 Prozent erzielt werden.}, language = {de} } @phdthesis{Weinhard2019, author = {Weinhard, Alexander}, title = {Managing RFID Implementations - Implications for Managerial Decision Making}, doi = {10.25972/OPUS-17816}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-178161}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2019}, abstract = {The present dissertation investigates the management of RFID implementations in retail trade. Our work contributes to this by investigating important aspects that have so far received little attention in scientific literature. We therefore perform three studies about three important aspects of managing RFID implementations. We evaluate in our first study customer acceptance of pervasive retail systems using privacy calculus theory. The results of our study reveal the most important aspects a retailer has to consider when implementing pervasive retail systems. In our second study we analyze RFID-enabled robotic inventory taking with the help of a simulation model. The results show that retailers should implement robotic inventory taking if the accuracy rates of the robots are as high as the robots' manufacturers claim. In our third and last study we evaluate the potentials of RFID data for supporting managerial decision making. We propose three novel methods in order to extract useful information from RFID data and propose a generic information extraction process. Our work is geared towards practitioners who want to improve their RFID-enabled processes and towards scientists conducting RFID-based research.}, subject = {RFID}, language = {en} } @phdthesis{Hirth2016, author = {Hirth, Matthias Johannes Wilhem}, title = {Modeling Crowdsourcing Platforms - A Use-Case Driven Approach}, issn = {1432-8801}, doi = {10.25972/OPUS-14072}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-140726}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2016}, abstract = {Computer systems have replaced human work-force in many parts of everyday life, but there still exists a large number of tasks that cannot be automated, yet. This also includes tasks, which we consider to be rather simple like the categorization of image content or subjective ratings. Traditionally, these tasks have been completed by designated employees or outsourced to specialized companies. However, recently the crowdsourcing paradigm is more and more applied to complete such human-labor intensive tasks. Crowdsourcing aims at leveraging the huge number of Internet users all around the globe, which form a potentially highly available, low-cost, and easy accessible work-force. To enable the distribution of work on a global scale, new web-based services emerged, so called crowdsourcing platforms, that act as mediator between employers posting tasks and workers completing tasks. However, the crowdsourcing approach, especially the large anonymous worker crowd, results in two types of challenges. On the one hand, there are technical challenges like the dimensioning of crowdsourcing platform infrastructure or the interconnection of crowdsourcing platforms and machine clouds to build hybrid services. On the other hand, there are conceptual challenges like identifying reliable workers or migrating traditional off-line work to the crowdsourcing environment. To tackle these challenges, this monograph analyzes and models current crowdsourcing systems to optimize crowdsourcing workflows and the underlying infrastructure. First, a categorization of crowdsourcing tasks and platforms is developed to derive generalizable properties. Based on this categorization and an exemplary analysis of a commercial crowdsourcing platform, models for different aspects of crowdsourcing platforms and crowdsourcing mechanisms are developed. A special focus is put on quality assurance mechanisms for crowdsourcing tasks, where the models are used to assess the suitability and costs of existing approaches for different types of tasks. Further, a novel quality assurance mechanism solely based on user-interactions is proposed and its feasibility is shown. The findings from the analysis of existing platforms, the derived models, and the developed quality assurance mechanisms are finally used to derive best practices for two crowdsourcing use-cases, crowdsourcing-based network measurements and crowdsourcing-based subjective user studies. These two exemplary use-cases cover aspects typical for a large range of crowdsourcing tasks and illustrated the potential benefits, but also resulting challenges when using crowdsourcing. With the ongoing digitalization and globalization of the labor markets, the crowdsourcing paradigm is expected to gain even more importance in the next years. This is already evident in the currently new emerging fields of crowdsourcing, like enterprise crowdsourcing or mobile crowdsourcing. The models developed in the monograph enable platform providers to optimize their current systems and employers to optimize their workflows to increase their commercial success. Moreover, the results help to improve the general understanding of crowdsourcing systems, a key for identifying necessary adaptions and future improvements.}, subject = {Open Innovation}, language = {en} } @phdthesis{Rygielski2017, author = {Rygielski, Piotr}, title = {Flexible Modeling of Data Center Networks for Capacity Management}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-146235}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2017}, abstract = {Nowadays, data centers are becoming increasingly dynamic due to the common adoption of virtualization technologies. Systems can scale their capacity on demand by growing and shrinking their resources dynamically based on the current load. However, the complexity and performance of modern data centers is influenced not only by the software architecture, middleware, and computing resources, but also by network virtualization, network protocols, network services, and configuration. The field of network virtualization is not as mature as server virtualization and there are multiple competing approaches and technologies. Performance modeling and prediction techniques provide a powerful tool to analyze the performance of modern data centers. However, given the wide variety of network virtualization approaches, no common approach exists for modeling and evaluating the performance of virtualized networks. The performance community has proposed multiple formalisms and models for evaluating the performance of infrastructures based on different network virtualization technologies. The existing performance models can be divided into two main categories: coarse-grained analytical models and highly-detailed simulation models. Analytical performance models are normally defined at a high level of abstraction and thus they abstract many details of the real network and therefore have limited predictive power. On the other hand, simulation models are normally focused on a selected networking technology and take into account many specific performance influencing factors, resulting in detailed models that are tightly bound to a given technology, infrastructure setup, or to a given protocol stack. Existing models are inflexible, that means, they provide a single solution method without providing means for the user to influence the solution accuracy and solution overhead. To allow for flexibility in the performance prediction, the user is required to build multiple different performance models obtaining multiple performance predictions. Each performance prediction may then have different focus, different performance metrics, prediction accuracy, and solving time. The goal of this thesis is to develop a modeling approach that does not require the user to have experience in any of the applied performance modeling formalisms. The approach offers the flexibility in the modeling and analysis by balancing between: (a) generic character and low overhead of coarse-grained analytical models, and (b) the more detailed simulation models with higher prediction accuracy. The contributions of this thesis intersect with technologies and research areas, such as: software engineering, model-driven software development, domain-specific modeling, performance modeling and prediction, networking and data center networks, network virtualization, Software-Defined Networking (SDN), Network Function Virtualization (NFV). The main contributions of this thesis compose the Descartes Network Infrastructure (DNI) approach and include: • Novel modeling abstractions for virtualized network infrastructures. This includes two meta-models that define modeling languages for modeling data center network performance. The DNI and miniDNI meta-models provide means for representing network infrastructures at two different abstraction levels. Regardless of which variant of the DNI meta-model is used, the modeling language provides generic modeling elements allowing to describe the majority of existing and future network technologies, while at the same time abstracting factors that have low influence on the overall performance. I focus on SDN and NFV as examples of modern virtualization technologies. • Network deployment meta-model—an interface between DNI and other meta- models that allows to define mapping between DNI and other descriptive models. The integration with other domain-specific models allows capturing behaviors that are not reflected in the DNI model, for example, software bottlenecks, server virtualization, and middleware overheads. • Flexible model solving with model transformations. The transformations enable solving a DNI model by transforming it into a predictive model. The model transformations vary in size and complexity depending on the amount of data abstracted in the transformation process and provided to the solver. In this thesis, I contribute six transformations that transform DNI models into various predictive models based on the following modeling formalisms: (a) OMNeT++ simulation, (b) Queueing Petri Nets (QPNs), (c) Layered Queueing Networks (LQNs). For each of these formalisms, multiple predictive models are generated (e.g., models with different level of detail): (a) two for OMNeT++, (b) two for QPNs, (c) two for LQNs. Some predictive models can be solved using multiple alternative solvers resulting in up to ten different automated solving methods for a single DNI model. • A model extraction method that supports the modeler in the modeling process by automatically prefilling the DNI model with the network traffic data. The contributed traffic profile abstraction and optimization method provides a trade-off by balancing between the size and the level of detail of the extracted profiles. • A method for selecting feasible solving methods for a DNI model. The method proposes a set of solvers based on trade-off analysis characterizing each transformation with respect to various parameters such as its specific limitations, expected prediction accuracy, expected run-time, required resources in terms of CPU and memory consumption, and scalability. • An evaluation of the approach in the context of two realistic systems. I evaluate the approach with focus on such factors like: prediction of network capacity and interface throughput, applicability, flexibility in trading-off between prediction accuracy and solving time. Despite not focusing on the maximization of the prediction accuracy, I demonstrate that in the majority of cases, the prediction error is low—up to 20\% for uncalibrated models and up to 10\% for calibrated models depending on the solving technique. In summary, this thesis presents the first approach to flexible run-time performance prediction in data center networks, including network based on SDN. It provides ability to flexibly balance between performance prediction accuracy and solving overhead. The approach provides the following key benefits: • It is possible to predict the impact of changes in the data center network on the performance. The changes include: changes in network topology, hardware configuration, traffic load, and applications deployment. • DNI can successfully model and predict the performance of multiple different of network infrastructures including proactive SDN scenarios. • The prediction process is flexible, that is, it provides balance between the granularity of the predictive models and the solving time. The decreased prediction accuracy is usually rewarded with savings of the solving time and consumption of resources required for solving. • The users are enabled to conduct performance analysis using multiple different prediction methods without requiring the expertise and experience in each of the modeling formalisms. The components of the DNI approach can be also applied to scenarios that are not considered in this thesis. The approach is generalizable and applicable for the following examples: (a) networks outside of data centers may be analyzed with DNI as long as the background traffic profile is known; (b) uncalibrated DNI models may serve as a basis for design-time performance analysis; (c) the method for extracting and compacting of traffic profiles may be used for other, non-network workloads as well.}, subject = {Modellierung}, language = {en} } @phdthesis{Ali2017, author = {Ali, Qasim}, title = {Distributed Control of Cooperating Mini UAVs}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-140686}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2017}, abstract = {Mini Unmanned Aerial Vehicles (MUAVs) werden immer beliebtere Forschungsplattformen. Vor allem in den letzten Jahren ziehen sie aufgrund ihrer Erschwinglichkeit und ihrer Flexibilit{\"a}t, die es erlaubt sie in fast allen Lebensbereichen einzusetzen, betr{\"a}chtliche Aufmerksamkeit auf sich. MUAVs haben offensichtliche Vorteile gegen{\"u}ber bemannten Plattformen einschließlich ihrer viel geringeren Herstellungs- und Betriebskosten, Risikovermeidung f{\"u}r den menschlichen Piloten, der M{\"o}glichkeit sicher niedrig und langsam fliegen zu k{\"o}nnen, und Realisierung von Operationen, die {\"u}ber die inh{\"a}renten Grenzen des menschlichen K{\"o}rpers hinausgehen. Der Fortschritt in der Micro Electro-Mechanical System (MEMS) Technologie, Avionik und Miniaturisierung von Sensoren spielte auch eine bedeutende Rolle bei der Entwicklung der MUAVs. Diese Flugger{\"a}te reichen von einfachem Spielzeug aus dem Elektrofachhandel bis zu hoch entwickelten, kommerziellen Plattformen, die die Durchf{\"u}hrung neuer Aufgaben wie Offshore-Windkraftwerk Inspektionen, 3D-Modellierung von Geb{\"a}uden usw. erlauben. MUAVs sind auch umweltfreundlich, da sie weniger Luftverschmutzung und L{\"a}rm verursachen. Unbemannt ist daher un{\"u}bertroffen. Aktuelle Forschung konzentriert sich auf die M{\"o}glichkeit mehrere kosteng{\"u}nstige Flugger{\"a}te zusammen fliegen zu lassen, w{\"a}hrend die erforderliche relative r{\"a}umliche Trennungen beibehalten wird. Dies erm{\"o}glicht es effizient Aufgaben zu erf{\"u}llen im Vergleich zu einem einzigen sehr teuren Flugger{\"a}t. Durch die Redundanz entf{\"a}llt auch das Risiko des Scheiterns der Mission durch den Verlust eines einzigen Flugger{\"a}ts. Wertvolle Aufgaben, die kooperative Flugger{\"a}te ausf{\"u}hren k{\"o}nnen, sind beispielsweise gemeinsame Lasttransporte, Such- und Rettungsmissionen, mobile Kommunikationsrelais, Spr{\"u}hen von Pestiziden und Wetterbeobachtung. Obwohl die Realisierung von Fl{\"u}gen mit mehreren, gekoppelten UAVs komplex ist, rechtfertigen dennoch offensichtliche Vorteile diese m{\"u}hsame und aufw{\"a}ndige Entwicklungsarbeit. Verteilte Steuerung von kooperierenden Einheiten ist ein multidisziplin{\"a}res Thema, das es erfordert in diversifizierten Bereichen zu arbeiten. Dazu geh{\"o}ren MUAV Hardware und Software, Kommunikationstechniken f{\"u}r den notwendigen Informationsaustausch, Flugdynamik, Regelungstechnik, insbesondere f{\"u}r verteilte / kooperative Steuerungstechniken, Graphentheorie f{\"u}r Kommunikationstopologie Modellierung und Sensoren-Technologie wie Differential GPS (DGPS). F{\"u}r eine Flotte von Agenten, die in unmittelbarer N{\"a}he fliegen, ist eine genaue Positionsbestimmung zwingend n{\"o}tig um Kollisionen zu vermeiden und die Anforderungen f{\"u}r die meisten Missionen wie Georeferenzierung zu erf{\"u}llen. F{\"u}r solche Szenarien ist DGPS ein potenzieller Kandidat. Ein Teil der Forschung konzentriert sich daher auf die Entwicklung von DGPS Code. Eines der Module dieser Forschung war Hardware-Implementierung. Ein einfacher Test-Aufbau zur Realisierung von Basisfunktionalit{\"a}ten f{\"u}r Formationsflug von Quadrocoptern wurde am Lehrstuhl f{\"u}r Informationstechnik in der Luft- und Raumfahrt der Universit{\"a}t W{\"u}rzburg entwickelt. Diese Testumgebung kann nicht nur zur Pr{\"u}fung und Validierung von Algorithmen f{\"u}r Formationsflug in realer Umgebung genutzt werden, sondern dient auch zur Ausbildung von Studenten. Ein bereits vorhandener Pr{\"u}fstand f{\"u}r einzelne Quadrocopter wurde mit den notwendigen Kommunikation und verteilten Steuerung erweitert, um Algorithmen f{\"u}r Formationsfl{\"u}ge in drei Freiheitsgraden (Roll / Nick / Gier) zu testen. Diese Studie umfasst die Bereiche der Kommunikation, Steuerungstechnik und Embedded-System-Programmierung. Das Bluetooth-Protokoll wurde f{\"u}r die gegenseitige Kommunikation zwischen zwei Quadrocoptern verwendet. Eine einfache Technik der Proportional-Integral-Differential (PID) Steuerung in Kombination mit Kalman-Filter wurde genutzt. Die MATLAB Instrument Control Toolbox wurde f{\"u}r die Datenanzeige, die Analyse und das Plotten verwendet. Plots k{\"o}nnen in Echtzeit gezeichnet werden und empfangene Daten k{\"o}nnen auch in Form von Dateien zur sp{\"a}teren Verwendung und Analyse gespeichert werden. Das System wurde preisg{\"u}nstig, unter Ber{\"u}cksichtigung eines einfachen Aufbaus, entwickelt. Der vorgeschlagene Aufbau ist sehr flexibel und kann einfach an ver{\"a}nderte Anforderungen angepasst werden. Als verteiltes Steuerungsschema wurde ein zentralisierter, heterogener Formationsflug Positionsregler formuliert, der einen „explicit model following Linear Quadratic Regulator Proportional Integral (LQR PI)" Regler verwendet. Der Anf{\"u}hrer Quadrocopter ist ein stabiles Referenzmodell mit der gew{\"u}nschten Dynamik, deren Ausgang vollkommen von den beiden Wingmen Quadrocopter verfolgt wird. Der Anf{\"u}hrer selbst wird durch Pole Placement Steuerverfahren mit den gew{\"u}nschten Stabilit{\"a}tseigenschaften gesteuert, w{\"a}hrend die beiden Anh{\"a}nger durch robuste und adaptive LQR PI Steuerverfahren geregelt werden. F{\"u}r diese Studie wird ein Vollzustandsvektor der Quadrocopter betrachtet w{\"a}hrend nur die resultierende Leistung verfolgt wird. Die ausgew{\"a}hlte 3D Formationsgeometrie und die statische Stabilit{\"a}t bleibt unter einer Vielzahl von m{\"o}glichen St{\"o}rungen erhalten. Bei Kommunikationsverlust zwischen Anf{\"u}hrer und einem der Anh{\"a}nger, leitet der andere Anh{\"a}nger die Daten, die er vom Anf{\"u}hrer erhalten hat, an den betroffenen Anh{\"a}nger weiter. Die Stabilit{\"a}t des Regelsystems wurde unter Verwendung von Singul{\"a}rwerten analysiert. Der vorgeschlagene Ansatz f{\"u}r eng gekoppelten Formationsflug von MUAVs wurde mit Hilfe von umfangreichen Simulationen unter MATLAB® / Simulink® validiert und ergab viel versprechende Ergebnisse. Auch die Tracking-Leistung wurde f{\"u}r zeitlich ver{\"a}nderliche Befehle gezeigt. Die vorgeschlagene Architektur ist skalierbar und kann problemlos erweitert werden. Dieser Ansatz ist f{\"u}r die Szenarien geeignet, die eng gekoppelte Formationsflug ben{\"o}tigen, wie kooperatives Greifen oder gemeinsame Lasttransporte. Ein innovatives Framework f{\"u}r die Teamarbeit von zwei Quadrocopter Flotten wurde entwickelt. Als Beispielmission wurde ein Szenario gew{\"a}hlt, bei dem ein Feuer auf einer gr{\"o}ßeren Fl{\"a}che gel{\"o}scht werden muss. Jede Formation hat ihre angegebene Formationsgeometrie und eine zugewiesene Aufgabe. Die Lageregelung f{\"u}r die Quadrocopter in einer der Formationen wurde durch ein LQR PI-Regelschema, das auf „explicit model following" basiert, umgesetzt. Die Quadrocopter in anderen Formation werden durch ein LQR PI Servomechanismus Regelsystem gesteuert. Die beiden Steuersysteme werden in Bezug auf ihre Leistung und ihren Steuerungsaufwand verglichen. Beide Formationen werden durch entsprechende Bodenstationen durch virtuelle Anf{\"u}hrer kommandiert. Die Bodenstationen tauschen die befohlene H{\"o}heninformation aus, um gegenseitig eine sichere Trennung zwischen den Formationen zu gew{\"a}hrleisten. Die Quadrocopter k{\"o}nnen kommandierte Solltrajektorien folgen und {\"u}ber erw{\"u}nschten Punkten f{\"u}r eine vorgegebene Zeit schweben. Bei Kommunikationsverlust zwischen Bodenstation und einem der Quadcopter leitet der benachbarte Quadrocopter die Befehlsdaten, die er von der Bodenstation erhalten hat, an die betroffene Einheit weiter. Das vorgeschlagene Framework wurde durch umfangreiche Simulationen mit Hilfe von MATLAB® / Simulink® validiert und liefert sehr brauchbare Ergebnisse. Cluster-Rekonfiguration von Agenten wird in unserer Arbeit ebenfalls gezeigt. Dies erlaubt es die Formationsgeometrie w{\"a}hrend des Fluges auf eine beliebige neue Form umzuschalten. F{\"u}r die genannten Anwendungen sind Konsens Algorithmen nicht erw{\"u}nscht, da wir von den Quadrocopter Flotten fordern, dass sie dem von uns gew{\"a}hlten Weg folgen, und nicht ihren Weg selbst w{\"a}hlen. Eine Reihe der praktischen Probleme von Kommunikationsnetzen kann in geeigneter Weise durch Graphen dargestellt werden. Dies erleichtert die Problemformulierung und den Analyseprozess. Kommunikationstopologien f{\"u}r Netzwerke mit einer großen Anzahl von Einheiten, wie zum Beispiel Schw{\"a}rme von Luftfahrzeugen, k{\"o}nnen durch einen graphentheoretischen Ansatz untersucht werden. Um die Bildung solcher Probleme zu erleichtern, wird der Graph mit Hilfe der Laplace-Matrix dargestellt. Eigenwerte der Laplace-Matrix wurden in unserer Studie angemessene Ber{\"u}cksichtigung gegeben einen Einblick in die Graphen / Subgraphen Eigenschaften zu verleihen. Der gleiche wurden genutzt um die bekannte Euler Formel zu verallgemeinern und somit auf Graphen und Subgraphen anwendbar zu machen. Eine modifizierte Euler-Formel wird ebenfalls vorgestellt. Die Verwendung der Graphentheorie in verteilten / kooperativen Regelsystemen wird auch durch Simulationen gezeigt. Kooperative Kontrolschemas, die auf auf Konsens-Algorithmen beruhenden, wurden f{\"u}r die Lageregelung von Quadrocopter-Flotten, in denen kein expliziter Anf{\"u}hrer existiert, verwendet. Konsens-Algorithmen wurden in Kombination mit verschiedenen Steuersystemen verwendet, was zur Autonomie von Quadrocoptern beitr{\"a}gt. Die Steuersysteme, die f{\"u}r diesen Zweck verwendet werden, umfassen LQR PI-Regelung basierend auf „model following" und LQR PI Servo-Mechanismus. Die Regelungen wurden unter verschiedenen Kommunikationstopologien untersucht, darunter voll verbundene ungerichtete Graphen, gerichteten Graphen und Zyklus-Topologie. Der Informationsfluss unter den Agenten in einem Cluster wurde durch Laplace-Matrix modelliert. Die Auswirkungen von Eingangs Verzerrungen auf Konsens Werte wurden ebenfalls untersucht. Quadrocopter k{\"o}nnen durch gegenseitigen Konsens Flugbahnen verfolgen und die Zielpunkte erreichen. Die vorgeschlagenen Regelungssysteme wurden unter verschiedenen Kommunikationstopologien in Matlab / Simulink-Umgebung durch umfangreiche Simulationen validiert. Die Ergebnisse bescheinigen die Wirksamkeit der pr{\"a}sentierten Schemata mit dem zus{\"a}tzlichen Vorteil der Einfachheit der Umsetzung. Das vorgeschlagene Regelungssystem ist skalierbar f{\"u}r große Gruppen von MUAVs. F{\"u}r Formationsflug sind die Anforderungen an die Positionsgenauigkeit sehr hoch. GPS-Signale allein bieten keine ausreichend hohe Positionsgenauigkeit um die Anforderung zu erf{\"u}llen; eine Technik f{\"u}r die genauere Positionsbestimmung ist daher erforderlich, beispielsweise DGPS. Es existiert eine Anzahl von {\"o}ffentlichen Codes f{\"u}r die GPS-Positionsbestimmung und Baseline-Bestimmung im Offline-Modus. Es existiert jedoch keine Software f{\"u}r DGPS, die Korrekturfaktoren der Basisstationen nutzt, ohne auf Doppel Differenz Informationen zu vertrauen. Um dies zu erreichen, wurde eine Methodik in MATLAB-Umgebung f{\"u}r DGPS mit C/A Pseudoranges nur auf einzelne Frequenz L1 eingef{\"u}hrt es machbar f{\"u}r Empf{\"a}nger kosteng{\"u}nstig GPS zu nutzen. Unsere Basisstation wird an einem genau vermessen Referenzpunkt aufgestellt. Pseudoranges und geometrische Abst{\"a}nde werden an der Basisstation verglichen, um die Korrekturfaktoren zu berechnen. Diese Korrekturfaktoren, f{\"u}r aller g{\"u}ltigen Satelliten w{\"a}hrend einer Epoche, werden dann an einen Rover {\"u}bergeben. Das Rover ber{\"u}cksichtigt innerhalb der entsprechenden Epoche diese f{\"u}r seine eigene wahre Positionsbestimmung. Zur Validierung der vorgeschlagenen Algorithmen wird unsere Rover ebenfalls an einer vorbestimmten Stelle platziert. Der vorgeschlagene Code ist ein geeignetes und einfaches Werkzeug f{\"u}r die Nachbearbeitung von GPS-Rohdaten f{\"u}r eine genaue Positionsbestimmung eines Rover, z.B. eines UAV w{\"a}hrend der Post-Missionsanalyse.}, subject = {Micro Air Vehicle}, language = {en} } @book{OPUS4-5759, title = {W{\"u}rzburger Hochschulschriften : 1581 - 1803 ; Bestandsverzeichnis}, organization = {Universit{\"a}tsbibliothek W{\"u}rzburg}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-69739}, publisher = {Universit{\"a}t W{\"u}rzburg}, year = {1992}, abstract = {Die Universit{\"a}tsbibliothek W{\"u}rzburg hat f{\"u}r ihre umfangreiche Sammlung alter W{\"u}rzburger Hochschulschriften einen Katalog erarbeitet, der haupts{\"a}chlich Dissertationen und Thesen verzeichnet, aber auch andere Pr{\"u}fungsarbeiten, die f{\"u}r den Erwerb unterschiedlicher akademischer Grade und Titel ausgearbeitet und publiziert worden sind und die aus der f{\"u}rstbisch{\"o}flichen Zeit unserer Universit{\"a}t stammen (1582 - 1803).}, subject = {Universit{\"a}t}, language = {de} } @book{OPUS4-5760, title = {W{\"u}rzburger Hochschulschriften : 1804 - 1885 ; Bestandsverzeichnis}, editor = {M{\"a}lzer, Gottfried and Baumann, Brigitte}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-69743}, publisher = {Universit{\"a}t W{\"u}rzburg}, year = {1994}, abstract = {Die Universit{\"a}tsbibliothek W{\"u}rzburg hat f{\"u}r ihre umfangreiche Sammlung alter W{\"u}rzburger Hochschulschriften einen Katalog erarbeitet, der haupts{\"a}chlich Dissertationen und Thesen verzeichnet, aber auch andere Pr{\"u}fungsarbeiten, die f{\"u}r den Erwerb unterschiedlicher akademischer Grade und Titel ausgearbeitet und publiziert worden sind. Dies ist der 2. Band der Nachweise f{\"u}r die Jahre 1804 bis 1885 mit 2510 Titeln.}, subject = {Universit{\"a}t}, language = {de} } @misc{OPUS4-6452, title = {R{\"u}ckBLICK - Der Jahresbericht 2012 der Julius-Maximilians-Universit{\"a}t W{\"u}rzburg}, volume = {2012}, organization = {Julius-Maximilians-Universit{\"a}t W{\"u}rzburg}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-76762}, year = {2013}, abstract = {Die Entwicklung der Universit{\"a}t W{\"u}rzburg im Jahr 2012.}, subject = {W{\"u}rzburg}, language = {de} } @phdthesis{Roth2020, author = {Roth, Daniel}, title = {Intrapersonal, Interpersonal, and Hybrid Interactions in Virtual Reality}, doi = {10.25972/OPUS-18862}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-188627}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2020}, abstract = {Virtual reality and related media and communication technologies have a growing impact on professional application fields and our daily life. Virtual environments have the potential to change the way we perceive ourselves and how we interact with others. In comparison to other technologies, virtual reality allows for the convincing display of a virtual self-representation, an avatar, to oneself and also to others. This is referred to as user embodiment. Avatars can be of varying realism and abstraction in their appearance and in the behaviors they convey. Such userembodying interfaces, in turn, can impact the perception of the self as well as the perception of interactions. For researchers, designers, and developers it is of particular interest to understand these perceptual impacts, to apply them to therapy, assistive applications, social platforms, or games, for example. The present thesis investigates and relates these impacts with regard to three areas: intrapersonal effects, interpersonal effects, and effects of social augmentations provided by the simulation. With regard to intrapersonal effects, we specifically explore which simulation properties impact the illusion of owning and controlling a virtual body, as well as a perceived change in body schema. Our studies lead to the construction of an instrument to measure these dimensions and our results indicate that these dimensions are especially affected by the level of immersion, the simulation latency, as well as the level of personalization of the avatar. With regard to interpersonal effects we compare physical and user-embodied social interactions, as well as different degrees of freedom in the replication of nonverbal behavior. Our results suggest that functional levels of interaction are maintained, whereas aspects of presence can be affected by avatar-mediated interactions, and collaborative motor coordination can be disturbed by immersive simulations. Social interaction is composed of many unknown symbols and harmonic patterns that define our understanding and interpersonal rapport. For successful virtual social interactions, a mere replication of physical world behaviors to virtual environments may seem feasible. However, the potential of mediated social interactions goes beyond this mere replication. In a third vein of research, we propose and evaluate alternative concepts on how computers can be used to actively engage in mediating social interactions, namely hybrid avatar-agent technologies. Specifically, we investigated the possibilities to augment social behaviors by modifying and transforming user input according to social phenomena and behavior, such as nonverbal mimicry, directed gaze, joint attention, and grouping. Based on our results we argue that such technologies could be beneficial for computer-mediated social interactions such as to compensate for lacking sensory input and disturbances in data transmission or to increase aspects of social presence by visual substitution or amplification of social behaviors. Based on related work and presented findings, the present thesis proposes the perspective of considering computers as social mediators. Concluding from prototypes and empirical studies, the potential of technology to be an active mediator of social perception with regard to the perception of the self, as well as the perception of social interactions may benefit our society by enabling further methods for diagnosis, treatment, and training, as well as the inclusion of individuals with social disorders. To this regard, we discuss implications for our society and ethical aspects. This thesis extends previous empirical work and further presents novel instruments, concepts, and implications to open up new perspectives for the development of virtual reality, mixed reality, and augmented reality applications.}, subject = {Virtuelle Realit{\"a}t}, language = {en} } @phdthesis{Wick2020, author = {Wick, Christoph}, title = {Optical Medieval Music Recognition}, doi = {10.25972/OPUS-21434}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-214348}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2020}, abstract = {In recent years, great progress has been made in the area of Artificial Intelligence (AI) due to the possibilities of Deep Learning which steadily yielded new state-of-the-art results especially in many image recognition tasks. Currently, in some areas, human performance is achieved or already exceeded. This great development already had an impact on the area of Optical Music Recognition (OMR) as several novel methods relying on Deep Learning succeeded in specific tasks. Musicologists are interested in large-scale musical analysis and in publishing digital transcriptions in a collection enabling to develop tools for searching and data retrieving. The application of OMR promises to simplify and thus speed-up the transcription process by either providing fully-automatic or semi-automatic approaches. This thesis focuses on the automatic transcription of Medieval music with a focus on square notation which poses a challenging task due to complex layouts, highly varying handwritten notations, and degradation. However, since handwritten music notations are quite complex to read, even for an experienced musicologist, it is to be expected that even with new techniques of OMR manual corrections are required to obtain the transcriptions. This thesis presents several new approaches and open source software solutions for layout analysis and Automatic Text Recognition (ATR) for early documents and for OMR of Medieval manuscripts providing state-of-the-art technology. Fully Convolutional Networks (FCN) are applied for the segmentation of historical manuscripts and early printed books, to detect staff lines, and to recognize neume notations. The ATR engine Calamari is presented which allows for ATR of early prints and also the recognition of lyrics. Configurable CNN/LSTM-network architectures which are trained with the segmentation-free CTC-loss are applied to the sequential recognition of text but also monophonic music. Finally, a syllable-to-neume assignment algorithm is presented which represents the final step to obtain a complete transcription of the music. The evaluations show that the performances of any algorithm is highly depending on the material at hand and the number of training instances. The presented staff line detection correctly identifies staff lines and staves with an \$F_1\$-score of above \$99.5\\%\$. The symbol recognition yields a diplomatic Symbol Accuracy Rate (dSAR) of above \$90\\%\$ by counting the number of correct predictions in the symbols sequence normalized by its length. The ATR of lyrics achieved a Character Error Rate (CAR) (equivalently the number of correct predictions normalized by the sentence length) of above \$93\\%\$ trained on 771 lyric lines of Medieval manuscripts and of 99.89\\% when training on around 3.5 million lines of contemporary printed fonts. The assignment of syllables and their corresponding neumes reached \$F_1\$-scores of up to \$99.2\\%\$. A direct comparison to previously published performances is difficult due to different materials and metrics. However, estimations show that the reported values of this thesis exceed the state-of-the-art in the area of square notation. A further goal of this thesis is to enable musicologists without technical background to apply the developed algorithms in a complete workflow by providing a user-friendly and comfortable Graphical User Interface (GUI) encapsulating the technical details. For this purpose, this thesis presents the web-application OMMR4all. Its fully-functional workflow includes the proposed state-of-the-art machine-learning algorithms and optionally allows for a manual intervention at any stage to correct the output preventing error propagation. To simplify the manual (post-) correction, OMMR4all provides an overlay-editor that superimposes the annotations with a scan of the original manuscripts so that errors can easily be spotted. The workflow is designed to be iteratively improvable by training better models as soon as new Ground Truth (GT) is available.}, subject = {Neumenschrift}, language = {en} } @phdthesis{SchauerMarinRodrigues2020, author = {Schauer Marin Rodrigues, Johannes}, title = {Detecting Changes and Finding Collisions in 3D Point Clouds : Data Structures and Algorithms for Post-Processing Large Datasets}, isbn = {978-3-945459-32-4}, doi = {10.25972/OPUS-21428}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-214285}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2020}, abstract = {Affordable prices for 3D laser range finders and mature software solutions for registering multiple point clouds in a common coordinate system paved the way for new areas of application for 3D point clouds. Nowadays we see 3D laser scanners being used not only by digital surveying experts but also by law enforcement officials, construction workers or archaeologists. Whether the purpose is digitizing factory production lines, preserving historic sites as digital heritage or recording environments for gaming or virtual reality applications -- it is hard to imagine a scenario in which the final point cloud must also contain the points of "moving" objects like factory workers, pedestrians, cars or flocks of birds. For most post-processing tasks, moving objects are undesirable not least because moving objects will appear in scans multiple times or are distorted due to their motion relative to the scanner rotation. The main contributions of this work are two postprocessing steps for already registered 3D point clouds. The first method is a new change detection approach based on a voxel grid which allows partitioning the input points into static and dynamic points using explicit change detection and subsequently remove the latter for a "cleaned" point cloud. The second method uses this cleaned point cloud as input for detecting collisions between points of the environment point cloud and a point cloud of a model that is moved through the scene. Our approach on explicit change detection is compared to the state of the art using multiple datasets including the popular KITTI dataset. We show how our solution achieves similar or better F1-scores than an existing solution while at the same time being faster. To detect collisions we do not produce a mesh but approximate the raw point cloud data by spheres or cylindrical volumes. We show how our data structures allow efficient nearest neighbor queries that make our CPU-only approach comparable to a massively-parallel algorithm running on a GPU. The utilized algorithms and data structures are discussed in detail. All our software is freely available for download under the terms of the GNU General Public license. Most of the datasets used in this thesis are freely available as well. We provide shell scripts that allow one to directly reproduce the quantitative results shown in this thesis for easy verification of our findings.}, subject = {Punktwolke}, language = {en} } @phdthesis{Albert2019, author = {Albert, Michael}, title = {Intelligent analysis of medical data in a generic telemedicine infrastructure}, isbn = {978-3-945459-26-3 (Online)}, doi = {10.25972/OPUS-17421}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-174213}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2019}, abstract = {Telemedicine uses telecommunication and information technology to provide health care services over spatial distances. In the upcoming demographic changes towards an older average population age, especially rural areas suffer from a decreasing doctor to patient ratio as well as a limited amount of available medical specialists in acceptable distance. These areas could benefit the most from telemedicine applications as they are known to improve access to medical services, medical expertise and can also help to mitigate critical or emergency situations. Although the possibilities of telemedicine applications exist in the entire range of healthcare, current systems focus on one specific disease while using dedicated hardware to connect the patient with the supervising telemedicine center. This thesis describes the development of a telemedical system which follows a new generic design approach. This bridges the gap of existing approaches that only tackle one specific application. The proposed system on the contrary aims at supporting as many diseases and use cases as possible by taking all the stakeholders into account at the same time. To address the usability and acceptance of the system it is designed to use standardized hardware like commercial medical sensors and smartphones for collecting medical data of the patients and transmitting them to the telemedical center. The smartphone can also act as interface to the patient for health questionnaires or feedback. The system can handle the collection and transport of medical data, analysis and visualization of the data as well as providing a real time communication with video and audio between the users. On top of the generic telemedical framework the issue of scalability is addressed by integrating a rule-based analysis tool for the medical data. Rules can be easily created by medical personnel via a visual editor and can be personalized for each patient. The rule-based analysis tool is extended by multiple options for visualization of the data, mechanisms to handle complex rules and options for performing actions like raising alarms or sending automated messages. It is sometimes hard for the medical experts to formulate their knowledge into rules and there may be information in the medical data that is not yet known. This is why a machine learning module was integrated into the system. It uses the incoming medical data of the patients to learn new rules that are then presented to the medical personnel for inspection. This is in line with European legislation where the human still needs to be in charge of such decisions. Overall, we were able to show the benefit of the generic approach by evaluating it in three completely different medical use cases derived from specific application needs: monitoring of COPD (chronic obstructive pulmonary disease) patients, support of patients performing dialysis at home and councils of intensive-care experts. In addition the system was used for a non-medical use case: monitoring and optimization of industrial machines and robots. In all of the mentioned cases, we were able to prove the robustness of the generic approach with real users of the corresponding domain. This is why we can propose this approach for future development of telemedical systems.}, subject = {Telemedizin}, language = {en} } @inproceedings{KleinehagenbrockPetersen2011, author = {Kleinehagenbrock, Frank and Petersen, Stefan}, title = {Geschichte studiert - und dann? Berufsfelder f{\"u}r Historikerinnen und Historiker sowie Studierende anderer Geisteswissenschaften. Ein Leitfaden}, isbn = {978-3-923959-80-8}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-66703}, year = {2011}, abstract = {Viele Studierende der Geschichte und anderer Geisteswissenschaften streben das Lehramt an. Darin Fuß zu fassen, wird in den kommenden Jahren immer schwieriger. Andere Studierende haben sogar {\"u}berhaupt keine Vorstellungen von ihrer beruflichen Zukunft. Dieser Leitfaden m{\"o}chte Orientierung bei der Berufswahl vermitteln und mit Hilfe von Experten Perspektiven er{\"o}ffnen.}, subject = {Geschichtsstudium}, language = {de} } @misc{OPUS4-5621, title = {R{\"u}ckBLICK - Der Jahresbericht 2011 der Julius-Maximilians-Universit{\"a}t W{\"u}rzburg}, volume = {2011}, organization = {Julius-Maximilians-Universit{\"a}t W{\"u}rzburg}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-69544}, year = {2012}, abstract = {Die Entwicklung der Universit{\"a}t W{\"u}rzburg im Jahr 2011.}, subject = {W{\"u}rzburg}, language = {de} } @phdthesis{Xu2014, author = {Xu, Zhihao}, title = {Cooperative Formation Controller Design for Time-Delay and Optimality Problems}, isbn = {978-3-923959-96-9}, doi = {10.25972/OPUS-10555}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-105555}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {This dissertation presents controller design methodologies for a formation of cooperative mobile robots to perform trajectory tracking and convoy protection tasks. Two major problems related to multi-agent formation control are addressed, namely the time-delay and optimality problems. For the task of trajectory tracking, a leader-follower based system structure is adopted for the controller design, where the selection criteria for controller parameters are derived through analyses of characteristic polynomials. The resulting parameters ensure the stability of the system and overcome the steady-state error as well as the oscillation behavior under time-delay effect. In the convoy protection scenario, a decentralized coordination strategy for balanced deployment of mobile robots is first proposed. Based on this coordination scheme, optimal controller parameters are generated in both centralized and decentralized fashion to achieve dynamic convoy protection in a unified framework, where distributed optimization technique is applied in the decentralized strategy. This unified framework takes into account the motion of the target to be protected, and the desired system performance, for instance, minimal energy to spend, equal inter-vehicle distance to keep, etc. Both trajectory tracking and convoy protection tasks are demonstrated through simulations and real-world hardware experiments based on the robotic equipment at Department of Computer Science VII, University of W{\"u}rzburg.}, subject = {Optimalwertregelung}, language = {en} }