@phdthesis{Rehfeld2016, author = {Rehfeld, Stephan}, title = {Untersuchung der Nebenl{\"a}ufigkeit, Latenz und Konsistenz asynchroner Interaktiver Echtzeitsysteme mittels Profiling und Model Checking}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-147431}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2016}, abstract = {Im Rahmen dieser Arbeit werden die Nebenl{\"a}ufigkeit, Konsistenz und Latenz in asynchronen Interaktiven Echtzeitsystemen durch die Techniken des Profilings und des Model Checkings untersucht. Zu Beginn wird erl{\"a}utert, warum das asynchrone Modell das vielversprechendste f{\"u}r die Nebenl{\"a}ufigkeit in einem Interaktiven Echtzeitsystem ist. Hierzu wird ein Vergleich zu anderen Modellen gezogen. Dar{\"u}ber hinaus wird ein detaillierter Vergleich von Synchronisationstechnologien, welche die Grundlage f{\"u}r Konsistenz schaffen, durchgef{\"u}hrt. Auf der Grundlage dieser beiden Vergleiche und der Betrachtung anderer Systeme wird ein Synchronisationskonzept entwickelt. Auf dieser Basis wird die Nebenl{\"a}ufigkeit, Konsistenz und Latenz mit zwei Verfahren untersucht. Die erste Technik ist das Profiling, wobei einige neue Darstellungsformen von gemessenen Daten entwickelt werden. Diese neu entwickelten Darstellungsformen werden in der Implementierung eines Profilers verwendet. Als zweite Technik wird das Model Checking analysiert, welches bisher noch nicht im Kontext von Interaktiven Echtzeitsystemen verwendet wurde. Model Checking dient dazu, die Verhaltensweise eines Interaktiven Echtzeitsystems vorherzusagen. Diese Vorhersagen werden mit den Messungen aus dem Profiler verglichen.}, subject = {Model Checking}, language = {de} } @phdthesis{Milenkoski2016, author = {Milenkoski, Aleksandar}, title = {Evaluation of Intrusion Detection Systems in Virtualized Environments}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-141846}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2016}, abstract = {Virtualization allows the creation of virtual instances of physical devices, such as network and processing units. In a virtualized system, governed by a hypervisor, resources are shared among virtual machines (VMs). Virtualization has been receiving increasing interest as away to reduce costs through server consolidation and to enhance the flexibility of physical infrastructures. Although virtualization provides many benefits, it introduces new security challenges; that is, the introduction of a hypervisor introduces threats since hypervisors expose new attack surfaces. Intrusion detection is a common cyber security mechanism whose task is to detect malicious activities in host and/or network environments. This enables timely reaction in order to stop an on-going attack, or to mitigate the impact of a security breach. The wide adoption of virtualization has resulted in the increasingly common practice of deploying conventional intrusion detection systems (IDSs), for example, hardware IDS appliances or common software-based IDSs, in designated VMs as virtual network functions (VNFs). In addition, the research and industrial communities have developed IDSs specifically designed to operate in virtualized environments (i.e., hypervisorbased IDSs), with components both inside the hypervisor and in a designated VM. The latter are becoming increasingly common with the growing proliferation of virtualized data centers and the adoption of the cloud computing paradigm, for which virtualization is as a key enabling technology. To minimize the risk of security breaches, methods and techniques for evaluating IDSs in an accurate manner are essential. For instance, one may compare different IDSs in terms of their attack detection accuracy in order to identify and deploy the IDS that operates optimally in a given environment, thereby reducing the risks of a security breach. However, methods and techniques for realistic and accurate evaluation of the attack detection accuracy of IDSs in virtualized environments (i.e., IDSs deployed as VNFs or hypervisor-based IDSs) are lacking. That is, workloads that exercise the sensors of an evaluated IDS and contain attacks targeting hypervisors are needed. Attacks targeting hypervisors are of high severity since they may result in, for example, altering the hypervisors's memory and thus enabling the execution of malicious code with hypervisor privileges. In addition, there are no metrics and measurement methodologies for accurately quantifying the attack detection accuracy of IDSs in virtualized environments with elastic resource provisioning (i.e., on-demand allocation or deallocation of virtualized hardware resources to VMs). Modern hypervisors allow for hotplugging virtual CPUs and memory on the designated VM where the intrusion detection engine of hypervisor-based IDSs, as well as of IDSs deployed as VNFs, typically operates. Resource hotplugging may have a significant impact on the attack detection accuracy of an evaluated IDS, which is not taken into account by existing metrics for quantifying IDS attack detection accuracy. This may lead to inaccurate measurements, which, in turn, may result in the deployment of misconfigured or ill-performing IDSs, increasing the risk of security breaches. This thesis presents contributions that span the standard components of any system evaluation scenario: workloads, metrics, and measurement methodologies. The scientific contributions of this thesis are: A comprehensive systematization of the common practices and the state-of-theart on IDS evaluation. This includes: (i) a definition of an IDS evaluation design space allowing to put existing practical and theoretical work into a common context in a systematic manner; (ii) an overview of common practices in IDS evaluation reviewing evaluation approaches and methods related to each part of the design space; (iii) and a set of case studies demonstrating how different IDS evaluation approaches are applied in practice. Given the significant amount of existing practical and theoretical work related to IDS evaluation, the presented systematization is beneficial for improving the general understanding of the topic by providing an overview of the current state of the field. In addition, it is beneficial for identifying and contrasting advantages and disadvantages of different IDS evaluation methods and practices, while also helping to identify specific requirements and best practices for evaluating current and future IDSs. An in-depth analysis of common vulnerabilities of modern hypervisors as well as a set of attack models capturing the activities of attackers triggering these vulnerabilities. The analysis includes 35 representative vulnerabilities of hypercall handlers (i.e., hypercall vulnerabilities). Hypercalls are software traps from a kernel of a VM to the hypervisor. The hypercall interface of hypervisors, among device drivers and VM exit events, is one of the attack surfaces that hypervisors expose. Triggering a hypercall vulnerability may lead to a crash of the hypervisor or to altering the hypervisor's memory. We analyze the origins of the considered hypercall vulnerabilities, demonstrate and analyze possible attacks that trigger them (i.e., hypercall attacks), develop hypercall attack models(i.e., systematized activities of attackers targeting the hypercall interface), and discuss future research directions focusing on approaches for securing hypercall interfaces. A novel approach for evaluating IDSs enabling the generation of workloads that contain attacks targeting hypervisors, that is, hypercall attacks. We propose an approach for evaluating IDSs using attack injection (i.e., controlled execution of attacks during regular operation of the environment where an IDS under test is deployed). The injection of attacks is performed based on attack models that capture realistic attack scenarios. We use the hypercall attack models developed as part of this thesis for injecting hypercall attacks. A novel metric and measurement methodology for quantifying the attack detection accuracy of IDSs in virtualized environments that feature elastic resource provisioning. We demonstrate how the elasticity of resource allocations in such environments may impact the IDS attack detection accuracy and show that using existing metrics in such environments may lead to practically challenging and inaccurate measurements. We also demonstrate the practical use of the metric we propose through a set of case studies, where we evaluate common conventional IDSs deployed as VNFs. In summary, this thesis presents the first systematization of the state-of-the-art on IDS evaluation, considering workloads, metrics and measurement methodologies as integral parts of every IDS evaluation approach. In addition, we are the first to examine the hypercall attack surface of hypervisors in detail and to propose an approach using attack injection for evaluating IDSs in virtualized environments. Finally, this thesis presents the first metric and measurement methodology for quantifying the attack detection accuracy of IDSs in virtualized environments that feature elastic resource provisioning. From a technical perspective, as part of the proposed approach for evaluating IDSsthis thesis presents hInjector, a tool for injecting hypercall attacks. We designed hInjector to enable the rigorous, representative, and practically feasible evaluation of IDSs using attack injection. We demonstrate the application and practical usefulness of hInjector, as well as of the proposed approach, by evaluating a representative hypervisor-based IDS designed to detect hypercall attacks. While we focus on evaluating the capabilities of IDSs to detect hypercall attacks, the proposed IDS evaluation approach can be generalized and applied in a broader context. For example, it may be directly used to also evaluate security mechanisms of hypervisors, such as hypercall access control (AC) mechanisms. It may also be applied to evaluate the capabilities of IDSs to detect attacks involving operations that are functionally similar to hypercalls, for example, the input/output control (ioctl) calls that the Kernel-based Virtual Machine (KVM) hypervisor supports. For IDSs in virtualized environments featuring elastic resource provisioning, our approach for injecting hypercall attacks can be applied in combination with the attack detection accuracy metric and measurement methodology we propose. Our approach for injecting hypercall attacks, and our metric and measurement methodology, can also be applied independently beyond the scenarios considered in this thesis. The wide spectrum of security mechanisms in virtualized environments whose evaluation can directly benefit from the contributions of this thesis (e.g., hypervisor-based IDSs, IDSs deployed as VNFs, and AC mechanisms) reflects the practical implication of the thesis.}, subject = {Eindringerkennung}, language = {en} } @phdthesis{Hirth2016, author = {Hirth, Matthias Johannes Wilhem}, title = {Modeling Crowdsourcing Platforms - A Use-Case Driven Approach}, issn = {1432-8801}, doi = {10.25972/OPUS-14072}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-140726}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2016}, abstract = {Computer systems have replaced human work-force in many parts of everyday life, but there still exists a large number of tasks that cannot be automated, yet. This also includes tasks, which we consider to be rather simple like the categorization of image content or subjective ratings. Traditionally, these tasks have been completed by designated employees or outsourced to specialized companies. However, recently the crowdsourcing paradigm is more and more applied to complete such human-labor intensive tasks. Crowdsourcing aims at leveraging the huge number of Internet users all around the globe, which form a potentially highly available, low-cost, and easy accessible work-force. To enable the distribution of work on a global scale, new web-based services emerged, so called crowdsourcing platforms, that act as mediator between employers posting tasks and workers completing tasks. However, the crowdsourcing approach, especially the large anonymous worker crowd, results in two types of challenges. On the one hand, there are technical challenges like the dimensioning of crowdsourcing platform infrastructure or the interconnection of crowdsourcing platforms and machine clouds to build hybrid services. On the other hand, there are conceptual challenges like identifying reliable workers or migrating traditional off-line work to the crowdsourcing environment. To tackle these challenges, this monograph analyzes and models current crowdsourcing systems to optimize crowdsourcing workflows and the underlying infrastructure. First, a categorization of crowdsourcing tasks and platforms is developed to derive generalizable properties. Based on this categorization and an exemplary analysis of a commercial crowdsourcing platform, models for different aspects of crowdsourcing platforms and crowdsourcing mechanisms are developed. A special focus is put on quality assurance mechanisms for crowdsourcing tasks, where the models are used to assess the suitability and costs of existing approaches for different types of tasks. Further, a novel quality assurance mechanism solely based on user-interactions is proposed and its feasibility is shown. The findings from the analysis of existing platforms, the derived models, and the developed quality assurance mechanisms are finally used to derive best practices for two crowdsourcing use-cases, crowdsourcing-based network measurements and crowdsourcing-based subjective user studies. These two exemplary use-cases cover aspects typical for a large range of crowdsourcing tasks and illustrated the potential benefits, but also resulting challenges when using crowdsourcing. With the ongoing digitalization and globalization of the labor markets, the crowdsourcing paradigm is expected to gain even more importance in the next years. This is already evident in the currently new emerging fields of crowdsourcing, like enterprise crowdsourcing or mobile crowdsourcing. The models developed in the monograph enable platform providers to optimize their current systems and employers to optimize their workflows to increase their commercial success. Moreover, the results help to improve the general understanding of crowdsourcing systems, a key for identifying necessary adaptions and future improvements.}, subject = {Open Innovation}, language = {en} } @phdthesis{Kindermann2016, author = {Kindermann, Philipp}, title = {Angular Schematization in Graph Drawing}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-020-7 (print)}, doi = {10.25972/WUP-978-3-95826-021-4}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-112549}, school = {W{\"u}rzburg University Press}, pages = {184}, year = {2016}, abstract = {Graphs are a frequently used tool to model relationships among entities. A graph is a binary relation between objects, that is, it consists of a set of objects (vertices) and a set of pairs of objects (edges). Networks are common examples of modeling data as a graph. For example, relationships between persons in a social network, or network links between computers in a telecommunication network can be represented by a graph. The clearest way to illustrate the modeled data is to visualize the graphs. The field of Graph Drawing deals with the problem of finding algorithms to automatically generate graph visualizations. The task is to find a "good" drawing, which can be measured by different criteria such as number of crossings between edges or the used area. In this thesis, we study Angular Schematization in Graph Drawing. By this, we mean drawings with large angles (for example, between the edges at common vertices or at crossing points). The thesis consists of three parts. First, we deal with the placement of boxes. Boxes are axis-parallel rectangles that can, for example, contain text. They can be placed on a map to label important sites, or can be used to describe semantic relationships between words in a word network. In the second part of the thesis, we consider graph drawings visually guide the viewer. These drawings generally induce large angles between edges that meet at a vertex. Furthermore, the edges are drawn crossing-free and in a way that makes them easy to follow for the human eye. The third and final part is devoted to crossings with large angles. In drawings with crossings, it is important to have large angles between edges at their crossing point, preferably right angles.}, language = {en} } @phdthesis{Wiebusch2016, author = {Wiebusch, Dennis}, title = {Reusability for Intelligent Realtime Interactive Systems}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-040-5 (print)}, doi = {10.25972/WUP-978-3-95826-041-2}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-121869}, school = {W{\"u}rzburg University Press}, pages = {260}, year = {2016}, abstract = {Software frameworks for Realtime Interactive Systems (RIS), e.g., in the areas of Virtual, Augmented, and Mixed Reality (VR, AR, and MR) or computer games, facilitate a multitude of functionalities by coupling diverse software modules. In this context, no uniform methodology for coupling these modules does exist; instead various purpose-built solutions have been proposed. As a consequence, important software qualities, such as maintainability, reusability, and adaptability, are impeded. Many modern systems provide additional support for the integration of Artificial Intelligence (AI) methods to create so called intelligent virtual environments. These methods exacerbate the above-mentioned problem of coupling software modules in the thus created Intelligent Realtime Interactive Systems (IRIS) even more. This, on the one hand, is due to the commonly applied specialized data structures and asynchronous execution schemes, and the requirement for high consistency regarding content-wise coupled but functionally decoupled forms of data representation on the other. This work proposes an approach to decoupling software modules in IRIS, which is based on the abstraction of architecture elements using a semantic Knowledge Representation Layer (KRL). The layer facilitates decoupling the required modules, provides a means for ensuring interface compatibility and consistency, and in the end constitutes an interface for symbolic AI methods.}, subject = {Virtuelle Realit{\"a}t}, language = {en} } @phdthesis{Busch2016, author = {Busch, Stephan}, title = {Robust, Flexible and Efficient Design for Miniature Satellite Systems}, isbn = {978-3-945459-10-2}, doi = {10.25972/OPUS-13652}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-136523}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2016}, abstract = {Small satellites contribute significantly in the rapidly evolving innovation in space engineering, in particular in distributed space systems for global Earth observation and communication services. Significant mass reduction by miniaturization, increased utilization of commercial high-tech components, and in particular standardization are the key drivers for modern miniature space technology. This thesis addresses key fields in research and development on miniature satellite technology regarding efficiency, flexibility, and robustness. Here, these challenges are addressed by the University of Wuerzburg's advanced pico-satellite bus, realizing a generic modular satellite architecture and standardized interfaces for all subsystems. The modular platform ensures reusability, scalability, and increased testability due to its flexible subsystem interface which allows efficient and compact integration of the entire satellite in a plug-and-play manner. Beside systematic design for testability, a high degree of operational robustness is achieved by the consequent implementation of redundancy of crucial subsystems. This is combined with efficient fault detection, isolation and recovery mechanisms. Thus, the UWE-3 platform, and in particular the on-board data handling system and the electrical power system, offers one of the most efficient pico-satellite architectures launched in recent years and provides a solid basis for future extensions. The in-orbit performance results of the pico-satellite UWE-3 are presented and summarize successful operations since its launch in 2013. Several software extensions and adaptations have been uploaded to UWE-3 increasing its capabilities. Thus, a very flexible platform for in-orbit software experiments and for evaluations of innovative concepts was provided and tested.}, subject = {Kleinsatellit}, language = {en} } @phdthesis{Schwartz2016, author = {Schwartz, Christian}, title = {Modeling and Evaluation of Multi-Stakeholder Scenarios in Communication Networks}, issn = {1432-8801}, doi = {10.25972/OPUS-13388}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-133887}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2016}, abstract = {Today's Internet is no longer only controlled by a single stakeholder, e.g. a standard body or a telecommunications company. Rather, the interests of a multitude of stakeholders, e.g. application developers, hardware vendors, cloud operators, and network operators, collide during the development and operation of applications in the Internet. Each of these stakeholders considers different KPIs to be important and attempts to optimise scenarios in its favour. This results in different, often opposing views and can cause problems for the complete network ecosystem. One example of such a scenario are Signalling Storms in the mobile Internet, with one of the largest occurring in Japan in 2012 due to the release and high popularity of a free instant messaging application. The network traffic generated by the application caused a high number of connections to the Internet being established and terminated. This resulted in a similarly high number of signalling messages in the mobile network, causing overload and a loss of service for 2.5 million users over 4 hours. While the network operator suffers the largest impact of this signalling overload, it does not control the application. Thus, the network operator can not change the application traffic characteristics to generate less network signalling traffic. The stakeholders who could prevent, or at least reduce, such behaviour, i.e. application developers or hardware vendors, have no direct benefit from modifying their products in such a way. This results in a clash of interests which negatively impacts the network performance for all participants. The goal of this monograph is to provide an overview over the complex structures of stakeholder relationships in today's Internet applications in mobile networks. To this end, we study different scenarios where such interests clash and suggest methods where tradeoffs can be optimised for all participants. If such an optimisation is not possible or attempts at it might lead to adverse effects, we discuss the reasons.}, subject = {Leistungsbewertung}, language = {en} }