@phdthesis{Krug2020, author = {Krug, Markus}, title = {Techniques for the Automatic Extraction of Character Networks in German Historic Novels}, doi = {10.25972/OPUS-20918}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-209186}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2020}, abstract = {Recent advances in Natural Language Preprocessing (NLP) allow for a fully automatic extraction of character networks for an incoming text. These networks serve as a compact and easy to grasp representation of literary fiction. They offer an aggregated view of the text, which can be used during distant reading approaches for the analysis of literary hypotheses. In their core, the networks consist of nodes, which represent literary characters, and edges, which represent relations between characters. For an automatic extraction of such a network, the first step is the detection of the references of all fictional entities that are of importance for a text. References to the fictional entities appear in the form of names, noun phrases and pronouns and prior to this work, no components capable of automatic detection of character references were available. Existing tools are only capable of detecting proper nouns, a subset of all character references. When evaluated on the task of detecting proper nouns in the domain of literary fiction, they still underperform at an F1-score of just about 50\%. This thesis uses techniques from the field of semi-supervised learning, such as Distant supervision and Generalized Expectations, and improves the results of an existing tool to about 82\%, when evaluated on all three categories in literary fiction, but without the need for annotated data in the target domain. However, since this quality is still not sufficient, the decision to annotate DROC, a corpus comprising 90 fragments of German novels was made. This resulted in a new general purpose annotation environment titled as ATHEN, as well as annotated data that spans about 500.000 tokens in total. Using this data, the combination of supervised algorithms and a tailored rule based algorithm, which in combination are able to exploit both - local consistencies as well as global consistencies - yield an algorithm with an F1-score of about 93\%. This component is referred to as the Kallimachos tagger. A character network can not directly display references however, instead they need to be clustered so that all references that belong to a real world or fictional entity are grouped together. This process widely known as coreference resolution is a hard problem in the focus of research for more than half a century. This work experimented with adaptations of classical feature based machine learning, with a dedicated rule based algorithm and with modern techniques of Deep Learning, but no approach can surpass 55\% B-Cubed F1, when evaluated on DROC. Due to this barrier, many researchers do not use a fully-fledged coreference resolution when they extract character networks, but only focus on a more forgiving subset- the names. For novels such as Alice's Adventures in Wonderland by Lewis Caroll, this would however only result in a network in which many important characters are missing. In order to integrate important characters into the network that are not named by the author, this work makes use of automatic detection of speaker and addressees for direct speech utterances (all entities involved in a dialog are considered to be of importance). This problem is by itself not an easy task, however the most successful system analysed in this thesis is able to correctly determine the speaker to about 85\% of the utterances as well as about 65\% of the addressees. This speaker information can not only help to identify the most dominant characters, but also serves as a way to model the relations between entities. During the span of this work, components have been developed to model relations between characters using speaker attribution, using co-occurrences as well as by the usage of true interactions, for which yet again a dataset was annotated using ATHEN. Furthermore, since relations between characters are usually typed, a component for the extraction of a typed relation was developed. Similar to the experiments for the character reference detection, a combination of a rule based and a Maximum Entropy classifier yielded the best overall results, with the extraction of family relations showing a score of about 80\% and the quality of love relations with a score of about 50\%. For family relations, a kernel for a Support Vector Machine was developed that even exceeded the scores of the combined approach but is behind on the other labels. In addition, this work presents new ways to evaluate automatically extracted networks without the need of domain experts, instead it relies on the usage of expert summaries. It also refrains from the uses of social network analysis for the evaluation, but instead presents ranked evaluations using Precision@k and the Spearman Rank correlation coefficient for the evaluation of the nodes and edges of the network. An analysis using these metrics showed, that the central characters of a novel are contained with high probability but the quality drops rather fast if more than five entities are analyzed. The quality of the edges is mainly dominated by the quality of the coreference resolution and the correlation coefficient between gold edges and system edges therefore varies between 30 and 60\%. All developed components are aggregated alongside a large set of other preprocessing modules in the Kallimachos pipeline and can be reused without any restrictions.}, subject = {Textanalyse}, language = {en} } @phdthesis{Metter2019, author = {Metter, Christopher Valentin}, title = {Resilience, Availabilty, and Serviceability Evaluation in Software-defined Networks}, issn = {1432-8801}, doi = {10.25972/OPUS-17678}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-176788}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2019}, abstract = {With the introduction of Software-defined Networking (SDN) in the late 2000s, not only a new research field has been created, but a paradigm shift was initiated in the broad field of networking. The programmable network control by SDN is a big step, but also a stumbling block for many of the established network operators and vendors. As with any new technology the question about the maturity and the productionreadiness of it arises. Therefore, this thesis picks specific features of SDN and analyzes its performance, reliability, and availability in scenarios that can be expected in production deployments. The first SDN topic is the performance impact of application traffic in the data plane on the control plane. Second, reliability and availability concerns of SDN deployments are exemplary analyzed by evaluating the detection performance of a common SDN controller. Thirdly, the performance of P4, a technology that enhances SDN, or better its impact of certain control operations on the processing performance is evaluated.}, subject = {Leistungsbewertung}, language = {en} } @phdthesis{NguyenNgoc2018, author = {Nguyen-Ngoc, Anh}, title = {On Performance Assessment of Control Mechanisms and Virtual Components in SDN-based Networks}, issn = {1432-8801}, doi = {10.25972/OPUS-16932}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-169328}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2018}, abstract = {This dissertation focuses on the performance evaluation of all components of Software Defined Networking (SDN) networks and covers whole their architecture. First, the isolation between virtual networks sharing the same physical resources is investigated with SDN switches of several vendors. Then, influence factors on the isolation are identified and evaluated. Second, the impact of control mechanisms on the performance of the data plane is examined through the flow rule installation time of SDN switches with different controllers. It is shown that both hardware-specific and controller instance have a specific influence on the installation time. Finally, several traffic flow monitoring methods of an SDN controller are investigated and a new monitoring approach is developed and evaluated. It is confirmed that the proposed method allows monitoring of particular flows as well as consumes fewer resources than the standard approach. Based on findings in this thesis, on the one hand, controller developers can refer to the work related to the control plane, such as flow monitoring or flow rule installation, to improve the performance of their applications. On the other hand, network administrators can apply the presented methods to select a suitable combination of controller and switches in their SDN networks, based on their performance requirements}, subject = {Leistungsbewertung}, language = {en} } @phdthesis{DinhXuan2018, author = {Dinh-Xuan, Lam}, title = {Quality of Experience Assessment of Cloud Applications and Performance Evaluation of VNF-Based QoE Monitoring}, issn = {1432-8801}, doi = {10.25972/OPUS-16918}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-169182}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2018}, abstract = {In this thesis various aspects of Quality of Experience (QoE) research are examined. The work is divided into three major blocks: QoE Assessment, QoE Monitoring, and VNF Performance Evaluation. First, prominent cloud applications such as Google Docs and a cloud-based photo album are explored. The QoE is characterized and the influence of packet loss and delay is studied. Afterwards, objective QoE monitoring for HTTP Adaptive Video Streaming (HAS) in the cloud is investigated. Additionally, by using a Virtual Network Function (VNF) for QoE monitoring in the cloud, the feasibility of an interworking of Network Function Virtualization (NFV) and cloud paradigm is evaluated. To this end, a VNF that exploits deep packet inspection technique was used to parse the video traffic. An algorithm is then designed accordingly to estimate video quality and QoE based on network and application layer parameters. To assess the accuracy of the estimation, the VNF is measured in different scenarios under different network QoS and the virtual environment of the cloud architecture. The insights show that the different geographical deployments of the VNF influence the accuracy of the video quality and QoE estimation. Various Service Function Chain (SFC) placement algorithms have been proposed and compared in the context of edge cloud networks. On the one hand, this research is aimed at cloud service providers by providing methods for evaluating QoE for cloud applications. On the other hand, network operators can learn the pitfalls and disadvantages of using the NFV paradigm for such a QoE monitoring mechanism.}, subject = {Quality of Experience}, language = {en} } @phdthesis{Gebert2017, author = {Gebert, Steffen Christian}, title = {Architectures for Softwarized Networks and Their Performance Evaluation}, issn = {1432-8801}, doi = {10.25972/OPUS-15063}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-150634}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2017}, abstract = {This thesis contributes to several issues in the context of SDN and NFV, with an emphasis on performance and management. The main contributions are guide lines for operators migrating to software-based networks, as well as an analytical model for the packet processing in a Linux system using the Kernel NAPI.}, subject = {Telekommunikationsnetz}, language = {en} } @phdthesis{Rygielski2017, author = {Rygielski, Piotr}, title = {Flexible Modeling of Data Center Networks for Capacity Management}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-146235}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2017}, abstract = {Nowadays, data centers are becoming increasingly dynamic due to the common adoption of virtualization technologies. Systems can scale their capacity on demand by growing and shrinking their resources dynamically based on the current load. However, the complexity and performance of modern data centers is influenced not only by the software architecture, middleware, and computing resources, but also by network virtualization, network protocols, network services, and configuration. The field of network virtualization is not as mature as server virtualization and there are multiple competing approaches and technologies. Performance modeling and prediction techniques provide a powerful tool to analyze the performance of modern data centers. However, given the wide variety of network virtualization approaches, no common approach exists for modeling and evaluating the performance of virtualized networks. The performance community has proposed multiple formalisms and models for evaluating the performance of infrastructures based on different network virtualization technologies. The existing performance models can be divided into two main categories: coarse-grained analytical models and highly-detailed simulation models. Analytical performance models are normally defined at a high level of abstraction and thus they abstract many details of the real network and therefore have limited predictive power. On the other hand, simulation models are normally focused on a selected networking technology and take into account many specific performance influencing factors, resulting in detailed models that are tightly bound to a given technology, infrastructure setup, or to a given protocol stack. Existing models are inflexible, that means, they provide a single solution method without providing means for the user to influence the solution accuracy and solution overhead. To allow for flexibility in the performance prediction, the user is required to build multiple different performance models obtaining multiple performance predictions. Each performance prediction may then have different focus, different performance metrics, prediction accuracy, and solving time. The goal of this thesis is to develop a modeling approach that does not require the user to have experience in any of the applied performance modeling formalisms. The approach offers the flexibility in the modeling and analysis by balancing between: (a) generic character and low overhead of coarse-grained analytical models, and (b) the more detailed simulation models with higher prediction accuracy. The contributions of this thesis intersect with technologies and research areas, such as: software engineering, model-driven software development, domain-specific modeling, performance modeling and prediction, networking and data center networks, network virtualization, Software-Defined Networking (SDN), Network Function Virtualization (NFV). The main contributions of this thesis compose the Descartes Network Infrastructure (DNI) approach and include: • Novel modeling abstractions for virtualized network infrastructures. This includes two meta-models that define modeling languages for modeling data center network performance. The DNI and miniDNI meta-models provide means for representing network infrastructures at two different abstraction levels. Regardless of which variant of the DNI meta-model is used, the modeling language provides generic modeling elements allowing to describe the majority of existing and future network technologies, while at the same time abstracting factors that have low influence on the overall performance. I focus on SDN and NFV as examples of modern virtualization technologies. • Network deployment meta-model—an interface between DNI and other meta- models that allows to define mapping between DNI and other descriptive models. The integration with other domain-specific models allows capturing behaviors that are not reflected in the DNI model, for example, software bottlenecks, server virtualization, and middleware overheads. • Flexible model solving with model transformations. The transformations enable solving a DNI model by transforming it into a predictive model. The model transformations vary in size and complexity depending on the amount of data abstracted in the transformation process and provided to the solver. In this thesis, I contribute six transformations that transform DNI models into various predictive models based on the following modeling formalisms: (a) OMNeT++ simulation, (b) Queueing Petri Nets (QPNs), (c) Layered Queueing Networks (LQNs). For each of these formalisms, multiple predictive models are generated (e.g., models with different level of detail): (a) two for OMNeT++, (b) two for QPNs, (c) two for LQNs. Some predictive models can be solved using multiple alternative solvers resulting in up to ten different automated solving methods for a single DNI model. • A model extraction method that supports the modeler in the modeling process by automatically prefilling the DNI model with the network traffic data. The contributed traffic profile abstraction and optimization method provides a trade-off by balancing between the size and the level of detail of the extracted profiles. • A method for selecting feasible solving methods for a DNI model. The method proposes a set of solvers based on trade-off analysis characterizing each transformation with respect to various parameters such as its specific limitations, expected prediction accuracy, expected run-time, required resources in terms of CPU and memory consumption, and scalability. • An evaluation of the approach in the context of two realistic systems. I evaluate the approach with focus on such factors like: prediction of network capacity and interface throughput, applicability, flexibility in trading-off between prediction accuracy and solving time. Despite not focusing on the maximization of the prediction accuracy, I demonstrate that in the majority of cases, the prediction error is low—up to 20\% for uncalibrated models and up to 10\% for calibrated models depending on the solving technique. In summary, this thesis presents the first approach to flexible run-time performance prediction in data center networks, including network based on SDN. It provides ability to flexibly balance between performance prediction accuracy and solving overhead. The approach provides the following key benefits: • It is possible to predict the impact of changes in the data center network on the performance. The changes include: changes in network topology, hardware configuration, traffic load, and applications deployment. • DNI can successfully model and predict the performance of multiple different of network infrastructures including proactive SDN scenarios. • The prediction process is flexible, that is, it provides balance between the granularity of the predictive models and the solving time. The decreased prediction accuracy is usually rewarded with savings of the solving time and consumption of resources required for solving. • The users are enabled to conduct performance analysis using multiple different prediction methods without requiring the expertise and experience in each of the modeling formalisms. The components of the DNI approach can be also applied to scenarios that are not considered in this thesis. The approach is generalizable and applicable for the following examples: (a) networks outside of data centers may be analyzed with DNI as long as the background traffic profile is known; (b) uncalibrated DNI models may serve as a basis for design-time performance analysis; (c) the method for extracting and compacting of traffic profiles may be used for other, non-network workloads as well.}, subject = {Modellierung}, language = {en} } @phdthesis{Winkler2015, author = {Winkler, Marco}, title = {On the Role of Triadic Substructures in Complex Networks}, publisher = {epubli GmbH}, address = {Berlin}, isbn = {978-3-7375-5654-5}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-116022}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {In the course of the growth of the Internet and due to increasing availability of data, over the last two decades, the field of network science has established itself as an own area of research. With quantitative scientists from computer science, mathematics, and physics working on datasets from biology, economics, sociology, political sciences, and many others, network science serves as a paradigm for interdisciplinary research. One of the major goals in network science is to unravel the relationship between topological graph structure and a network's function. As evidence suggests, systems from the same fields, i.e. with similar function, tend to exhibit similar structure. However, it is still vague whether a similar graph structure automatically implies likewise function. This dissertation aims at helping to bridge this gap, while particularly focusing on the role of triadic structures. After a general introduction to the main concepts of network science, existing work devoted to the relevance of triadic substructures is reviewed. A major challenge in modeling triadic structure is the fact that not all three-node subgraphs can be specified independently of each other, as pairs of nodes may participate in multiple of those triadic subgraphs. In order to overcome this obstacle, we suggest a novel class of generative network models based on so called Steiner triple systems. The latter are partitions of a graph's vertices into pair-disjoint triples (Steiner triples). Thus, the configurations on Steiner triples can be specified independently of each other without overdetermining the network's link structure. Subsequently, we investigate the most basic realization of this new class of models. We call it the triadic random graph model (TRGM). The TRGM is parametrized by a probability distribution over all possible triadic subgraph patterns. In order to generate a network instantiation of the model, for all Steiner triples in the system, a pattern is drawn from the distribution and adjusted randomly on the Steiner triple. We calculate the degree distribution of the TRGM analytically and find it to be similar to a Poissonian distribution. Furthermore, it is shown that TRGMs possess non-trivial triadic structure. We discover inevitable correlations in the abundance of certain triadic subgraph patterns which should be taken into account when attributing functional relevance to particular motifs - patterns which occur significantly more frequently than expected at random. Beyond, the strong impact of the probability distributions on the Steiner triples on the occurrence of triadic subgraphs over the whole network is demonstrated. This interdependence allows us to design ensembles of networks with predefined triadic substructure. Hence, TRGMs help to overcome the lack of generative models needed for assessing the relevance of triadic structure. We further investigate whether motifs occur homogeneously or heterogeneously distributed over a graph. Therefore, we study triadic subgraph structures in each node's neighborhood individually. In order to quantitatively measure structure from an individual node's perspective, we introduce an algorithm for node-specific pattern mining for both directed unsigned, and undirected signed networks. Analyzing real-world datasets, we find that there are networks in which motifs are distributed highly heterogeneously, bound to the proximity of only very few nodes. Moreover, we observe indication for the potential sensitivity of biological systems to a targeted removal of these critical vertices. In addition, we study whole graphs with respect to the homogeneity and homophily of their node-specific triadic structure. The former describes the similarity of subgraph distributions in the neighborhoods of individual vertices. The latter quantifies whether connected vertices are structurally more similar than non-connected ones. We discover these features to be characteristic for the networks' origins. Moreover, clustering the vertices of graphs regarding their triadic structure, we investigate structural groups in the neural network of C. elegans, the international airport-connection network, and the global network of diplomatic sentiments between countries. For the latter we find evidence for the instability of triangles considered socially unbalanced according to sociological theories. Finally, we utilize our TRGM to explore ensembles of networks with similar triadic substructure in terms of the evolution of dynamical processes acting on their nodes. Focusing on oscillators, coupled along the graphs' edges, we observe that certain triad motifs impose a clear signature on the systems' dynamics, even when embedded in a larger network structure.}, subject = {Netzwerk}, language = {en} } @phdthesis{Hartmann2015, author = {Hartmann, Matthias}, title = {Optimization and Design of Network Architectures for Future Internet Routing}, issn = {1432-8801}, doi = {10.25972/OPUS-11416}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-114165}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {At the center of the Internet's protocol stack stands the Internet Protocol (IP) as a common denominator that enables all communication. To make routing efficient, resilient, and scalable, several aspects must be considered. Care must be taken that traffic is well balanced to make efficient use of the existing network resources, both in failure free operation and in failure scenarios. Finding the optimal routing in a network is an NP-complete problem. Therefore, routing optimization is usually performed using heuristics. This dissertation shows that a routing optimized with one objective function is often not good when looking at other objective functions. It can even be worse than unoptimized routing with respect to that objective function. After looking at failure-free routing and traffic distribution in different failure scenarios, the analysis is extended to include the loop-free alternate (LFA) IP fast reroute mechanism. Different application scenarios of LFAs are examined and a special focus is set on the fact that LFAs usually cannot protect all traffic in a network even against single link failures. Thus, the routing optimization for LFAs is targeted on both link utilization and failure coverage. Finally, the pre-congestion notification mechanism PCN for network admission control and overload protection is analyzed and optimized. Different design options for implementing the protocol are compared, before algorithms are developed for the calculation and optimization of protocol parameters and PCN-based routing. The second part of the thesis tackles a routing problem that can only be resolved on a global scale. The scalability of the Internet is at risk since a major and intensifying growth of the interdomain routing tables has been observed. Several protocols and architectures are analyzed that can be used to make interdomain routing more scalable. The most promising approach is the locator/identifier (Loc/ID) split architecture which separates routing from host identification. This way, changes in connectivity, mobility of end hosts, or traffic-engineering activities are hidden from the routing in the core of the Internet and the routing tables can be kept much smaller. All of the currently proposed Loc/ID split approaches have their downsides. In particular, the fact that most architectures use the ID for routing outside the Internet's core is a poor design, which inhibits many of the possible features of a new routing architecture. To better understand the problems and to provide a solution for a scalable routing design that implements a true Loc/ID split, the new GLI-Split protocol is developed in this thesis, which provides separation of global and local routing and uses an ID that is independent from any routing decisions. Besides GLI-Split, several other new routing architectures implementing Loc/ID split have been proposed for the Internet. Most of them assume that a mapping system is queried for EID-to-RLOC mappings by an intermediate node at the border of an edge network. When the mapping system is queried by an intermediate node, packets are already on their way towards their destination, and therefore, the mapping system must be fast, scalable, secure, resilient, and should be able to relay packets without locators to nodes that can forward them to the correct destination. The dissertation develops a classification for all proposed mapping system architectures and shows their similarities and differences. Finally, the fast two-level mapping system FIRMS is developed. It includes security and resilience features as well as a relay service for initial packets of a flow when intermediate nodes encounter a cache miss for the EID-to-RLOC mapping.}, subject = {Netzwerk}, language = {en} } @phdthesis{Hock2014, author = {Hock, David Rog{\´e}r}, title = {Analysis and Optimization of Resilient Routing in Core Communication Networks}, issn = {1432-8801}, doi = {10.25972/OPUS-10168}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-101681}, school = {Universit{\"a}t W{\"u}rzburg}, pages = {175}, year = {2014}, abstract = {Routing is one of the most important issues in any communication network. It defines on which path packets are transmitted from the source of a connection to the destination. It allows to control the distribution of flows between different locations in the network and thereby is a means to influence the load distribution or to reach certain constraints imposed by particular applications. As failures in communication networks appear regularly and cannot be completely avoided, routing is required to be resilient against such outages, i.e., routing still has to be able to forward packets on backup paths even if primary paths are not working any more. Throughout the years, various routing technologies have been introduced that are very different in their control structure, in their way of working, and in their ability to handle certain failure cases. Each of the different routing approaches opens up their own specific questions regarding configuration, optimization, and inclusion of resilience issues. This monograph investigates, with the example of three particular routing technologies, some concrete issues regarding the analysis and optimization of resilience. It thereby contributes to a better general, technology-independent understanding of these approaches and of their diverse potential for the use in future network architectures. The first considered routing type, is decentralized intra-domain routing based on administrative IP link costs and the shortest path principle. Typical examples are common today's intra-domain routing protocols OSPF and IS-IS. This type of routing includes automatic restoration abilities in case of failures what makes it in general very robust even in the case of severe network outages including several failed components. Furthermore, special IP-Fast Reroute mechanisms allow for a faster reaction on outages. For routing based on link costs, traffic engineering, e.g. the optimization of the maximum relative link load in the network, can be done indirectly by changing the administrative link costs to adequate values. The second considered routing type, MPLS-based routing, is based on the a priori configuration of primary and backup paths, so-called Label Switched Paths. The routing layout of MPLS paths offers more freedom compared to IP-based routing as it is not restricted by any shortest path constraints but any paths can be setup. However, this in general involves a higher configuration effort. Finally, in the third considered routing type, typically centralized routing using a Software Defined Networking (SDN) architecture, simple switches only forward packets according to routing decisions made by centralized controller units. SDN-based routing layouts offer the same freedom as for explicit paths configured using MPLS. In case of a failure, new rules can be setup by the controllers to continue the routing in the reduced topology. However, new resilience issues arise caused by the centralized architecture. If controllers are not reachable anymore, the forwarding rules in the single nodes cannot be adapted anymore. This might render a rerouting in case of connection problems in severe failure scenarios infeasible.}, subject = {Leistungsbewertung}, language = {en} } @phdthesis{Jarschel2014, author = {Jarschel, Michael}, title = {An Assessment of Applications and Performance Analysis of Software Defined Networking}, issn = {1432-8801}, doi = {10.25972/OPUS-10079}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-100795}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {With the introduction of OpenFlow by the Stanford University in 2008, a process began in the area of network research, which questions the predominant approach of fully distributed network control. OpenFlow is a communication protocol that allows the externalization of the network control plane from the network devices, such as a router, and to realize it as a logically-centralized entity in software. For this concept, the term "Software Defined Networking" (SDN) was coined during scientific discourse. For the network operators, this concept has several advantages. The two most important can be summarized under the points cost savings and flexibility. Firstly, it is possible through the uniform interface for network hardware ("Southbound API"), as implemented by OpenFlow, to combine devices and software from different manufacturers, which increases the innovation and price pressure on them. Secondly, the realization of the network control plane as a freely programmable software with open interfaces ("Northbound API") provides the opportunity to adapt it to the individual circumstances of the operator's network and to exchange information with the applications it serves. This allows the network to be more flexible and to react more quickly to changing circumstances as well as transport the traffic more effectively and tailored to the user's "Quality of Experience" (QoE). The approach of a separate network control layer for packet-based networks is not new and has already been proposed several times in the past. Therefore, the SDN approach has raised many questions about its feasibility in terms of efficiency and applicability. These questions are caused to some extent by the fact that there is no generally accepted definition of the SDN concept to date. It is therefore a part of this thesis to derive such a definition. In addition, several of the open issues are investigated. This Investigations follow the three aspects: Performance Evaluation of Software Defined Networking, applications on the SDN control layer, and the usability of SDN Northbound-API for creation application-awareness in network operation. Performance Evaluation of Software Defined Networking: The question of the efficiency of an SDN-based system was from the beginning one of the most important. In this thesis, experimental measurements of the performance of OpenFlow-enabled switch hardware and control software were conducted for the purpose of answering this question. The results of these measurements were used as input parameters for establishing an analytical model of the reactive SDN approach. Through the model it could be determined that the performance of the software control layer, often called "Controller", is crucial for the overall performance of the system, but that the approach is generally viable. Based on this finding a software for analyzing the performance of SDN controllers was developed. This software allows the emulation of the forwarding layer of an SDN network towards the control software and can thus determine its performance in different situations and configurations. The measurements with this software showed that there are quite significant differences in the behavior of different control software implementations. Among other things it has been shown that some show different characteristics for various switches, in particular in terms of message processing speed. Under certain circumstances this can lead to network failures. Applications on the SDN control layer: The core piece of software defined networking are the intelligent network applications that operate on the control layer. However, their development is still in its infancy and little is known about the technical possibilities and their limitations. Therefore, the relationship between an SDN-based and classical implementation of a network function is investigated in this thesis. This function is the monitoring of network links and the traffic they carry. A typical approach for this task has been built based on Wiretapping and specialized measurement hardware and compared with an implementation based on OpenFlow switches and a special SDN control application. The results of the comparison show that the SDN version can compete in terms of measurement accuracy for bandwidth and delay estimation with the traditional measurement set-up. However, a compromise has to be found for measurements below the millisecond range. Another question regarding the SDN control applications is whether and how well they can solve existing problems in networks. Two programs have been developed based on SDN in this thesis to solve two typical network issues. Firstly, the tool "IPOM", which enables considerably more flexibility in the study of effects of network structures for a researcher, who is confined to a fixed physical test network topology. The second software provides an interface between the Cloud Orchestration Software "OpenNebula" and an OpenFlow controller. The purpose of this software was to investigate experimentally whether a pre-notification of the network of an impending relocation of a virtual service in a data center is sufficient to ensure the continuous operation of that service. This was demonstrated on the example of a video service. Usability of the SDN Northbound API for creating application-awareness in network operation: Currently, the fact that the network and the applications that run on it are developed and operated separately leads to problems in network operation. SDN offers with the Northbound-API an open interface that enables the exchange between information of both worlds during operation. One aim of this thesis was to investigate whether this interface can be exploited so that the QoE experienced by the user can be maintained on high level. For this purpose, the QoE influence factors were determined on a challenging application by means of a subjective survey study. The application is cloud gaming, in which the calculation of video game environments takes place in the cloud and is transported via video over the network to the user. It was shown that apart from the most important factor influencing QoS, i.e., packet loss on the downlink, also the type of game type and its speed play a role. This demonstrates that in addition to QoS the application state is important and should be communicated to the network. Since an implementation of such a state conscious SDN for the example of Cloud Gaming was not possible due to its proprietary implementation, in this thesis the application "YouTube video streaming" was chosen as an alternative. For this application, status information is retrievable via the "Yomo" tool and can be used for network control. It was shown that an SDN-based implementation of an application-aware network has distinct advantages over traditional network management methods and the user quality can be obtained in spite of disturbances.}, subject = {Leistungsbewertung}, language = {en} } @phdthesis{Rueppel2014, author = {R{\"u}ppel, Frederike}, title = {Accessibility of Bilinear Interconnected Systems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-99250}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {The subject of this thesis is the controllability of interconnected linear systems, where the interconnection parameter are the control variables. The study of accessibility and controllability of bilinear systems is closely related to their system Lie algebra. In 1976, Brockett classified all possible system Lie algebras of linear single-input, single-output (SISO) systems under time-varying output feedback. Here, Brockett's results are generalized to networks of linear systems, where time-varying output feedback is applied according to the interconnection structure of the network. First, networks of linear SISO systems are studied and it is assumed that all interconnections are independently controllable. By calculating the system Lie algebra it is shown that accessibility of the controlled network is equivalent to the strong connectedness of the underlying interconnection graph in case the network has at least three subsystems. Networks with two subsystems are not captured by these proofs. Thus, we give results for this particular case under additional assumption either on the graph structure or on the dynamics of the node systems, which are both not necessary. Additionally, the system Lie algebra is studied in case the interconnection graph is not strongly connected. Then, we show how to adapt the ideas of proof to networks of multi-input, multi-output (MIMO) systems. We generalize results for the system Lie algebra on networks of MIMO systems both under output feedback and under restricted output feedback. Moreover, the case with generalized interconnections is studied, i.e. parallel edges and linear dependencies in the interconnection controls are allowed. The new setting demands to distinguish between homogeneous and heterogeneous networks. With this new setting only sufficient conditions can be found to guarantee accessibility of the controlled network. As an example, networks with Toeplitz interconnection structure are studied.}, subject = {Steuerbarkeit}, language = {en} } @phdthesis{Zeeb2013, author = {Zeeb, Steffen}, title = {Chaos Synchronization in Time-Delayed Coupled Networks}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-78966}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2013}, abstract = {Die vorliegende Arbeit befasst sich mit der Untersuchung verschiedener Aspekte der Chaos Synchronisation von Netzwerken mit zeitverz{\"o}gerten Kopplungen. Ein Netzwerk aus identischen chaotischen Einheiten kann vollst{\"a}ndig und isochron synchronisieren, auch wenn der Signalaustausch einer starken Zeitverz{\"o}gerung unterliegt. Im ersten Teil der Arbeit werden Systeme mit mehreren Zeitverz{\"o}gerungen betrachtet. Dabei erstrecken sich die verschiedenen Zeitverz{\"o}gerungen jeweils {\"u}ber einen weiten Bereich an Gr{\"o}ßenordnungen. Es wird gezeigt, dass diese Zeitverz{\"o}gerungen im Lyapunov Spektrum des Systems auftreten; verschiedene Teile des Spektrums skalieren jeweils mit einer der Zeitverz{\"o}gerungen. Anhand des Skalierungsverhaltens des maximalen Lyapunov Exponenten k{\"o}nnen verschiedene Arten von Chaos definiert werden. Diese bestimmen die Synchronisationseigenschaften eines Netzwerkes und werden insbesondere wichtig bei hierarchischen Netzwerken, d.h. bei Netzwerken bestehend aus Unternetzwerken, bei welchen Signale innerhalb des Unternetzwerkes auf einer anderen Zeitskala ausgetauscht werden als zwischen verschiedenen Unternetzwerken. F{\"u}r ein solches System kann sowohl vollst{\"a}ndige als auch Unternetzwerksynchronisation auftreten. Skaliert der maximale Lyapunov Exponent mit der k{\"u}rzeren Zeitverz{\"o}gerung des Unternetzwerkes dann k{\"o}nnen nur die Elemente des Unternetzwerkes synchronisieren. Skaliert der maximale Lyapunov Exponent allerdings mit der l{\"a}ngeren Zeitverz{\"o}gerung kann das komplette Netzwerk vollst{\"a}ndig synchronisieren. Dies wird analytisch f{\"u}r die Bernoulli Abbildung und numerisch f{\"u}r die Zelt Abbildung gezeigt. Der zweite Teil befasst sich mit der Attraktordimension und ihrer {\"A}nderung am {\"U}bergang zur vollst{\"a}ndiger Chaos Synchronisation. Aus dem Lyapunov Spektrum des Systems wird die Kaplan-Yorke Dimension berechnet und es wird gezeigt, dass diese am Synchronisations{\"u}bergang aus physikalischen Gr{\"u}nden einen Sprung haben muss. Aus der Zeitreihe der Dynamik des Systems wird die Korrelationsdimension bestimmt und anschließend mit der Kaplan-Yorke Dimension verglichen. F{\"u}r Bernoulli Systeme finden wir in der Tat eine Diskontinuit{\"a}t in der Korrelationsdimension. Die St{\"a}rke des Sprungs der Kaplan-Yorke Dimension wird f{\"u}r ein Netzwerk aus Bernoulli Einheiten als Funktion der Netzwerkgr{\"o}ße berechnet. Desweiteren wird das Skalierungsverhalten der Kaplan-Yorke Dimension sowie der Kolmogoroventropie in Abh{\"a}ngigkeit der Systemgr{\"o}ße und der Zeitverz{\"o}gerung untersucht. Zu guter Letzt wird eine Verstimmung der Einheiten, d.h., ein "parameter mismatch", eingef{\"u}hrt und analysiert wie diese das Verhalten der Attraktordimension {\"a}ndert. Im dritten und letzten Teil wird die lineare Antwort eines synchronisierten chaotischen Systems auf eine kleine externe St{\"o}rung untersucht. Diese St{\"o}rung bewirkt eine Abweichung der Einheiten vom perfekt synchronisierten Zustand. Die Verteilung der Abst{\"a}nde zwischen zwei Einheiten dient als Maß f{\"u}r die lineare Antwort des Systems. Diese Verteilung sowie ihre Momente werden numerisch und f{\"u}r Spezialf{\"a}lle auch analytisch berechnet. Wir finden, dass im synchronisierten Zustand, in Abh{\"a}ngigkeit der Parameter des Systems, Verteilungen auftreten k{\"o}nnen die einem Potenzgesetz gehorchen und dessen Momente divergieren. Als weiteres Maß f{\"u}r die lineare Antwort wird die Bit Error Rate einer {\"u}bermittelten bin{\"a}ren Nachricht verwendet. The Bit Error Rate ist durch ein Integral {\"u}ber die Verteilung der Abst{\"a}nde gegeben. In dieser Arbeit wird sie vorwiegend numerisch untersucht und wir finden ein komplexes, nicht monotones Verhalten als Funktion der Kopplungsst{\"a}rke. F{\"u}r Spezialf{\"a}lle weist die Bit Error Rate eine "devil's staircase" auf, welche mit einer fraktalen Struktur in der Verteilung der Abst{\"a}nde verkn{\"u}pft ist. Die lineare Antwort des Systems auf eine harmonische St{\"o}rung wird ebenfalls untersucht. Es treten Resonanzen auf, welche in Abh{\"a}ngigkeit von der Zeitverz{\"o}gerung unterdr{\"u}ckt oder verst{\"a}rkt werden. Eine bi-direktional gekoppelte Kette aus drei Einheiten kann eine St{\"o}rung vollst{\"a}ndig heraus filtern, so dass die Bit Error Rate und auch das zweite Moment verschwinden.}, subject = {Chaostheorie}, language = {en} } @phdthesis{Heiligenthal2012, author = {Heiligenthal, Sven}, title = {Strong and Weak Chaos in Networks of Semiconductor Lasers with Time-Delayed Couplings}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-77958}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2012}, abstract = {This thesis deals with the chaotic dynamics of nonlinear networks consisting of semiconductor lasers which have time-delayed self-feedbacks or mutual couplings. These semiconductor lasers are simulated numerically by the Lang-Kobayashi equations. The central issue is how the chaoticity of the lasers, measured by the maximal Lyapunov exponent, changes when the delay time is changed. It is analysed how this change of chaoticity with increasing delay time depends on the reflectivity of the mirror for the self-feedback or the strength of the mutal coupling, respectively. The consequences of the different types of chaos for the effect of chaos synchronization of mutually coupled semiconductor lasers are deduced and discussed. At the beginning of this thesis, the master stability formalism for the stability analysis of nonlinear networks with delay is explained. After the description of the Lang-Kobayashi equations and their linearizations as a model for the numerical simulation of semiconductor lasers with time-delayed couplings, the artificial sub-Lyapunov exponent \$\lambda_{0}\$ is introduced. It is explained how the sign of the sub-Lyapunov exponent can be determined by experiments. The notions of "strong chaos" and "weak chaos" are introduced and distinguished by their different scaling properties of the maximal Lyapunov exponent with the delay time. The sign of the sub-Lyapunov exponent \$\lambda_{0}\$ is shown to determine the occurence of strong or weak chaos. The transition sequence "weak to strong chaos and back to weak chaos" upon monotonically increasing the coupling strength \$\sigma\$ of a single laser's self-feedback is shown for numerical calculations of the Lang-Kobayashi equations. At the transition between strong and weak chaos, the sub-Lyapunov exponent vanishes, \$\lambda_{0}=0\$, resulting in a special scaling behaviour of the maximal Lyapunov exponent with the delay time. Transitions between strong and weak chaos by changing \$\sigma\$ can also be found for the R{\"o}ssler and Lorenz dynamics. The connection between the sub-Lyapunov exponent and the time-dependent eigenvalues of the Jacobian for the internal laser dynamics is analysed. Counterintuitively, the difference between strong and weak chaos is not directly visible from the trajectory although the difference of the trajectories induces the transitions between the two types of chaos. In addition, it is shown that a linear measure like the auto-correlation function cannot unambiguously reveal the difference between strong and weak chaos either. Although the auto-correlations after one delay time are significantly higher for weak chaos than for strong chaos, it is not possible to detect a qualitative difference. If two time-scale separated self-feedbacks are present, the shorter feedback has to be taken into account for the definition of a new sub-Lyapunov exponent \$\lambda_{0,s}\$, which in this case determines the occurence of strong or weak chaos. If the two self-feedbacks have comparable delay times, the sub-Lyapunov exponent \$\lambda_{0}\$ remains the criterion for strong or weak chaos. It is shown that the sub-Lyapunov exponent scales with the square root of the effective pump current \$\sqrt{p-1}\$, both in its magnitude and in the position of the critical coupling strengths. For networks with several distinct sub-Lyapunov exponents, it is shown that the maximal sub-Lyapunov exponent of the network determines whether the network's maximal Lyapunov exponent scales strongly or weakly with increasing delay time. As a consequence, complete synchronization of a network is excluded for arbitrary networks which contain at least one strongly chaotic laser. Furthermore, it is demonstrated that the sub-Lyapunov exponent of a driven laser depends on the number of the incoherently superimposed inputs from unsynchronized input lasers. For networks of delay-coupled lasers operating in weak chaos, the condition \$|\gamma_{2}|<\mathrm{e}^{-\lambda_{\mathrm{m}}\,\tau}\$ for stable chaos synchronization is deduced using the master stability formalism. Hence, synchronization of any network depends only on the properties of a single laser with self-feedback and the eigenvalue gap of the coupling matrix. The characteristics of the master stability function for the Lang-Kobayashi dynamics is described, and consequently, the master stability function is refined to allow for precise practical prediction of synchronization. The prediction of synchronization with the master stability function is demonstrated for bidirectional and unidirectional networks. Furthermore, the master stability function is extended for two distinct delay times. Finally, symmetries and resonances for certain values of the ratio of the delay times are shown for the master stability function of the Lang-Kobyashi equations.}, subject = {Halbleiterlaser}, language = {en} } @phdthesis{Menth2004, author = {Menth, Michael}, title = {Efficient admission control and routing for resilient communication networks}, doi = {10.25972/OPUS-846}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-9949}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2004}, abstract = {This work is subdivided into two main areas: resilient admission control and resilient routing. The work gives an overview of the state of the art of quality of service mechanisms in communication networks and proposes a categorization of admission control (AC) methods. These approaches are investigated regarding performance, more precisely, regarding the potential resource utilization by dimensioning the capacity for a network with a given topology, traffic matrix, and a required flow blocking probability. In case of a failure, the affected traffic is rerouted over backup paths which increases the traffic rate on the respective links. To guarantee the effectiveness of admission control also in failure scenarios, the increased traffic rate must be taken into account for capacity dimensioning and leads to resilient AC. Capacity dimensioning is not feasible for existing networks with already given link capacities. For the application of resilient NAC in this case, the size of distributed AC budgets must be adapted according to the traffic matrix in such a way that the maximum blocking probability for all flows is minimized and that the capacity of all links is not exceeded by the admissible traffic rate in any failure scenario. Several algorithms for the solution of that problem are presented and compared regarding their efficiency and fairness. A prototype for resilient AC was implemented in the laboratories of Siemens AG in Munich within the scope of the project KING. Resilience requires additional capacity on the backup paths for failure scenarios. The amount of this backup capacity depends on the routing and can be minimized by routing optimization. New protection switching mechanisms are presented that deviate the traffic quickly around outage locations. They are simple and can be implemented, e.g, by MPLS technology. The Self-Protecting Multi-Path (SPM) is a multi-path consisting of disjoint partial paths. The traffic is distributed over all faultless partial paths according to an optimized load balancing function both in the working case and in failure scenarios. Performance studies show that the network topology and the traffic matrix also influence the amount of required backup capacity significantly. The example of the COST-239 network illustrates that conventional shortest path routing may need 50\% more capacity than the optimized SPM if all single link and node failures are protected.}, subject = {Kommunikation}, language = {en} }