@phdthesis{DinhXuan2018, author = {Dinh-Xuan, Lam}, title = {Quality of Experience Assessment of Cloud Applications and Performance Evaluation of VNF-Based QoE Monitoring}, issn = {1432-8801}, doi = {10.25972/OPUS-16918}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-169182}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2018}, abstract = {In this thesis various aspects of Quality of Experience (QoE) research are examined. The work is divided into three major blocks: QoE Assessment, QoE Monitoring, and VNF Performance Evaluation. First, prominent cloud applications such as Google Docs and a cloud-based photo album are explored. The QoE is characterized and the influence of packet loss and delay is studied. Afterwards, objective QoE monitoring for HTTP Adaptive Video Streaming (HAS) in the cloud is investigated. Additionally, by using a Virtual Network Function (VNF) for QoE monitoring in the cloud, the feasibility of an interworking of Network Function Virtualization (NFV) and cloud paradigm is evaluated. To this end, a VNF that exploits deep packet inspection technique was used to parse the video traffic. An algorithm is then designed accordingly to estimate video quality and QoE based on network and application layer parameters. To assess the accuracy of the estimation, the VNF is measured in different scenarios under different network QoS and the virtual environment of the cloud architecture. The insights show that the different geographical deployments of the VNF influence the accuracy of the video quality and QoE estimation. Various Service Function Chain (SFC) placement algorithms have been proposed and compared in the context of edge cloud networks. On the one hand, this research is aimed at cloud service providers by providing methods for evaluating QoE for cloud applications. On the other hand, network operators can learn the pitfalls and disadvantages of using the NFV paradigm for such a QoE monitoring mechanism.}, subject = {Quality of Experience}, language = {en} } @phdthesis{Englert2011, author = {Englert, Anja}, title = {Chaossynchronisation in Netzwerken mit zeitverz{\"o}gerten Kopplungen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-65454}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2011}, abstract = {Die vorliegende Arbeit besch{\"a}ftigt sich mit der Chaossynchronisation in Netzwerken mit zeitverz{\"o}gerten Kopplungen. Ein Netzwerk chaotischer Einheiten kann isochron und vollst{\"a}ndig synchronisieren, auch wenn der Austausch der Signale einer oder mehreren Verz{\"o}gerungszeiten unterliegt. In einem Netzwerk identischer Einheiten hat sich als Stabilit{\"a}tsanalyse die Methode der Master Stability Funktion von Pecora und Carroll etabliert. Diese entspricht f{\"u}r ein Netzwerk gekoppelter iterativer Bernoulli-Abbildungen Polynomen vom Grade der gr{\"o}ßten Verz{\"o}gerungszeit. Das Stabilit{\"a}tsproblem reduziert sich somit auf die Untersuchung der Nullstellen dieser Polynome hinsichtlich ihrer Lage bez{\"u}glich des Einheitskreises. Eine solche Untersuchung kann beispielsweise numerisch mit dem Schur-Cohn-Theorem erfolgen, doch auch analytische Ergebnisse lassen sich erzielen. In der vorliegenden Arbeit werden Bernoulli-Netzwerke mit einer oder mehreren zeitverz{\"o}gerten Kopplungen und/oder R{\"u}ckkopplungen untersucht. Hierbei werden Aussagen {\"u}ber Teile des Stabilit{\"a}tsgebietes getroffen, welche unabh{\"a}ngig von den Verz{\"o}gerungszeiten sind. Des Weiteren werden Aussagen zu Systemen gemacht, welche sehr große Verz{\"o}gerungszeiten aufweisen. Insbesondere wird gezeigt, dass in einem Bernoulli-Netzwerk keine stabile Chaossynchronisation m{\"o}glich ist, wenn die vorhandene Verz{\"o}gerungszeit sehr viel gr{\"o}ßer ist als die Zeitskala der lokalen Dynamik, bzw. der Lyapunovzeit. Außerdem wird in bestimmten Systemen mit mehreren Verz{\"o}gerungszeiten anhand von Symmetriebetrachtungen stabile Chaossynchronisation ausgeschlossen, wenn die Verz{\"o}gerungszeiten in bestimmten Verh{\"a}ltnissen zueinander stehen. So ist in einem doppelt bidirektional gekoppeltem Paar ohne R{\"u}ckkopplung und mit zwei verschiedenen Verz{\"o}gerungszeiten stabile Chaossynchronisation nicht m{\"o}glich, wenn die Verz{\"o}gerungszeiten in einem Verh{\"a}ltnis von teilerfremden ungeraden ganzen Zahlen zueinander stehen. Es kann zudem Chaossynchronisation ausgeschlossen werden, wenn in einem bipartiten Netzwerk mit zwei großen Verz{\"o}gerungszeiten zwischen diesen eine kleine Differenz herrscht. Schließlich wird ein selbstkonsistentes Argument vorgestellt, das das Auftreten von Chaossynchronisation durch die Mischung der Signale der einzelnen Einheiten interpretiert und sich unter anderem auf die Teilerfremdheit der Zyklen eines Netzes st{\"u}tzt. Abschließend wird untersucht, ob einige der durch die Bernoulli-Netzwerke gefundenen Ergebnisse sich auf andere chaotische Netzwerke {\"u}bertragen lassen. Hervorzuheben ist die sehr gute {\"U}bereinstimmung der Ergebnisse eines Bernoulli-Netzwerkes mit den Ergebnissen eines gleichartigen Netzwerkes gekoppelter Halbleiterlasergleichungen, sowie die {\"U}bereinstimmungen mit experimentellen Ergebnissen eines Systems von Halbleiterlasern.}, subject = {Chaos}, language = {de} } @phdthesis{Gebert2017, author = {Gebert, Steffen Christian}, title = {Architectures for Softwarized Networks and Their Performance Evaluation}, issn = {1432-8801}, doi = {10.25972/OPUS-15063}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-150634}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2017}, abstract = {This thesis contributes to several issues in the context of SDN and NFV, with an emphasis on performance and management. The main contributions are guide lines for operators migrating to software-based networks, as well as an analytical model for the packet processing in a Linux system using the Kernel NAPI.}, subject = {Telekommunikationsnetz}, language = {en} } @phdthesis{Haas2012, author = {Haas, Katrin}, title = {Charakterisierung der Versorgungsqualit{\"a}t beim akuten ST-Hebungsmyokardinfarkt im Herzinfarktnetz Mainfranken zu Beginn der Netzwerkbildung}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-80388}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2012}, abstract = {Ziele: Evaluierung der Versorgungslage von Patienten mit akutem ST-Hebungsinfarkt im 2007 neu gegr{\"u}ndeten Herzinfarktnetz Mainfranken und Vergleich von Ist-Zustand und Leitlinienempfehlungen. Analyse der Behandlungszeiten und Identifizierung von Verbesserungsm{\"o}glichkeiten im Netzwerk. Dar{\"u}ber hinaus sollte untersucht werden, ob Feedbackveranstaltungen als Qualit{\"a}tsmanagement-Intervention die Behandlungszeiten im Laufe des Untersuchungszeitraumes verbessern. Methoden: Von Oktober 2007 bis Dezember 2008 wurden verschiedene Basisdaten sowie die Daten der Rettungs- und Therapiekette von Patienten mit akutem ST-Hebungsinfarkt (Symptomdauer <12h), die in der Medizinischen Klinik und Poliklinik I des Universit{\"a}tsklinikums W{\"u}rzburg mit dem Ziel einer PCI akut-koronarangiographiert wurden, im Rahmen der multizentrischen FiTT-STEMI-Studie prospektiv erfasst. Im Untersuchungszeitraum wurden die analysierten Daten alle drei Monate im Rahmen einer Feedbackveranstaltung allen Beteiligten der Rettungs- und Therapiekette demonstriert. Ergebnisse: Im genannten Zeitraum konnten 188 Patienten in die Studie eingeschlossen werden (19\% weiblich, 81\% m{\"a}nnlich), wovon 85\% eine PCI im Anschluss an die Koronarangiographie erhielten. Das mittlere Alter betrug 62±12 Jahre, 15\% der Patienten waren {\"u}ber 75 Jahre. Der mittlere TIMI-Risk-Score lag bei 3,7 Punkten. Die intrahospitale Letalit{\"a}t lag bei 6,9\%. Die Pr{\"a}hospitalzeit betrug im Median 120min; es ergab sich keine signifikante Ver{\"a}nderung {\"u}ber die Quartale. Ein Sekund{\"a}rtransport bzw. ein pr{\"a}hospitaler Kontakt zum Hausarzt verl{\"a}ngerten die Pr{\"a}hospitalzeit im Median um 173 bzw. 57min. Die Door-to-balloon(D2B)-Zeit betrug im Gesamtuntersuchungszeitraum im Median 76min, nur 33\% der Patienten erreichten eine leitliniengerechte D2B-Zeit von <60min. Die meiste Zeit innerhalb der D2B-Zeit entfiel auf die Zeit vom Erreichen der PCI-Klinik bis zum Herzkatheterlabor (Door-to-cath-Zeit). Die Verk{\"u}rzung der D2B-Zeit von 80min im ersten auf 70min im f{\"u}nften Quartal war statistisch nicht signifikant. Die Contact-to-balloon(C2B)-Zeit betrug im Gesamtuntersuchungszeitraum im Median 139min und konnte innerhalb des Untersuchungszeitraums statistisch signifikant von 164min im ersten auf 112min im f{\"u}nften Quartal gesenkt werden. Dadurch konnte die Anzahl der leitliniengerecht behandelten Patienten (C2B-Zeit<120min) von 15 auf 58\% im Gesamtkollektiv bzw. 24 auf 63\% bei Patienten mit Prim{\"a}rtransport erh{\"o}ht werden. Schlussfolgerung: Das Patientenkollektiv des Herzinfarktnetzes Mainfranken entsprach bez{\"u}glich der Basischarakteristika dem anderer nationaler und internationaler Register. Da eine PCI innerhalb von 120min nach medizinischem Erstkontakt als bestm{\"o}gliche Therapie beim ST-Hebungsinfarkt angesehen wird und trotz der Verbesserung im Untersuchungszeitraum im f{\"u}nften Quartal nur 58\% der Patienten eine PCI in diesem Zeitintervall erhielten, sollten alle Anstrengungen unternommen werden die D2B- und C2B-Zeiten im Herzinfarktnetz weiter zu verk{\"u}rzen. Hierf{\"u}r sollte eine Direkt{\"u}bergabe im Herzkatheterlabor erm{\"o}glicht werden, da die Door-to-cath-Zeit in W{\"u}rzburg im Median 36 bis 48min in Anspruch nahm. Dar{\"u}ber hinaus sollte durch Aufkl{\"a}rungs- und Informationsarbeit sowie Schulungen f{\"u}r Rettungspersonal und Patienten versucht werden einen Sekund{\"a}rtransport oder Hausarztkontakt sowie ein verz{\"o}gertes Alarmieren des Rettungsdienstes zu vermeiden, da sich hierdurch die Pr{\"a}hospitalzeit massiv verl{\"a}ngerte. Inwieweit die im Untersuchungszeitraum gezeigte Verk{\"u}rzung der Zeiten mit den durchgef{\"u}hrten Feedbackveranstaltungen zusammenh{\"a}ngt bleibt ungewiss, da die Ver{\"a}nderung auch durch die Etablierung des neu gegr{\"u}ndeten Netzwerks an sich bedingt sein kann.}, subject = {Herzinfarkt}, language = {de} } @phdthesis{Hartmann2015, author = {Hartmann, Matthias}, title = {Optimization and Design of Network Architectures for Future Internet Routing}, issn = {1432-8801}, doi = {10.25972/OPUS-11416}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-114165}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {At the center of the Internet's protocol stack stands the Internet Protocol (IP) as a common denominator that enables all communication. To make routing efficient, resilient, and scalable, several aspects must be considered. Care must be taken that traffic is well balanced to make efficient use of the existing network resources, both in failure free operation and in failure scenarios. Finding the optimal routing in a network is an NP-complete problem. Therefore, routing optimization is usually performed using heuristics. This dissertation shows that a routing optimized with one objective function is often not good when looking at other objective functions. It can even be worse than unoptimized routing with respect to that objective function. After looking at failure-free routing and traffic distribution in different failure scenarios, the analysis is extended to include the loop-free alternate (LFA) IP fast reroute mechanism. Different application scenarios of LFAs are examined and a special focus is set on the fact that LFAs usually cannot protect all traffic in a network even against single link failures. Thus, the routing optimization for LFAs is targeted on both link utilization and failure coverage. Finally, the pre-congestion notification mechanism PCN for network admission control and overload protection is analyzed and optimized. Different design options for implementing the protocol are compared, before algorithms are developed for the calculation and optimization of protocol parameters and PCN-based routing. The second part of the thesis tackles a routing problem that can only be resolved on a global scale. The scalability of the Internet is at risk since a major and intensifying growth of the interdomain routing tables has been observed. Several protocols and architectures are analyzed that can be used to make interdomain routing more scalable. The most promising approach is the locator/identifier (Loc/ID) split architecture which separates routing from host identification. This way, changes in connectivity, mobility of end hosts, or traffic-engineering activities are hidden from the routing in the core of the Internet and the routing tables can be kept much smaller. All of the currently proposed Loc/ID split approaches have their downsides. In particular, the fact that most architectures use the ID for routing outside the Internet's core is a poor design, which inhibits many of the possible features of a new routing architecture. To better understand the problems and to provide a solution for a scalable routing design that implements a true Loc/ID split, the new GLI-Split protocol is developed in this thesis, which provides separation of global and local routing and uses an ID that is independent from any routing decisions. Besides GLI-Split, several other new routing architectures implementing Loc/ID split have been proposed for the Internet. Most of them assume that a mapping system is queried for EID-to-RLOC mappings by an intermediate node at the border of an edge network. When the mapping system is queried by an intermediate node, packets are already on their way towards their destination, and therefore, the mapping system must be fast, scalable, secure, resilient, and should be able to relay packets without locators to nodes that can forward them to the correct destination. The dissertation develops a classification for all proposed mapping system architectures and shows their similarities and differences. Finally, the fast two-level mapping system FIRMS is developed. It includes security and resilience features as well as a relay service for initial packets of a flow when intermediate nodes encounter a cache miss for the EID-to-RLOC mapping.}, subject = {Netzwerk}, language = {en} } @phdthesis{Heiligenthal2012, author = {Heiligenthal, Sven}, title = {Strong and Weak Chaos in Networks of Semiconductor Lasers with Time-Delayed Couplings}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-77958}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2012}, abstract = {This thesis deals with the chaotic dynamics of nonlinear networks consisting of semiconductor lasers which have time-delayed self-feedbacks or mutual couplings. These semiconductor lasers are simulated numerically by the Lang-Kobayashi equations. The central issue is how the chaoticity of the lasers, measured by the maximal Lyapunov exponent, changes when the delay time is changed. It is analysed how this change of chaoticity with increasing delay time depends on the reflectivity of the mirror for the self-feedback or the strength of the mutal coupling, respectively. The consequences of the different types of chaos for the effect of chaos synchronization of mutually coupled semiconductor lasers are deduced and discussed. At the beginning of this thesis, the master stability formalism for the stability analysis of nonlinear networks with delay is explained. After the description of the Lang-Kobayashi equations and their linearizations as a model for the numerical simulation of semiconductor lasers with time-delayed couplings, the artificial sub-Lyapunov exponent \$\lambda_{0}\$ is introduced. It is explained how the sign of the sub-Lyapunov exponent can be determined by experiments. The notions of "strong chaos" and "weak chaos" are introduced and distinguished by their different scaling properties of the maximal Lyapunov exponent with the delay time. The sign of the sub-Lyapunov exponent \$\lambda_{0}\$ is shown to determine the occurence of strong or weak chaos. The transition sequence "weak to strong chaos and back to weak chaos" upon monotonically increasing the coupling strength \$\sigma\$ of a single laser's self-feedback is shown for numerical calculations of the Lang-Kobayashi equations. At the transition between strong and weak chaos, the sub-Lyapunov exponent vanishes, \$\lambda_{0}=0\$, resulting in a special scaling behaviour of the maximal Lyapunov exponent with the delay time. Transitions between strong and weak chaos by changing \$\sigma\$ can also be found for the R{\"o}ssler and Lorenz dynamics. The connection between the sub-Lyapunov exponent and the time-dependent eigenvalues of the Jacobian for the internal laser dynamics is analysed. Counterintuitively, the difference between strong and weak chaos is not directly visible from the trajectory although the difference of the trajectories induces the transitions between the two types of chaos. In addition, it is shown that a linear measure like the auto-correlation function cannot unambiguously reveal the difference between strong and weak chaos either. Although the auto-correlations after one delay time are significantly higher for weak chaos than for strong chaos, it is not possible to detect a qualitative difference. If two time-scale separated self-feedbacks are present, the shorter feedback has to be taken into account for the definition of a new sub-Lyapunov exponent \$\lambda_{0,s}\$, which in this case determines the occurence of strong or weak chaos. If the two self-feedbacks have comparable delay times, the sub-Lyapunov exponent \$\lambda_{0}\$ remains the criterion for strong or weak chaos. It is shown that the sub-Lyapunov exponent scales with the square root of the effective pump current \$\sqrt{p-1}\$, both in its magnitude and in the position of the critical coupling strengths. For networks with several distinct sub-Lyapunov exponents, it is shown that the maximal sub-Lyapunov exponent of the network determines whether the network's maximal Lyapunov exponent scales strongly or weakly with increasing delay time. As a consequence, complete synchronization of a network is excluded for arbitrary networks which contain at least one strongly chaotic laser. Furthermore, it is demonstrated that the sub-Lyapunov exponent of a driven laser depends on the number of the incoherently superimposed inputs from unsynchronized input lasers. For networks of delay-coupled lasers operating in weak chaos, the condition \$|\gamma_{2}|<\mathrm{e}^{-\lambda_{\mathrm{m}}\,\tau}\$ for stable chaos synchronization is deduced using the master stability formalism. Hence, synchronization of any network depends only on the properties of a single laser with self-feedback and the eigenvalue gap of the coupling matrix. The characteristics of the master stability function for the Lang-Kobayashi dynamics is described, and consequently, the master stability function is refined to allow for precise practical prediction of synchronization. The prediction of synchronization with the master stability function is demonstrated for bidirectional and unidirectional networks. Furthermore, the master stability function is extended for two distinct delay times. Finally, symmetries and resonances for certain values of the ratio of the delay times are shown for the master stability function of the Lang-Kobyashi equations.}, subject = {Halbleiterlaser}, language = {en} } @phdthesis{Hock2014, author = {Hock, David Rog{\´e}r}, title = {Analysis and Optimization of Resilient Routing in Core Communication Networks}, issn = {1432-8801}, doi = {10.25972/OPUS-10168}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-101681}, school = {Universit{\"a}t W{\"u}rzburg}, pages = {175}, year = {2014}, abstract = {Routing is one of the most important issues in any communication network. It defines on which path packets are transmitted from the source of a connection to the destination. It allows to control the distribution of flows between different locations in the network and thereby is a means to influence the load distribution or to reach certain constraints imposed by particular applications. As failures in communication networks appear regularly and cannot be completely avoided, routing is required to be resilient against such outages, i.e., routing still has to be able to forward packets on backup paths even if primary paths are not working any more. Throughout the years, various routing technologies have been introduced that are very different in their control structure, in their way of working, and in their ability to handle certain failure cases. Each of the different routing approaches opens up their own specific questions regarding configuration, optimization, and inclusion of resilience issues. This monograph investigates, with the example of three particular routing technologies, some concrete issues regarding the analysis and optimization of resilience. It thereby contributes to a better general, technology-independent understanding of these approaches and of their diverse potential for the use in future network architectures. The first considered routing type, is decentralized intra-domain routing based on administrative IP link costs and the shortest path principle. Typical examples are common today's intra-domain routing protocols OSPF and IS-IS. This type of routing includes automatic restoration abilities in case of failures what makes it in general very robust even in the case of severe network outages including several failed components. Furthermore, special IP-Fast Reroute mechanisms allow for a faster reaction on outages. For routing based on link costs, traffic engineering, e.g. the optimization of the maximum relative link load in the network, can be done indirectly by changing the administrative link costs to adequate values. The second considered routing type, MPLS-based routing, is based on the a priori configuration of primary and backup paths, so-called Label Switched Paths. The routing layout of MPLS paths offers more freedom compared to IP-based routing as it is not restricted by any shortest path constraints but any paths can be setup. However, this in general involves a higher configuration effort. Finally, in the third considered routing type, typically centralized routing using a Software Defined Networking (SDN) architecture, simple switches only forward packets according to routing decisions made by centralized controller units. SDN-based routing layouts offer the same freedom as for explicit paths configured using MPLS. In case of a failure, new rules can be setup by the controllers to continue the routing in the reduced topology. However, new resilience issues arise caused by the centralized architecture. If controllers are not reachable anymore, the forwarding rules in the single nodes cannot be adapted anymore. This might render a rerouting in case of connection problems in severe failure scenarios infeasible.}, subject = {Leistungsbewertung}, language = {en} } @phdthesis{Jarschel2014, author = {Jarschel, Michael}, title = {An Assessment of Applications and Performance Analysis of Software Defined Networking}, issn = {1432-8801}, doi = {10.25972/OPUS-10079}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-100795}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {With the introduction of OpenFlow by the Stanford University in 2008, a process began in the area of network research, which questions the predominant approach of fully distributed network control. OpenFlow is a communication protocol that allows the externalization of the network control plane from the network devices, such as a router, and to realize it as a logically-centralized entity in software. For this concept, the term "Software Defined Networking" (SDN) was coined during scientific discourse. For the network operators, this concept has several advantages. The two most important can be summarized under the points cost savings and flexibility. Firstly, it is possible through the uniform interface for network hardware ("Southbound API"), as implemented by OpenFlow, to combine devices and software from different manufacturers, which increases the innovation and price pressure on them. Secondly, the realization of the network control plane as a freely programmable software with open interfaces ("Northbound API") provides the opportunity to adapt it to the individual circumstances of the operator's network and to exchange information with the applications it serves. This allows the network to be more flexible and to react more quickly to changing circumstances as well as transport the traffic more effectively and tailored to the user's "Quality of Experience" (QoE). The approach of a separate network control layer for packet-based networks is not new and has already been proposed several times in the past. Therefore, the SDN approach has raised many questions about its feasibility in terms of efficiency and applicability. These questions are caused to some extent by the fact that there is no generally accepted definition of the SDN concept to date. It is therefore a part of this thesis to derive such a definition. In addition, several of the open issues are investigated. This Investigations follow the three aspects: Performance Evaluation of Software Defined Networking, applications on the SDN control layer, and the usability of SDN Northbound-API for creation application-awareness in network operation. Performance Evaluation of Software Defined Networking: The question of the efficiency of an SDN-based system was from the beginning one of the most important. In this thesis, experimental measurements of the performance of OpenFlow-enabled switch hardware and control software were conducted for the purpose of answering this question. The results of these measurements were used as input parameters for establishing an analytical model of the reactive SDN approach. Through the model it could be determined that the performance of the software control layer, often called "Controller", is crucial for the overall performance of the system, but that the approach is generally viable. Based on this finding a software for analyzing the performance of SDN controllers was developed. This software allows the emulation of the forwarding layer of an SDN network towards the control software and can thus determine its performance in different situations and configurations. The measurements with this software showed that there are quite significant differences in the behavior of different control software implementations. Among other things it has been shown that some show different characteristics for various switches, in particular in terms of message processing speed. Under certain circumstances this can lead to network failures. Applications on the SDN control layer: The core piece of software defined networking are the intelligent network applications that operate on the control layer. However, their development is still in its infancy and little is known about the technical possibilities and their limitations. Therefore, the relationship between an SDN-based and classical implementation of a network function is investigated in this thesis. This function is the monitoring of network links and the traffic they carry. A typical approach for this task has been built based on Wiretapping and specialized measurement hardware and compared with an implementation based on OpenFlow switches and a special SDN control application. The results of the comparison show that the SDN version can compete in terms of measurement accuracy for bandwidth and delay estimation with the traditional measurement set-up. However, a compromise has to be found for measurements below the millisecond range. Another question regarding the SDN control applications is whether and how well they can solve existing problems in networks. Two programs have been developed based on SDN in this thesis to solve two typical network issues. Firstly, the tool "IPOM", which enables considerably more flexibility in the study of effects of network structures for a researcher, who is confined to a fixed physical test network topology. The second software provides an interface between the Cloud Orchestration Software "OpenNebula" and an OpenFlow controller. The purpose of this software was to investigate experimentally whether a pre-notification of the network of an impending relocation of a virtual service in a data center is sufficient to ensure the continuous operation of that service. This was demonstrated on the example of a video service. Usability of the SDN Northbound API for creating application-awareness in network operation: Currently, the fact that the network and the applications that run on it are developed and operated separately leads to problems in network operation. SDN offers with the Northbound-API an open interface that enables the exchange between information of both worlds during operation. One aim of this thesis was to investigate whether this interface can be exploited so that the QoE experienced by the user can be maintained on high level. For this purpose, the QoE influence factors were determined on a challenging application by means of a subjective survey study. The application is cloud gaming, in which the calculation of video game environments takes place in the cloud and is transported via video over the network to the user. It was shown that apart from the most important factor influencing QoS, i.e., packet loss on the downlink, also the type of game type and its speed play a role. This demonstrates that in addition to QoS the application state is important and should be communicated to the network. Since an implementation of such a state conscious SDN for the example of Cloud Gaming was not possible due to its proprietary implementation, in this thesis the application "YouTube video streaming" was chosen as an alternative. For this application, status information is retrievable via the "Yomo" tool and can be used for network control. It was shown that an SDN-based implementation of an application-aware network has distinct advantages over traditional network management methods and the user quality can be obtained in spite of disturbances.}, subject = {Leistungsbewertung}, language = {en} } @phdthesis{Krug2020, author = {Krug, Markus}, title = {Techniques for the Automatic Extraction of Character Networks in German Historic Novels}, doi = {10.25972/OPUS-20918}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-209186}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2020}, abstract = {Recent advances in Natural Language Preprocessing (NLP) allow for a fully automatic extraction of character networks for an incoming text. These networks serve as a compact and easy to grasp representation of literary fiction. They offer an aggregated view of the text, which can be used during distant reading approaches for the analysis of literary hypotheses. In their core, the networks consist of nodes, which represent literary characters, and edges, which represent relations between characters. For an automatic extraction of such a network, the first step is the detection of the references of all fictional entities that are of importance for a text. References to the fictional entities appear in the form of names, noun phrases and pronouns and prior to this work, no components capable of automatic detection of character references were available. Existing tools are only capable of detecting proper nouns, a subset of all character references. When evaluated on the task of detecting proper nouns in the domain of literary fiction, they still underperform at an F1-score of just about 50\%. This thesis uses techniques from the field of semi-supervised learning, such as Distant supervision and Generalized Expectations, and improves the results of an existing tool to about 82\%, when evaluated on all three categories in literary fiction, but without the need for annotated data in the target domain. However, since this quality is still not sufficient, the decision to annotate DROC, a corpus comprising 90 fragments of German novels was made. This resulted in a new general purpose annotation environment titled as ATHEN, as well as annotated data that spans about 500.000 tokens in total. Using this data, the combination of supervised algorithms and a tailored rule based algorithm, which in combination are able to exploit both - local consistencies as well as global consistencies - yield an algorithm with an F1-score of about 93\%. This component is referred to as the Kallimachos tagger. A character network can not directly display references however, instead they need to be clustered so that all references that belong to a real world or fictional entity are grouped together. This process widely known as coreference resolution is a hard problem in the focus of research for more than half a century. This work experimented with adaptations of classical feature based machine learning, with a dedicated rule based algorithm and with modern techniques of Deep Learning, but no approach can surpass 55\% B-Cubed F1, when evaluated on DROC. Due to this barrier, many researchers do not use a fully-fledged coreference resolution when they extract character networks, but only focus on a more forgiving subset- the names. For novels such as Alice's Adventures in Wonderland by Lewis Caroll, this would however only result in a network in which many important characters are missing. In order to integrate important characters into the network that are not named by the author, this work makes use of automatic detection of speaker and addressees for direct speech utterances (all entities involved in a dialog are considered to be of importance). This problem is by itself not an easy task, however the most successful system analysed in this thesis is able to correctly determine the speaker to about 85\% of the utterances as well as about 65\% of the addressees. This speaker information can not only help to identify the most dominant characters, but also serves as a way to model the relations between entities. During the span of this work, components have been developed to model relations between characters using speaker attribution, using co-occurrences as well as by the usage of true interactions, for which yet again a dataset was annotated using ATHEN. Furthermore, since relations between characters are usually typed, a component for the extraction of a typed relation was developed. Similar to the experiments for the character reference detection, a combination of a rule based and a Maximum Entropy classifier yielded the best overall results, with the extraction of family relations showing a score of about 80\% and the quality of love relations with a score of about 50\%. For family relations, a kernel for a Support Vector Machine was developed that even exceeded the scores of the combined approach but is behind on the other labels. In addition, this work presents new ways to evaluate automatically extracted networks without the need of domain experts, instead it relies on the usage of expert summaries. It also refrains from the uses of social network analysis for the evaluation, but instead presents ranked evaluations using Precision@k and the Spearman Rank correlation coefficient for the evaluation of the nodes and edges of the network. An analysis using these metrics showed, that the central characters of a novel are contained with high probability but the quality drops rather fast if more than five entities are analyzed. The quality of the edges is mainly dominated by the quality of the coreference resolution and the correlation coefficient between gold edges and system edges therefore varies between 30 and 60\%. All developed components are aggregated alongside a large set of other preprocessing modules in the Kallimachos pipeline and can be reused without any restrictions.}, subject = {Textanalyse}, language = {en} } @phdthesis{Menth2004, author = {Menth, Michael}, title = {Efficient admission control and routing for resilient communication networks}, doi = {10.25972/OPUS-846}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-9949}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2004}, abstract = {This work is subdivided into two main areas: resilient admission control and resilient routing. The work gives an overview of the state of the art of quality of service mechanisms in communication networks and proposes a categorization of admission control (AC) methods. These approaches are investigated regarding performance, more precisely, regarding the potential resource utilization by dimensioning the capacity for a network with a given topology, traffic matrix, and a required flow blocking probability. In case of a failure, the affected traffic is rerouted over backup paths which increases the traffic rate on the respective links. To guarantee the effectiveness of admission control also in failure scenarios, the increased traffic rate must be taken into account for capacity dimensioning and leads to resilient AC. Capacity dimensioning is not feasible for existing networks with already given link capacities. For the application of resilient NAC in this case, the size of distributed AC budgets must be adapted according to the traffic matrix in such a way that the maximum blocking probability for all flows is minimized and that the capacity of all links is not exceeded by the admissible traffic rate in any failure scenario. Several algorithms for the solution of that problem are presented and compared regarding their efficiency and fairness. A prototype for resilient AC was implemented in the laboratories of Siemens AG in Munich within the scope of the project KING. Resilience requires additional capacity on the backup paths for failure scenarios. The amount of this backup capacity depends on the routing and can be minimized by routing optimization. New protection switching mechanisms are presented that deviate the traffic quickly around outage locations. They are simple and can be implemented, e.g, by MPLS technology. The Self-Protecting Multi-Path (SPM) is a multi-path consisting of disjoint partial paths. The traffic is distributed over all faultless partial paths according to an optimized load balancing function both in the working case and in failure scenarios. Performance studies show that the network topology and the traffic matrix also influence the amount of required backup capacity significantly. The example of the COST-239 network illustrates that conventional shortest path routing may need 50\% more capacity than the optimized SPM if all single link and node failures are protected.}, subject = {Kommunikation}, language = {en} }