@phdthesis{Hartmann2015, author = {Hartmann, Matthias}, title = {Optimization and Design of Network Architectures for Future Internet Routing}, issn = {1432-8801}, doi = {10.25972/OPUS-11416}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-114165}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {At the center of the Internet's protocol stack stands the Internet Protocol (IP) as a common denominator that enables all communication. To make routing efficient, resilient, and scalable, several aspects must be considered. Care must be taken that traffic is well balanced to make efficient use of the existing network resources, both in failure free operation and in failure scenarios. Finding the optimal routing in a network is an NP-complete problem. Therefore, routing optimization is usually performed using heuristics. This dissertation shows that a routing optimized with one objective function is often not good when looking at other objective functions. It can even be worse than unoptimized routing with respect to that objective function. After looking at failure-free routing and traffic distribution in different failure scenarios, the analysis is extended to include the loop-free alternate (LFA) IP fast reroute mechanism. Different application scenarios of LFAs are examined and a special focus is set on the fact that LFAs usually cannot protect all traffic in a network even against single link failures. Thus, the routing optimization for LFAs is targeted on both link utilization and failure coverage. Finally, the pre-congestion notification mechanism PCN for network admission control and overload protection is analyzed and optimized. Different design options for implementing the protocol are compared, before algorithms are developed for the calculation and optimization of protocol parameters and PCN-based routing. The second part of the thesis tackles a routing problem that can only be resolved on a global scale. The scalability of the Internet is at risk since a major and intensifying growth of the interdomain routing tables has been observed. Several protocols and architectures are analyzed that can be used to make interdomain routing more scalable. The most promising approach is the locator/identifier (Loc/ID) split architecture which separates routing from host identification. This way, changes in connectivity, mobility of end hosts, or traffic-engineering activities are hidden from the routing in the core of the Internet and the routing tables can be kept much smaller. All of the currently proposed Loc/ID split approaches have their downsides. In particular, the fact that most architectures use the ID for routing outside the Internet's core is a poor design, which inhibits many of the possible features of a new routing architecture. To better understand the problems and to provide a solution for a scalable routing design that implements a true Loc/ID split, the new GLI-Split protocol is developed in this thesis, which provides separation of global and local routing and uses an ID that is independent from any routing decisions. Besides GLI-Split, several other new routing architectures implementing Loc/ID split have been proposed for the Internet. Most of them assume that a mapping system is queried for EID-to-RLOC mappings by an intermediate node at the border of an edge network. When the mapping system is queried by an intermediate node, packets are already on their way towards their destination, and therefore, the mapping system must be fast, scalable, secure, resilient, and should be able to relay packets without locators to nodes that can forward them to the correct destination. The dissertation develops a classification for all proposed mapping system architectures and shows their similarities and differences. Finally, the fast two-level mapping system FIRMS is developed. It includes security and resilience features as well as a relay service for initial packets of a flow when intermediate nodes encounter a cache miss for the EID-to-RLOC mapping.}, subject = {Netzwerk}, language = {en} } @phdthesis{Hossfeld2009, author = {Hoßfeld, Tobias}, title = {Performance Evaluation of Future Internet Applications and Emerging User Behavior}, doi = {10.25972/OPUS-3067}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-37570}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2009}, abstract = {In future telecommunication systems, we observe an increasing diversity of access networks. The separation of transport services and applications or services leads to multi-network services, i.e., a future service has to work transparently to the underlying network infrastructure. Multi-network services with edge-based intelligence, like P2P file sharing or the Skype VoIP service, impose new traffic control paradigms on the future Internet. Such services adapt the amount of consumed bandwidth to reach different goals. A selfish behavior tries to keep the QoE of a single user above a certain level. Skype, for instance, repeats voice samples depending on the perceived end-to-end loss. From the viewpoint of a single user, the replication of voice data overcomes the degradation caused by packet loss and enables to maintain a certain QoE. The cost for this achievement is a higher amount of consumed bandwidth. However, if the packet loss is caused by congestion in the network, this additionally required bandwidth even worsens the network situation. Altruistic behavior, on the other side, would reduce the bandwidth consumption in such a way that the pressure on the network is released and thus the overall network performance is improved. In this monograph, we analyzed the impact of the overlay, P2P, and QoE paradigms in future Internet applications and the interactions from the observing user behavior. The shift of intelligence toward the edge is accompanied by a change in the emerging user behavior and traffic profile, as well as a change from multi-service networks to multi-networks services. In addition, edge-based intelligence may lead to a higher dynamics in the network topology, since the applications are often controlled by an overlay network, which can rapidly change in size and structure as new nodes can leave or join the overlay network in an entirely distributed manner. As a result, we found that the performance evaluation of such services provides new challenges, since novel key performance factors have to be first identified, like pollution of P2P systems, and appropriate models of the emerging user behavior are required, e.g. taking into account user impatience. As common denominator of the presented studies in this work, we focus on a user-centric view when evaluating the performance of future Internet applications. For a subscriber of a certain application or service, the perceived quality expressed as QoE will be the major criterion of the user's satisfaction with the network and service providers. We selected three different case studies and characterized the application's performance from the end user's point of view. Those are (1) cooperation in mobile P2P file sharing networks, (2) modeling of online TV recording services, and (3) QoE of edge-based VoIP applications. The user-centric approach facilitates the development of new mechanisms to overcome problems arising from the changing user behavior. An example is the proposed CycPriM cooperation strategy, which copes with selfish user behavior in mobile P2P file sharing system. An adequate mechanism has also been shown to be efficient in a heterogeneous B3G network with mobile users conducting vertical handovers between different wireless access technologies. The consideration of the user behavior and the user perceived quality guides to an appropriate modeling of future Internet applications. In the case of the online TV recording service, this enables the comparison between different technical realizations of the system, e.g. using server clusters or P2P technology, to properly dimension the installed network elements and to assess the costs for service providers. Technologies like P2P help to overcome phenomena like flash crowds and improve scalability compared to server clusters, which may get overloaded in such situations. Nevertheless, P2P technology invokes additional challenges and different user behavior to that seen in traditional client/server systems. Beside the willingness to share files and the churn of users, peers may be malicious and offer fake contents to disturb the data dissemination. Finally, the understanding and the quantification of QoE with respect to QoS degradations permits designing sophisticated edge-based applications. To this end, we identified and formulated the IQX hypothesis as an exponential interdependency between QoE and QoS parameters, which we validated for different examples. The appropriate modeling of the emerging user behavior taking into account the user's perceived quality and its interactions with the overlay and P2P paradigm will finally help to design future Internet applications.}, subject = {Leistungsbewertung}, language = {en} }