@phdthesis{Gebert2017, author = {Gebert, Steffen Christian}, title = {Architectures for Softwarized Networks and Their Performance Evaluation}, issn = {1432-8801}, doi = {10.25972/OPUS-15063}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-150634}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2017}, abstract = {This thesis contributes to several issues in the context of SDN and NFV, with an emphasis on performance and management. The main contributions are guide lines for operators migrating to software-based networks, as well as an analytical model for the packet processing in a Linux system using the Kernel NAPI.}, subject = {Telekommunikationsnetz}, language = {en} } @phdthesis{Rygielski2017, author = {Rygielski, Piotr}, title = {Flexible Modeling of Data Center Networks for Capacity Management}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-146235}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2017}, abstract = {Nowadays, data centers are becoming increasingly dynamic due to the common adoption of virtualization technologies. Systems can scale their capacity on demand by growing and shrinking their resources dynamically based on the current load. However, the complexity and performance of modern data centers is influenced not only by the software architecture, middleware, and computing resources, but also by network virtualization, network protocols, network services, and configuration. The field of network virtualization is not as mature as server virtualization and there are multiple competing approaches and technologies. Performance modeling and prediction techniques provide a powerful tool to analyze the performance of modern data centers. However, given the wide variety of network virtualization approaches, no common approach exists for modeling and evaluating the performance of virtualized networks. The performance community has proposed multiple formalisms and models for evaluating the performance of infrastructures based on different network virtualization technologies. The existing performance models can be divided into two main categories: coarse-grained analytical models and highly-detailed simulation models. Analytical performance models are normally defined at a high level of abstraction and thus they abstract many details of the real network and therefore have limited predictive power. On the other hand, simulation models are normally focused on a selected networking technology and take into account many specific performance influencing factors, resulting in detailed models that are tightly bound to a given technology, infrastructure setup, or to a given protocol stack. Existing models are inflexible, that means, they provide a single solution method without providing means for the user to influence the solution accuracy and solution overhead. To allow for flexibility in the performance prediction, the user is required to build multiple different performance models obtaining multiple performance predictions. Each performance prediction may then have different focus, different performance metrics, prediction accuracy, and solving time. The goal of this thesis is to develop a modeling approach that does not require the user to have experience in any of the applied performance modeling formalisms. The approach offers the flexibility in the modeling and analysis by balancing between: (a) generic character and low overhead of coarse-grained analytical models, and (b) the more detailed simulation models with higher prediction accuracy. The contributions of this thesis intersect with technologies and research areas, such as: software engineering, model-driven software development, domain-specific modeling, performance modeling and prediction, networking and data center networks, network virtualization, Software-Defined Networking (SDN), Network Function Virtualization (NFV). The main contributions of this thesis compose the Descartes Network Infrastructure (DNI) approach and include: • Novel modeling abstractions for virtualized network infrastructures. This includes two meta-models that define modeling languages for modeling data center network performance. The DNI and miniDNI meta-models provide means for representing network infrastructures at two different abstraction levels. Regardless of which variant of the DNI meta-model is used, the modeling language provides generic modeling elements allowing to describe the majority of existing and future network technologies, while at the same time abstracting factors that have low influence on the overall performance. I focus on SDN and NFV as examples of modern virtualization technologies. • Network deployment meta-model—an interface between DNI and other meta- models that allows to define mapping between DNI and other descriptive models. The integration with other domain-specific models allows capturing behaviors that are not reflected in the DNI model, for example, software bottlenecks, server virtualization, and middleware overheads. • Flexible model solving with model transformations. The transformations enable solving a DNI model by transforming it into a predictive model. The model transformations vary in size and complexity depending on the amount of data abstracted in the transformation process and provided to the solver. In this thesis, I contribute six transformations that transform DNI models into various predictive models based on the following modeling formalisms: (a) OMNeT++ simulation, (b) Queueing Petri Nets (QPNs), (c) Layered Queueing Networks (LQNs). For each of these formalisms, multiple predictive models are generated (e.g., models with different level of detail): (a) two for OMNeT++, (b) two for QPNs, (c) two for LQNs. Some predictive models can be solved using multiple alternative solvers resulting in up to ten different automated solving methods for a single DNI model. • A model extraction method that supports the modeler in the modeling process by automatically prefilling the DNI model with the network traffic data. The contributed traffic profile abstraction and optimization method provides a trade-off by balancing between the size and the level of detail of the extracted profiles. • A method for selecting feasible solving methods for a DNI model. The method proposes a set of solvers based on trade-off analysis characterizing each transformation with respect to various parameters such as its specific limitations, expected prediction accuracy, expected run-time, required resources in terms of CPU and memory consumption, and scalability. • An evaluation of the approach in the context of two realistic systems. I evaluate the approach with focus on such factors like: prediction of network capacity and interface throughput, applicability, flexibility in trading-off between prediction accuracy and solving time. Despite not focusing on the maximization of the prediction accuracy, I demonstrate that in the majority of cases, the prediction error is low—up to 20\% for uncalibrated models and up to 10\% for calibrated models depending on the solving technique. In summary, this thesis presents the first approach to flexible run-time performance prediction in data center networks, including network based on SDN. It provides ability to flexibly balance between performance prediction accuracy and solving overhead. The approach provides the following key benefits: • It is possible to predict the impact of changes in the data center network on the performance. The changes include: changes in network topology, hardware configuration, traffic load, and applications deployment. • DNI can successfully model and predict the performance of multiple different of network infrastructures including proactive SDN scenarios. • The prediction process is flexible, that is, it provides balance between the granularity of the predictive models and the solving time. The decreased prediction accuracy is usually rewarded with savings of the solving time and consumption of resources required for solving. • The users are enabled to conduct performance analysis using multiple different prediction methods without requiring the expertise and experience in each of the modeling formalisms. The components of the DNI approach can be also applied to scenarios that are not considered in this thesis. The approach is generalizable and applicable for the following examples: (a) networks outside of data centers may be analyzed with DNI as long as the background traffic profile is known; (b) uncalibrated DNI models may serve as a basis for design-time performance analysis; (c) the method for extracting and compacting of traffic profiles may be used for other, non-network workloads as well.}, subject = {Modellierung}, language = {en} } @phdthesis{Burger2017, author = {Burger, Valentin}, title = {Performance Evalution and Optimization of Content Delivery Networks}, issn = {1432-8801}, doi = {10.25972/OPUS-15276}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-152769}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2017}, abstract = {Content Delivery Networks (CDNs) are networks that distribute content in the Internet. CDNs are increasingly responsible for the largest share of traffic in the Internet. CDNs distribute popular content to caches in many geographical areas to save bandwidth by avoiding unnecessary multihop retransmission. By bringing the content geographically closer to the user, CDNs also reduce the latency of the services. Besides end users and content providers, which require high availability of high quality content, CDN providers and Internet Service Providers (ISPs) are interested in an efficient operation of CDNs. In order to ensure an efficient replication of the content, CDN providers have a network of (globally) distributed interconnected datacenters at different points of presence (PoPs). ISPs aim to provide reliable and high speed Internet access. They try to keep the load on the network low and to reduce cost for connectivity with other ISPs. The increasing number of mobile devices such as smart phones and tablets, high definition video content and high resolution displays result in a continuous growth in mobile traffic. This growth in mobile traffic is further accelerated by newly emerging services, such as mobile live streaming and broadcasting services. The steep increase in mobile traffic is expected to reach by 2018 roughly 60\% of total network traffic, the majority of which will be video. To handle the growth in mobile networks, the next generation of 5G mobile networks is designed to have higher access rates and an increased densification of the network infrastructure. With the explosion of access rates and number of base stations the backhaul of wireless networks will become congested. To reduce the load on the backhaul, the research community suggests installing local caches in gateway routers between the wireless network and the Internet, in base stations of different sizes, and in end-user devices. The local deployment of caches allows keeping the traffic within the ISPs network. The caches are organized in a hierarchy, where caches in the lowest tier are requested first. The request is forwarded to the next tier, if the requested object is not found. Appropriate evaluation methods are required to optimally dimension the caches dependent on the traffic characteristics and the available resources. Additionally methods are necessary that allow performance evaluation of backhaul bandwidth aggregation systems, which further reduce the load on the backhaul. This thesis analyses CDNs utilizing locally available resources and develops the following evaluations and optimization approaches: Characterization of CDNs and distribution of resources in the Internet, analysis and optimization of hierarchical caching systems with bandwidth constraints and performance evaluation of bandwidth aggregation systems.}, subject = {CDN-Netzwerk}, language = {en} } @phdthesis{Spinner2017, author = {Spinner, Simon}, title = {Self-Aware Resource Management in Virtualized Data Centers}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-153754}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2017}, abstract = {Enterprise applications in virtualized data centers are often subject to time-varying workloads, i.e., the load intensity and request mix change over time, due to seasonal patterns and trends, or unpredictable bursts in user requests. Varying workloads result in frequently changing resource demands to the underlying hardware infrastructure. Virtualization technologies enable sharing and on-demand allocation of hardware resources between multiple applications. In this context, the resource allocations to virtualized applications should be continuously adapted in an elastic fashion, so that "at each point in time the available resources match the current demand as closely as possible" (Herbst el al., 2013). Autonomic approaches to resource management promise significant increases in resource efficiency while avoiding violations of performance and availability requirements during peak workloads. Traditional approaches for autonomic resource management use threshold-based rules (e.g., Amazon EC2) that execute pre-defined reconfiguration actions when a metric reaches a certain threshold (e.g., high resource utilization or load imbalance). However, many business-critical applications are subject to Service-Level-Objectives defined on an application performance metric (e.g., response time or throughput). To determine thresholds so that the end-to-end application SLO is fulfilled poses a major challenge due to the complex relationship between the resource allocation to an application and the application performance. Furthermore, threshold-based approaches are inherently prone to an oscillating behavior resulting in unnecessary reconfigurations. In order to overcome the deficiencies of threshold-based approaches and enable a fully automated approach to dynamically control the resource allocations of virtualized applications, model-based approaches are required that can predict the impact of a reconfiguration on the application performance in advance. However, existing model-based approaches are severely limited in their learning capabilities. They either require complete performance models of the application as input, or use a pre-identified model structure and only learn certain model parameters from empirical data at run-time. The former requires high manual efforts and deep system knowledge to create the performance models. The latter does not provide the flexibility to capture the specifics of complex and heterogeneous system architectures. This thesis presents a self-aware approach to the resource management in virtualized data centers. In this context, self-aware means that it automatically learns performance models of the application and the virtualized infrastructure and reasons based on these models to autonomically adapt the resource allocations in accordance with given application SLOs. Learning a performance model requires the extraction of the model structure representing the system architecture as well as the estimation of model parameters, such as resource demands. The estimation of resource demands is a key challenge as they cannot be observed directly in most systems. The major scientific contributions of this thesis are: - A reference architecture for online model learning in virtualized systems. Our reference architecture is based on a set of model extraction agents. Each agent focuses on specific tasks to automatically create and update model skeletons capturing its local knowledge of the system and collaborates with other agents to extract the structural parts of a global performance model of the system. We define different agent roles in the reference architecture and propose a model-based collaboration mechanism for the agents. The agents may be bundled within virtual appliances and may be tailored to include knowledge about the software stack deployed in a specific virtual appliance. - An online method for the statistical estimation of resource demands. For a given request processed by an application, the resource time consumed for a specified resource within the system (e.g., CPU or I/O device), referred to as resource demand, is the total average time the resource is busy processing the request. A request could be any unit of work (e.g., web page request, database transaction, batch job) processed by the system. We provide a systematization of existing statistical approaches to resource demand estimation and conduct an extensive experimental comparison to evaluate the accuracy of these approaches. We propose a novel method to automatically select estimation approaches and demonstrate that it increases the robustness and accuracy of the estimated resource demands significantly. - Model-based controllers for autonomic vertical scaling of virtualized applications. We design two controllers based on online model-based reasoning techniques in order to vertically scale applications at run-time in accordance with application SLOs. The controllers exploit the knowledge from the automatically extracted performance models when determining necessary reconfigurations. The first controller adds and removes virtual CPUs to an application depending on the current demand. It uses a layered performance model to also consider the physical resource contention when determining the required resources. The second controller adapts the resource allocations proactively to ensure the availability of the application during workload peaks and avoid reconfiguration during phases of high workload. We demonstrate the applicability of our approach in current virtualized environments and show its effectiveness leading to significant increases in resource efficiency and improvements of the application performance and availability under time-varying workloads. The evaluation of our approach is based on two case studies representative of widely used enterprise applications in virtualized data centers. In our case studies, we were able to reduce the amount of required CPU resources by up to 23\% and the number of reconfigurations by up to 95\% compared to a rule-based approach while ensuring full compliance with application SLO. Furthermore, using workload forecasting techniques we were able to schedule expensive reconfigurations (e.g., changes to the memory size) during phases of load load and thus were able to reduce their impact on application availability by over 80\% while significantly improving application performance compared to a reactive controller. The methods and techniques for resource demand estimation and vertical application scaling were developed and evaluated in close collaboration with VMware and Google.}, subject = {Cloud Computing}, language = {en} }