@phdthesis{Bregenzer2015, author = {Bregenzer, J{\"u}rgen}, title = {Effizienter Einsatz von Multicore-Architekturen in der Steuerungstechnik}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-010-8 (Print)}, doi = {10.25972/WUP-978-3-95826-011-5}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-106239}, school = {W{\"u}rzburg University Press}, pages = {185}, year = {2015}, abstract = {Der Einsatz von Multicore-Prozessoren in der industriellen Steuerungstechnik birgt sowohl Chancen als auch Risiken. Die vorliegende Dissertation entwickelt und bewertet aus diesem Grund generische Strategien zur Nutzung dieser Prozessorarchitektur unter Ber{\"u}cksichtigung der spezifischen Rahmenbedingungen und Anforderungen dieser Dom{\"a}ne. Multicore-Prozessoren bieten die Chance zur Konsolidierung derzeit auf dedizierter Hardware ausgef{\"u}hrter heterogener Steuerungssubsysteme unter einer bisher nicht erreichbaren temporalen Isolation. In diesem Kontext definiert die vorliegende Dissertation die spezifischen Anforderungen, die eine integrierte Ausf{\"u}hrung in der Dom{\"a}ne der industriellen Automatisierung erf{\"u}llen muss. Eine Vorbedingung f{\"u}r ein derartiges Szenario stellt allerdings der Einsatz einer geeigneten Konsolidierungsl{\"o}sung dar. Mit einem virtualisierten und einem hybriden Konsolidierungsansatz werden deshalb zwei repr{\"a}sentative L{\"o}sungen f{\"u}r die Dom{\"a}ne eingebetteter Systeme vorgestellt, die schließlich hinsichtlich der zuvor definierten Kriterien evaluiert werden. Da die Taktraten von Prozessoren physikalische Grenzen erreicht haben, werden sich in der Steuerungstechnik signifikante Performanzsteigerungen zuk{\"u}nftig nur durch den Einsatz von Multicore-Prozessoren erzielen lassen. Dies hat zur Vorbedingung, dass die Firmware die Parallelit{\"a}t dieser Prozessorarchitektur in geeigneter Weise zu nutzen vermag. Leider entstehen bei der Parallelisierung eines komplexen Systems wie einer Automatisierungs-Firmware im Allgemeinen signifikante Aufw{\"a}nde. Infolgedessen sollten diesbez{\"u}gliche Entscheidungen nur auf Basis einer objektiven Abw{\"a}gung potentieller Alternativen getroffen werden. Allerdings macht die Systemkomplexit{\"a}t eine Absch{\"a}tzung der durch eine spezifische parallele Firmware-Architektur zu erwartenden Performanz zu einer anspruchsvollen Aufgabe. Dies gilt vor allem, da eine Parallelisierung gefordert wird, die f{\"u}r eine Vielzahl von Lastszenarien in Form gesteuerter Maschinen geeignet ist. Aus diesem Grund spezifiziert die vorliegende Dissertation eine anwendungsorientierte Methode zur Unterst{\"u}tzung von Entwurfsentscheidungen, die bei der Migration einer bestehenden Singlecore-Firmware auf eine homogene Multicore-Architektur zu treffen sind. Dies wird erreicht, indem in automatisierter Weise geeignete Firmware-Modelle auf Basis von dynamischem Profiling der Firmware unter mehreren repr{\"a}sentativen Lastszenarien erstellt werden. Im Anschluss daran werden diese Modelle um das Expertenwissen von Firmware-Entwicklern erweitert, bevor mittels multikriterieller genetischer Algorithmen der Entwurfsraum der Parallelisierungsalternativen exploriert wird. Schließlich kann eine spezifische L{\"o}sung der auf diese Weise hergeleiteten Pareto-Front auf Basis ihrer Bewertungsmetriken zur Implementierung durch einen Entwickler ausgew{\"a}hlt werden. Die vorliegende Arbeit schließt mit einer Fallstudie, welche die zuvor beschriebene Methode auf eine numerische Steuerungs-Firmware anwendet und dabei deren Potential f{\"u}r eine umfassende Unterst{\"u}tzung einer Firmware-Parallelisierung aufzeigt.}, subject = {Mehrkernprozessor}, language = {de} } @phdthesis{Fehrmann2015, author = {Fehrmann, Sven}, title = {Ontologiebasiertes Cloud Computing}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-111929}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {Die Dissertation „Ontologiebasiertes Cloud Computing" im Fachbereich Wirtschaftsinformatik behandelt das Thema Cloud Computing und veranschaulicht die M{\"o}glichkeiten der theoretischen und praktischen Nutzung einer Ontologie f{\"u}r das Cloud Computing. Neben den Private und Public Clouds sowie Hybrid-L{\"o}sungen wird vor allem eine ausgefeilte Virtualisierungstechnologie die Zukunft im IT-Bereich mitgestalten. Die Vielfalt und Anzahl der angebotenen Services nimmt besonders auf dem Sektor der Public Clouds weiterhin stark zu, w{\"a}hrend im Hybrid-Bereich ansprechende L{\"o}sungen noch ausstehen. Das Nutzen eines Cloud-Services ist in der Regel einfach und wird mit den fallenden Preisen zunehmend interessanter. Eine Reihe von Punkten, die im Vorfeld genau betrachtet und festgelegt werden m{\"u}ssen, wie Aspekte der IT-Sicherheit, des Datenschutzes und der Kosten, erm{\"o}glichen eine wirtschaftliche und rechtssichere Inanspruchnahme eines Cloud-Services. Vor der Nutzung eines Services m{\"u}ssen zudem der Wert, die Nutzungsh{\"a}ufigkeit und die Geheimhaltungsstufe der eigenen Daten bekannt sein, um sicher bestimmen zu k{\"o}nnen, ob alle Informationen oder nur ein Teil zum Auslagern geeignet sind. Dazu bedarf es einer klaren Festlegung der vertraglichen Rahmenbedingungen und einer Regelung bez{\"u}glich des Schadensersatzes bei einem Ausfall. Ein aktives Change Management sollte schon vor der Einf{\"u}hrung eines Services Akzeptanz f{\"u}r die sich im IT-Umfeld {\"a}ndernden Aufgabengebiete schaffen. Vergleichbare Alternativen zu finden, dies war die Zielvorgabe der durchgef{\"u}hrten, breiten Untersuchung von 15 Serviceanbietern, verbunden mit dem Aufbau einer Ontologie. Auf einem sehr dynamischen Cloud Computing Markt k{\"o}nnen diese Untersuchungen nat{\"u}rlich nur eine Momentaufnahme abbilden, denn neue Provider etablieren sich, schon l{\"a}nger bestehende ver{\"a}ndern und verbessern ihre Angebote. Damit diese Momentaufnahme nicht in einem statischen Endzustand verbleibt, wurde eine Ontologie aufgebaut, die die konsistente Einpflege ver{\"a}nderter Sachverhalte zul{\"a}sst. Die Idealvorstellung ist es, dass beim Bekanntwerden einer neuen Information diese auch immer in die Ontologie einfließt. Die Anbieteruntersuchung zeigt, dass Cloud-Services heute schon ein hohes Potential haben. Um sich einen Gesamt{\"u}berblick {\"u}ber die unterschiedlichen Services und ihre Angebote zu verschaffen, ist eine Ontologie besonders geeignet. Die aufgebaute Cloud-Ontologie beinhaltet eine Service-Auswahl, die auf die Literatur- und Anbieteruntersuchung aufbaut. {\"A}hnlich einer Suchmaschine hilft sie, sich {\"u}ber bestehende Angebote auf dem Markt zu informieren. Und sie vereinfacht die Selektion, definiert klar bekannte technische Details, erleichtert die Suche z. B. nach ben{\"o}tigten Zusatzdienstleistungen {\"u}ber standardisierte Schnittstellen, versucht Transparenz und Nachvollziehbarkeit bei den Abrechnungsmodellen herzustellen, um eine Vergleichbarkeit {\"u}berhaupt erst zu erm{\"o}glichen. Der gr{\"o}ßte Vorteil liegt in der Zeitersparnis: Die Recherche nach passenden Cloud-Services wird durch formalisierte und somit vergleichbare Kriterien verk{\"u}rzt. Bei mehreren passenden Anbietern l{\"a}sst sich {\"u}ber weitere Abfragen oder Kostenvergleiche der jeweils f{\"u}r den Nutzer beste Anbieter gezielt finden. Ebenso k{\"o}nnen Services mit signifikanten Ausschlusskriterien fr{\"u}hzeitig aus der Auswahl entfernt werden. Durch das Verbot bestimmter Zuweisungen oder durch die Forderung von Mindestbedingungen innerhalb der Ontologie wird die Einpflege falscher Sachverhalte verhindert und sie erweist sich damit wesentlich unempfindlicher als viele Programme. Die Aufgabenstellung bei der Modellerstellung lag darin, zu einer allgemeinen Aussagekraft der modellierten Abh{\"a}ngigkeiten zu kommen. Außerdem erf{\"u}llt die Cloud-Ontologie die vier typischen Anforderungen an eine Ontologie: Sie ist ausschließlich durch die standardisierte Sprache OWL beschrieben, kann durch einen Inferenzalgorithmus (z. B. Pellet) ausgewertet werden, unterscheidet eindeutig zwischen 80 Klassen und 342 Individuals und bildet zahlreiche Informationen {\"u}ber 2657 Verkn{\"u}pfungen ab. Die Ontologie kann mit geringem Aufwand auch in ein Programm mit einer ansprechenden Oberfl{\"a}che {\"u}berf{\"u}hrt werden, wie der programmierte Prototyp beweist. In der Praxis m{\"u}ssen f{\"u}r Unternehmen verst{\"a}rkt Hilfsmittel angeboten werden oder in den Vordergrund r{\"u}cken, wie Cloud-Ontologien, die die Auswahl von Services erleichtern, Vergleiche erst erm{\"o}glichen, die Suche verk{\"u}rzen und zum Schluss zu Ergebnissen f{\"u}hren, die den Vorstellungen des k{\"u}nftigen Nutzers entsprechen.}, subject = {Cloud Computing}, language = {de} } @phdthesis{Spinner2017, author = {Spinner, Simon}, title = {Self-Aware Resource Management in Virtualized Data Centers}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-153754}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2017}, abstract = {Enterprise applications in virtualized data centers are often subject to time-varying workloads, i.e., the load intensity and request mix change over time, due to seasonal patterns and trends, or unpredictable bursts in user requests. Varying workloads result in frequently changing resource demands to the underlying hardware infrastructure. Virtualization technologies enable sharing and on-demand allocation of hardware resources between multiple applications. In this context, the resource allocations to virtualized applications should be continuously adapted in an elastic fashion, so that "at each point in time the available resources match the current demand as closely as possible" (Herbst el al., 2013). Autonomic approaches to resource management promise significant increases in resource efficiency while avoiding violations of performance and availability requirements during peak workloads. Traditional approaches for autonomic resource management use threshold-based rules (e.g., Amazon EC2) that execute pre-defined reconfiguration actions when a metric reaches a certain threshold (e.g., high resource utilization or load imbalance). However, many business-critical applications are subject to Service-Level-Objectives defined on an application performance metric (e.g., response time or throughput). To determine thresholds so that the end-to-end application SLO is fulfilled poses a major challenge due to the complex relationship between the resource allocation to an application and the application performance. Furthermore, threshold-based approaches are inherently prone to an oscillating behavior resulting in unnecessary reconfigurations. In order to overcome the deficiencies of threshold-based approaches and enable a fully automated approach to dynamically control the resource allocations of virtualized applications, model-based approaches are required that can predict the impact of a reconfiguration on the application performance in advance. However, existing model-based approaches are severely limited in their learning capabilities. They either require complete performance models of the application as input, or use a pre-identified model structure and only learn certain model parameters from empirical data at run-time. The former requires high manual efforts and deep system knowledge to create the performance models. The latter does not provide the flexibility to capture the specifics of complex and heterogeneous system architectures. This thesis presents a self-aware approach to the resource management in virtualized data centers. In this context, self-aware means that it automatically learns performance models of the application and the virtualized infrastructure and reasons based on these models to autonomically adapt the resource allocations in accordance with given application SLOs. Learning a performance model requires the extraction of the model structure representing the system architecture as well as the estimation of model parameters, such as resource demands. The estimation of resource demands is a key challenge as they cannot be observed directly in most systems. The major scientific contributions of this thesis are: - A reference architecture for online model learning in virtualized systems. Our reference architecture is based on a set of model extraction agents. Each agent focuses on specific tasks to automatically create and update model skeletons capturing its local knowledge of the system and collaborates with other agents to extract the structural parts of a global performance model of the system. We define different agent roles in the reference architecture and propose a model-based collaboration mechanism for the agents. The agents may be bundled within virtual appliances and may be tailored to include knowledge about the software stack deployed in a specific virtual appliance. - An online method for the statistical estimation of resource demands. For a given request processed by an application, the resource time consumed for a specified resource within the system (e.g., CPU or I/O device), referred to as resource demand, is the total average time the resource is busy processing the request. A request could be any unit of work (e.g., web page request, database transaction, batch job) processed by the system. We provide a systematization of existing statistical approaches to resource demand estimation and conduct an extensive experimental comparison to evaluate the accuracy of these approaches. We propose a novel method to automatically select estimation approaches and demonstrate that it increases the robustness and accuracy of the estimated resource demands significantly. - Model-based controllers for autonomic vertical scaling of virtualized applications. We design two controllers based on online model-based reasoning techniques in order to vertically scale applications at run-time in accordance with application SLOs. The controllers exploit the knowledge from the automatically extracted performance models when determining necessary reconfigurations. The first controller adds and removes virtual CPUs to an application depending on the current demand. It uses a layered performance model to also consider the physical resource contention when determining the required resources. The second controller adapts the resource allocations proactively to ensure the availability of the application during workload peaks and avoid reconfiguration during phases of high workload. We demonstrate the applicability of our approach in current virtualized environments and show its effectiveness leading to significant increases in resource efficiency and improvements of the application performance and availability under time-varying workloads. The evaluation of our approach is based on two case studies representative of widely used enterprise applications in virtualized data centers. In our case studies, we were able to reduce the amount of required CPU resources by up to 23\% and the number of reconfigurations by up to 95\% compared to a rule-based approach while ensuring full compliance with application SLO. Furthermore, using workload forecasting techniques we were able to schedule expensive reconfigurations (e.g., changes to the memory size) during phases of load load and thus were able to reduce their impact on application availability by over 80\% while significantly improving application performance compared to a reactive controller. The methods and techniques for resource demand estimation and vertical application scaling were developed and evaluated in close collaboration with VMware and Google.}, subject = {Cloud Computing}, language = {en} } @phdthesis{DinhXuan2018, author = {Dinh-Xuan, Lam}, title = {Quality of Experience Assessment of Cloud Applications and Performance Evaluation of VNF-Based QoE Monitoring}, issn = {1432-8801}, doi = {10.25972/OPUS-16918}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-169182}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2018}, abstract = {In this thesis various aspects of Quality of Experience (QoE) research are examined. The work is divided into three major blocks: QoE Assessment, QoE Monitoring, and VNF Performance Evaluation. First, prominent cloud applications such as Google Docs and a cloud-based photo album are explored. The QoE is characterized and the influence of packet loss and delay is studied. Afterwards, objective QoE monitoring for HTTP Adaptive Video Streaming (HAS) in the cloud is investigated. Additionally, by using a Virtual Network Function (VNF) for QoE monitoring in the cloud, the feasibility of an interworking of Network Function Virtualization (NFV) and cloud paradigm is evaluated. To this end, a VNF that exploits deep packet inspection technique was used to parse the video traffic. An algorithm is then designed accordingly to estimate video quality and QoE based on network and application layer parameters. To assess the accuracy of the estimation, the VNF is measured in different scenarios under different network QoS and the virtual environment of the cloud architecture. The insights show that the different geographical deployments of the VNF influence the accuracy of the video quality and QoE estimation. Various Service Function Chain (SFC) placement algorithms have been proposed and compared in the context of edge cloud networks. On the one hand, this research is aimed at cloud service providers by providing methods for evaluating QoE for cloud applications. On the other hand, network operators can learn the pitfalls and disadvantages of using the NFV paradigm for such a QoE monitoring mechanism.}, subject = {Quality of Experience}, language = {en} } @phdthesis{Ifflaender2021, author = {Iffl{\"a}nder, Lukas}, title = {Attack-aware Security Function Management}, doi = {10.25972/OPUS-22421}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-224211}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2021}, abstract = {Over the last decades, cybersecurity has become an increasingly important issue. Between 2019 and 2011 alone, the losses from cyberattacks in the United States grew by 6217\%. At the same time, attacks became not only more intensive but also more and more versatile and diverse. Cybersecurity has become everyone's concern. Today, service providers require sophisticated and extensive security infrastructures comprising many security functions dedicated to various cyberattacks. Still, attacks become more violent to a level where infrastructures can no longer keep up. Simply scaling up is no longer sufficient. To address this challenge, in a whitepaper, the Cloud Security Alliance (CSA) proposed multiple work packages for security infrastructure, leveraging the possibilities of Software-defined Networking (SDN) and Network Function Virtualization (NFV). Security functions require a more sophisticated modeling approach than regular network functions. Notably, the property to drop packets deemed malicious has a significant impact on Security Service Function Chains (SSFCs)—service chains consisting of multiple security functions to protect against multiple at- tack vectors. Under attack, the order of these chains influences the end-to-end system performance depending on the attack type. Unfortunately, it is hard to predict the attack composition at system design time. Thus, we make a case for dynamic attack-aware SSFC reordering. Also, we tackle the issues of the lack of integration between security functions and the surrounding network infrastructure, the insufficient use of short term CPU frequency boosting, and the lack of Intrusion Detection and Prevention Systems (IDPS) against database ransomware attacks. Current works focus on characterizing the performance of security functions and their behavior under overload without considering the surrounding infrastructure. Other works aim at replacing security functions using network infrastructure features but do not consider integrating security functions within the network. Further publications deal with using SDN for security or how to deal with new vulnerabilities introduced through SDN. However, they do not take security function performance into account. NFV is a popular field for research dealing with frameworks, benchmarking methods, the combination with SDN, and implementing security functions as Virtualized Network Functions (VNFs). Research in this area brought forth the concept of Service Function Chains (SFCs) that chain multiple network functions after one another. Nevertheless, they still do not consider the specifics of security functions. The mentioned CSA whitepaper proposes many valuable ideas but leaves their realization open to others. This thesis presents solutions to increase the performance of single security functions using SDN, performance modeling, a framework for attack-aware SSFC reordering, a solution to make better use of CPU frequency boosting, and an IDPS against database ransomware. Specifically, the primary contributions of this work are: • We present approaches to dynamically bypass Intrusion Detection Systems (IDS) in order to increase their performance without reducing the security level. To this end, we develop and implement three SDN-based approaches (two dynamic and one static). We evaluate the proposed approaches regarding security and performance and show that they significantly increase the performance com- pared to an inline IDS without significant security deficits. We show that using software switches can further increase the performance of the dynamic approaches up to a point where they can eliminate any throughput drawbacks when using the IDS. • We design a DDoS Protection System (DPS) against TCP SYN flood at tacks in the form of a VNF that works inside an SDN-enabled network. This solution eliminates known scalability and performance drawbacks of existing solutions for this attack type. Then, we evaluate this solution showing that it correctly handles the connection establishment and present solutions for an observed issue. Next, we evaluate the performance showing that our solution increases performance up to three times. Parallelization and parameter tuning yields another 76\% performance boost. Based on these findings, we discuss optimal deployment strategies. • We introduce the idea of attack-aware SSFC reordering and explain its impact in a theoretical scenario. Then, we discuss the required information to perform this process. We validate our claim of the importance of the SSFC order by analyzing the behavior of single security functions and SSFCs. Based on the results, we conclude that there is a massive impact on the performance up to three orders of magnitude, and we find contradicting optimal orders for different workloads. Thus, we demonstrate the need for dynamic reordering. Last, we develop a model for SSFC regarding traffic composition and resource demands. We classify the traffic into multiple classes and model the effect of single security functions on the traffic and their generated resource demands as functions of the incoming network traffic. Based on our model, we propose three approaches to determine optimal orders for reordering. • We implement a framework for attack-aware SSFC reordering based on this knowledge. The framework places all security functions inside an SDN-enabled network and reorders them using SDN flows. Our evaluation shows that the framework can enforce all routes as desired. It correctly adapts to all attacks and returns to the original state after the attacks cease. We find possible security issues at the moment of reordering and present solutions to eliminate them. • Next, we design and implement an approach to load balance servers while taking into account their ability to go into a state of Central Processing Unit (CPU) frequency boost. To this end, the approach collects temperature information from available hosts and places services on the host that can attain the boosted mode the longest. We evaluate this approach and show its effectiveness. For high load scenarios, the approach increases the overall performance and the performance per watt. Even better results show up for low load workloads, where not only all performance metrics improve but also the temperatures and total power consumption decrease. • Last, we design an IDPS protecting against database ransomware attacks that comprise multiple queries to attain their goal. Our solution models these attacks using a Colored Petri Net (CPN). A proof-of-concept implementation shows that our approach is capable of detecting attacks without creating false positives for benign scenarios. Furthermore, our solution creates only a small performance impact. Our contributions can help to improve the performance of security infrastructures. We see multiple application areas from data center operators over software and hardware developers to security and performance researchers. Most of the above-listed contributions found use in several research publications. Regarding future work, we see the need to better integrate SDN-enabled security functions and SSFC reordering in data center networks. Future SSFC should discriminate between different traffic types, and security frameworks should support automatically learning models for security functions. We see the need to consider energy efficiency when regarding SSFCs and take CPU boosting technologies into account when designing performance models as well as placement, scaling, and deployment strategies. Last, for a faster adaptation against recent ransomware attacks, we propose machine-assisted learning for database IDPS signatures.}, subject = {Software-defined networking}, language = {en} }