@phdthesis{Schmitt2022, author = {Schmitt, Norbert}, title = {Measurement, Modeling, and Emulation of Power Consumption of Distributed Systems}, doi = {10.25972/OPUS-27658}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-276582}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2022}, abstract = {Today's cloud data centers consume an enormous amount of energy, and energy consumption will rise in the future. An estimate from 2012 found that data centers consume about 30 billion watts of power, resulting in about 263TWh of energy usage per year. The energy consumption will rise to 1929TWh until 2030. This projected rise in energy demand is fueled by a growing number of services deployed in the cloud. 50\% of enterprise workloads have been migrated to the cloud in the last decade so far. Additionally, an increasing number of devices are using the cloud to provide functionalities and enable data centers to grow. Estimates say more than 75 billion IoT devices will be in use by 2025. The growing energy demand also increases the amount of CO2 emissions. Assuming a CO2-intensity of 200g CO2 per kWh will get us close to 227 billion tons of CO2. This emission is more than the emissions of all energy-producing power plants in Germany in 2020. However, data centers consume energy because they respond to service requests that are fulfilled through computing resources. Hence, it is not the users and devices that consume the energy in the data center but the software that controls the hardware. While the hardware is physically consuming energy, it is not always responsible for wasting energy. The software itself plays a vital role in reducing the energy consumption and CO2 emissions of data centers. The scenario of our thesis is, therefore, focused on software development. Nevertheless, we must first show developers that software contributes to energy consumption by providing evidence of its influence. The second step is to provide methods to assess an application's power consumption during different phases of the development process and to allow modern DevOps and agile development methods. We, therefore, need to have an automatic selection of system-level energy-consumption models that can accommodate rapid changes in the source code and application-level models allowing developers to locate power-consuming software parts for constant improvements. Afterward, we need emulation to assess the energy efficiency before the actual deployment.}, subject = {Leistungsbedarf}, language = {en} } @phdthesis{Runge2022, author = {Runge, Isabel Madeleine}, title = {Network Coding for Reliable Data Dissemination in Wireless Sensor Networks}, doi = {10.25972/OPUS-27224}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-272245}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2022}, abstract = {The application of Wireless Sensor Networks (WSNs) with a large number of tiny, cost-efficient, battery-powered sensor nodes that are able to communicate directly with each other poses many challenges. Due to the large number of communicating objects and despite a used CSMA/CA MAC protocol, there may be many signal collisions. In addition, WSNs frequently operate under harsh conditions and nodes are often prone to failure, for example, due to a depleted battery or unreliable components. Thus, nodes or even large parts of the network can fail. These aspects lead to reliable data dissemination and data storage being a key issue. Therefore, these issues are addressed herein while keeping latency low, throughput high, and energy consumption reduced. Furthermore, simplicity as well as robustness to changes in conditions are essential here. In order to achieve these aims, a certain amount of redundancy has to be included. This can be realized, for example, by using network coding. Existing approaches, however, often only perform well under certain conditions or for a specific scenario, have to perform a time-consuming initialization, require complex calculations, or do not provide the possibility of early decoding. Therefore, we developed a network coding procedure called Broadcast Growth Codes (BCGC) for reliable data dissemination, which performs well under a broad range of diverse conditions. These can be a high probability of signal collisions, any degree of nodes' mobility, a large number of nodes, or occurring node failures, for example. BCGC do not require complex initialization and only use simple XOR operations for encoding and decoding. Furthermore, decoding can be started as soon as a first packet/codeword has been received. Evaluations by using an in-house implemented network simulator as well as a real-world testbed showed that BCGC enhance reliability and enable to retrieve data dependably despite an unreliable network. In terms of latency, throughput, and energy consumption, depending on the conditions and the procedure being compared, BCGC can achieve the same performance or even outperform existing procedures significantly while being robust to changes in conditions and allowing low complexity of the nodes as well as early decoding.}, subject = {Zuverl{\"a}ssigkeit}, language = {en} } @phdthesis{Wagner2023, author = {Wagner, Jan Cetric}, title = {Maximalnetzplan zur reaktiven Steuerung von Produktionsabl{\"a}ufen}, isbn = {978-3-945459-43-0}, doi = {10.25972/OPUS-30545}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-305452}, school = {Universit{\"a}t W{\"u}rzburg}, pages = {182}, year = {2023}, abstract = {In produzierenden Unternehmen werden verschiedene Vorgehensweisen zur Planung, {\"U}berwachung und Steuerung von Produktionsabl{\"a}ufen eingesetzt. Einer dieser Methoden wird als Vorgangsknotennetzplantechnik bezeichnet. Die einzelnen Produktionsschritte werden als Knoten definiert und durch Pfeile miteinander verbunden. Die Pfeile stellen die Beziehungen der jeweiligen Vorg{\"a}nge zueinander und damit den Produktionsablauf dar. Diese Technik erlaubt den Anwendern einen umfassenden {\"U}berblick {\"u}ber die einzelnen Prozessrelationen. Zus{\"a}tzlich k{\"o}nnen mit ihr Vorgangszeiten und Produktfertigstellungszeiten ermittelt werden, wodurch eine ausf{\"u}hrliche Planung der Produktion erm{\"o}glicht wird. Ein Nachteil dieser Technik begr{\"u}ndet sich in der alleinigen Darstellung einer ausf{\"u}hrbaren Prozessabfolge. Im Falle eines St{\"o}rungseintritts mit der Folge eines nicht durchf{\"u}hrbaren Vorgangs muss von dem origin{\"a}ren Prozess abgewichen werden. Aufgrund dessen wird eine Neuplanung erforderlich. Es werden Alternativen f{\"u}r den gest{\"o}rten Vorgang ben{\"o}tigt, um eine Fortf{\"u}hrung des Prozesses ungeachtet der St{\"o}rung zu erreichen. Innerhalb dieser Arbeit wird daher eine Erweiterung der Vorgangsknotennetzplantechnik beschrieben, die es erlaubt, erg{\"a}nzend zu dem geplanten Soll-Prozess Alternativvorg{\"a}nge f{\"u}r einzelne Vorg{\"a}nge darzulegen. Diese Methode wird als Maximalnetzplan bezeichnet. Die Alternativen werden im Falle eines St{\"o}rungseintritts automatisch evaluiert und dem Anwender in priorisierter Reihenfolge pr{\"a}sentiert. Durch die Verwendung des Maximalnetzplans kann eine aufwendige Neuplanung vermieden werden. Als Anwendungsbeispiel dient ein Montageprozess, mithilfe dessen die Verwendbarkeit der Methode dargelegt wird. Weiterf{\"u}hrend zeigt eine zeitliche Analyse zufallsbedingter Maximalnetzpl{\"a}ne eine Begr{\"u}ndung zur Durchf{\"u}hrung von Alternativen und damit den Nutzen des Maximalnetzplans auf. Zus{\"a}tzlich sei angemerkt, dass innerhalb dieser Arbeit verwendete Begrifflichkeiten wie Anwender, Werker oder Mitarbeiter in maskuliner Schreibweise niedergeschrieben werden. Dieses ist ausschließlich der Einfachheit geschuldet und nicht dem Zweck der Diskriminierung anderer Geschlechter dienlich. Die verwendete Schreibweise soll alle Geschlechter ansprechen, ob m{\"a}nnlich, weiblich oder divers.}, subject = {Produktionsplanung}, language = {de} } @phdthesis{Grohmann2022, author = {Grohmann, Johannes Sebastian}, title = {Model Learning for Performance Prediction of Cloud-native Microservice Applications}, doi = {10.25972/OPUS-26160}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-261608}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2022}, abstract = {One consequence of the recent coronavirus pandemic is increased demand and use of online services around the globe. At the same time, performance requirements for modern technologies are becoming more stringent as users become accustomed to higher standards. These increased performance and availability requirements, coupled with the unpredictable usage growth, are driving an increasing proportion of applications to run on public cloud platforms as they promise better scalability and reliability. With data centers already responsible for about one percent of the world's power consumption, optimizing resource usage is of paramount importance. Simultaneously, meeting the increasing and changing resource and performance requirements is only possible by optimizing resource management without introducing additional overhead. This requires the research and development of new modeling approaches to understand the behavior of running applications with minimal information. However, the emergence of modern software paradigms makes it increasingly difficult to derive such models and renders previous performance modeling techniques infeasible. Modern cloud applications are often deployed as a collection of fine-grained and interconnected components called microservices. Microservice architectures offer massive benefits but also have broad implications for the performance characteristics of the respective systems. In addition, the microservices paradigm is typically paired with a DevOps culture, resulting in frequent application and deployment changes. Such applications are often referred to as cloud-native applications. In summary, the increasing use of ever-changing cloud-hosted microservice applications introduces a number of unique challenges for modeling the performance of modern applications. These include the amount, type, and structure of monitoring data, frequent behavioral changes, or infrastructure variabilities. This violates common assumptions of the state of the art and opens a research gap for our work. In this thesis, we present five techniques for automated learning of performance models for cloud-native software systems. We achieve this by combining machine learning with traditional performance modeling techniques. Unlike previous work, our focus is on cloud-hosted and continuously evolving microservice architectures, so-called cloud-native applications. Therefore, our contributions aim to solve the above challenges to deliver automated performance models with minimal computational overhead and no manual intervention. Depending on the cloud computing model, privacy agreements, or monitoring capabilities of each platform, we identify different scenarios where performance modeling, prediction, and optimization techniques can provide great benefits. Specifically, the contributions of this thesis are as follows: Monitorless: Application-agnostic prediction of performance degradations. To manage application performance with only platform-level monitoring, we propose Monitorless, the first truly application-independent approach to detecting performance degradation. We use machine learning to bridge the gap between platform-level monitoring and application-specific measurements, eliminating the need for application-level monitoring. Monitorless creates a single and holistic resource saturation model that can be used for heterogeneous and untrained applications. Results show that Monitorless infers resource-based performance degradation with 97\% accuracy. Moreover, it can achieve similar performance to typical autoscaling solutions, despite using less monitoring information. SuanMing: Predicting performance degradation using tracing. We introduce SuanMing to mitigate performance issues before they impact the user experience. This contribution is applied in scenarios where tracing tools enable application-level monitoring. SuanMing predicts explainable causes of expected performance degradations and prevents performance degradations before they occur. Evaluation results show that SuanMing can predict and pinpoint future performance degradations with an accuracy of over 90\%. SARDE: Continuous and autonomous estimation of resource demands. We present SARDE to learn application models for highly variable application deployments. This contribution focuses on the continuous estimation of application resource demands, a key parameter of performance models. SARDE represents an autonomous ensemble estimation technique. It dynamically and continuously optimizes, selects, and executes an ensemble of approaches to estimate resource demands in response to changes in the application or its environment. Through continuous online adaptation, SARDE efficiently achieves an average resource demand estimation error of 15.96\% in our evaluation. DepIC: Learning parametric dependencies from monitoring data. DepIC utilizes feature selection techniques in combination with an ensemble regression approach to automatically identify and characterize parametric dependencies. Although parametric dependencies can massively improve the accuracy of performance models, DepIC is the first approach to automatically learn such parametric dependencies from passive monitoring data streams. Our evaluation shows that DepIC achieves 91.7\% precision in identifying dependencies and reduces the characterization prediction error by 30\% compared to the best individual approach. Baloo: Modeling the configuration space of databases. To study the impact of different configurations within distributed DBMSs, we introduce Baloo. Our last contribution models the configuration space of databases considering measurement variabilities in the cloud. More specifically, Baloo dynamically estimates the required benchmarking measurements and automatically builds a configuration space model of a given DBMS. Our evaluation of Baloo on a dataset consisting of 900 configuration points shows that the framework achieves a prediction error of less than 11\% while saving up to 80\% of the measurement effort. Although the contributions themselves are orthogonally aligned, taken together they provide a holistic approach to performance management of modern cloud-native microservice applications. Our contributions are a significant step forward as they specifically target novel and cloud-native software development and operation paradigms, surpassing the capabilities and limitations of previous approaches. In addition, the research presented in this paper also has a significant impact on the industry, as the contributions were developed in collaboration with research teams from Nokia Bell Labs, Huawei, and Google. Overall, our solutions open up new possibilities for managing and optimizing cloud applications and improve cost and energy efficiency.}, subject = {Cloud Computing}, language = {en} } @phdthesis{Babu2021, author = {Babu, Dinesh Kumar}, title = {Efficient Data Fusion Approaches for Remote Sensing Time Series Generation}, doi = {10.25972/OPUS-25180}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-251808}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2021}, abstract = {Fernerkundungszeitreihen beschreiben die Erfassung von zeitlich gleichm{\"a}ßig verteilten Fernerkundungsdaten in einem festgelegten Zeitraum entweder global oder f{\"u}r ein vordefiniertes Gebiet. F{\"u}r die {\"U}berwachung der Landwirtschaft, die Erkennung von Ver{\"a}nderungen der Ph{\"a}nologie oder f{\"u}r das Umwelt-Monitoring werden nahezu t{\"a}gliche Daten mit hoher r{\"a}umlicher Aufl{\"o}sung ben{\"o}tigt. Bei vielen verschiedenen fernerkundlichen Anwendungen h{\"a}ngt die Genauigkeit von der dichte und der Verl{\"a}sslichkeit der fernerkundlichen Datenreihe ab. Die verschiedenen Fernerkundungssatellitenkonstellationen sind immer noch nicht in der Lage, fast t{\"a}glich oder t{\"a}glich Bilder mit hoher r{\"a}umlicher Aufl{\"o}sung zu liefern, um die Bed{\"u}rfnisse der oben erw{\"a}hnten Fernerkundungsanwendungen zu erf{\"u}llen. Einschr{\"a}nkungen bei den Sensoren, hohe Entwicklungskosten, hohe Betriebskosten der Satelliten und das Vorhandensein von Wolken, die die Sicht auf das Beobachtungsgebiet blockieren, sind einige der Gr{\"u}nde, die es sehr schwierig machen, fast t{\"a}gliche oder t{\"a}gliche optische Fernerkundungsdaten mit hoher r{\"a}umlicher Aufl{\"o}sung zu erhalten. Mit Entwicklungen bei den optischen Sensorsystemen und gut geplanten Fernerkundungssatellitenkonstellationen kann dieser Zustand verbessert werden, doch ist dies mit Kosten verbunden. Selbst dann wird das Problem nicht vollst{\"a}ndig gel{\"o}st sein, so dass der wachsende Bedarf an zeitlich und r{\"a}umlich hochaufl{\"o}senden Daten nicht vollst{\"a}ndig gedeckt werden kann. Da der Datenerfassungsprozess sich auf Satelliten st{\"u}tzt, die physische Systeme sind, k{\"o}nnen diese aus verschiedenen Gr{\"u}nden unvorhersehbar ausfallen und einen vollst{\"a}ndigen Verlust der Beobachtung f{\"u}r einen bestimmten Zeitraum verursachen, wodurch eine L{\"u}cke in der Zeitreihe entsteht. Um den langfristigen Trend der ph{\"a}nologischen Ver{\"a}nderungen aufgrund der sich schnell {\"a}ndernden Umweltbedingungen zu beobachten, sind die Fernerkundungsdaten aus der gegenw{\"a}rtig nicht ausreichend. Hierzu werden auch Daten aus der Vergangenheit ben{\"o}tigt. Eine bessere Alternativl{\"o}sung f{\"u}r dieses Problem kann die Erstellung von Fernerkundungszeitreihen durch die Fusion von Daten mehrerer Fernerkundungssatelliten mit unterschiedlichen r{\"a}umlichen und zeitlichen Aufl{\"o}sungen sein. Dieser Ansatz soll effektiv und effizient sein. Bei dieser Methode kann ein zeitlich und r{\"a}umlich hoch aufgel{\"o}stes Bild von einem Satelliten, wie Sentinel-2 mit einem zeitlich und r{\"a}umlich niedrig aufgel{\"o}sten Bild von einem Satelliten, wie Sentinel-3 fusioniert werden, um synthetische Daten mit hoher zeitlicher und r{\"a}umlicher Aufl{\"o}sung zu erzeugen. Die Erzeugung von Fernerkundungszeitreihen durch Datenfusionsmethoden kann sowohl auf die gegenw{\"a}rtig erfassten Satellitenbilder als auch auf die in der Vergangenheit von den Satelliten aufgenommenen Bilder angewandt werden. Dies wird die dringend ben{\"o}tigten zeitlich und r{\"a}umlich hochaufl{\"o}senden Bilder f{\"u}r Fernerkundungsanwendungen liefern. Dieser vereinfachte Ansatz ist kosteneffektiv und bietet den Forschern die M{\"o}glichkeit, aus der begrenzten Datenquelle, die ihnen zur Verf{\"u}gung steht, die f{\"u}r ihre Anwendung ben{\"o}tigten Daten selbst zu generieren. Ein effizienter Datenfusionsansatz in Kombination mit einer gut geplanten Satellitenkonstellation kann ein L{\"o}sungsansatz sein, um eine nahezu t{\"a}gliche Zeitreihen von Fernerkundungsdaten l{\"u}ckenlos gew{\"a}hrleistet. Ziel dieser Forschungsarbeit ist die Entwicklung eines effizienten Datenfusionsansatzes, um dichte Fernerkundungszeitreihen zu erhalten.}, language = {en} } @phdthesis{HennyKrahmer2023, author = {Henny-Krahmer, Ulrike}, title = {Genre Analysis and Corpus Design: Nineteenth Century Spanish-American Novels (1830-1910)}, doi = {10.25972/OPUS-31999}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-319992}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {This work in the field of digital literary stylistics and computational literary studies is concerned with theoretical concerns of literary genre, with the design of a corpus of nineteenth-century Spanish-American novels, and with its empirical analysis in terms of subgenres of the novel. The digital text corpus consists of 256 Argentine, Cuban, and Mexican novels from the period between 1830 and 1910. It has been created with the goal to analyze thematic subgenres and literary currents that were represented in numerous novels in the nineteenth century by means of computational text categorization methods. The texts have been gathered from different sources, encoded in the standard of the Text Encoding Initiative (TEI), and enriched with detailed bibliographic and subgenre-related metadata, as well as with structural information. To categorize the texts, statistical classification and a family resemblance analysis relying on network analysis are used with the aim to examine how the subgenres, which are understood as communicative, conventional phenomena, can be captured on the stylistic, textual level of the novels that participate in them. The result is that both thematic subgenres and literary currents are textually coherent to degrees of 70-90 \%, depending on the individual subgenre constellation, meaning that the communicatively established subgenre classifications can be accurately captured to this extent in terms of textually defined classes. Besides the empirical focus, the dissertation also aims to relate literary theoretical genre concepts to the ones used in digital genre stylistics and computational literary studies as subfields of digital humanities. It is argued that literary text types, conventional literary genres, and textual literary genres should be distinguished on a theoretical level to improve the conceptualization of genre for digital text analysis.}, subject = {Gattungstheorie}, language = {en} } @phdthesis{HochmannGlattmann2023, author = {Hochmann-Glattmann, Amanda}, title = {Autonome technische (Pflege-)Systeme und die Menschenw{\"u}rde}, doi = {10.25972/OPUS-32997}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-329970}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {Seit jeher {\"u}ben Roboter eine Faszination auf den Menschen aus. Es ist die {\"A}hnlichkeit zum Menschen, die technische Systeme, die mit einer h{\"o}heren Intelligenz ausgestattet sind, gleichermaßen faszinierend wie erschreckend erscheinen l{\"a}sst. Der Gedanke daran, technische Kreaturen zu erschaffen, die uns erhabenen menschlichen Wesen „das Wasser reichen" oder uns gar {\"u}bertreffen k{\"o}nnen, l{\"a}sst uns nicht mehr los. Die Erkenntnis von dem Nutzen, den uns derartige Wesen in allen denkbaren Bereichen bringen k{\"o}nnten, m{\"u}ndet jedoch sehr schnell in eine Skepsis im Hinblick auf eine Entm{\"u}ndigung und Entwertung des Menschen. Denn schon heute, obgleich die Forschung in vielen Bereichen noch in den Kinderschuhen steckt, geraten wir in zahlreichen Lebensbereichen in Kontakt mit technischen Systemen, die eine starke Wirkung auf uns aus{\"u}ben und viele grundlegende Fragen aufwerfen. Die Arbeit widmet sich der ethischen Dimension autonomer (Pflege-)Systeme und thematisiert zu diesem Zweck konkrete Anwendungsszenarien. Dabei geht es nicht um allgemeine ethische Fragen, sondern konkret um den Aspekt der Vereinbarkeit autonomer technischer Systeme mit der Menschenw{\"u}rde ihrer Nutzer. Auch der Gesichtspunkt des Einflusses von autonomen technischen Innovationen auf das Selbstverst{\"a}ndnis des Menschen (Menschenbild) ist Teil der Arbeit. Als Maßstab f{\"u}r moderne technische Entwicklungen dient der W{\"u}rdegrundsatz aufgrund seiner enormen Bedeutung f{\"u}r das Recht sowie f{\"u}r das zugrundeliegende und allgemeine Menschenbild. Im Rahmen einer an einem humanistischen Weltbild orientierten Gesellschaft steht die Menschenw{\"u}rde als oberster Wert, dem moralische und rechtliche Entwicklungen gerecht werden m{\"u}ssen, {\"u}ber allem. Daher gilt es, moderne Entwicklungen immer auch im Hinblick auf ihre Vereinbarkeit mit der Menschenw{\"u}rde zu {\"u}berpr{\"u}fen. So l{\"a}sst sich feststellen, ob ein Regulierungsbedarf besteht und wie Regulierungen im Einzelnen auszugestalten sind. Gleichzeitig muss aber auch die Menschenw{\"u}rde gesellschaftlichen Entwicklungen gerecht werden. Demgem{\"a}ß wird sie vom Bundesverfassungsgericht als Grundsatz, der sich aktuellen Herausforderungen stellt und zur Erzwingung eines gesellschaftlichen Diskurses f{\"u}hrt, angesehen. Die hiesige Arbeit soll einen Beitrag zu der bereits angestoßenen gesellschaftlichen Debatte rund um den technischen Fortschritt und konkret um die Probleme, die mit der zunehmenden Autonomie technischer Systeme einhergehen, leisten.}, subject = {Robotik}, language = {de} } @phdthesis{Loh2024, author = {Loh, Frank}, title = {Monitoring the Quality of Streaming and Internet of Things Applications}, edition = {korrigierte Version}, issn = {1432-8801}, doi = {10.25972/OPUS-35096}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-350969}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2024}, abstract = {The ongoing and evolving usage of networks presents two critical challenges for current and future networks that require attention: (1) the task of effectively managing the vast and continually increasing data traffic and (2) the need to address the substantial number of end devices resulting from the rapid adoption of the Internet of Things. Besides these challenges, there is a mandatory need for energy consumption reduction, a more efficient resource usage, and streamlined processes without losing service quality. We comprehensively address these efforts, tackling the monitoring and quality assessment of streaming applications, a leading contributor to the total Internet traffic, as well as conducting an exhaustive analysis of the network performance within a Long Range Wide Area Network (LoRaWAN), one of the rapidly emerging LPWAN solutions.}, subject = {Leistungsbewertung}, language = {en} } @phdthesis{Loh2024, author = {Loh, Frank}, title = {Monitoring the Quality of Streaming and Internet of Things Applications}, issn = {1432-8801}, doi = {10.25972/OPUS-34783}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-347831}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2024}, abstract = {The ongoing and evolving usage of networks presents two critical challenges for current and future networks that require attention: (1) the task of effectively managing the vast and continually increasing data traffic and (2) the need to address the substantial number of end devices resulting from the rapid adoption of the Internet of Things. Besides these challenges, there is a mandatory need for energy consumption reduction, a more efficient resource usage, and streamlined processes without losing service quality. We comprehensively address these efforts, tackling the monitoring and quality assessment of streaming applications, a leading contributor to the total Internet traffic, as well as conducting an exhaustive analysis of the network performance within a Long Range Wide Area Network (LoRaWAN), one of the rapidly emerging LPWAN solutions.}, subject = {Leistungsbewertung}, language = {en} } @phdthesis{Drobczyk2024, author = {Drobczyk, Martin}, title = {Ultra-Wideband Wireless Network for Enhanced Intra-Spacecraft Communication}, doi = {10.25972/OPUS-35956}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-359564}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2024}, abstract = {Wireless communication networks already comprise an integral part of both the private and industrial sectors and are successfully replacing existing wired networks. They enable the development of novel applications and offer greater flexibility and efficiency. Although some efforts are already underway in the aerospace sector to deploy wireless communication networks on board spacecraft, none of these projects have yet succeeded in replacing the hard-wired state-of-the-art architecture for intra-spacecraft communication. The advantages are evident as the reduction of the wiring harness saves time, mass, and costs, and makes the whole integration process more flexible. It also allows for easier scaling when interconnecting different systems. This dissertation deals with the design and implementation of a wireless network architecture to enhance intra-spacecraft communications by breaking with the state-of-the-art standards that have existed in the space industry for decades. The potential and benefits of this novel wireless network architecture are evaluated, an innovative design using ultra-wideband technology is presented. It is combined with a Medium Access Control (MAC) layer tailored for low-latency and deterministic networks supporting even mission-critical applications. As demonstrated by the Wireless Compose experiment on the International Space Station (ISS), this technology is not limited to communications but also enables novel positioning applications. To adress the technological challenges, extensive studies have been carried out on electromagnetic compatibility, space radiation, and data robustness. The architecture was evaluated from various perspectives and successfully demonstrated in space. Overall, this research highlights how a wireless network can improve and potentially replace existing state-of-the-art communication systems on board spacecraft in future missions. And it will help to adapt and ultimately accelerate the implementation of wireless networks in space systems.}, subject = {Raumfahrttechnik}, language = {en} }