@phdthesis{Heck2005, author = {Heck, Klaus}, title = {Wireless LAN performance studies in the context of 4G networks}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-14896}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2005}, abstract = {Wireless communication is nothing new. The first data transmissions based on electromagnetic waves have been successfully performed at the end of the 19th century. However, it took almost another century until the technology was ripe for mass market. The first mobile communication systems based on the transmission of digital data were introduced in the late 1980s. Within just a couple of years they have caused a revolution in the way people communicate. The number of cellular phones started to outnumber the fixed telephone lines in many countries and is still rising. New technologies in 3G systems, such as UMTS, allow higher data rates and support various kinds of multimedia services. Nevertheless, the end of the road in wireless communication is far from being reached. In the near future, the Internet and cellular phone systems are expected to be integrated to a new form of wireless system. Bandwidth requirements for a rich set of wireless services, e.g.\ video telephony, video streaming, online gaming, will be easily met. The transmission of voice data will just be another IP based service. On the other hand, building such a system is by far not an easy task. The problems in the development of the UMTS system showed the high complexity of wireless systems with support for bandwidth-hungry, IP-based services. But the technological challenges are just one difficulty. Telecommunication systems are planned on a world-wide basis, such that standard bodies, governments, institutions, hardware vendors, and service providers have to find agreements and compromises on a number of different topics. In this work, we provide the reader with a discussion of many of the topics involved in the planning of a Wireless LAN system that is capable of being integrated into the 4th generation mobile networks (4G) that is being discussed nowadays. Therefore, it has to be able to cope with interactive voice and video traffic while still offering high data rates for best effort traffic. Let us assume a scenario where a huge office complex is completely covered with Wireless LAN access points. Different antenna systems are applied in order to reduce the number of access points that are needed on the one hand, while optimizing the coverage on the other. No additional infrastructure is implemented. Our goal is to evaluate whether the Wireless LAN technology is capable of dealing with the various demands of such a scenario. First, each single access point has to be capable of supporting best-effort and Quality of Service (QoS) demanding applications simultaneously. The IT infrastructure in our scenario consists solely of Wireless LAN, such that it has to allow users surfing the Web, while others are involved in voice calls or video conferences. Then, there is the problem of overlapping cells. Users attached to one access point produce interference for others. However, the QoS support has to be maintained, which is not an easy task. Finally, there are nomadic users, which roam from one Wireless LAN cell to another even during a voice call. There are mechanisms in the standard that allow for mobility, but their capabilities for QoS support are yet to be studied. This shows the large number of unresolved issues when it comes to Wireless LAN in the context of 4G networks. In this work we want to tackle some of the problems.}, subject = {Drahtloses lokales Netz}, language = {en} } @phdthesis{Karch2002, author = {Karch, Oliver}, title = {Where am I? - Indoor localization based on range measurements}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-8442}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2002}, abstract = {Nowadays, robotics plays an important role in increasing fields of application. There exist many environments or situations where mobile robots instead of human beings are used, since the tasks are too hazardous, uncomfortable, repetitive, or costly for humans to perform. The autonomy and the mobility of the robot are often essential for a good solution of these problems. Thus, such a robot should at least be able to answer the question "Where am I?". This thesis investigates the problem of self-localizing a robot in an indoor environment using range measurements. That is, a robot equipped with a range sensor wakes up inside a building and has to determine its position using only its sensor data and a map of its environment. We examine this problem from an idealizing point of view (reducing it into a pure geometric one) and further investigate a method of Guibas, Motwani, and Raghavan from the field of computational geometry to solving it. Here, so-called visibility skeletons, which can be seen as coarsened representations of visibility polygons, play a decisive role. In the major part of this thesis we analyze the structures and the occurring complexities in the framework of this scheme. It turns out that the main source of complication are so-called overlapping embeddings of skeletons into the map polygon, for which we derive some restrictive visibility constraints. Based on these results we are able to improve one of the occurring complexity bounds in the sense that we can formulate it with respect to the number of reflex vertices instead of the total number of map vertices. This also affects the worst-case bound on the preprocessing complexity of the method. The second part of this thesis compares the previous idealizing assumptions with the properties of real-world environments and discusses the occurring problems. In order to circumvent these problems, we use the concept of distance functions, which model the resemblance between the sensor data and the map, and appropriately adapt the above method to the needs of realistic scenarios. In particular, we introduce a distance function, namely the polar coordinate metric, which seems to be well suited to the localization problem. Finally, we present the RoLoPro software where most of the discussed algorithms are implemented (including the polar coordinate metric).}, subject = {Autonomer Roboter}, language = {en} } @phdthesis{Bleier2023, author = {Bleier, Michael}, title = {Underwater Laser Scanning - Refractive Calibration, Self-calibration and Mapping for 3D Reconstruction}, isbn = {978-3-945459-45-4}, doi = {10.25972/OPUS-32269}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-322693}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {There is great interest in affordable, precise and reliable metrology underwater: Archaeologists want to document artifacts in situ with high detail. In marine research, biologists require the tools to monitor coral growth and geologists need recordings to model sediment transport. Furthermore, for offshore construction projects, maintenance and inspection millimeter-accurate measurements of defects and offshore structures are essential. While the process of digitizing individual objects and complete sites on land is well understood and standard methods, such as Structure from Motion or terrestrial laser scanning, are regularly applied, precise underwater surveying with high resolution is still a complex and difficult task. Applying optical scanning techniques in water is challenging due to reduced visibility caused by turbidity and light absorption. However, optical underwater scanners provide significant advantages in terms of achievable resolution and accuracy compared to acoustic systems. This thesis proposes an underwater laser scanning system and the algorithms for creating dense and accurate 3D scans in water. It is based on laser triangulation and the main optical components are an underwater camera and a cross-line laser projector. The prototype is configured with a motorized yaw axis for capturing scans from a tripod. Alternatively, it is mounted to a moving platform for mobile mapping. The main focus lies on the refractive calibration of the underwater camera and laser projector, the image processing and 3D reconstruction. For highest accuracy, the refraction at the individual media interfaces must be taken into account. This is addressed by an optimization-based calibration framework using a physical-geometric camera model derived from an analytical formulation of a ray-tracing projection model. In addition to scanning underwater structures, this work presents the 3D acquisition of semi-submerged structures and the correction of refraction effects. As in-situ calibration in water is complex and time-consuming, the challenge of transferring an in-air scanner calibration to water without re-calibration is investigated, as well as self-calibration techniques for structured light. The system was successfully deployed in various configurations for both static scanning and mobile mapping. An evaluation of the calibration and 3D reconstruction using reference objects and a comparison of free-form surfaces in clear water demonstrate the high accuracy potential in the range of one millimeter to less than one centimeter, depending on the measurement distance. Mobile underwater mapping and motion compensation based on visual-inertial odometry is demonstrated using a new optical underwater scanner based on fringe projection. Continuous registration of individual scans allows the acquisition of 3D models from an underwater vehicle. RGB images captured in parallel are used to create 3D point clouds of underwater scenes in full color. 3D maps are useful to the operator during the remote control of underwater vehicles and provide the building blocks to enable offshore inspection and surveying tasks. The advancing automation of the measurement technology will allow non-experts to use it, significantly reduce acquisition time and increase accuracy, making underwater metrology more cost-effective.}, subject = {Selbstkalibrierung}, language = {en} } @phdthesis{Freiberg2015, author = {Freiberg, Martina}, title = {UI-, User-, \& Usability-Oriented Engineering of Participative Knowledge-Based Systems}, publisher = {W{\"u}rzburg University Press}, isbn = {978-3-95826-012-2 (print)}, doi = {10.25972/WUP-978-3-95826-013-9}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-106072}, school = {W{\"u}rzburg University Press}, pages = {232}, year = {2015}, abstract = {Knowledge-based systems (KBS) face an ever-increasing interest in various disciplines and contexts. Yet, the former aim to construct the 'perfect intelligent software' continuously shifts to user-centered, participative solutions. Such systems enable users to contribute their personal knowledge to the problem solving process for increased efficiency and an ameliorated user experience. More precisely, we define non-functional key requirements of participative KBS as: Transparency (encompassing KBS status mediation), configurability (user adaptability, degree of user control/exploration), quality of the KB and UI, and evolvability (enabling the KBS to grow mature with their users). Many of those requirements depend on the respective target users, thus calling for a more user-centered development. Often, also highly expertise domains are targeted — inducing highly complex KBs — which requires a more careful and considerate UI/interaction design. Still, current KBS engineering (KBSE) approaches mostly focus on knowledge acquisition (KA) This often leads to non-optimal, little reusable, and non/little evaluated KBS front-end solutions. In this thesis we propose a more encompassing KBSE approach. Due to the strong mutual influences between KB and UI, we suggest a novel form of intertwined UI and KB development. We base the approach on three core components for encompassing KBSE: (1) Extensible prototyping, a tailored form of evolutionary prototyping; this builds on mature UI prototypes and offers two extension steps for the anytime creation of core KBS prototypes (KB + core UI) and fully productive KBS (core KBS prototype + common framing functionality). (2) KBS UI patterns, that define reusable solutions for the core KBS UI/interaction; we provide a basic collection of such patterns in this work. (3) Suitable usability instruments for the assessment of the KBS artifacts. Therewith, we do not strive for 'yet another' self-contained KBS engineering methodology. Rather, we motivate to extend existing approaches by the proposed key components. We demonstrate this based on an agile KBSE model. For practical support, we introduce the tailored KBSE tool ProKEt. ProKEt offers a basic selection of KBS core UI patterns and corresponding configuration options out of the box; their further adaption/extension is possible on various levels of expertise. For practical usability support, ProKEt offers facilities for quantitative and qualitative data collection. ProKEt explicitly fosters the suggested, intertwined development of UI and KB. For seamlessly integrating KA activities, it provides extension points for two selected external KA tools: For KnowOF, a standard office based KA environment. And for KnowWE, a semantic wiki for collaborative KA. Therewith, ProKEt offers powerful support for encompassing, user-centered KBSE. Finally, based on the approach and the tool, we also developed a novel KBS type: Clarification KBS as a mashup of consultation and justification KBS modules. Those denote a specifically suitable realization for participative KBS in highly expertise contexts and consequently require a specific design. In this thesis, apart from more common UI solutions, we particularly also introduce KBS UI patterns especially tailored towards Clarification KBS.}, subject = {Wissensbasiertes System}, language = {en} } @phdthesis{Saska2009, author = {Saska, Martin}, title = {Trajectory planning and optimal control for formations of autonomous robots}, isbn = {978-3-923959-56-3}, doi = {10.25972/OPUS-4622}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-53175}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2009}, abstract = {In this thesis, we present novel approaches for formation driving of nonholonomic robots and optimal trajectory planning to reach a target region. The methods consider a static known map of the environment as well as unknown and dynamic obstacles detected by sensors of the formation. The algorithms are based on leader following techniques, where the formation of car-like robots is maintained in a shape determined by curvilinear coordinates. Beyond this, the general methods of formation driving are specialized and extended for an application of airport snow shoveling. Detailed descriptions of the algorithms complemented by relevant stability and convergence studies will be provided in the following chapters. Furthermore, discussions of the applicability will be verified by various simulations in existing robotic environments and also by a hardware experiment.}, subject = {Autonomer Roboter}, language = {en} } @phdthesis{Tischler2008, author = {Tischler, German}, title = {Theory and Applications of Parametric Weighted Finite Automata}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-28145}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2008}, abstract = {Parametric weighted finite automata (PWFA) are a multi-dimensional generalization of weighted finite automata. The expressiveness of PWFA contains the expressiveness of weighted finite automata as well as the expressiveness of affine iterated function system. The thesis discusses theory and applications of PWFA. The properties of PWFA definable sets are studied and it is shown that some fractal generator systems can be simulated using PWFA and that various real and complex functions can be represented by PWFA. Furthermore, the decoding of PWFA and the interpretation of PWFA definable sets is discussed.}, subject = {Automat }, language = {en} } @phdthesis{Schmitz2000, author = {Schmitz, Heinz}, title = {The Forbidden Pattern Approach to Concatenation Hierarchies}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-2832}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2000}, abstract = {The thesis looks at the question asking for the computability of the dot-depth of star-free regular languages. Here one has to determine for a given star-free regular language the minimal number of alternations between concatenation on one hand, and intersection, union, complement on the other hand. This question was first raised in 1971 (Brzozowski/Cohen) and besides the extended star-heights problem usually refered to as one of the most difficult open questions on regular languages. The dot-depth problem can be captured formally by hierarchies of classes of star-free regular languages B(0), B(1/2), B(1), B(3/2),... and L(0), L(1/2), L(1), L(3/2),.... which are defined via alternating the closure under concatenation and Boolean operations, beginning with single alphabet letters. Now the question of dot-depth is the question whether these hierarchy classes have decidable membership problems. The thesis makes progress on this question using the so-called forbidden pattern approach: Classes of regular languages are characterized in terms of patterns in finite automata (subgraphs in the transition graph) that are not allowed. Such a characterization immediately implies the decidability of the respective class, since the absence of a certain pattern in a given automaton can be effectively verified. Before this work, the decidability of B(0), B(1/2), B(1) and L(0), L(1/2), L(1), L(3/2) were known. Here a detailed study of these classes with help of forbidden patterns is given which leads to new insights into their inner structure. Furthermore, the decidability of B(3/2) is proven. Based on these results a theory of pattern iteration is developed which leads to the introduction of two new hierarchies of star-free regular languages. These hierarchies are decidable on one hand, on the other hand they are in close connection to the classes B(n) and L(n). It remains an open question here whether they may in fact coincide. Some evidence is given in favour of this conjecture which opens a new way to attack the dot-depth problem. Moreover, it is shown that the class L(5/2) is decidable in the restricted case of a two-letter alphabet.}, subject = {Sternfreie Sprache}, language = {en} } @phdthesis{Driewer2008, author = {Driewer, Frauke}, title = {Teleoperation Interfaces in Human-Robot Teams}, isbn = {978-3-923959-57-0}, doi = {10.25972/OPUS-2955}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-36351}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2008}, abstract = {Diese Arbeit besch{\"a}ftigt sich mit der Verbesserung von Mensch-Roboter Interaktion in Mensch-Roboter Teams f{\"u}r Teleoperation Szenarien, wie z.B. robotergest{\"u}tzte Feuerwehreins{\"a}tze. Hierbei wird ein Konzept und eine Architektur f{\"u}r ein System zur Unterst{\"u}tzung von Teleoperation von Mensch-Roboter Teams vorgestellt. Die Anforderungen an Informationsaustausch und -verarbeitung, insbesondere f{\"u}r die Anwendung Rettungseinsatz, werden ausgearbeitet. Weiterhin wird das Design der Benutzerschnittstellen f{\"u}r Mensch-Roboter Teams dargestellt und Prinzipien f{\"u}r Teleoperation-Systeme und Benutzerschnittstellen erarbeitet. Alle Studien und Ans{\"a}tze werden in einem Prototypen-System implementiert und in verschiedenen Benutzertests abgesichert. Erweiterungsm{\"o}glichkeiten zum Einbinden von 3D Sensordaten und die Darstellung auf Stereovisualisierungssystemen werden gezeigt.}, subject = {Robotik}, language = {en} } @phdthesis{Travers2007, author = {Travers, Stephen}, title = {Structural Properties of NP-Hard Sets and Uniform Characterisations of Complexity Classes}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-27124}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2007}, abstract = {This thesis is devoted to the study of computational complexity theory, a branch of theoretical computer science. Computational complexity theory investigates the inherent difficulty in designing efficient algorithms for computational problems. By doing so, it analyses the scalability of computational problems and algorithms and places practical limits on what computers can actually accomplish. Computational problems are categorised into complexity classes. Among the most important complexity classes are the class NP and the subclass of NP-complete problems, which comprises many important optimisation problems in the field of operations research. Moreover, with the P-NP-problem, the class NP represents the most important unsolved question in computer science. The first part of this thesis is devoted to the study of NP-complete-, and more generally, NP-hard problems. It aims at improving our understanding of this important complexity class by systematically studying how altering NP-hard sets affects their NP-hardness. This research is related to longstanding open questions concerning the complexity of unions of disjoint NP-complete sets, and the existence of sparse NP-hard sets. The second part of the thesis is also dedicated to complexity classes but takes a different perspective: In a sense, after investigating the interior of complexity classes in the first part, the focus shifts to the description of complexity classes and thereby to the exterior in the second part. It deals with the description of complexity classes through leaf languages, a uniform framework which allows us to characterise a great variety of important complexity classes. The known concepts are complemented by a new leaf-language model. To a certain extent, this new approach combines the advantages of the known models. The presented results give evidence that the connection between the theory of formal languages and computational complexity theory might be closer than formerly known.}, subject = {Berechnungskomplexit{\"a}t}, language = {en} } @phdthesis{Hopfner2008, author = {Hopfner, Marbod}, title = {Source Code Analysis, Management, and Visualization for PROLOG}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-36300}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2008}, abstract = {This thesis deals with the management and analysis of source code, which is represented in XML. Using the elementary methods of the XML repository, the XML source code representation is accessed, changed, updated, and saved. We reason about the source code, refactor source code and we visualize dependency graphs for call analysis. The visualized dependencies between files, modules, or packages are used to structure the source code in order to get a system, which is easily to comprehend, to modify and to complete. Sophisticated methods have been developed to slice the source code in order to obtain a working package of a large system, containing only a specific functionality. The basic methods, on which the visualizations and analyses are built on can be changed like changing a plug-in. The visualization methods can be reused in order to handle arbitrary source code representations, e.g., JAML, PHPML, PROLOGML. Dependencies of other context can be visualized, too, e.g., ER diagrams, or website references. The tool SCAV supports source code visualization and analyzing methods.}, subject = {Refactoring}, language = {en} } @phdthesis{Sun2014, author = {Sun, Kaipeng}, title = {Six Degrees of Freedom Object Pose Estimation with Fusion Data from a Time-of-flight Camera and a Color Camera}, isbn = {978-3-923959-97-6}, doi = {10.25972/OPUS-10508}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-105089}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {Object six Degrees of Freedom (6DOF) pose estimation is a fundamental problem in many practical robotic applications, where the target or an obstacle with a simple or complex shape can move fast in cluttered environments. In this thesis, a 6DOF pose estimation algorithm is developed based on the fused data from a time-of-flight camera and a color camera. The algorithm is divided into two stages, an annealed particle filter based coarse pose estimation stage and a gradient decent based accurate pose optimization stage. In the first stage, each particle is evaluated with sparse representation. In this stage, the large inter-frame motion of the target can be well handled. In the second stage, the range data based conventional Iterative Closest Point is extended by incorporating the target appearance information and used for calculating the accurate pose by refining the coarse estimate from the first stage. For dealing with significant illumination variations during the tracking, spherical harmonic illumination modeling is investigated and integrated into both stages. The robustness and accuracy of the proposed algorithm are demonstrated through experiments on various objects in both indoor and outdoor environments. Moreover, real-time performance can be achieved with graphics processing unit acceleration.}, subject = {Mustererkennung}, language = {en} } @phdthesis{Baier2018, author = {Baier, Pablo A.}, title = {Simulator for Minimally Invasive Vascular Interventions: Hardware and Software}, isbn = {978-3-945459-22-5}, doi = {10.25972/OPUS-16119}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-161190}, school = {Universit{\"a}t W{\"u}rzburg}, pages = {118}, year = {2018}, abstract = {A complete simulation system is proposed that can be used as an educational tool by physicians in training basic skills of Minimally Invasive Vascular Interventions. In the first part, a surface model is developed to assemble arteries having a planar segmentation. It is based on Sweep Surfaces and can be extended to T- and Y-like bifurcations. A continuous force vector field is described, representing the interaction between the catheter and the surface. The computation time of the force field is almost unaffected when the resolution of the artery is increased. The mechanical properties of arteries play an essential role in the study of the circulatory system dynamics, which has been becoming increasingly important in the treatment of cardiovascular diseases. In Virtual Reality Simulators, it is crucial to have a tissue model that responds in real time. In this work, the arteries are discretized by a two dimensional mesh and the nodes are connected by three kinds of linear springs. Three tissue layers (Intima, Media, Adventitia) are considered and, starting from the stretch-energy density, some of the elasticity tensor components are calculated. The physical model linearizes and homogenizes the material response, but it still contemplates the geometric nonlinearity. In general, if the arterial stretch varies by 1\% or less, then the agreement between the linear and nonlinear models is trustworthy. In the last part, the physical model of the wire proposed by Konings is improved. As a result, a simpler and more stable method is obtained to calculate the equilibrium configuration of the wire. In addition, a geometrical method is developed to perform relaxations. It is particularly useful when the wire is hindered in the physical method because of the boundary conditions. The physical and the geometrical methods are merged, resulting in efficient relaxations. Tests show that the shape of the virtual wire agrees with the experiment. The proposed algorithm allows real-time executions and the hardware to assemble the simulator has a low cost.}, subject = {Computersimulation}, language = {en} } @phdthesis{Betz2005, author = {Betz, Christian}, title = {Scalable authoring of diagnostic case based training systems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-17885}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2005}, abstract = {Diagnostic Case Based Training Systems (D-CBT) provide learners with a means to learn and exercise knowledge in a realistic context. In medical education, D-CBT Systems present virtual patients to the learners who are asked to examine, diagnose and state therapies for these patients. Due a number of conflicting and changing requirements, e.g. time for learning, authoring effort, several systems were developed so far. These systems range from simple, easy-to-use presentation systems to highly complex knowledge based systems supporting explorative learning. This thesis presents an approach and tools to create D-CBT systems from existing sources (documents, e.g. dismissal records) using existing tools (word processors): Authors annotate and extend the documents to model the knowledge. A scalable knowledge representation is able to capture the content on multiple levels, from simple to highly structured knowledge. Thus, authoring of D-CBT systems requires less prerequisites and pre-knowledge and is faster than approaches using specialized authoring environments. Also, authors can iteratively add and structure more knowledge to adapt training cases to their learners needs. The theses also discusses the application of the same approach to other domains, especially to knowledge acquisition for the Semantic Web.}, subject = {Computerunterst{\"u}tztes Lernen}, language = {en} } @phdthesis{Busch2016, author = {Busch, Stephan}, title = {Robust, Flexible and Efficient Design for Miniature Satellite Systems}, isbn = {978-3-945459-10-2}, doi = {10.25972/OPUS-13652}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-136523}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2016}, abstract = {Small satellites contribute significantly in the rapidly evolving innovation in space engineering, in particular in distributed space systems for global Earth observation and communication services. Significant mass reduction by miniaturization, increased utilization of commercial high-tech components, and in particular standardization are the key drivers for modern miniature space technology. This thesis addresses key fields in research and development on miniature satellite technology regarding efficiency, flexibility, and robustness. Here, these challenges are addressed by the University of Wuerzburg's advanced pico-satellite bus, realizing a generic modular satellite architecture and standardized interfaces for all subsystems. The modular platform ensures reusability, scalability, and increased testability due to its flexible subsystem interface which allows efficient and compact integration of the entire satellite in a plug-and-play manner. Beside systematic design for testability, a high degree of operational robustness is achieved by the consequent implementation of redundancy of crucial subsystems. This is combined with efficient fault detection, isolation and recovery mechanisms. Thus, the UWE-3 platform, and in particular the on-board data handling system and the electrical power system, offers one of the most efficient pico-satellite architectures launched in recent years and provides a solid basis for future extensions. The in-orbit performance results of the pico-satellite UWE-3 are presented and summarize successful operations since its launch in 2013. Several software extensions and adaptations have been uploaded to UWE-3 increasing its capabilities. Thus, a very flexible platform for in-orbit software experiments and for evaluations of innovative concepts was provided and tested.}, subject = {Kleinsatellit}, language = {en} } @phdthesis{Herrmann2013, author = {Herrmann, Christian}, title = {Robotic Motion Compensation for Applications in Radiation Oncology}, isbn = {978-3-923959-88-4}, doi = {10.25972/OPUS-6727}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-79045}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2013}, abstract = {Aufgrund vieler Verbesserungen der Behandlungsmethoden im Laufe der letzten 60 Jahre, erlaubt die Strahlentherapie heutzutage pr{\"a}zise Behandlungen von statischen Tumoren. Jedoch birgt die Bestrahlung von sich bewegenden Tumoren noch große Herausforderungen in sich, da bewegliche Tumore oft den Behandlungsstrahl verlassen. Dabei reduziert sich die Strahlendosis im Tumor w{\"a}hrend sich diese im umliegenden gesunden Gewebe erh{\"o}ht. Diese Forschungsarbeit zielt darauf ab, die Grenzen der Strahlentherapie zu erweitern, um pr{\"a}zise Behandlungen von beweglichen Tumoren zu erm{\"o}glichen. Der Fokus der Arbeit liegt auf der Erstellung eines Echtzeitsystems zur aktiven Kompensation von Tumorbewegungen durch robotergest{\"u}tzte Methoden. W{\"a}hrend Behandlungen befinden sich Patienten auf einer Patientenliege, mit der statische Lagerungsfehler vor Beginn einer Behandlung korrigiert werden. Die in dieser Arbeit verwendete Patientenliege "HexaPOD" ist ein paralleler Manipulator mit sechs Freiheitsgraden, der große Lasten innerhalb eines eingeschr{\"a}nkten Arbeitsbereichs pr{\"a}zise positionieren kann. Obwohl der HexaPOD urspr{\"u}nglich nicht f{\"u}r dynamische Anwendungen konzipiert wurde, wird dieser f{\"u}r eine dauerhafte Bewegungskompensation eingesetzt, in dem Patienten so bewegt werden, dass Tumore pr{\"a}zise im Zentralstrahl w{\"a}hrend der Dauer einer gesamten Behandlung verbleiben. Um ein echtzeitf{\"a}higes Kompensationssystem auf Basis des HexaPODs zu realisieren, muss eine Reihe an Herausforderungen bew{\"a}ltigt werden. Echtzeitaspekte werden einerseits durch die Verwendung eines harten Echtzeitbetriebssystems abgedeckt, andererseits durch die Messung und Sch{\"a}tzung von Latenzzeiten aller physikalischen Gr{\"o}ßen im System, z.B. Messungen der Tumor- und Atemposition. Neben der konsistenten und durchg{\"a}ngigen Ber{\"u}cksichtigung von akkuraten Zeitinformation, werden alle software-induzierten Latenzen adaptiv ausgeglichen. Dies erfordert Vorhersagen der Tumorposition in die nahe Zukunft. Zahlreiche Pr{\"a}diktoren zur Atem- und Tumorpositionsvorhersage werden vorgeschlagen und anhand verschiedenster Metriken evaluiert. Erweiterungen der Pr{\"a}diktionsalgorithmen werden eingef{\"u}hrt, die sowohl Atem- als auch Tumorpositionsinformationen fusionieren, um Vorhersagen ohne explizites Korrelationsmodell zu erm{\"o}glichen. Die Vorhersagen bestimmen den zuk{\"u}nftigen Bewegungspfad des HexaPODs, um Tumorbewegungen zu kompensieren. Dazu werden verschiedene Regler entwickelt, die eine Trajektorienverfolgung mit dem HexaPOD erm{\"o}glichen. Auf der Basis von linearer und nicht-linearer dynamischer Modellierung des HexaPODs mit Methoden der Systemidentifikation, wird zun{\"a}chst ein modellpr{\"a}diktiver Regler entwickelt. Ein zweiter Regler wird auf Basis einer Annahme {\"u}ber das Arbeitsprinzip des internen Reglers im HexaPOD entworfen. Schließlich wird ein dritter Regler vorgeschlagen, der beide vorhergehenden Regler miteinander kombiniert. F{\"u}r jeden dieser Regler werden vergleichende Ergebnisse aus Experimenten mit realer Hardware und menschlichen Versuchspersonen pr{\"a}sentiert und diskutiert. Dar{\"u}ber hinaus wird die geeignete Wahl von freien Parametern in den Reglern vorgestellt. Neben einer pr{\"a}zisen Verfolgung der Referenztrajektorie spielt der Patientenkomfort eine entscheidende Rolle f{\"u}r die Akzeptanz des Systems. Es wird gezeigt, dass die Regler glatte Trajektorien realisieren k{\"o}nnen, um zu garantieren, dass sich Patienten wohl f{\"u}hlen w{\"a}hrend ihre Tumorbewegung mit Genauigkeiten im Submillimeterbereich ausgeglichen wird. Gesamtfehler werden im Kompensationssystem analysiert, in dem diese zu Trajektorienverfolgungsfehlern und Pr{\"a}diktionsfehlern in Beziehung gesetzt werden. Durch Ausnutzung von Eigenschaften verschiedener Pr{\"a}diktoren wird gezeigt, dass die Startzeit des Systems bis die Verfolgung der Referenztrajektorie erreicht ist, wenige Sekunden betr{\"a}gt. Dies gilt insbesondere f{\"u}r den Fall eines initial ruhenden HexaPODs und ohne Vorwissen {\"u}ber Tumorbewegungen. Dies zeigt die Eignung des Systems f{\"u}r die sehr kurz fraktionierten Behandlungen von Lungentumoren. Das Tumorkompensationssystem wurde ausschließlich auf Basis von klinischer Standard-Hardware entwickelt, die in vielen Behandlungsr{\"a}umen zu finden ist. Durch ein einfaches und flexibles Design k{\"o}nnen Behandlungsr{\"a}ume in kosteneffizienter Weise um M{\"o}glichkeiten der Bewegungskompensation erg{\"a}nzt werden. Dar{\"u}ber hinaus werden aktuelle Behandlungsmethoden wie intensit{\"a}tsmodulierte Strahlentherapie oder Volumetric Modulated Arc Therapy in keiner Weise eingeschr{\"a}nkt. Aufgrund der Unterst{\"u}tzung verschiedener Kompensationsmodi kann das System auf alle beweglichen Tumore angewendet werden, unabh{\"a}ngig davon ob die Bewegungen vorhersagbar (Lungentumore) oder nicht vorhersagbar (Prostatatumore) sind. Durch Integration von geeigneten Methoden zur Tumorpositionsbestimmung kann das System auf einfache Weise zur Kompensation von anderen Tumoren erweitert werden.}, subject = {Robotik}, language = {en} } @phdthesis{Martin2008, author = {Martin, R{\"u}diger}, title = {Resilience, Provisioning, and Control for the Network of the Future}, doi = {10.25972/OPUS-2504}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-28497}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2008}, abstract = {The Internet sees an ongoing transformation process from a single best-effort service network into a multi-service network. In addition to traditional applications like e-mail,WWW-traffic, or file transfer, future generation networks (FGNs) will carry services with real-time constraints and stringent availability and reliability requirements like Voice over IP (VoIP), video conferencing, virtual private networks (VPNs) for finance, other real-time business applications, tele-medicine, or tele-robotics. Hence, quality of service (QoS) guarantees and resilience to failures are crucial characteristics of an FGN architecture. At the same time, network operations must be efficient. This necessitates sophisticated mechanisms for the provisioning and the control of future communication infrastructures. In this work we investigate such echanisms for resilient FGNs. There are many aspects of the provisioning and control of resilient FGNs such as traffic matrix estimation, traffic characterization, traffic forecasting, mechanisms for QoS enforcement also during failure cases, resilient routing, or calability concerns for future routing and addressing mechanisms. In this work we focus on three important aspects for which performance analysis can deliver substantial insights: load balancing for multipath Internet routing, fast resilience concepts, and advanced dimensioning techniques for resilient networks. Routing in modern communication networks is often based on multipath structures, e.g., equal-cost multipath routing (ECMP) in IP networks, to facilitate traffic engineering and resiliency. When multipath routing is applied, load balancing algorithms distribute the traffic over available paths towards the destination according to pre-configured distribution values. State-of-the-art load balancing algorithms operate either on the packet or the flow level. Packet level mechanisms achieve highly accurate traffic distributions, but are known to have negative effects on the performance of transport protocols and should not be applied. Flow level mechanisms avoid performance degradations, but at the expense of reduced accuracy. These inaccuracies may have unpredictable effects on link capacity requirements and complicate resource management. Thus, it is important to exactly understand the accuracy and dynamics of load balancing algorithms in order to be able to exercise better network control. Knowing about their weaknesses, it is also important to look for alternatives and to assess their applicability in different networking scenarios. This is the first aspect of this work. Component failures are inevitable during the operation of communication networks and lead to routing disruptions if no special precautions are taken. In case of a failure, the robust shortest-path routing of the Internet reconverges after some time to a state where all nodes are again reachable - provided physical connectivity still exists. But stringent availability and reliability criteria of new services make a fast reaction to failures obligatory for resilient FGNs. This led to the development of fast reroute (FRR) concepts for MPLS and IP routing. The operations of MPLS-FRR have already been standardized. Still, the standards leave some degrees of freedom for the resilient path layout and it is important to understand the tradeoffs between different options for the path layout to efficiently provision resilient FGNs. In contrast, the standardization for IP-FRR is an ongoing process. The applicability and possible combinations of different concepts still are open issues. IP-FRR also facilitates a comprehensive resilience framework for IP routing covering all steps of the failure recovery cycle. These points constitute another aspect of this work. Finally, communication networks are usually over-provisioned, i.e., they have much more capacity installed than actually required during normal operation. This is a precaution for various challenges such as network element failures. An alternative to this capacity overprovisioning (CO) approach is admission control (AC). AC blocks new flows in case of imminent overload due to unanticipated events to protect the QoS for already admitted flows. On the one hand, CO is generally viewed as a simple mechanism, AC as a more complex mechanism that complicates the network control plane and raises interoperability issues. On the other hand, AC appears more cost-efficient than CO. To obtain advanced provisioning methods for resilient FGNs, it is important to find suitable models for irregular events, such as failures and different sources of overload, and to incorporate them into capacity dimensioning methods. This allows for a fair comparison between CO and AC in various situations and yields a better understanding of the strengths and weaknesses of both concepts. Such an advanced capacity dimensioning method for resilient FGNs represents the third aspect of this work.}, subject = {Backbone-Netz}, language = {en} } @phdthesis{Schlosser2011, author = {Schlosser, Daniel}, title = {Quality of Experience Management in Virtual Future Networks}, doi = {10.25972/OPUS-5719}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-69986}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2011}, abstract = {Aktuell beobachten wir eine drastische Vervielf{\"a}ltigung der Dienste und Anwendungen, die das Internet f{\"u}r den Datentransport nutzen. Dabei unterscheiden sich die Anforderungen dieser Dienste an das Netzwerk deutlich. Das Netzwerkmanagement wird durch diese Diversit{\"a}t der nutzenden Dienste aber deutlich erschwert, da es einem Datentransportdienstleister kaum m{\"o}glich ist, die unterschiedlichen Verbindungen zu unterscheiden, ohne den Inhalt der transportierten Daten zu analysieren. Netzwerkvirtualisierung ist eine vielversprechende L{\"o}sung f{\"u}r dieses Problem, da sie es erm{\"o}glicht f{\"u}r verschiedene Dienste unterschiedliche virtuelle Netze auf dem gleichen physikalischen Substrat zu betreiben. Diese Diensttrennung erm{\"o}glicht es, jedes einzelne Netz anwendungsspezifisch zu steuern. Ziel einer solchen Netzsteuerung ist es, sowohl die vom Nutzer erfahrene Dienstg{\"u}te als auch die Kosteneffizienz des Datentransports zu optimieren. Dar{\"u}ber hinaus wird es mit Netzwerkvirtualisierung m{\"o}glich das physikalische Netz so weit zu abstrahieren, dass die aktuell fest verzahnten Rollen von Netzwerkbesitzer und Netzwerkbetreiber entkoppelt werden k{\"o}nnen. Dar{\"u}ber hinaus stellt Netzwerkvirtualisierung sicher, dass unterschiedliche Datennetze, die gleichzeitig auf dem gleichen physikalischen Netz betrieben werden, sich gegenseitig weder beeinflussen noch st{\"o}ren k{\"o}nnen. Diese Arbeit  besch{\"a}ftigt sich mit ausgew{\"a}hlten Aspekten dieses Themenkomplexes und fokussiert sich darauf, ein virtuelles Netzwerk mit bestm{\"o}glicher Dienstqualit{\"a}t f{\"u}r den Nutzer zu betreiben und zu steuern. Daf{\"u}r wird ein Top-down-Ansatz gew{\"a}hlt, der von den Anwendungsf{\"a}llen, einer m{\"o}glichen Netzwerkvirtualisierungs-Architektur und aktuellen M{\"o}glichkeiten der Hardwarevirtualisierung ausgeht. Im Weiteren fokussiert sich die Arbeit dann in Richtung Bestimmung und Optimierung der vom Nutzer erfahrenen Dienstqualit{\"a}t (QoE) auf Applikationsschicht und diskutiert M{\"o}glichkeiten zur Messung und {\"U}berwachung von wesentlichen Netzparametern in virtualisierten Netzen.}, subject = {Netzwerkmanagement}, language = {en} } @phdthesis{Huber2023, author = {Huber, Stephan}, title = {Proxemo: Documenting Observed Emotions in HCI}, doi = {10.25972/OPUS-30573}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-305730}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {For formative evaluations of user experience (UX) a variety of methods have been developed over the years. However, most techniques require the users to interact with the study as a secondary task. This active involvement in the evaluation is not inclusive of all users and potentially biases the experience currently being studied. Yet there is a lack of methods for situations in which the user has no spare cognitive resources. This condition occurs when 1) users' cognitive abilities are impaired (e.g., people with dementia) or 2) users are confronted with very demanding tasks (e.g., air traffic controllers). In this work we focus on emotions as a key component of UX and propose the new structured observation method Proxemo for formative UX evaluations. Proxemo allows qualified observers to document users' emotions by proxy in real time and then directly link them to triggers. Technically this is achieved by synchronising the timestamps of emotions documented by observers with a video recording of the interaction. In order to facilitate the documentation of observed emotions in highly diverse contexts we conceptualise and implement two separate versions of a documentation aid named Proxemo App. For formative UX evaluations of technology-supported reminiscence sessions with people with dementia, we create a smartwatch app to discreetly document emotions from the categories anger, general alertness, pleasure, wistfulness and pride. For formative UX evaluations of prototypical user interfaces with air traffic controllers we create a smartphone app to efficiently document emotions from the categories anger, boredom, surprise, stress and pride. Descriptive case studies in both application domains indicate the feasibility and utility of the method Proxemo and the appropriateness of the respectively adapted design of the Proxemo App. The third part of this work is a series of meta-evaluation studies to determine quality criteria of Proxemo. We evaluate Proxemo regarding its reliability, validity, thoroughness and effectiveness, and compare Proxemo's efficiency and the observers' experience to documentation with pen and paper. Proxemo is reliable, as well as more efficient, thorough and effective than handwritten notes and provides a better UX to observers. Proxemo compares well with existing methods where benchmarks are available. With Proxemo we contribute a validated structured observation method that has shown to meet requirements formative UX evaluations in the extreme contexts of users with cognitive impairments or high task demands. Proxemo is agnostic regarding researchers' theoretical approaches and unites reductionist and holistic perspectives within one method. Future work should explore the applicability of Proxemo for further domains and extend the list of audited quality criteria to include, for instance, downstream utility. With respect to basic research we strive to better understand the sources leading observers to empathic judgments and propose reminisce and older adults as model environment for investigating mixed emotions.}, subject = {Gef{\"u}hl}, language = {en} } @phdthesis{Wojtkowiak2018, author = {Wojtkowiak, Harald}, title = {Planungssystem zur Steigerung der Autonomie von Kleinstsatelliten}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-163569}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2018}, abstract = {Der Betrieb von Satelliten wird sich in Zukunft gravierend {\"a}ndern. Die bisher ausge{\"u}bte konventionelle Vorgehensweise, bei der die Planung der vom Satelliten auszuf{\"u}hrenden Aktivit{\"a}ten sowie die Kontrolle hier{\"u}ber ausschließlich vom Boden aus erfolgen, st{\"o}ßt bei heutigen Anwendungen an ihre Grenzen. Im schlimmsten Fall verhindert dieser Umstand sogar die Erschließung bisher ungenutzter M{\"o}glichkeiten. Der Gewinn eines Satelliten, sei es in Form wissenschaftlicher Daten oder der Vermarktung satellitengest{\"u}tzter Dienste, wird daher nicht optimal ausgesch{\"o}pft. Die Ursache f{\"u}r dieses Problem l{\"a}sst sich im Grunde auf eine ausschlaggebende Tatsache zur{\"u}ckf{\"u}hren: Konventionelle Satelliten k{\"o}nnen ihr Verhalten, d.h. die Folge ihrer T{\"a}tigkeiten, nicht eigenst{\"a}ndig anpassen. Stattdessen erstellt das Bedienpersonal am Boden - vor allem die Operatoren - mit Hilfe von Planungssoftware feste Ablaufpl{\"a}ne, die dann in Form von Kommandosequenzen von den Bodenstationen aus an die jeweiligen Satelliten hochgeladen werden. Dort werden die Befehle lediglich {\"u}berpr{\"u}ft, interpretiert und strikt ausgef{\"u}hrt. Die Abarbeitung erfolgt linear. Situationsbedingte {\"A}nderungen, wie sie vergleichsweise bei der Codeausf{\"u}hrung von Softwareprogrammen durch Kontrollkonstrukte, zum Beispiel Schleifen und Verzweigungen, {\"u}blich sind, sind typischerweise nicht vorgesehen. Der Operator ist daher die einzige Instanz, die das Verhalten des Satelliten mittels Kommandierung, per Upload, beeinflussen kann, und auch nur dann, wenn ein direkter Funkkontakt zwischen Satellit und Bodenstation besteht. Die dadurch m{\"o}glichen Reaktionszeiten des Satelliten liegen bestenfalls bei einigen Sekunden, falls er sich im Wirkungsbereich der Bodenstation befindet. Außerhalb des Kontaktfensters kann sich die Zeitschranke, gegeben durch den Orbit und die aktuelle Position des Satelliten, von einigen Minuten bis hin zu einigen Stunden erstrecken. Die Signallaufzeiten der Funk{\"u}bertragung verl{\"a}ngern die Reaktionszeiten um weitere Sekunden im erdnahen Bereich. Im interplanetaren Raum erstrecken sich die Zeitspannen aufgrund der immensen Entfernungen sogar auf mehrere Minuten. Dadurch bedingt liegt die derzeit technologisch m{\"o}gliche, bodengest{\"u}tzte, Reaktionszeit von Satelliten bestenfalls im Bereich von einigen Sekunden. Diese Einschr{\"a}nkung stellt ein schweres Hindernis f{\"u}r neuartige Satellitenmissionen, bei denen insbesondere nichtdeterministische und kurzzeitige Ph{\"a}nomene (z.B. Blitze und Meteoreintritte in die Erdatmosph{\"a}re) Gegenstand der Beobachtungen sind, dar. Die langen Reaktionszeiten des konventionellen Satellitenbetriebs verhindern die Realisierung solcher Missionen, da die verz{\"o}gerte Reaktion erst erfolgt, nachdem das zu beobachtende Ereignis bereits abgeschlossen ist. Die vorliegende Dissertation zeigt eine M{\"o}glichkeit, das durch die langen Reaktionszeiten entstandene Problem zu l{\"o}sen, auf. Im Zentrum des L{\"o}sungsansatzes steht dabei die Autonomie. Im Wesentlichen geht es dabei darum, den Satelliten mit der F{\"a}higkeit auszustatten, sein Verhalten, d.h. die Folge seiner T{\"a}tigkeiten, eigenst{\"a}ndig zu bestimmen bzw. zu {\"a}ndern. Dadurch wird die direkte Abh{\"a}ngigkeit des Satelliten vom Operator bei Reaktionen aufgehoben. Im Grunde wird der Satellit in die Lage versetzt, sich selbst zu kommandieren. Die Idee der Autonomie wurde im Rahmen der zugrunde liegenden Forschungsarbeiten umgesetzt. Das Ergebnis ist ein autonomes Planungssystem. Dabei handelt es sich um ein Softwaresystem, mit dem sich autonomes Verhalten im Satelliten realisieren l{\"a}sst. Es kann an unterschiedliche Satellitenmissionen angepasst werden. Ferner deckt es verschiedene Aspekte des autonomen Satellitenbetriebs, angefangen bei der generellen Entscheidungsfindung der T{\"a}tigkeiten, {\"u}ber die zeitliche Ablaufplanung unter Einbeziehung von Randbedingungen (z.B. Ressourcen) bis hin zur eigentlichen Ausf{\"u}hrung, d.h. Kommandierung, ab. Das Planungssystem kommt als Anwendung in ASAP, einer autonomen Sensorplattform, zum Einsatz. Es ist ein optisches System und dient der Detektion von kurzzeitigen Ph{\"a}nomenen und Ereignissen in der Erdatmosph{\"a}re. Die Forschungsarbeiten an dem autonomen Planungssystem, an ASAP sowie an anderen zu diesen in Bezug stehenden Systemen wurden an der Professur f{\"u}r Raumfahrttechnik des Lehrstuhls Informatik VIII der Julius-Maximilians-Universit{\"a}t W{\"u}rzburg durchgef{\"u}hrt.}, subject = {Planungssystem}, language = {de} } @phdthesis{Pries2010, author = {Pries, Jan Rastin}, title = {Performance Optimization of Wireless Infrastructure and Mesh Networks}, doi = {10.25972/OPUS-3723}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-46097}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2010}, abstract = {Future broadband wireless networks should be able to support not only best effort traffic but also real-time traffic with strict Quality of Service (QoS) constraints. In addition, their available resources are scare and limit the number of users. To facilitate QoS guarantees and increase the maximum number of concurrent users, wireless networks require careful planning and optimization. In this monograph, we studied three aspects of performance optimization in wireless networks: resource optimization in WLAN infrastructure networks, quality of experience control in wireless mesh networks, and planning and optimization of wireless mesh networks. An adaptive resource management system is required to effectively utilize the limited resources on the air interface and to guarantee QoS for real-time applications. Thereby, both WLAN infrastructure and WLAN mesh networks have to be considered. An a-priori setting of the access parameters is not meaningful due to the contention-based medium access and the high dynamics of the system. Thus, a management system is required which dynamically adjusts the channel access parameters based on the network load. While this is sufficient for wireless infrastructure networks, interferences on neighboring paths and self-interferences have to be considered for wireless mesh networks. In addition, a careful channel allocation and route assignment is needed. Due to the large parameter space, standard optimization techniques fail for optimizing large wireless mesh networks. In this monograph, we reveal that biology-inspired optimization techniques, namely genetic algorithms, are well-suitable for the planning and optimization of wireless mesh networks. Although genetic algorithms generally do not always find the optimal solution, we show that with a good parameter set for the genetic algorithm, the overall throughput of the wireless mesh network can be significantly improved while still sharing the resources fairly among the users.}, subject = {IEEE 802.11}, language = {en} } @phdthesis{Maeder2008, author = {M{\"a}der, Andreas}, title = {Performance Models for UMTS 3.5G Mobile Wireless Systems}, doi = {10.25972/OPUS-2766}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-32525}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2008}, abstract = {Mobile telecommunication systems of the 3.5th generation (3.5G) constitute a first step towards the requirements of an all-IP world. As the denotation suggests, 3.5G systems are not completely new designed from scratch. Instead, they are evolved from existing 3G systems like UMTS or cdma2000. 3.5G systems are primarily designed and optimized for packet-switched best-effort traffic, but they are also intended to increase system capacity by exploiting available radio resources more efficiently. Systems based on cdma2000 are enhanced with 1xEV-DO (EV-DO: evolution, data-optimized). In the UMTS domain, the 3G partnership project (3GPP) specified the High Speed Packet Access (HSPA) family, consisting of High Speed Downlink Packet Access (HSDPA) and its counterpart High Speed Uplink Packet Access (HSUPA) or Enhanced Uplink. The focus of this monograph is on HSPA systems, although the operation principles of other 3.5G systems are similar. One of the main contributions of our work are performance models which allow a holistic view on the system. The models consider user traffic on flow-level, such that only on significant changes of the system state a recalculation of parameters like bandwidth is necessary. The impact of lower layers is captured by stochastic models. This approach combines accurate modeling and the ability to cope with computational complexity. Adopting this approach to HSDPA, we develop a new physical layer abstraction model that takes radio resources, scheduling discipline, radio propagation and mobile device capabilities into account. Together with models for the calculation of network-wide interference and transmit powers, a discrete-event simulation and an analytical model based on a queuing-theoretical approach are proposed. For the Enhanced Uplink, we develop analytical models considering independent and correlated other-cell interference.}, subject = {Mobilfunk}, language = {en} } @phdthesis{Zinner2012, author = {Zinner, Thomas}, title = {Performance Modeling of QoE-Aware Multipath Video Transmission in the Future Internet}, doi = {10.25972/OPUS-6106}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-72324}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2012}, abstract = {Internet applications are becoming more and more flexible to support diverge user demands and network conditions. This is reflected by technical concepts, which provide new adaptation mechanisms to allow fine grained adjustment of the application quality and the corresponding bandwidth requirements. For the case of video streaming, the scalable video codec H.264/SVC allows the flexible adaptation of frame rate, video resolution and image quality with respect to the available network resources. In order to guarantee a good user-perceived quality (Quality of Experience, QoE) it is necessary to adjust and optimize the video quality accurately. But not only have the applications of the current Internet changed. Within network and transport, new technologies evolved during the last years providing a more flexible and efficient usage of data transport and network resources. One of the most promising technologies is Network Virtualization (NV) which is seen as an enabler to overcome the ossification of the Internet stack. It provides means to simultaneously operate multiple logical networks which allow for example application-specific addressing, naming and routing, or their individual resource management. New transport mechanisms like multipath transmission on the network and transport layer aim at an efficient usage of available transport resources. However, the simultaneous transmission of data via heterogeneous transport paths and communication technologies inevitably introduces packet reordering. Additional mechanisms and buffers are required to restore the correct packet order and thus to prevent a disturbance of the data transport. A proper buffer dimensioning as well as the classification of the impact of varying path characteristics like bandwidth and delay require appropriate evaluation methods. Additionally, for a path selection mechanism real time evaluation mechanisms are needed. A better application-network interaction and the corresponding exchange of information enable an efficient adaptation of the application to the network conditions and vice versa. This PhD thesis analyzes a video streaming architecture utilizing multipath transmission and scalable video coding and develops the following optimization possibilities and results: Analysis and dimensioning methods for multipath transmission, quantification of the adaptation possibilities to the current network conditions with respect to the QoE for H.264/SVC, and evaluation and optimization of a future video streaming architecture, which allows a better interaction of application and network.}, subject = {Video{\"u}bertragung}, language = {en} } @phdthesis{Klein2010, author = {Klein, Alexander}, title = {Performance Issues of MAC and Routing Protocols in Wireless Sensor Networks}, doi = {10.25972/OPUS-4465}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-52870}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2010}, abstract = {The focus of this work lies on the communication issues of Medium Access Control (MAC) and routing protocols in the context of WSNs. The communication challenges in these networks mainly result from high node density, low bandwidth, low energy constraints and the hardware limitations in terms of memory, computational power and sensing capabilities of low-power transceivers. For this reason, the structure of WSNs is always kept as simple as possible to minimize the impact of communication issues. Thus, the majority of WSNs apply a simple one hop star topology since multi-hop communication has high demands on the routing protocol since it increases the bandwidth requirements of the network. Moreover, medium access becomes a challenging problem due to the fact that low-power transceivers are very limited in their sensing capabilities. The first contribution is represented by the Backoff Preamble-based MAC Protocol with Sequential Contention Resolution (BPS-MAC) which is designed to overcome the limitations of low-power transceivers. Two communication issues, namely the Clear Channel Assessment (CCA) delay and the turnaround time, are directly addressed by the protocol. The CCA delay represents the period of time which is required by the transceiver to detect a busy radio channel while the turnaround time specifies the period of time which is required to switch between receive and transmit mode. Standard Carrier Sense Multiple Access (CSMA) protocols do not achieve high performance in terms of packet loss if the traffic is highly correlated due to the fact that the transceiver is not able to sense the medium during the switching phase. Therefore, a node may start to transmit data while another node is already transmitting since it has sensed an idle medium right before it started to switch its transceiver from receive to transmit mode. The BPS-MAC protocol uses a new sequential preamble-based medium access strategy which can be adapted to the hardware capabilities of the transceivers. The protocol achieves a very low packet loss rate even in wireless networks with high node density and event-driven traffic without the need of synchronization. This makes the protocol attractive to applications such as structural health monitoring, where event suppression is not an option. Moreover, acknowledgments or complex retransmission strategies become almost unnecessary since the sequential preamble-based contention resolution mechanism minimizes the collision probability. However, packets can still be lost as a consequence of interference or other issues which affect signal propagation. The second contribution consists of a new routing protocol which is able to quickly detect topology changes without generating a large amount of overhead. The key characteristics of the Statistic-Based Routing (SBR) protocol are high end-to-end reliability (in fixed and mobile networks), load balancing capabilities, a smooth continuous routing metric, quick adaptation to changing network conditions, low processing and memory requirements, low overhead, support of unidirectional links and simplicity. The protocol can establish routes in a hybrid or a proactive mode and uses an adaptive continuous routing metric which makes it very flexible in terms of scalability while maintaining stable routes. The hybrid mode is optimized for low-power WSNs since routes are only established on demand. The difference of the hybrid mode to reactive routing strategies is that routing messages are periodically transmitted to maintain already established routes. However, the protocol stops the transmission of routing messages if no data packets are transmitted for a certain time period in order to minimize the routing overhead and the energy consumption. The proactive mode is designed for high data rate networks which have less energy constraints. In this mode, the protocol periodically transmits routing messages to establish routes in a proactive way even in the absence of data traffic. Thus, nodes in the network can immediately transmit data since the route to the destination is already established in advance. In addition, a new delay-based routing message forwarding strategy is introduced. The forwarding strategy is part of SBR but can also be applied to many routing protocols in order to modify the established topology. The strategy can be used, e.g. in mobile networks, to decrease the packet loss by deferring routing messages with respect to the neighbor change rate. Thus, nodes with a stable neighborhood forward messages faster than nodes within a fast changing neighborhood. As a result, routes are established through nodes with correlated movement which results in fewer topology changes due to higher link durations.}, subject = {Routing}, language = {en} } @phdthesis{Henjes2010, author = {Henjes, Robert}, title = {Performance Evaluation of Publish/Subscribe Middleware Architectures}, doi = {10.25972/OPUS-4536}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-53388}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2010}, abstract = {While developing modern applications, it is necessary to ensure an efficient and performant communication between different applications. In current environments, a middleware software is used, which supports the publish/subscribe communication pattern. Using this communication pattern, a publisher sends information encapsulated in messages to the middleware. A subscriber registers its interests at the middleware. The monograph describes three different steps to determine the performance of such a system. In a first step, the message throughput performance of a publish/subscribe in different scenarios is measured using a Java Message Service (JMS) based implementation. In the second step the maximum achievable message throughput is described by adapted models depending on the filter complexity and the replication grade. Using the model, the performance characteristics of a specific system in a given scenario can be determined. These numbers are used for the queuing model described in the third part of the thesis, which supports the dimensioning of a system in realistic scenarios. Additionally, we introduce a method to approximate an M/G/1 system numerically in an efficient way, which can be used for real time analysis to predict the expected performance in a certain scenario. Finally, the analytical model is used to investigate different possibilities to ensure the scalability of the maximum achievable message throughput of the overall system.}, subject = {Middleware}, language = {en} } @phdthesis{Geissler2022, author = {Geißler, Stefan}, title = {Performance Evaluation of Next-Generation Data Plane Architectures and their Components}, issn = {1432-8801}, doi = {10.25972/OPUS-26015}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-260157}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2022}, abstract = {In this doctoral thesis we cover the performance evaluation of next generation data plane architectures, comprised of complex software as well as programmable hardware components that allow fine granular configuration. In the scope of the thesis we propose mechanisms to monitor the performance of singular components and model key performance indicators of software based packet processing solutions. We present novel approaches towards network abstraction that allow the integration of heterogeneous data plane technologies into a singular network while maintaining total transparency between control and data plane. Finally, we investigate a full, complex system consisting of multiple software-based solutions and perform a detailed performance analysis. We employ simulative approaches to investigate overload control mechanisms that allow efficient operation under adversary conditions. The contributions of this work build the foundation for future research in the areas of network softwarization and network function virtualization.}, subject = {Leistungsbewertung}, language = {en} } @phdthesis{Hossfeld2009, author = {Hoßfeld, Tobias}, title = {Performance Evaluation of Future Internet Applications and Emerging User Behavior}, doi = {10.25972/OPUS-3067}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-37570}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2009}, abstract = {In future telecommunication systems, we observe an increasing diversity of access networks. The separation of transport services and applications or services leads to multi-network services, i.e., a future service has to work transparently to the underlying network infrastructure. Multi-network services with edge-based intelligence, like P2P file sharing or the Skype VoIP service, impose new traffic control paradigms on the future Internet. Such services adapt the amount of consumed bandwidth to reach different goals. A selfish behavior tries to keep the QoE of a single user above a certain level. Skype, for instance, repeats voice samples depending on the perceived end-to-end loss. From the viewpoint of a single user, the replication of voice data overcomes the degradation caused by packet loss and enables to maintain a certain QoE. The cost for this achievement is a higher amount of consumed bandwidth. However, if the packet loss is caused by congestion in the network, this additionally required bandwidth even worsens the network situation. Altruistic behavior, on the other side, would reduce the bandwidth consumption in such a way that the pressure on the network is released and thus the overall network performance is improved. In this monograph, we analyzed the impact of the overlay, P2P, and QoE paradigms in future Internet applications and the interactions from the observing user behavior. The shift of intelligence toward the edge is accompanied by a change in the emerging user behavior and traffic profile, as well as a change from multi-service networks to multi-networks services. In addition, edge-based intelligence may lead to a higher dynamics in the network topology, since the applications are often controlled by an overlay network, which can rapidly change in size and structure as new nodes can leave or join the overlay network in an entirely distributed manner. As a result, we found that the performance evaluation of such services provides new challenges, since novel key performance factors have to be first identified, like pollution of P2P systems, and appropriate models of the emerging user behavior are required, e.g. taking into account user impatience. As common denominator of the presented studies in this work, we focus on a user-centric view when evaluating the performance of future Internet applications. For a subscriber of a certain application or service, the perceived quality expressed as QoE will be the major criterion of the user's satisfaction with the network and service providers. We selected three different case studies and characterized the application's performance from the end user's point of view. Those are (1) cooperation in mobile P2P file sharing networks, (2) modeling of online TV recording services, and (3) QoE of edge-based VoIP applications. The user-centric approach facilitates the development of new mechanisms to overcome problems arising from the changing user behavior. An example is the proposed CycPriM cooperation strategy, which copes with selfish user behavior in mobile P2P file sharing system. An adequate mechanism has also been shown to be efficient in a heterogeneous B3G network with mobile users conducting vertical handovers between different wireless access technologies. The consideration of the user behavior and the user perceived quality guides to an appropriate modeling of future Internet applications. In the case of the online TV recording service, this enables the comparison between different technical realizations of the system, e.g. using server clusters or P2P technology, to properly dimension the installed network elements and to assess the costs for service providers. Technologies like P2P help to overcome phenomena like flash crowds and improve scalability compared to server clusters, which may get overloaded in such situations. Nevertheless, P2P technology invokes additional challenges and different user behavior to that seen in traditional client/server systems. Beside the willingness to share files and the churn of users, peers may be malicious and offer fake contents to disturb the data dissemination. Finally, the understanding and the quantification of QoE with respect to QoS degradations permits designing sophisticated edge-based applications. To this end, we identified and formulated the IQX hypothesis as an exponential interdependency between QoE and QoS parameters, which we validated for different examples. The appropriate modeling of the emerging user behavior taking into account the user's perceived quality and its interactions with the overlay and P2P paradigm will finally help to design future Internet applications.}, subject = {Leistungsbewertung}, language = {en} } @phdthesis{Milbrandt2007, author = {Milbrandt, Jens}, title = {Performance Evaluation of Efficient Resource Management Concepts for Next Generation IP Networks}, doi = {10.25972/OPUS-1991}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-23332}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2007}, abstract = {Next generation networks (NGNs) must integrate the services of current circuit-switched telephone networks and packet-switched data networks. This convergence towards a unified communication infrastructure necessitates from the high capital expenditures (CAPEX) and operational expenditures (OPEX) due to the coexistence of separate networks for voice and data. In the end, NGNs must offer the same services as these legacy networks and, therefore, they must provide a low-cost packet-switched solution with real-time transport capabilities for telephony and multimedia applications. In addition, NGNs must be fault-tolerant to guarantee user satisfaction and to support business-critical processes also in case of network failures. A key technology for the operation of NGNs is the Internet Protocol (IP) which evolved to a common and well accepted standard for networking in the Internet during the last 25 years. There are two basically different approaches to achieve QoS in IP networks. With capacity overprovisioning (CO), an IP network is equipped with sufficient bandwidth such that network congestion becomes very unlikely and QoS is maintained most of the time. The second option to achieve QoS in IP networks is admission control (AC). AC represents a network-inherent intelligence that admits real-time traffic flows to a single link or an entire network only if enough resources are available such that the requirements on packet loss and delay can be met. Otherwise, the request of a new flow is blocked. This work focuses on resource management and control mechanisms for NGNs, in particular on AC and associated bandwidth allocation methods. The first contribution consists of a new link-oriented AC method called experience-based admission control (EBAC) which is a hybrid approach dealing with the problems inherent to conventional AC mechanisms like parameter-based or measurement-based AC (PBAC/MBAC). PBAC provides good QoS but suffers from poor resource utilization and, vice versa, MBAC uses resources efficiently but is susceptible to QoS violations. Hence, EBAC aims at increasing the resource efficiency while maintaining the QoS which increases the revenues of ISPs and postpones their CAPEX for infrastructure upgrades. To show the advantages of EBAC, we first review today's AC approaches and then develop the concept of EBAC. EBAC is a simple mechanism that safely overbooks the capacity of a single link to increase its resource utilization. We evaluate the performance of EBAC by its simulation under various traffic conditions. The second contribution concerns dynamic resource allocation in transport networks which implement a specific network admission control (NAC) architecture. In general, the performance of different NAC systems may be evaluated by conventional methods such as call blocking analysis which has often been applied in the context of multi-service asynchronous transfer mode (ATM) networks. However, to yield more practical results than abstract blocking probabilities, we propose a new method to compare different AC approaches by their respective bandwidth requirements. To present our new method for comparing different AC systems, we first give an overview of network resource management (NRM) in general. Then we present the concept of adaptive bandwidth allocation (ABA) in capacity tunnels and illustrate the analytical performance evaluation framework to compare different AC systems by their capacity requirements. Different network characteristics influence the performance of ABA. Therefore, the impact of various traffic demand models and tunnel implementations, and the influence of resilience requirements is investigated. In conclusion, the resources in NGNs must be exclusively dedicated to admitted traffic to guarantee QoS. For that purpose, robust and efficient concepts for NRM are required to control the requested bandwidth with regard to the available transmission capacity. Sophisticated AC will be a key function for NRM in NGNs and, therefore, efficient resource management concepts like experience-based admission control and adaptive bandwidth allocation for admission-controlled capacity tunnels, as presented in this work are appealing for NGN solutions.}, subject = {Ressourcenmanagement}, language = {en} } @phdthesis{Lehrieder2013, author = {Lehrieder, Frank}, title = {Performance Evaluation and Optimization of Content Distribution using Overlay Networks}, doi = {10.25972/OPUS-6420}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-76018}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2013}, abstract = {The work presents a performance evaluation and optimization of so-called overlay networks for content distribution in the Internet. Chapter 1 describes the importance which have such networks in today's Internet, for example, for the transmission of video content. The focus of this work is on overlay networks based on the peer-to-peer principle. These are characterized by the fact that users who download content, also contribute to the distribution process by sharing parts of the data to other users. This enables efficient content distribution because each user not only consumes resources in the system, but also provides its own resources. Chapter 2 of the monograph contains a detailed description of the functionality of today's most popular overlay network BitTorrent. It explains the various components and their interaction. This is followed by an illustration of why such overlay networks for Internet service providers (ISPs) are problematic. The reason lies in the large amount of inter-ISP traffic that is produced by these overlay networks. Since this inter-ISP traffic leads to high costs for ISPs, they try to reduce it by improved mechanisms for overlay networks. One optimization approach is the use of topology awareness within the overlay networks. It provides users of the overlay networks with information about the underlying physical network topology. This allows them to avoid inter-ISP traffic by exchanging data preferrentially with other users that are connected to the same ISP. Another approach to save inter-ISP traffic is caching. In this case the ISP provides additional computers in its network, called caches, which store copies of popular content. The users of this ISP can then obtain such content from the cache. This prevents that the content must be retrieved from locations outside of the ISP's network, and saves costly inter-ISP traffic in this way. In the third chapter of the thesis, the results of a comprehensive measurement study of overlay networks, which can be found in today's Internet, are presented. After a short description of the measurement methodology, the results of the measurements are described. These results contain data on a variety of characteristics of current P2P overlay networks in the Internet. These include the popularity of content, i.e., how many users are interested in specific content, the evolution of the popularity and the size of the files. The distribution of users within the Internet is investigated in detail. Special attention is given to the number of users that exchange a particular file within the same ISP. On the basis of these measurement results, an estimation of the traffic savings that can achieved by topology awareness is derived. This new estimation is of scientific and practical importance, since it is not limited to individual ISPs and files, but considers the whole Internet and the total amount of data exchanged in overlay networks. Finally, the characteristics of regional content are considered, in which the popularity is limited to certain parts of the Internet. This is for example the case of videos in German, Italian or French language. Chapter 4 of the thesis is devoted to the optimization of overlay networks for content distribution through caching. It presents a deterministic flow model that describes the influence of caches. On the basis of this model, it derives an estimate of the inter-ISP traffic that is generated by an overlay network, and which part can be saved by caches. The results show that the influence of the cache depends on the structure of the overlay networks, and that caches can also lead to an increase in inter-ISP traffic under certain circumstances. The described model is thus an important tool for ISPs to decide for which overlay networks caches are useful and to dimension them. Chapter 5 summarizes the content of the work and emphasizes the importance of the findings. In addition, it explains how the findings can be applied to the optimization of future overlay networks. Special attention is given to the growing importance of video-on-demand and real-time video transmissions.}, subject = {Leistungsbewertung}, language = {en} } @phdthesis{Oechsner2010, author = {Oechsner, Simon}, title = {Performance Challenges and Optimization Potential of Peer-to-Peer Overlay Technologies}, doi = {10.25972/OPUS-4159}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-50015}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2010}, abstract = {In today's Internet, building overlay structures to provide a service is becoming more and more common. This approach allows for the utilization of client resources, thus being more scalable than a client-server model in this respect. However, in these architectures the quality of the provided service depends on the clients and is therefore more complex to manage. Resource utilization, both at the clients themselves and in the underlying network, determine the efficiency of the overlay application. Here, a trade-off exists between the resource providers and the end users that can be tuned via overlay mechanisms. Thus, resource management and traffic management is always quality-of-service management as well. In this monograph, the three currently significant and most widely used overlay types in the Internet are considered. These overlays are implemented in popular applications which only recently have gained importance. Thus, these overlay networks still face real-world technical challenges which are of high practical relevance. We identify the specific issues for each of the considered overlays, and show how their optimization affects the trade-offs between resource efficiency and service quality. Thus, we supply new insights and system knowledge that is not provided by previous work.}, subject = {Overlay-Netz}, language = {en} } @phdthesis{Binzenhoefer2007, author = {Binzenh{\"o}fer, Andreas}, title = {Performance Analysis of Structured Overlay Networks}, doi = {10.25972/OPUS-2250}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-26291}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2007}, abstract = {Overlay networks establish logical connections between users on top of the physical network. While randomly connected overlay networks provide only a best effort service, a new generation of structured overlay systems based on Distributed Hash Tables (DHTs) was proposed by the research community. However, there is still a lack of understanding the performance of such DHTs. Additionally, those architectures are highly distributed and therefore appear as a black box to the operator. Yet an operator does not want to lose control over his system and needs to be able to continuously observe and examine its current state at runtime. This work addresses both problems and shows how the solutions can be combined into a more self-organizing overlay concept. At first, we evaluate the performance of structured overlay networks under different aspects and thereby illuminate in how far such architectures are able to support carrier-grade applications. Secondly, to enable operators to monitor and understand their deployed system in more detail, we introduce both active as well as passive methods to gather information about the current state of the overlay network.}, subject = {Overlay-Netz}, language = {en} } @phdthesis{Hartmann2015, author = {Hartmann, Matthias}, title = {Optimization and Design of Network Architectures for Future Internet Routing}, issn = {1432-8801}, doi = {10.25972/OPUS-11416}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-114165}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {At the center of the Internet's protocol stack stands the Internet Protocol (IP) as a common denominator that enables all communication. To make routing efficient, resilient, and scalable, several aspects must be considered. Care must be taken that traffic is well balanced to make efficient use of the existing network resources, both in failure free operation and in failure scenarios. Finding the optimal routing in a network is an NP-complete problem. Therefore, routing optimization is usually performed using heuristics. This dissertation shows that a routing optimized with one objective function is often not good when looking at other objective functions. It can even be worse than unoptimized routing with respect to that objective function. After looking at failure-free routing and traffic distribution in different failure scenarios, the analysis is extended to include the loop-free alternate (LFA) IP fast reroute mechanism. Different application scenarios of LFAs are examined and a special focus is set on the fact that LFAs usually cannot protect all traffic in a network even against single link failures. Thus, the routing optimization for LFAs is targeted on both link utilization and failure coverage. Finally, the pre-congestion notification mechanism PCN for network admission control and overload protection is analyzed and optimized. Different design options for implementing the protocol are compared, before algorithms are developed for the calculation and optimization of protocol parameters and PCN-based routing. The second part of the thesis tackles a routing problem that can only be resolved on a global scale. The scalability of the Internet is at risk since a major and intensifying growth of the interdomain routing tables has been observed. Several protocols and architectures are analyzed that can be used to make interdomain routing more scalable. The most promising approach is the locator/identifier (Loc/ID) split architecture which separates routing from host identification. This way, changes in connectivity, mobility of end hosts, or traffic-engineering activities are hidden from the routing in the core of the Internet and the routing tables can be kept much smaller. All of the currently proposed Loc/ID split approaches have their downsides. In particular, the fact that most architectures use the ID for routing outside the Internet's core is a poor design, which inhibits many of the possible features of a new routing architecture. To better understand the problems and to provide a solution for a scalable routing design that implements a true Loc/ID split, the new GLI-Split protocol is developed in this thesis, which provides separation of global and local routing and uses an ID that is independent from any routing decisions. Besides GLI-Split, several other new routing architectures implementing Loc/ID split have been proposed for the Internet. Most of them assume that a mapping system is queried for EID-to-RLOC mappings by an intermediate node at the border of an edge network. When the mapping system is queried by an intermediate node, packets are already on their way towards their destination, and therefore, the mapping system must be fast, scalable, secure, resilient, and should be able to relay packets without locators to nodes that can forward them to the correct destination. The dissertation develops a classification for all proposed mapping system architectures and shows their similarities and differences. Finally, the fast two-level mapping system FIRMS is developed. It includes security and resilience features as well as a relay service for initial packets of a flow when intermediate nodes encounter a cache miss for the EID-to-RLOC mapping.}, subject = {Netzwerk}, language = {en} } @phdthesis{Baier1998, author = {Baier, Herbert}, title = {Operators of Higher Order}, publisher = {Shaker Verlag}, isbn = {3-8265-4008-5}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-140799}, school = {Universit{\"a}t W{\"u}rzburg}, pages = {V, 95}, year = {1998}, abstract = {Motivated by results on interactive proof systems we investigate the computational power of quantifiers applied to well-known complexity classes. In special, we are interested in existential, universal and probabilistic bounded error quantifiers ranging over words and sets of words, i.e. oracles if we think in a Turing machine model. In addition to the standard oracle access mechanism, we also consider quantifiers ranging over oracles to which access is restricted in a certain way.}, subject = {Komplexit{\"a}tstheorie}, language = {en} } @phdthesis{Winkler2015, author = {Winkler, Marco}, title = {On the Role of Triadic Substructures in Complex Networks}, publisher = {epubli GmbH}, address = {Berlin}, isbn = {978-3-7375-5654-5}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-116022}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {In the course of the growth of the Internet and due to increasing availability of data, over the last two decades, the field of network science has established itself as an own area of research. With quantitative scientists from computer science, mathematics, and physics working on datasets from biology, economics, sociology, political sciences, and many others, network science serves as a paradigm for interdisciplinary research. One of the major goals in network science is to unravel the relationship between topological graph structure and a network's function. As evidence suggests, systems from the same fields, i.e. with similar function, tend to exhibit similar structure. However, it is still vague whether a similar graph structure automatically implies likewise function. This dissertation aims at helping to bridge this gap, while particularly focusing on the role of triadic structures. After a general introduction to the main concepts of network science, existing work devoted to the relevance of triadic substructures is reviewed. A major challenge in modeling triadic structure is the fact that not all three-node subgraphs can be specified independently of each other, as pairs of nodes may participate in multiple of those triadic subgraphs. In order to overcome this obstacle, we suggest a novel class of generative network models based on so called Steiner triple systems. The latter are partitions of a graph's vertices into pair-disjoint triples (Steiner triples). Thus, the configurations on Steiner triples can be specified independently of each other without overdetermining the network's link structure. Subsequently, we investigate the most basic realization of this new class of models. We call it the triadic random graph model (TRGM). The TRGM is parametrized by a probability distribution over all possible triadic subgraph patterns. In order to generate a network instantiation of the model, for all Steiner triples in the system, a pattern is drawn from the distribution and adjusted randomly on the Steiner triple. We calculate the degree distribution of the TRGM analytically and find it to be similar to a Poissonian distribution. Furthermore, it is shown that TRGMs possess non-trivial triadic structure. We discover inevitable correlations in the abundance of certain triadic subgraph patterns which should be taken into account when attributing functional relevance to particular motifs - patterns which occur significantly more frequently than expected at random. Beyond, the strong impact of the probability distributions on the Steiner triples on the occurrence of triadic subgraphs over the whole network is demonstrated. This interdependence allows us to design ensembles of networks with predefined triadic substructure. Hence, TRGMs help to overcome the lack of generative models needed for assessing the relevance of triadic structure. We further investigate whether motifs occur homogeneously or heterogeneously distributed over a graph. Therefore, we study triadic subgraph structures in each node's neighborhood individually. In order to quantitatively measure structure from an individual node's perspective, we introduce an algorithm for node-specific pattern mining for both directed unsigned, and undirected signed networks. Analyzing real-world datasets, we find that there are networks in which motifs are distributed highly heterogeneously, bound to the proximity of only very few nodes. Moreover, we observe indication for the potential sensitivity of biological systems to a targeted removal of these critical vertices. In addition, we study whole graphs with respect to the homogeneity and homophily of their node-specific triadic structure. The former describes the similarity of subgraph distributions in the neighborhoods of individual vertices. The latter quantifies whether connected vertices are structurally more similar than non-connected ones. We discover these features to be characteristic for the networks' origins. Moreover, clustering the vertices of graphs regarding their triadic structure, we investigate structural groups in the neural network of C. elegans, the international airport-connection network, and the global network of diplomatic sentiments between countries. For the latter we find evidence for the instability of triangles considered socially unbalanced according to sociological theories. Finally, we utilize our TRGM to explore ensembles of networks with similar triadic substructure in terms of the evolution of dynamical processes acting on their nodes. Focusing on oscillators, coupled along the graphs' edges, we observe that certain triad motifs impose a clear signature on the systems' dynamics, even when embedded in a larger network structure.}, subject = {Netzwerk}, language = {en} } @phdthesis{Schroeter2012, author = {Schr{\"o}ter, Martin}, title = {Newton Methods for Image Registration}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-71490}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2012}, abstract = {Consider the situation where two or more images are taken from the same object. After taking the first image, the object is moved or rotated so that the second recording depicts it in a different manner. Additionally, take heed of the possibility that the imaging techniques may have also been changed. One of the main problems in image processing is to determine the spatial relation between such images. The corresponding process of finding the spatial alignment is called "registration". In this work, we study the optimization problem which corresponds to the registration task. Especially, we exploit the Lie group structure of the set of transformations to construct efficient, intrinsic algorithms. We also apply the algorithms to medical registration tasks. However, the methods developed are not restricted to the field of medical image processing. We also have a closer look at more general forms of optimization problems and show connections to related tasks.}, subject = {Newton-Verfahren}, language = {en} } @phdthesis{Fleszar2018, author = {Fleszar, Krzysztof}, title = {Network-Design Problems in Graphs and on the Plane}, edition = {1. Auflage}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-076-4 (Print)}, doi = {10.25972/WUP-978-3-95826-077-1}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-154904}, school = {W{\"u}rzburg University Press}, pages = {xi, 204}, year = {2018}, abstract = {A network design problem defines an infinite set whose elements, called instances, describe relationships and network constraints. It asks for an algorithm that, given an instance of this set, designs a network that respects the given constraints and at the same time optimizes some given criterion. In my thesis, I develop algorithms whose solutions are optimum or close to an optimum value within some guaranteed bound. I also examine the computational complexity of these problems. Problems from two vast areas are considered: graphs and the Euclidean plane. In the Maximum Edge Disjoint Paths problem, we are given a graph and a subset of vertex pairs that are called terminal pairs. We are asked for a set of paths where the endpoints of each path form a terminal pair. The constraint is that any two paths share at most one inner vertex. The optimization criterion is to maximize the cardinality of the set. In the hard-capacitated k-Facility Location problem, we are given an integer k and a complete graph where the distances obey a given metric and where each node has two numerical values: a capacity and an opening cost. We are asked for a subset of k nodes, called facilities, and an assignment of all the nodes, called clients, to the facilities. The constraint is that the number of clients assigned to a facility cannot exceed the facility's capacity value. The optimization criterion is to minimize the total cost which consists of the total opening cost of the facilities and the total distance between the clients and the facilities they are assigned to. In the Stabbing problem, we are given a set of axis-aligned rectangles in the plane. We are asked for a set of horizontal line segments such that, for every rectangle, there is a line segment crossing its left and right edge. The optimization criterion is to minimize the total length of the line segments. In the k-Colored Non-Crossing Euclidean Steiner Forest problem, we are given an integer k and a finite set of points in the plane where each point has one of k colors. For every color, we are asked for a drawing that connects all the points of the same color. The constraint is that drawings of different colors are not allowed to cross each other. The optimization criterion is to minimize the total length of the drawings. In the Minimum Rectilinear Polygon for Given Angle Sequence problem, we are given an angle sequence of left (+90°) turns and right (-90°) turns. We are asked for an axis-parallel simple polygon where the angles of the vertices yield the given sequence when walking around the polygon in counter-clockwise manner. The optimization criteria considered are to minimize the perimeter, the area, and the size of the axis-parallel bounding box of the polygon.}, subject = {Euklidische Ebene}, language = {en} } @phdthesis{Reitwiessner2011, author = {Reitwießner, Christian}, title = {Multiobjective Optimization and Language Equations}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-70146}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2011}, abstract = {Praktische Optimierungsprobleme beinhalten oft mehrere gleichberechtigte, sich jedoch widersprechende Kriterien. Beispielsweise will man bei einer Reise zugleich m{\"o}glichst schnell ankommen, sie soll aber auch nicht zu teuer sein. Im ersten Teil dieser Arbeit wird die algorithmische Beherrschbarkeit solcher mehrkriterieller Optimierungsprobleme behandelt. Es werden zun{\"a}chst verschiedene L{\"o}sungsbegriffe diskutiert und auf ihre Schwierigkeit hin verglichen. Interessanterweise stellt sich heraus, dass diese Begriffe f{\"u}r ein einkriterielles Problem stets gleich schwer sind, sie sich ab zwei Kriterien allerdings stark unterscheiden k{\"o}nen (außer es gilt P = NP). In diesem Zusammenhang wird auch die Beziehung zwischen Such- und Entscheidungsproblemen im Allgemeinen untersucht. Schließlich werden neue und verbesserte Approximationsalgorithmen f{\"u}r verschieden Varianten des Problems des Handlungsreisenden gefunden. Dabei wird mit Mitteln der Diskrepanztheorie eine Technik entwickelt, die ein grundlegendes Hindernis der Mehrkriteriellen Optimierung aus dem Weg schafft: Gegebene L{\"o}sungen so zu kombinieren, dass die neue L{\"o}sung in allen Kriterien m{\"o}glichst ausgewogen ist und gleichzeitig die Struktur der L{\"o}sungen nicht zu stark zerst{\"o}rt wird. Der zweite Teil der Arbeit widmet sich verschiedenen Aspekten von Gleichungssystemen f{\"u}r (formale) Sprachen. Einerseits werden konjunktive und Boolesche Grammatiken untersucht. Diese sind Erweiterungen der kontextfreien Grammatiken um explizite Durchschnitts- und Komplementoperationen. Es wird unter anderem gezeigt, dass man bei konjunktiven Grammatiken die Vereinigungsoperation stark einschr{\"a}nken kann, ohne dabei die erzeugte Sprache zu {\"a}ndern. Außerdem werden bestimmte Schaltkreise untersucht, deren Gatter keine Wahrheitswerte sondern Mengen von Zahlen berechnen. F{\"u}r diese Schaltkreise wird das {\"A}quivalenzproblem betrachtet, also die Frage ob zwei gegebene Schaltkreise die gleiche Menge berechnen oder nicht. Es stellt sich heraus, dass, abh{\"a}ngig von den erlaubten Gattertypen, die Komplexit{\"a}t des {\"A}quivalenzproblems stark variiert und f{\"u}r verschiedene Komplexit{\"a}tsklassen vollst{\"a}ndig ist, also als (parametrisierter) Vertreter f{\"u}r diese Klassen stehen kann.}, subject = {Mehrkriterielle Optimierung}, language = {en} } @phdthesis{Hoehn2002, author = {H{\"o}hn, Holger}, title = {Multimediale, datenbankgest{\"u}tzte Lehr- und Lernplattformen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-4049}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2002}, abstract = {Die Dissertation befaßt sich mit der Entwicklung einer multimedialen, datenbankgest{\"u}tzten Lehr- und Lernplattform. Die entwickelten Module erm{\"o}glichen und erweitern nicht nur die M{\"o}glichkeit des Selbststudiums f{\"u}r den Studenten sondern erleichtern auch die Arbeit der Dozenten. Außerdem wird auch die Zusammenarbeit und der Austausch von Lernobjekten zwischen verschiedenen Institutionen erm{\"o}glicht. In der Lehr- und Lernplattform k{\"o}nnen verschiedene Lernobjekt-Typen verwaltet werden. Exemplarisch wurden die Typen Bilder, 3D-Animationen, Vorlesungen, Lerntexte, Fallbeispiele und Quizelemente integriert. Die Lehr- und Lernplattform besteht aus drei Bausteinen: 1. In der Lernobjekt-Datenbank werden alle Lernobjekt-Typen und Lernobjekte verwaltet. 2. Autorenwerkzeuge dienen zur Erstellung von Lernobjekten. 3. In der Lernplattform werden die Lernobjekte den Studenten zum (Selbst-)Lernen pr{\"a}sentiert. Neben den Vorteilen, die der Einsatz von E-Learning im allgemeinen bietet, wie die flexible Lernorganisation oder die Nutzung von Lerninhalten unabh{\"a}ngig von Ort und Zeit, zeichnet sich die entwickelte Lehr- und Lernplattform besonders durch folgende Punkte aus: Generierung von Lerninhalten h{\"o}herer Qualit{\"a}t durch multizentrische Expertenb{\"u}ndelung und Arbeitsteilung, Erweiterbarkeit auf andere, neue Lernobjekt-Typen, Verwaltbarkeit, Konsistenz, Flexibilit{\"a}t, geringer Verwaltungsaufwand, Navigationsm{\"o}glichkeiten f{\"u}r den Studenten, Personalisierbarkeit und Konformit{\"a}t zu internationalen Standards. Sowohl bei der Modellierung als auch bei der Umsetzung wurde darauf geachtet, m{\"o}glichst gut die Anforderungen der Dermatologie bei gleichzeitiger Erweiterbarkeit auf andere, {\"a}hnliche Szenarien zu erf{\"u}llen. Besonders einfach sollte die Anpassung der Plattform f{\"u}r andere bildorientierte Disziplinen sein.}, subject = {Multimedia}, language = {de} } @phdthesis{Wirth2001, author = {Wirth, Hans-Christoph}, title = {Multicriteria Approximation of Network Design and Network Upgrade Problems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-2845}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2001}, abstract = {Network planning has come to great importance during the past decades. Today's telecommunication, traffic systems, and logistics would not have been evolved to the current state without careful analysis of the underlying network problems and precise implementation of the results obtained from those examinations. Graphs with node and arc attributes are a very useful tool to model realistic applications, while on the other hand they are well understood in theory. We investigate network design problems which are motivated particularly from applications in communication networks and logistics. Those problems include the search for homogeneous subgraphs in edge labeled graphs where either the total number of labels or the reload cost are subject to optimize. Further, we investigate some variants of the dial a ride problem. On the other hand, we use node and edge upgrade models to deal with the fact that in many cases one prefers to change existing networks rather than implementing a newly computed solution from scratch. We investigate the construction of bottleneck constrained forests under a node upgrade model, as well as several flow cost problems under a edge based upgrade model. All problems are examined within a framework of multi-criteria optimization. Many of the problems can be shown to be NP-hard, with the consequence that, under the widely accepted assumption that P is not equal to NP, there cannot exist efficient algorithms for solving the problems. This motivates the development of approximation algorithms which compute near-optimal solutions with provable performance guarantee in polynomial time.}, subject = {Netzplantechnik}, language = {en} } @phdthesis{Borrmann2018, author = {Borrmann, Dorit}, title = {Multi-modal 3D mapping - Combining 3D point clouds with thermal and color information}, isbn = {978-3-945459-20-1}, issn = {1868-7474}, doi = {10.25972/OPUS-15708}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-157085}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2018}, abstract = {Imagine a technology that automatically creates a full 3D thermal model of an environment and detects temperature peaks in it. For better orientation in the model it is enhanced with color information. The current state of the art for analyzing temperature related issues is thermal imaging. It is relevant for energy efficiency but also for securing important infrastructure such as power supplies and temperature regulation systems. Monitoring and analysis of the data for a large building is tedious as stable conditions need to be guaranteed for several hours and detailed notes about the pose and the environment conditions for each image must be taken. For some applications repeated measurements are necessary to monitor changes over time. The analysis of the scene is only possible through expertise and experience. This thesis proposes a robotic system that creates a full 3D model of the environment with color and thermal information by combining thermal imaging with the technology of terrestrial laser scanning. The addition of a color camera facilitates the interpretation of the data and allows for other application areas. The data from all sensors collected at different positions is joined in one common reference frame using calibration and scan matching. The first part of the thesis deals with 3D point cloud processing with the emphasis on accessing point cloud data efficiently, detecting planar structures in the data and registering multiple point clouds into one common coordinate system. The second part covers the autonomous exploration and data acquisition with a mobile robot with the objective to minimize the unseen area in 3D space. Furthermore, the combination of different modalities, color images, thermal images and point cloud data through calibration is elaborated. The last part presents applications for the the collected data. Among these are methods to detect the structure of building interiors for reconstruction purposes and subsequent detection and classification of windows. A system to project the gathered thermal information back into the scene is presented as well as methods to improve the color information and to join separately acquired point clouds and photo series. A full multi-modal 3D model contains all the relevant geometric information about the recorded scene and enables an expert to fully analyze it off-site. The technology clears the path for automatically detecting points of interest thereby helping the expert to analyze the heat flow as well as localize and identify heat leaks. The concept is modular and neither limited to achieving energy efficiency nor restricted to the use in combination with a mobile platform. It also finds its application in fields such as archaeology and geology and can be extended by further sensors.}, subject = {Punktwolke}, language = {en} } @phdthesis{Kluge2004, author = {Kluge, Boris}, title = {Motion coordination for a mobile robot in dynamic environments}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-15508}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2004}, abstract = {Generating coordinated motion for a mobile robot operating in natural, continuously changing environments among moving obstacles such as humans is a complex task which requires the solution of various sub problems. In this thesis, we will cover the topics of perception and navigation in dynamic environments, as well as reasoning about the motion of the obstacles and of the robot itself. Perception is mainly considered for a laser range finder, and an according method for obstacle detection and tracking is proposed. Network optimization algorithms are used for data association in the tracking step, resulting in considerable robustness with respect to clutter by small objects. Navigation in general is accomplished using an adaptation of the velocity obstacle approach to the given vehicle kinematics, and cooperative motion coordination between the robot and a human guide is achieved using an appropriate selection rule for collision-free velocities. Next, the robot is enabled to compare its path to the path of a human guide using one of a collection of presented distance measures, which permits the detection of exceptional conditions. Furthermore, a taxonomy for the assessment of situations concerning the robot is presented, and following a summary of existing approaches to more intelligent and comprehensive perception, we propose a method for obstruction detection. Finally, a new approach to reflective navigation behaviors is described where the robot reasons about intelligent moving obstacles in its environment, which allows to adjust the character of the robot motion from regardful and defensive to more self-confident and aggressive behaviors.}, subject = {Bewegungsablauf}, language = {de} } @phdthesis{Hess2009, author = {Hess, Martin}, title = {Motion coordination and control in systems of nonholonomic autonomous vehicles}, isbn = {978-3-923959-55-6}, doi = {10.25972/OPUS-3794}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-46442}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2009}, abstract = {This work focuses on coordination methods and the control of motion in groups of nonholonomic wheeled mobile robots, in particular of the car-like type. These kind of vehicles are particularly restricted in their mobility. In the main part of this work the two problems of formation motion coordination and of rendezvous in distributed multi-vehicle systems are considered. We introduce several enhancements to an existing motion planning approach for formations of nonholonomic mobile robots. Compared to the original method, the extended approach is able to handle time-varying reference speeds as well as adjustments of the formation's shape during reference trajectory segments with continuously differentiable curvature. Additionally, undesired discontinuities in the speed and steering profiles of the vehicles are avoided. Further, the scenario of snow shoveling on an airfield by utilizing multiple formations of autonomous snowplows is discussed. We propose solutions to the subproblems of motion planning for the formations and tracking control for the individual vehicles. While all situations that might occur have been tested in a simulation environment, we also verified the developed tracking controller in real robot hardware experiments. The task of the rendezvous problem in groups of car-like robots is to drive all vehicles to a common position by means of decentralized control laws. Typically there exists no direct interaction link between all of the vehicles. In this work we present decentralized rendezvous control laws for vehicles with free and with bounded steering. The convergence properties of the approaches are analyzed by utilizing Lyapunov based techniques. Furthermore, they are evaluated within various simulation experiments, while the bounded steering case is also verified within laboratory hardware experiments. Finally we introduce a modification to the bounded steering system that increases the convergence speed at the expense of a higher traveled distance of the vehicles.}, subject = {Robotik}, language = {en} } @phdthesis{Staehle2011, author = {Staehle, Barbara}, title = {Modeling and Optimization Methods for Wireless Sensor and Mesh Networks}, doi = {10.25972/OPUS-4967}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-64884}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2011}, abstract = {Im Internet der Zukunft werden Menschen nicht nur mit Menschen, sondern auch mit „Dingen", und sogar „Dinge" mit „Dingen" kommunizieren. Zus{\"a}tzlich wird das Bed{\"u}rfnis steigen, immer und {\"u}berall Zugang zum Internet zu haben. Folglich gewinnen drahtlose Sensornetze (WSNs) und drahtlose Mesh-Netze (WMNs) an Bedeutung, da sie Daten {\"u}ber die Umwelt ins Internet liefern, beziehungsweise einfache Internet-Zugangsm{\"o}glichkeiten schaffen. In den vier Teilen dieser Arbeit werden unterschiedliche Modellierungs- und Optimierungsmethoden f{\"u}r WSNs und WMNs vorgestellt. Der Energieverbrauch ist die wichtigste Metrik, wenn es darum geht die Kommunikation in einem WSN zu optimieren. Da sich in der Literatur sehr viele unterschiedliche Energiemodelle finden, untersucht der erste Teil der Arbeit welchen Einfluss unterschiedliche Energiemodelle auf die Optimierung von WSNs haben. Aufbauend auf diesen {\"U}berlegungen besch{\"a}ftigt sich der zweite Teil der Arbeit mit drei Problemen, die {\"u}berwunden werden m{\"u}ssen um eine standardisierte energieeffiziente Kommunikations-L{\"o}sung f{\"u}r WSNs basierend auf IEEE 802.15.4 und ZigBee zu realisieren. F{\"u}r WMNs sind beide Probleme von geringem Interesse, die Leistung des Netzes jedoch umso mehr. Der dritte Teil der Arbeit f{\"u}hrt daher Algorithmen f{\"u}r die Berechnung des Max-Min fairen (MMF) Netzwerk-Durchsatzes in WMNs mit mehreren Linkraten und Internet-Gateways ein. Der letzte Teil der Arbeit untersucht die Auswirkungen des LRA-Konzeptes. Dessen grundlegende Idee ist die folgende. Falls f{\"u}r einen Link eine niedrigere Datenrate als theoretisch m{\"o}glich verwendet wird, sinkt zwar der Link-Durchsatz, jedoch ist unter Umst{\"a}nden eine gr{\"o}ßere Anzahl von gleichzeitigen {\"U}bertragungen m{\"o}glich und der Gesamt-Durchsatz des Netzes kann sich erh{\"o}hen. Mithilfe einer analytischen LRA Formulierung und einer systematischen Studie kann gezeigt werden, dass eine netzwerkweite Zuordnung robusterer Datenraten als n{\"o}tig zu einer Erh{\"o}hung des MMF Netzwerk-Durchsatzes f{\"u}hrt. Desweitern kann gezeigt werden, dass sich LRA positiv auf die Leistungsf{\"a}higkeit eines IEEE 802.11 WMNs auswirkt und f{\"u}r die Optimierung des Netzes genutzt werden kann.}, subject = {Drahtloses Sensorsystem}, language = {en} } @phdthesis{Sauer2010, author = {Sauer, Markus}, title = {Mixed-Reality for Enhanced Robot Teleoperation}, isbn = {978-3-923959-67-9}, doi = {10.25972/OPUS-4666}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-55083}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2010}, abstract = {In den letzten Jahren ist die Forschung in der Robotik soweit fortgeschritten, dass die Mensch-Maschine Schnittstelle zunehmend die kritischste Komponente f{\"u}r eine hohe Gesamtperformanz von Systemen zur Navigation und Koordination von Robotern wird. In dieser Dissertation wird untersucht wie Mixed-Reality Technologien f{\"u}r Nutzerschnittstellen genutzt werden k{\"o}nnen, um diese Gesamtperformanz zu erh{\"o}hen. Hierzu werden Konzepte und Technologien entwickelt, die durch Evaluierung mit Nutzertest ein optimiertes und anwenderbezogenes Design von Mixed-Reality Nutzerschnittstellen erm{\"o}glichen. Er werden somit sowohl die technische Anforderungen als auch die menschlichen Faktoren f{\"u}r ein konsistentes Systemdesign ber{\"u}cksichtigt. Nach einer detaillierten Problemanalyse und der Erstellung eines Systemmodels, das den Menschen als Schl{\"u}sselkomponente mit einbezieht, wird zun{\"a}chst die Anwendung der neuartigen 3D-Time-of-Flight Kamera zur Navigation von Robotern, aber auch f{\"u}r den Einsatz in Mixed-Reality Schnittstellen analysiert und optimiert. Weiterhin wird gezeigt, wie sich der Netzwerkverkehr des Videostroms als wichtigstes Informationselement der meisten Nutzerschnittstellen f{\"u}r die Navigationsaufgabe auf der Netzwerk Applikationsebene in typischen Multi-Roboter Netzwerken mit dynamischen Topologien und Lastsituation optimieren l{\"a}sst. Hierdurch ist es m{\"o}glich in sonst in sonst typischen Ausfallszenarien den Videostrom zu erhalten und die Bildrate zu stabilisieren. Diese fortgeschrittenen Technologien werden dann auch dem entwickelten Konzept der generischen 3D Mixed Reality Schnittselle eingesetzt. Dieses Konzept erm{\"o}glicht eine integrierte 3D Darstellung der verf{\"u}gbaren Information, so dass r{\"a}umliche Beziehungen von Informationen aufrechterhalten werden und somit die Anzahl der mentalen Transformationen beim menschlichen Bediener reduziert wird. Gleichzeitig werden durch diesen Ansatz auch immersive Stereo Anzeigetechnologien unterst{\"u}tzt, welche zus{\"a}tzlich das r{\"a}umliche Verst{\"a}ndnis der entfernten Situation f{\"o}rdern. Die in der Dissertation vorgestellten und evaluierten Ans{\"a}tze nutzen auch die Tatsache, dass sich eine lokale Autonomie von Robotern heute sehr robust realisieren l{\"a}sst. Dies wird zum Beispiel zur Realisierung eines Assistenzsystems mit variabler Autonomie eingesetzt. Hierbei erh{\"a}lt der Fernbediener {\"u}ber eine Kraftr{\"u}ckkopplung kombiniert mit einer integrierten Augmented Reality Schnittstelle, einen Eindruck {\"u}ber die Situation am entfernten Arbeitsbereich, aber auch {\"u}ber die aktuelle Navigationsintention des Roboters. Die durchgef{\"u}hrten Nutzertests belegen die signifikante Steigerung der Navigationsperformanz durch den entwickelten Ansatz. Die robuste lokale Autonomie erm{\"o}glicht auch den in der Dissertation eingef{\"u}hrten Ansatz der pr{\"a}diktiven Mixed-Reality Schnittstelle. Die durch diesen Ansatz entkoppelte Regelschleife {\"u}ber den Menschen erm{\"o}glicht es die Sichtbarkeit von unvermeidbaren Systemverz{\"o}gerungen signifikant zu reduzieren. Zus{\"a}tzlich k{\"o}nnen durch diesen Ansatz beide f{\"u}r die Navigation hilfreichen Blickwinkel in einer 3D-Nutzerschnittstelle kombiniert werden - der exozentrische Blickwinkel und der egozentrische Blickwinkel als Augmented Reality Sicht.}, subject = {Mobiler Roboter}, language = {en} } @phdthesis{Krenzer2023, author = {Krenzer, Adrian}, title = {Machine learning to support physicians in endoscopic examinations with a focus on automatic polyp detection in images and videos}, doi = {10.25972/OPUS-31911}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-319119}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {Deep learning enables enormous progress in many computer vision-related tasks. Artificial Intel- ligence (AI) steadily yields new state-of-the-art results in the field of detection and classification. Thereby AI performance equals or exceeds human performance. Those achievements impacted many domains, including medical applications. One particular field of medical applications is gastroenterology. In gastroenterology, machine learning algorithms are used to assist examiners during interventions. One of the most critical concerns for gastroenterologists is the development of Colorectal Cancer (CRC), which is one of the leading causes of cancer-related deaths worldwide. Detecting polyps in screening colonoscopies is the essential procedure to prevent CRC. Thereby, the gastroenterologist uses an endoscope to screen the whole colon to find polyps during a colonoscopy. Polyps are mucosal growths that can vary in severity. This thesis supports gastroenterologists in their examinations with automated detection and clas- sification systems for polyps. The main contribution is a real-time polyp detection system. This system is ready to be installed in any gastroenterology practice worldwide using open-source soft- ware. The system achieves state-of-the-art detection results and is currently evaluated in a clinical trial in four different centers in Germany. The thesis presents two additional key contributions: One is a polyp detection system with ex- tended vision tested in an animal trial. Polyps often hide behind folds or in uninvestigated areas. Therefore, the polyp detection system with extended vision uses an endoscope assisted by two additional cameras to see behind those folds. If a polyp is detected, the endoscopist receives a vi- sual signal. While the detection system handles the additional two camera inputs, the endoscopist focuses on the main camera as usual. The second one are two polyp classification models, one for the classification based on shape (Paris) and the other on surface and texture (NBI International Colorectal Endoscopic (NICE) classification). Both classifications help the endoscopist with the treatment of and the decisions about the detected polyp. The key algorithms of the thesis achieve state-of-the-art performance. Outstandingly, the polyp detection system tested on a highly demanding video data set shows an F1 score of 90.25 \% while working in real-time. The results exceed all real-time systems in the literature. Furthermore, the first preliminary results of the clinical trial of the polyp detection system suggest a high Adenoma Detection Rate (ADR). In the preliminary study, all polyps were detected by the polyp detection system, and the system achieved a high usability score of 96.3 (max 100). The Paris classification model achieved an F1 score of 89.35 \% which is state-of-the-art. The NICE classification model achieved an F1 score of 81.13 \%. Furthermore, a large data set for polyp detection and classification was created during this thesis. Therefore a fast and robust annotation system called Fast Colonoscopy Annotation Tool (FastCAT) was developed. The system simplifies the annotation process for gastroenterologists. Thereby the i gastroenterologists only annotate key parts of the endoscopic video. Afterward, those video parts are pre-labeled by a polyp detection AI to speed up the process. After the AI has pre-labeled the frames, non-experts correct and finish the annotation. This annotation process is fast and ensures high quality. FastCAT reduces the overall workload of the gastroenterologist on average by a factor of 20 compared to an open-source state-of-art annotation tool.}, subject = {Deep Learning}, language = {en} } @phdthesis{Somody2023, author = {Somody, Joseph Christian Campbell}, title = {Leveraging deep learning for identification and structural determination of novel protein complexes from \(in\) \(situ\) electron cryotomography of \(Mycoplasma\) \(pneumoniae\)}, doi = {10.25972/OPUS-31344}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-313447}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {The holy grail of structural biology is to study a protein in situ, and this goal has been fast approaching since the resolution revolution and the achievement of atomic resolution. A cell's interior is not a dilute environment, and proteins have evolved to fold and function as needed in that environment; as such, an investigation of a cellular component should ideally include the full complexity of the cellular environment. Imaging whole cells in three dimensions using electron cryotomography is the best method to accomplish this goal, but it comes with a limitation on sample thickness and produces noisy data unamenable to direct analysis. This thesis establishes a novel workflow to systematically analyse whole-cell electron cryotomography data in three dimensions and to find and identify instances of protein complexes in the data to set up a determination of their structure and identity for success. Mycoplasma pneumoniae is a very small parasitic bacterium with fewer than 700 protein-coding genes, is thin enough and small enough to be imaged in large quantities by electron cryotomography, and can grow directly on the grids used for imaging, making it ideal for exploratory studies in structural proteomics. As part of the workflow, a methodology for training deep-learning-based particle-picking models is established. As a proof of principle, a dataset of whole-cell Mycoplasma pneumoniae tomograms is used with this workflow to characterize a novel membrane-associated complex observed in the data. Ultimately, 25431 such particles are picked from 353 tomograms and refined to a density map with a resolution of 11 {\AA}. Making good use of orthogonal datasets to filter search space and verify results, structures were predicted for candidate proteins and checked for suitable fit in the density map. In the end, with this approach, nine proteins were found to be part of the complex, which appears to be associated with chaperone activity and interact with translocon machinery. Visual proteomics refers to the ultimate potential of in situ electron cryotomography: the comprehensive interpretation of tomograms. The workflow presented here is demonstrated to help in reaching that potential.}, subject = {Kryoelektronenmikroskopie}, language = {en} } @phdthesis{Dang2012, author = {Dang, Nghia Duc}, title = {Konzeption und Evaluation eines hybriden, skalierbaren Werkzeugs zur mechatronischen Systemdiagnose am Beispiel eines Diagnosesystems f{\"u}r freie Kfz-Werkst{\"a}tten}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-70774}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2012}, abstract = {Die Entwicklung eines wissensbasierten Systems, speziell eines Diagnosesystems, ist eine Teildisziplin der k{\"u}nstlichen Intelligenz und angewandten Informatik. Im Laufe der Forschung auf diesem Gebiet wurden verschiedene L{\"o}sungsans{\"a}tze mit unterschiedlichem Erfolg bei der Anwendung in der Kraftfahrzeugdiagnose entwickelt. Diagnosesysteme in Vertragswerkst{\"a}tten, das heißt in Fahrzeughersteller gebundenen Werkst{\"a}tten, wenden haupts{\"a}chlich die fallbasierte Diagnostik an. Zum einen h{\"a}lt sich hier die Fahrzeugvielfalt in Grenzen und zum anderen besteht eine Meldepflicht bei neuen, nicht im System vorhandenen F{\"a}llen. Die freien Werkst{\"a}tten verf{\"u}gen nicht {\"u}ber eine solche Datenbank. Somit ist der fallbasierte Ansatz schwer umsetzbar. In freien Werkst{\"a}tten - Fahrzeughersteller unabh{\"a}ngigen Werkst{\"a}tten - basiert die Fehlersuche haupts{\"a}chlich auf Fehlerb{\"a}umen. Wegen der wachsenden Fahrzeugkomplexit{\"a}t, welche wesentlich durch die stark zunehmende Anzahl der durch mechatronische Systeme realisierten Funktionen bedingt ist, und der steigenden Typenvielfalt ist die gef{\"u}hrte Fehlersuche in freien Werkst{\"a}tten nicht immer zielf{\"u}hrend. Um die Unterst{\"u}tzung des Personals von freien Werkst{\"a}tten bei der zuk{\"u}nftigen Fehlersuche zu gew{\"a}hrleisten, werden neue Generationen von herstellerunabh{\"a}ngigen Diagnosetools ben{\"o}tigt, die die Probleme der Variantenvielfalt und Komplexit{\"a}t l{\"o}sen. In der vorliegenden Arbeit wird ein L{\"o}sungsansatz vorgestellt, der einen qualitativen, modellbasierten Diagnoseansatz mit einem auf heuristischem Diagnosewissen basierenden Ansatz vereint. Neben der Grundlage zur Wissenserhebung werden in dieser Arbeit die theoretische Grundlage zur Beherrschung der Variantenvielfalt sowie die Tests f{\"u}r die erstellten Diagnosemodelle behandelt. Die Diagnose ist symptombasiert und die Inferenzmechanismen zur Verarbeitung des Diagnosewissens sind eine Kombination aus Propagierung der abweichenden physikalischen Gr{\"o}ßen im Modell und der Auswertung des heuristischen Wissens. Des Weiteren werden in dieser Arbeit verschiedene Aspekte der Realisierung der entwickelten theoretischen Grundlagen dargestellt, zum Beispiel: Systemarchitektur, Wissenserhebungsprozess, Ablauf des Diagnosevorgangs in den Werkst{\"a}tten. Die Evaluierung der entwickelten L{\"o}sung bei der Wissenserhebung in Form von Modellerstellungen und Modellierungsworkshops sowie Feldtests dient nicht nur zur Best{\"a}tigung des entwickelten Ansatzes, sondern auch zur Ideenfindung f{\"u}r die Integration der entwickelten Tools in die existierende IT-Infrastruktur.}, subject = {Diagnosesystem}, language = {de} } @phdthesis{Atzmueller2006, author = {Atzm{\"u}ller, Martin}, title = {Knowledge-Intensive Subgroup Mining - Techniques for Automatic and Interactive Discovery}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-21004}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2006}, abstract = {Data mining has proved its significance in various domains and applications. As an important subfield of the general data mining task, subgroup mining can be used, e.g., for marketing purposes in business domains, or for quality profiling and analysis in medical domains. The goal is to efficiently discover novel, potentially useful and ultimately interesting knowledge. However, in real-world situations these requirements often cannot be fulfilled, e.g., if the applied methods do not scale for large data sets, if too many results are presented to the user, or if many of the discovered patterns are already known to the user. This thesis proposes a combination of several techniques in order to cope with the sketched problems: We discuss automatic methods, including heuristic and exhaustive approaches, and especially present the novel SD-Map algorithm for exhaustive subgroup discovery that is fast and effective. For an interactive approach we describe techniques for subgroup introspection and analysis, and we present advanced visualization methods, e.g., the zoomtable that directly shows the most important parameters of a subgroup and that can be used for optimization and exploration. We also describe various visualizations for subgroup comparison and evaluation in order to support the user during these essential steps. Furthermore, we propose to include possibly available background knowledge that is easy to formalize into the mining process. We can utilize the knowledge in many ways: To focus the search process, to restrict the search space, and ultimately to increase the efficiency of the discovery method. We especially present background knowledge to be applied for filtering the elements of the problem domain, for constructing abstractions, for aggregating values of attributes, and for the post-processing of the discovered set of patterns. Finally, the techniques are combined into a knowledge-intensive process supporting both automatic and interactive methods for subgroup mining. The practical significance of the proposed approach strongly depends on the available tools. We introduce the VIKAMINE system as a highly-integrated environment for knowledge-intensive active subgroup mining. Also, we present an evaluation consisting of two parts: With respect to objective evaluation criteria, i.e., comparing the efficiency and the effectiveness of the subgroup discovery methods, we provide an experimental evaluation using generated data. For that task we present a novel data generator that allows a simple and intuitive specification of the data characteristics. The results of the experimental evaluation indicate that the novel SD-Map method outperforms the other described algorithms using data sets similar to the intended application concerning the efficiency, and also with respect to precision and recall for the heuristic methods. Subjective evaluation criteria include the user acceptance, the benefit of the approach, and the interestingness of the results. We present five case studies utilizing the presented techniques: The approach has been successfully implemented in medical and technical applications using real-world data sets. The method was very well accepted by the users that were able to discover novel, useful, and interesting knowledge.}, subject = {Data Mining}, language = {en} } @phdthesis{Fehler2010, author = {Fehler, Manuel}, title = {Kalibrierung Agenten-basierter Simulationen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-64762}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2010}, abstract = {In der vorliegenden Arbeit wird das Problem der Kalibrierung Agenten-basierter Simulationen (ABS) behandelt, also das Problem, die Parameterwerte eines Agenten-basierten Simulationsmodells so einzustellen, dass valides Simulationsverhalten erreicht wird. Das Kalibrierungsproblem f{\"u}r Simulationen an sich ist nicht neu und ist im Rahmen klassischer Simulationsparadigmen, wie z.B. der Makro-Simulation, fester Bestandteil der Forschung. Im Vergleich zu den dort betrachteten Kalibrierungsproblemen zeichnet sich das Kalibrierungsproblem f{\"u}r ABS jedoch durch eine Reihe zus{\"a}tzlicher Herausforderungen aus, welche die direkte Anwendung existierender Kalibrierungsverfahren in begrenzter Zeit erschweren, bzw. nicht mehr sinnvoll zulassen. Die L{\"o}sung dieser Probleme steht im Zentrum dieser Dissertation: Das Ziel besteht darin, den Nutzer bei der Kalibrierung von ABS auf der Basis von unzureichenden, potentiell fehlerhaften Daten und Wissen zu unterst{\"u}tzen. Dabei sollen drei Hauptprobleme gel{\"o}st werden: 1)Vereinfachung der Kalibrierung großer Agenten-Parametermengen auf der Mikro- Ebene in Agenten-basierten Simulationen durch Ausnutzung der spezifischen Struktur von ABS (n{\"a}mlich dem Aufbau aus einer Menge von Agentenmodellen). 2)Kalibrierung Agenten-basierter Simulationen, so dass auf allen relevanten Beobachtungsebenen valides Simulationsverhalten erzeugt wird (mindestens Mikro und Makro-Ebene). Als erschwerende Randbedingung muss die Kalibrierung unter der Voraussetzung einer Makro-Mikro-Wissensl{\"u}cke durchgef{\"u}hrt werden. 3)Kalibrierung Agenten-basierter Simulationen auf der Mikro-Ebene unter der Voraussetzung, dass zur Kalibrierung einzelner Agentenmodelle nicht ausreichend und potentiell verf{\"a}lschte Daten zur Verhaltensvalidierung zur Verf{\"u}gung stehen. Hierzu wird in dieser Arbeit das sogenannte Makro-Mikro-Verfahren zur Kalibrierung von Agenten-basierten Simulationen entwickelt. Das Verfahren besteht aus einem Basisverfahren, das im Verlauf der Arbeit um verschiedene Zusatzverfahren erweitert wird. Das Makro-Mikro-Verfahren und seine Erweiterungen sollen dazu dienen, die Modellkalibrierung trotz stark verrauschter Daten und eingeschr{\"a}nktem Wissen {\"u}ber die Wirkungszusammenh{\"a}nge im Originalsystem geeignet zu erm{\"o}glichen und dabei den Kalibrierungsprozess zu beschleunigen: 1) Makro-Mikro-Kalibrierungsverfahren: Das in dieser Arbeit entwickelte Makro- Mikro-Verfahren unterst{\"u}tzt den Nutzer durch eine kombinierte Kalibrierung auf der Mikro- und der Makro-Beobachtungsebene, die gegebenenfalls durch Zwischenebenen erweitert werden kann. Der Grundgedanke des Verfahrens besteht darin, das Kalibrierungsproblem in eines auf aggregierter Verhaltensebene und eines auf der Ebene des Mikro-Agentenverhaltens aufzuteilen. Auf der Makro-Ebene wird nach validen idealen aggregierten Verhaltensmodellen (IVM) der Agenten gesucht. Auf der Mikro-Ebene wird versucht die individuellen Modelle der Agenten auf Basis des erw{\"u}nschten Gesamtverhaltens und der ermittelten IVM so zu kalibrieren, das insgesamt Simulationsverhalten entsteht, das sowohl auf Mikro- als auch auf Makro-Ebene valide ist. 2) Erweiterung 1: Robuste Kalibrierung: Um den Umgang mit potentiell verrauschten Validierungskriterien (d.h. mit verrauschten Daten {\"u}ber ein Originalsystem, auf denen die Validierungskriterien der Simulation beruhen) und Modellteilen w{\"a}hrend der Kalibrierung von ABS zu erm{\"o}glichen, wird eine robuste Kalibrierungstechnik zur Anwendung im Makro-Mikro-Verfahren entwickelt. 3) Erweiterung 2: Kalibrierung mit Heterogenit{\"a}tssuche: Als zweite Erweiterung des Makro-Mikro-Verfahrens wird ein Verfahren entwickelt, das das Problem des unklaren Detaillierungsgrades von ABS auf der Ebene der Parameterwerte adressiert. Prinzipiell kann zwar jeder Agent unterschiedliche Parameterwerte verwenden, obwohl eine geringere Heterogenit{\"a}t zur Erzeugung validen Verhaltens ausreichend w{\"a}re. Die entwickelte Erweiterung versucht, w{\"a}hrend der Kalibrierung, eine geeignete Heterogenit{\"a}tsauspr{\"a}gung f{\"u}r die Parameterwerte der Agenten zu ermitteln. Unter einer Heterogenit{\"a}tsauspr{\"a}gung wird dabei eine Einteilung der simulierten Agenten in Gruppen mit jeweils gleichen Parameterwerten verstanden. Die Heterogenit{\"a}tssuche dient dazu, einen Kompromiss zu finden zwischen der Notwendigkeit, sehr große Parametersuchr{\"a}ume durchsuchen zu m{\"u}ssen und gleichzeitig den Suchraum so klein wie m{\"o}glich halten zu wollen.}, subject = {Computersimulation}, language = {de} } @phdthesis{Zeiger2010, author = {Zeiger, Florian}, title = {Internet Protocol based networking of mobile robots}, isbn = {978-3-923959-59-4}, doi = {10.25972/OPUS-4661}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-54776}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2010}, abstract = {This work is composed of three main parts: remote control of mobile systems via Internet, ad-hoc networks of mobile robots, and remote control of mobile robots via 3G telecommunication technologies. The first part gives a detailed state of the art and a discussion of the problems to be solved in order to teleoperate mobile robots via the Internet. The focus of the application to be realized is set on a distributed tele-laboratory with remote experiments on mobile robots which can be accessed world-wide via the Internet. Therefore, analyses of the communication link are used in order to realize a robust system. The developed and implemented architecture of this distributed tele-laboratory allows for a smooth access also with a variable or low link quality. The second part covers the application of ad-hoc networks for mobile robots. The networking of mobile robots via mobile ad-hoc networks is a very promising approach to realize integrated telematic systems without relying on preexisting communication infrastructure. Relevant civilian application scenarios are for example in the area of search and rescue operations where first responders are supported by multi-robot systems. Here, mobile robots, humans, and also existing stationary sensors can be connected very fast and efficient. Therefore, this work investigates and analyses the performance of different ad-hoc routing protocols for IEEE 802.11 based wireless networks in relevant scenarios. The analysis of the different protocols allows for an optimization of the parameter settings in order to use these ad-hoc routing protocols for mobile robot teleoperation. Also guidelines for the realization of such telematics systems are given. Also traffic shaping mechanisms of application layer are presented which allow for a more efficient use of the communication link. An additional application scenario, the integration of a small size helicopter into an IP based ad-hoc network, is presented. The teleoperation of mobile robots via 3G telecommunication technologies is addressed in the third part of this work. The high availability, high mobility, and the high bandwidth provide a very interesting opportunity to realize scenarios for the teleoperation of mobile robots or industrial remote maintenance. This work analyses important parameters of the UMTS communication link and investigates also the characteristics for different data streams. These analyses are used to give guidelines which are necessary for the realization of or industrial remote maintenance or mobile robot teleoperation scenarios. All the results and guidelines for the design of telematic systems in this work were derived from analyses and experiments with real hardware.}, subject = {Robotik}, language = {en} } @phdthesis{Ostermayer2017, author = {Ostermayer, Ludwig}, title = {Integration of Prolog and Java with the Connector Architecture CAPJa}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-150713}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2017}, abstract = {Modern software is often realized as a modular combination of subsystems for, e. g., knowledge management, visualization, verification, or the interaction with users. As a result, software libraries from possibly different programming languages have to work together. Even more complex the case is if different programming paradigms have to be combined. This type of diversification of programming languages and paradigms in just one software application can only be mastered by mechanisms for a seamless integration of the involved programming languages. However, the integration of the common logic programming language Prolog and the popular object-oriented programming language Java is complicated by various interoperability problems which stem on the one hand from the paradigmatic gap between the programming languages, and on the other hand, from the diversity of the available Prolog systems. The subject of the thesis is the investigation of novel mechanisms for the integration of logic programming in Prolog and object-oriented programming in Java. We are particularly interested in an object-oriented, uniform approach which is not specific to just one Prolog system. Therefore, we have first identified several important criteria for the seamless integration of Prolog and Java from the object-oriented perspective. The main contribution of the thesis is a novel integration framework called the Connector Architecture for Prolog and Java (CAPJa). The framework is completely implemented in Java and imposes no modifications to the Java Virtual Machine or Prolog. CAPJa provides a semi-automated mechanism for the integration of Prolog predicates into Java. For compact, readable, and object-oriented queries to Prolog, CAPJa exploits lambda expressions with conditional and relational operators in Java. The communication between Java and Prolog is based on a fully automated mapping of Java objects to Prolog terms, and vice versa. In Java, an extensible system of gateways provides connectivity with various Prolog system and, moreover, makes any connected Prolog system easily interchangeable, without major adaption in Java.}, subject = {Logische Programmierung}, language = {en} }