@misc{Werner2024, type = {Master Thesis}, author = {Werner, Lennart}, title = {Terrain Mapping for Autonomous Navigation of Lunar Rovers}, doi = {10.25972/OPUS-35826}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-358268}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2024}, abstract = {Autonomous mobile robots operating in unknown terrain have to guide their drive decisions through local perception. Local mapping and traversability analysis is essential for safe rover operation and low level locomotion. This thesis deals with the challenge of building a local, robot centric map from ultra short baseline stereo imagery for height and traversability estimation. Several grid-based, incremental mapping algorithms are compared and evaluated in a multi size, multi resolution framework. A new, covariance based mapping update is introduced, which is capable of detecting sub- cellsize obstacles and abstracts the terrain of one cell as a first order surface. The presented mapping setup is capable of producing reliable ter- rain and traversability estimates under the conditions expected for the Cooperative Autonomous Distributed Robotic Exploreration (CADRE) mission. Algorithmic- and software architecture design targets high reliability and efficiency for meeting the tight constraints implied by CADRE's small on-board embedded CPU. Extensive evaluations are conducted to find possible edge-case scenar- ios in the operating envelope of the map and to confirm performance parameters. The research in this thesis targets the CADRE mission, but is applicable to any form of mobile robotics which require height- and traversability mapping.}, subject = {Mondfahrzeug}, language = {en} } @phdthesis{Bleier2023, author = {Bleier, Michael}, title = {Underwater Laser Scanning - Refractive Calibration, Self-calibration and Mapping for 3D Reconstruction}, isbn = {978-3-945459-45-4}, doi = {10.25972/OPUS-32269}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-322693}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {There is great interest in affordable, precise and reliable metrology underwater: Archaeologists want to document artifacts in situ with high detail. In marine research, biologists require the tools to monitor coral growth and geologists need recordings to model sediment transport. Furthermore, for offshore construction projects, maintenance and inspection millimeter-accurate measurements of defects and offshore structures are essential. While the process of digitizing individual objects and complete sites on land is well understood and standard methods, such as Structure from Motion or terrestrial laser scanning, are regularly applied, precise underwater surveying with high resolution is still a complex and difficult task. Applying optical scanning techniques in water is challenging due to reduced visibility caused by turbidity and light absorption. However, optical underwater scanners provide significant advantages in terms of achievable resolution and accuracy compared to acoustic systems. This thesis proposes an underwater laser scanning system and the algorithms for creating dense and accurate 3D scans in water. It is based on laser triangulation and the main optical components are an underwater camera and a cross-line laser projector. The prototype is configured with a motorized yaw axis for capturing scans from a tripod. Alternatively, it is mounted to a moving platform for mobile mapping. The main focus lies on the refractive calibration of the underwater camera and laser projector, the image processing and 3D reconstruction. For highest accuracy, the refraction at the individual media interfaces must be taken into account. This is addressed by an optimization-based calibration framework using a physical-geometric camera model derived from an analytical formulation of a ray-tracing projection model. In addition to scanning underwater structures, this work presents the 3D acquisition of semi-submerged structures and the correction of refraction effects. As in-situ calibration in water is complex and time-consuming, the challenge of transferring an in-air scanner calibration to water without re-calibration is investigated, as well as self-calibration techniques for structured light. The system was successfully deployed in various configurations for both static scanning and mobile mapping. An evaluation of the calibration and 3D reconstruction using reference objects and a comparison of free-form surfaces in clear water demonstrate the high accuracy potential in the range of one millimeter to less than one centimeter, depending on the measurement distance. Mobile underwater mapping and motion compensation based on visual-inertial odometry is demonstrated using a new optical underwater scanner based on fringe projection. Continuous registration of individual scans allows the acquisition of 3D models from an underwater vehicle. RGB images captured in parallel are used to create 3D point clouds of underwater scenes in full color. 3D maps are useful to the operator during the remote control of underwater vehicles and provide the building blocks to enable offshore inspection and surveying tasks. The advancing automation of the measurement technology will allow non-experts to use it, significantly reduce acquisition time and increase accuracy, making underwater metrology more cost-effective.}, subject = {Selbstkalibrierung}, language = {en} } @phdthesis{Wagner2023, author = {Wagner, Jan Cetric}, title = {Maximalnetzplan zur reaktiven Steuerung von Produktionsabl{\"a}ufen}, isbn = {978-3-945459-43-0}, doi = {10.25972/OPUS-30545}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-305452}, school = {Universit{\"a}t W{\"u}rzburg}, pages = {182}, year = {2023}, abstract = {In produzierenden Unternehmen werden verschiedene Vorgehensweisen zur Planung, {\"U}berwachung und Steuerung von Produktionsabl{\"a}ufen eingesetzt. Einer dieser Methoden wird als Vorgangsknotennetzplantechnik bezeichnet. Die einzelnen Produktionsschritte werden als Knoten definiert und durch Pfeile miteinander verbunden. Die Pfeile stellen die Beziehungen der jeweiligen Vorg{\"a}nge zueinander und damit den Produktionsablauf dar. Diese Technik erlaubt den Anwendern einen umfassenden {\"U}berblick {\"u}ber die einzelnen Prozessrelationen. Zus{\"a}tzlich k{\"o}nnen mit ihr Vorgangszeiten und Produktfertigstellungszeiten ermittelt werden, wodurch eine ausf{\"u}hrliche Planung der Produktion erm{\"o}glicht wird. Ein Nachteil dieser Technik begr{\"u}ndet sich in der alleinigen Darstellung einer ausf{\"u}hrbaren Prozessabfolge. Im Falle eines St{\"o}rungseintritts mit der Folge eines nicht durchf{\"u}hrbaren Vorgangs muss von dem origin{\"a}ren Prozess abgewichen werden. Aufgrund dessen wird eine Neuplanung erforderlich. Es werden Alternativen f{\"u}r den gest{\"o}rten Vorgang ben{\"o}tigt, um eine Fortf{\"u}hrung des Prozesses ungeachtet der St{\"o}rung zu erreichen. Innerhalb dieser Arbeit wird daher eine Erweiterung der Vorgangsknotennetzplantechnik beschrieben, die es erlaubt, erg{\"a}nzend zu dem geplanten Soll-Prozess Alternativvorg{\"a}nge f{\"u}r einzelne Vorg{\"a}nge darzulegen. Diese Methode wird als Maximalnetzplan bezeichnet. Die Alternativen werden im Falle eines St{\"o}rungseintritts automatisch evaluiert und dem Anwender in priorisierter Reihenfolge pr{\"a}sentiert. Durch die Verwendung des Maximalnetzplans kann eine aufwendige Neuplanung vermieden werden. Als Anwendungsbeispiel dient ein Montageprozess, mithilfe dessen die Verwendbarkeit der Methode dargelegt wird. Weiterf{\"u}hrend zeigt eine zeitliche Analyse zufallsbedingter Maximalnetzpl{\"a}ne eine Begr{\"u}ndung zur Durchf{\"u}hrung von Alternativen und damit den Nutzen des Maximalnetzplans auf. Zus{\"a}tzlich sei angemerkt, dass innerhalb dieser Arbeit verwendete Begrifflichkeiten wie Anwender, Werker oder Mitarbeiter in maskuliner Schreibweise niedergeschrieben werden. Dieses ist ausschließlich der Einfachheit geschuldet und nicht dem Zweck der Diskriminierung anderer Geschlechter dienlich. Die verwendete Schreibweise soll alle Geschlechter ansprechen, ob m{\"a}nnlich, weiblich oder divers.}, subject = {Produktionsplanung}, language = {de} } @phdthesis{Scharnagl2022, author = {Scharnagl, Julian}, title = {Distributed Guidance, Navigation and Control for Satellite Formation Flying Missions}, isbn = {978-3-945459-42-3}, doi = {10.25972/OPUS-28753}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-287530}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2022}, abstract = {Ongoing changes in spaceflight - continuing miniaturization, declining costs of rocket launches and satellite components, and improved satellite computing and control capabilities - are advancing Satellite Formation Flying (SFF) as a research and application area. SFF enables new applications that cannot be realized (or cannot be realized at a reasonable cost) with conventional single-satellite missions. In particular, distributed Earth observation applications such as photogrammetry and tomography or distributed space telescopes require precisely placed and controlled satellites in orbit. Several enabling technologies are required for SFF, such as inter-satellite communication, precise attitude control, and in-orbit maneuverability. However, one of the most important requirements is a reliable distributed Guidance, Navigation and Control (GNC) strategy. This work addresses the issue of distributed GNC for SFF in 3D with a focus on Continuous Low-Thrust (CLT) propulsion satellites (e.g., with electric thrusters) and concentrates on circular low Earth orbits. However, the focus of this work is not only on control theory, but control is considered as part of the system engineering process of typical small satellite missions. Thus, common sensor and actuator systems are analyzed to derive their characteristics and their impacts on formation control. This serves as the basis for the design, implementation, and evaluation of the following control approaches: First, a Model Predictive Control (MPC) method with specific adaptations to SFF and its requirements and constraints; second, a distributed robust controller that combines consensus methods for distributed system control and \$H_{\infty}\$ robust control; and finally, a controller that uses plant inversion for control and combines it with a reference governor to steer the controller to the target on an optimal trajectory considering several constraints. The developed controllers are validated and compared based on extensive software simulations. Realistic 3D formation flight scenarios were taken from the Networked Pico-Satellite Distributed System Control (NetSat) cubesat formation flight mission. The three compared methods show different advantages and disadvantages in the different application scenarios. The distributed robust consensus-based controller for example lacks the ability to limit the maximum thrust, so it is not suitable for satellites with CLT. But both the MPC-based approach and the plant inversionbased controller are suitable for CLT SFF applications, while showing again distinct advantages and disadvantages in different scenarios. The scientific contribution of this work may be summarized as the creation of novel and specific control approaches for the class of CLT SFF applications, which is still lacking methods withstanding the application in real space missions, as well as the scientific evaluation and comparison of the developed methods.}, subject = {Kleinsatellit}, language = {en} } @phdthesis{Freimann2022, author = {Freimann, Andreas}, title = {Efficient Communication in Networks of Small Low Earth Orbit Satellites and Ground Stations}, isbn = {978-3-945459-41-6}, doi = {10.25972/OPUS-28052}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-280521}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2022}, abstract = {With the miniaturization of satellites a fundamental change took place in the space industry. Instead of single big monolithic satellites nowadays more and more systems are envisaged consisting of a number of small satellites to form cooperating systems in space. The lower costs for development and launch as well as the spatial distribution of these systems enable the implementation of new scientific missions and commercial services. With this paradigm shift new challenges constantly emerge for satellite developers, particularly in the area of wireless communication systems and network protocols. Satellites in low Earth orbits and ground stations form dynamic space-terrestrial networks. The characteristics of these networks differ fundamentally from those of other networks. The resulting challenges with regard to communication system design, system analysis, packet forwarding, routing and medium access control as well as challenges concerning the reliability and efficiency of wireless communication links are addressed in this thesis. The physical modeling of space-terrestrial networks is addressed by analyzing existing satellite systems and communication devices, by evaluating measurements and by implementing a simulator for space-terrestrial networks. The resulting system and channel models were used as a basis for the prediction of the dynamic network topologies, link properties and channel interference. These predictions allowed for the implementation of efficient routing and medium access control schemes for space-terrestrial networks. Further, the implementation and utilization of software-defined ground stations is addressed, and a data upload scheme for the operation of small satellite formations is presented.}, subject = {Satellitenfunk}, language = {en} } @phdthesis{Leutert2021, author = {Leutert, Florian}, title = {Flexible Augmented Reality Systeme f{\"u}r robotergest{\"u}tzte Produktionsumgebungen}, isbn = {978-3-945459-39-3}, doi = {10.25972/OPUS-24972}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-249728}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2021}, abstract = {Produktionssysteme mit Industrierobotern werden zunehmend komplex; waren deren Arbeitsbereiche fr{\"u}her noch statisch und abgeschirmt, und die programmierten Abl{\"a}ufe gleichbleibend, so sind die Anforderungen an moderne Robotik-Produktionsanlagen gestiegen: Diese sollen sich jetzt mithilfe von intelligenter Sensorik auch in unstrukturierten Umgebungen einsetzen lassen, sich bei sinkenden Losgr{\"o}ßen aufgrund individualisierter Produkte und h{\"a}ufig {\"a}ndernden Produktionsaufgaben leicht rekonfigurieren lassen, und sogar eine direkte Zusammenarbeit zwischen Mensch und Roboter erm{\"o}glichen. Gerade auch bei dieser Mensch-Roboter-Kollaboration wird es damit notwendig, dass der Mensch die Daten und Aktionen des Roboters leicht verstehen kann. Aufgrund der gestiegenen Anforderungen m{\"u}ssen somit auch die Bedienerschnittstellen dieser Systeme verbessert werden. Als Grundlage f{\"u}r diese neuen Benutzerschnittstellen bietet sich Augmented Reality (AR) als eine Technologie an, mit der sich komplexe r{\"a}umliche Daten f{\"u}r den Bediener leicht verst{\"a}ndlich darstellen lassen. Komplexe Informationen werden dabei in der Arbeitsumgebung der Nutzer visualisiert und als virtuelle Einblendungen sichtbar gemacht, und so auf einen Blick verst{\"a}ndlich. Die diversen existierenden AR-Anzeigetechniken sind f{\"u}r verschiedene Anwendungsfelder unterschiedlich gut geeignet, und sollten daher flexibel kombinier- und einsetzbar sein. Auch sollen diese AR-Systeme schnell und einfach auf verschiedenartiger Hardware in den unterschiedlichen Arbeitsumgebungen in Betrieb genommen werden k{\"o}nnen. In dieser Arbeit wird ein Framework f{\"u}r Augmented Reality Systeme vorgestellt, mit dem sich die genannten Anforderungen umsetzen lassen, ohne dass daf{\"u}r spezialisierte AR-Hardware notwendig wird. Das Flexible AR-Framework kombiniert und b{\"u}ndelt daf{\"u}r verschiedene Softwarefunktionen f{\"u}r die grundlegenden AR-Anzeigeberechnungen, f{\"u}r die Kalibrierung der notwendigen Hardware, Algorithmen zur Umgebungserfassung mittels Structured Light sowie generische ARVisualisierungen und erlaubt es dadurch, verschiedene AR-Anzeigesysteme schnell und flexibel in Betrieb zu nehmen und parallel zu betreiben. Im ersten Teil der Arbeit werden Standard-Hardware f{\"u}r verschiedene AR-Visualisierungsformen sowie die notwendigen Algorithmen vorgestellt, um diese flexibel zu einem AR-System zu kombinieren. Dabei m{\"u}ssen die einzelnen verwendeten Ger{\"a}te pr{\"a}zise kalibriert werden; hierf{\"u}r werden verschiedene M{\"o}glichkeiten vorgestellt, und die mit ihnen dann erreichbaren typischen Anzeige- Genauigkeiten in einer Evaluation charakterisiert. Nach der Vorstellung der grundlegenden ARSysteme des Flexiblen AR-Frameworks wird dann eine Reihe von Anwendungen vorgestellt, bei denen das entwickelte System in konkreten Praxis-Realisierungen als AR-Benutzerschnittstelle zum Einsatz kam, unter anderem zur {\"U}berwachung von, Zusammenarbeit mit und einfachen Programmierung von Industrierobotern, aber auch zur Visualisierung von komplexen Sensordaten oder zur Fernwartung. Im Verlauf der Arbeit werden dadurch die Vorteile, die sich durch Verwendung der AR-Technologie in komplexen Produktionssystemen ergeben, herausgearbeitet und in Nutzerstudien belegt.}, subject = {Erweiterte Realit{\"a}t }, language = {de} } @phdthesis{Dombrovski2022, author = {Dombrovski, Veaceslav}, title = {Software Framework to Support Operations of Nanosatellite Formations}, isbn = {978-3-945459-38-6}, doi = {10.25972/OPUS-24931}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-249314}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2022}, abstract = {Since the first CubeSat launch in 2003, the hardware and software complexity of the nanosatellites was continuosly increasing. To keep up with the continuously increasing mission complexity and to retain the primary advantages of a CubeSat mission, a new approach for the overall space and ground software architecture and protocol configuration is elaborated in this work. The aim of this thesis is to propose a uniform software and protocol architecture as a basis for software development, test, simulation and operation of multiple pico-/nanosatellites based on ultra-low power components. In contrast to single-CubeSat missions, current and upcoming nanosatellite formation missions require faster and more straightforward development, pre-flight testing and calibration procedures as well as simultaneous operation of multiple satellites. A dynamic and decentral Compass mission network was established in multiple active CubeSat missions, consisting of uniformly accessible nodes. Compass middleware was elaborated to unify the communication and functional interfaces between all involved mission-related software and hardware components. All systems can access each other via dynamic routes to perform service-based M2M communication. With the proposed model-based communication approach, all states, abilities and functionalities of a system are accessed in a uniform way. The Tiny scripting language was designed to allow dynamic code execution on ultra-low power components as a basis for constraint-based in-orbit scheduler and experiment execution. The implemented Compass Operations front-end enables far-reaching monitoring and control capabilities of all ground and space systems. Its integrated constraint-based operations task scheduler allows the recording of complex satellite operations, which are conducted automatically during the overpasses. The outcome of this thesis became an enabling technology for UWE-3, UWE-4 and NetSat CubeSat missions.}, subject = {Kleinsatellit}, language = {en} } @phdthesis{Kramer2021, author = {Kramer, Alexander}, title = {Orbit control of a very small satellite using electric propulsion}, isbn = {978-3-945459-34-8 (online)}, doi = {10.25972/OPUS-24155}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-241552}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2021}, abstract = {Miniaturized satellites on a nanosatellite scale below 10kg of total mass contribute most to the number of launched satellites into Low Earth Orbit today. This results from the potential to design, integrate and launch these space missions within months at very low costs. In the past decade, the reliability in the fields of system design, communication, and attitude control have matured to allow for competitive applications in Earth observation, communication services, and science missions. The capability of orbit control is an important next step in this development, enabling operators to adjust orbits according to current mission needs and small satellite formation flight, which promotes new measurements in various fields of space science. Moreover, this ability makes missions with altitudes above the ISS comply with planned regulations regarding collision avoidance maneuvering. This dissertation presents the successful implementation of orbit control capabilities on the pico-satellite class for the first time. This pioneering achievement is demonstrated on the 1U CubeSat UWE-4. A focus is on the integration and operation of an electric propulsion system on miniaturized satellites. Besides limitations in size, mass, and power of a pico-satellite, the choice of a suitable electric propulsion system was driven by electromagnetic cleanliness and the use as a combined attitude and orbit control system. Moreover, the integration of the propulsion system leaves the valuable space at the outer faces of the CubeSat structure unoccupied for future use by payloads. The used NanoFEEP propulsion system consists of four thruster heads, two neutralizers and two Power Processing Units (PPUs). The thrusters can be used continuously for 50 minutes per orbit after the liquefaction of the propellant by dedicated heaters. The power consumption of a PPU with one activated thruster, its heater and a neutralizer at emitter current levels of 30-60μA or thrust levels of 2.6-5.5μN, respectively, is in the range of 430-1050mW. Two thruster heads were activated within the scope of in-orbit experiments. The thrust direction was determined using a novel algorithm within 15.7° and 13.2° of the mounting direction. Despite limited controllability of the remaining thrusters, thrust vector pointing was achieved using the magnetic actuators of the Attitude and Orbit Control System. In mid 2020, several orbit control maneuvers changed the altitude of UWE-4, a first for pico-satellites. During the orbit lowering scenario with a duration of ten days, a single thruster head was activated in 78 orbits for 5:40 minutes per orbit. This resulted in a reduction of the orbit altitude by about 98.3m and applied a Delta v of 5.4cm/s to UWE-4. The same thruster was activated in another experiment during 44 orbits within five days for an average duration of 7:00 minutes per orbit. The altitude of UWE-4 was increased by about 81.2m and a Delta v of 4.4cm/s was applied. Additionally, a collision avoidance maneuver was executed in July 2020, which increased the distance of closest approach to the object by more than 5000m.}, subject = {Kleinsatellit}, language = {en} } @techreport{RossiMaurelliUnnithanetal.2021, author = {Rossi, Angelo Pio and Maurelli, Francesco and Unnithan, Vikram and Dreger, Hendrik and Mathewos, Kedus and Pradhan, Nayan and Corbeanu, Dan-Andrei and Pozzobon, Riccardo and Massironi, Matteo and Ferrari, Sabrina and Pernechele, Claudia and Paoletti, Lorenzo and Simioni, Emanuele and Maurizio, Pajola and Santagata, Tommaso and Borrmann, Dorit and N{\"u}chter, Andreas and Bredenbeck, Anton and Zevering, Jasper and Arzberger, Fabian and Reyes Mantilla, Camilo Andr{\´e}s}, title = {DAEDALUS - Descent And Exploration in Deep Autonomy of Lava Underground Structures}, isbn = {978-3-945459-33-1}, issn = {1868-7466}, doi = {10.25972/OPUS-22791}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-227911}, pages = {188}, year = {2021}, abstract = {The DAEDALUS mission concept aims at exploring and characterising the entrance and initial part of Lunar lava tubes within a compact, tightly integrated spherical robotic device, with a complementary payload set and autonomous capabilities. The mission concept addresses specifically the identification and characterisation of potential resources for future ESA exploration, the local environment of the subsurface and its geologic and compositional structure. A sphere is ideally suited to protect sensors and scientific equipment in rough, uneven environments. It will house laser scanners, cameras and ancillary payloads. The sphere will be lowered into the skylight and will explore the entrance shaft, associated caverns and conduits. Lidar (light detection and ranging) systems produce 3D models with high spatial accuracy independent of lighting conditions and visible features. Hence this will be the primary exploration toolset within the sphere. The additional payload that can be accommodated in the robotic sphere consists of camera systems with panoramic lenses and scanners such as multi-wavelength or single-photon scanners. A moving mass will trigger movements. The tether for lowering the sphere will be used for data communication and powering the equipment during the descending phase. Furthermore, the connector tether-sphere will host a WIFI access point, such that data of the conduit can be transferred to the surface relay station. During the exploration phase, the robot will be disconnected from the cable, and will use wireless communication. Emergency autonomy software will ensure that in case of loss of communication, the robot will continue the nominal mission.}, subject = {Mond}, language = {en} } @phdthesis{SchauerMarinRodrigues2020, author = {Schauer Marin Rodrigues, Johannes}, title = {Detecting Changes and Finding Collisions in 3D Point Clouds : Data Structures and Algorithms for Post-Processing Large Datasets}, isbn = {978-3-945459-32-4}, doi = {10.25972/OPUS-21428}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-214285}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2020}, abstract = {Affordable prices for 3D laser range finders and mature software solutions for registering multiple point clouds in a common coordinate system paved the way for new areas of application for 3D point clouds. Nowadays we see 3D laser scanners being used not only by digital surveying experts but also by law enforcement officials, construction workers or archaeologists. Whether the purpose is digitizing factory production lines, preserving historic sites as digital heritage or recording environments for gaming or virtual reality applications -- it is hard to imagine a scenario in which the final point cloud must also contain the points of "moving" objects like factory workers, pedestrians, cars or flocks of birds. For most post-processing tasks, moving objects are undesirable not least because moving objects will appear in scans multiple times or are distorted due to their motion relative to the scanner rotation. The main contributions of this work are two postprocessing steps for already registered 3D point clouds. The first method is a new change detection approach based on a voxel grid which allows partitioning the input points into static and dynamic points using explicit change detection and subsequently remove the latter for a "cleaned" point cloud. The second method uses this cleaned point cloud as input for detecting collisions between points of the environment point cloud and a point cloud of a model that is moved through the scene. Our approach on explicit change detection is compared to the state of the art using multiple datasets including the popular KITTI dataset. We show how our solution achieves similar or better F1-scores than an existing solution while at the same time being faster. To detect collisions we do not produce a mesh but approximate the raw point cloud data by spheres or cylindrical volumes. We show how our data structures allow efficient nearest neighbor queries that make our CPU-only approach comparable to a massively-parallel algorithm running on a GPU. The utilized algorithms and data structures are discussed in detail. All our software is freely available for download under the terms of the GNU General Public license. Most of the datasets used in this thesis are freely available as well. We provide shell scripts that allow one to directly reproduce the quantitative results shown in this thesis for easy verification of our findings.}, subject = {Punktwolke}, language = {en} } @phdthesis{Bangert2019, author = {Bangert, Philip}, title = {Magnetic Attitude Control of Miniature Satellites and its Extension towards Orbit Control using an Electric Propulsion System}, isbn = {978-3-945459-28-7 (online)}, issn = {1868-7474}, doi = {10.25972/OPUS-17702}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-177020}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2019}, abstract = {The attitude and orbit control system of pico- and nano-satellites to date is one of the bottle necks for future scientific and commercial applications. A performance increase while keeping with the satellites' restrictions will enable new space missions especially for the smallest of the CubeSat classes. This work addresses methods to measure and improve the satellite's attitude pointing and orbit control performance based on advanced sensor data analysis and optimized on-board software concepts. These methods are applied to spaceborne satellites and future CubeSat missions to demonstrate their validity. An in-orbit calibration procedure for a typical CubeSat attitude sensor suite is developed and applied to the UWE-3 satellite in space. Subsequently, a method to estimate the attitude determination accuracy without the help of an external reference sensor is developed. Using this method, it is shown that the UWE-3 satellite achieves an in-orbit attitude determination accuracy of about 2°. An advanced data analysis of the attitude motion of a miniature satellite is used in order to estimate the main attitude disturbance torque in orbit. It is shown, that the magnetic disturbance is by far the most significant contribution for miniature satellites and a method to estimate the residual magnetic dipole moment of a satellite is developed. Its application to three CubeSats currently in orbit reveals that magnetic disturbances are a common issue for this class of satellites. The dipole moments measured are between 23.1mAm² and 137.2mAm². In order to autonomously estimate and counteract this disturbance in future missions an on-board magnetic dipole estimation algorithm is developed. The autonomous neutralization of such disturbance torques together with the simplification of attitude control for the satellite operator is the focus of a novel on-board attitude control software architecture. It incorporates disturbance torques acting on the satellite and automatically optimizes the control output. Its application is demonstrated in space on board of the UWE-3 satellite through various attitude control experiments of which the results are presented here. The integration of a miniaturized electric propulsion system will enable CubeSats to perform orbit control and, thus, open up new application scenarios. The in-orbit characterization, however, poses the problem of precisely measuring very low thrust levels in the order of µN. A method to measure this thrust based on the attitude dynamics of the satellite is developed and evaluated in simulation. It is shown, that the demonstrator mission UWE-4 will be able to measure these thrust levels with a high accuracy of 1\% for thrust levels higher than 1µN. The orbit control capabilities of UWE-4 using its electric propulsion system are evaluated and a hybrid attitude control system making use of the satellite's magnetorquers and the electric propulsion system is developed. It is based on the flexible attitude control architecture mentioned before and thrust vector pointing accuracies of better than 2° can be achieved. This results in a thrust delivery of more than 99\% of the desired acceleration in the target direction.}, subject = {Satellit}, language = {en} } @phdthesis{Pfitzner2019, author = {Pfitzner, Christian}, title = {Visual Human Body Weight Estimation with Focus on Clinical Applications}, isbn = {978-3-945459-27-0 (online)}, doi = {10.25972/OPUS-17484}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-174842}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2019}, abstract = {It is the aim of this thesis to present a visual body weight estimation, which is suitable for medical applications. A typical scenario where the estimation of the body weight is essential, is the emergency treatment of stroke patients: In case of an ischemic stroke, the patient has to receive a body weight adapted drug, to solve a blood clot in a vessel. The accuracy of the estimated weight influences the outcome of the therapy directly. However, the treatment has to start as early as possible after the arrival at a trauma room, to provide sufficient treatment. Weighing a patient takes time, and the patient has to be moved. Furthermore, patients are often not able to communicate a value for their body weight due to their stroke symptoms. Therefore, it is state of the art that physicians guess the body weight. A patient receiving a too low dose has an increased risk that the blood clot does not dissolve and brain tissue is permanently damaged. Today, about one-third gets an insufficient dosage. In contrast to that, an overdose can cause bleedings and further complications. Physicians are aware of this issue, but a reliable alternative is missing. The thesis presents state-of-the-art principles and devices for the measurement and estimation of body weight in the context of medical applications. While scales are common and available at a hospital, the process of weighing takes too long and can hardly be integrated into the process of stroke treatment. Sensor systems and algorithms are presented in the section for related work and provide an overview of different approaches. The here presented system -- called Libra3D -- consists of a computer installed in a real trauma room, as well as visual sensors integrated into the ceiling. For the estimation of the body weight, the patient is on a stretcher which is placed in the field of view of the sensors. The three sensors -- two RGB-D and a thermal camera -- are calibrated intrinsically and extrinsically. Also, algorithms for sensor fusion are presented to align the data from all sensors which is the base for a reliable segmentation of the patient. A combination of state-of-the-art image and point cloud algorithms is used to localize the patient on the stretcher. The challenges in the scenario with the patient on the bed is the dynamic environment, including other people or medical devices in the field of view. After the successful segmentation, a set of hand-crafted features is extracted from the patient's point cloud. These features rely on geometric and statistical values and provide a robust input to a subsequent machine learning approach. The final estimation is done with a previously trained artificial neural network. The experiment section offers different configurations of the previously extracted feature vector. Additionally, the here presented approach is compared to state-of-the-art methods; the patient's own assessment, the physician's guess, and an anthropometric estimation. Besides the patient's own estimation, Libra3D outperforms all state-of-the-art estimation methods: 95 percent of all patients are estimated with a relative error of less than 10 percent to ground truth body weight. It takes only a minimal amount of time for the measurement, and the approach can easily be integrated into the treatment of stroke patients, while physicians are not hindered. Furthermore, the section for experiments demonstrates two additional applications: The extracted features can also be used to estimate the body weight of people standing, or even walking in front of a 3D camera. Also, it is possible to determine or classify the BMI of a subject on a stretcher. A potential application for this approach is the reduction of the radiation dose of patients being exposed to X-rays during a CT examination. During the time of this thesis, several data sets were recorded. These data sets contain the ground truth body weight, as well as the data from the sensors. They are available for the collaboration in the field of body weight estimation for medical applications.}, subject = {Punktwolke}, language = {en} } @phdthesis{Albert2019, author = {Albert, Michael}, title = {Intelligent analysis of medical data in a generic telemedicine infrastructure}, isbn = {978-3-945459-26-3 (Online)}, doi = {10.25972/OPUS-17421}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-174213}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2019}, abstract = {Telemedicine uses telecommunication and information technology to provide health care services over spatial distances. In the upcoming demographic changes towards an older average population age, especially rural areas suffer from a decreasing doctor to patient ratio as well as a limited amount of available medical specialists in acceptable distance. These areas could benefit the most from telemedicine applications as they are known to improve access to medical services, medical expertise and can also help to mitigate critical or emergency situations. Although the possibilities of telemedicine applications exist in the entire range of healthcare, current systems focus on one specific disease while using dedicated hardware to connect the patient with the supervising telemedicine center. This thesis describes the development of a telemedical system which follows a new generic design approach. This bridges the gap of existing approaches that only tackle one specific application. The proposed system on the contrary aims at supporting as many diseases and use cases as possible by taking all the stakeholders into account at the same time. To address the usability and acceptance of the system it is designed to use standardized hardware like commercial medical sensors and smartphones for collecting medical data of the patients and transmitting them to the telemedical center. The smartphone can also act as interface to the patient for health questionnaires or feedback. The system can handle the collection and transport of medical data, analysis and visualization of the data as well as providing a real time communication with video and audio between the users. On top of the generic telemedical framework the issue of scalability is addressed by integrating a rule-based analysis tool for the medical data. Rules can be easily created by medical personnel via a visual editor and can be personalized for each patient. The rule-based analysis tool is extended by multiple options for visualization of the data, mechanisms to handle complex rules and options for performing actions like raising alarms or sending automated messages. It is sometimes hard for the medical experts to formulate their knowledge into rules and there may be information in the medical data that is not yet known. This is why a machine learning module was integrated into the system. It uses the incoming medical data of the patients to learn new rules that are then presented to the medical personnel for inspection. This is in line with European legislation where the human still needs to be in charge of such decisions. Overall, we were able to show the benefit of the generic approach by evaluating it in three completely different medical use cases derived from specific application needs: monitoring of COPD (chronic obstructive pulmonary disease) patients, support of patients performing dialysis at home and councils of intensive-care experts. In addition the system was used for a non-medical use case: monitoring and optimization of industrial machines and robots. In all of the mentioned cases, we were able to prove the robustness of the generic approach with real users of the corresponding domain. This is why we can propose this approach for future development of telemedical systems.}, subject = {Telemedizin}, language = {en} } @phdthesis{Koch2018, author = {Koch, Rainer}, title = {Sensor Fusion for Precise Mapping of Transparent and Specular Reflective Objects}, isbn = {978-3-945459-25-6}, doi = {10.25972/OPUS-16346}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-163462}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2018}, abstract = {Almost once a week broadcasts about earthquakes, hurricanes, tsunamis, or forest fires are filling the news. While oneself feels it is hard to watch such news, it is even harder for rescue troops to enter such areas. They need some skills to get a quick overview of the devastated area and find victims. Time is ticking, since the chance for survival shrinks the longer it takes till help is available. To coordinate the teams efficiently, all information needs to be collected at the command center. Therefore, teams investigate the destroyed houses and hollow spaces for victims. Doing so, they never can be sure that the building will not fully collapse while they are inside. Here, rescue robots are welcome helpers, as they are replaceable and make work more secure. Unfortunately, rescue robots are not usable off-the-shelf, yet. There is no doubt, that such a robot has to fulfil essential requirements to successfully accomplish a rescue mission. Apart from the mechanical requirements it has to be able to build a 3D map of the environment. This is essential to navigate through rough terrain and fulfil manipulation tasks (e.g. open doors). To build a map and gather environmental information, robots are equipped with multiple sensors. Since laser scanners produce precise measurements and support a wide scanning range, they are common visual sensors utilized for mapping. Unfortunately, they produce erroneous measurements when scanning transparent (e.g. glass, transparent plastic) or specular reflective objects (e.g. mirror, shiny metal). It is understood that such objects can be everywhere and a pre-manipulation to prevent their influences is impossible. Using additional sensors also bear risks. The problem is that these objects are occasionally visible, based on the incident angle of the laser beam, the surface, and the type of object. Hence, for transparent objects, measurements might result from the object surface or objects behind it. For specular reflective objects, measurements might result from the object surface or a mirrored object. These mirrored objects are illustrated behind the surface which is wrong. To obtain a precise map, the surfaces need to be recognised and mapped reliably. Otherwise, the robot navigates into it and crashes. Further, points behind the surface should be identified and treated based on the object type. Points behind a transparent surface should remain as they represent real objects. In contrast, Points behind a specular reflective surface should be erased. To do so, the object type needs to be classified. Unfortunately, none of the current approaches is capable to fulfil these requirements. Therefore, the following thesis addresses this problem to detect transparent and specular reflective objects and to identify their influences. To give the reader a start up, the first chapters describe: the theoretical background concerning propagation of light; sensor systems applied for range measurements; mapping approaches used in this work; and the state-of-the-art concerning detection and identification of transparent and specular reflective objects. Afterwards, the Reflection-Identification-Approach, which is the core of subject thesis is presented. It describes 2D and a 3D implementation to detect and classify such objects. Both are available as ROS-nodes. In the next chapter, various experiments demonstrate the applicability and reliability of these nodes. It proves that transparent and specular reflective objects can be detected and classified. Therefore, a Pre- and Post-Filter module is required in 2D. In 3D, classification is possible solely with the Pre-Filter. This is due to the higher amount of measurements. An example shows that an updatable mapping module allows the robot navigation to rely on refined maps. Otherwise, two individual maps are build which require a fusion afterwards. Finally, the last chapter summarizes the results and proposes suggestions for future work.}, subject = {laserscanner}, language = {en} } @phdthesis{Baier2018, author = {Baier, Pablo A.}, title = {Simulator for Minimally Invasive Vascular Interventions: Hardware and Software}, isbn = {978-3-945459-22-5}, doi = {10.25972/OPUS-16119}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-161190}, school = {Universit{\"a}t W{\"u}rzburg}, pages = {118}, year = {2018}, abstract = {A complete simulation system is proposed that can be used as an educational tool by physicians in training basic skills of Minimally Invasive Vascular Interventions. In the first part, a surface model is developed to assemble arteries having a planar segmentation. It is based on Sweep Surfaces and can be extended to T- and Y-like bifurcations. A continuous force vector field is described, representing the interaction between the catheter and the surface. The computation time of the force field is almost unaffected when the resolution of the artery is increased. The mechanical properties of arteries play an essential role in the study of the circulatory system dynamics, which has been becoming increasingly important in the treatment of cardiovascular diseases. In Virtual Reality Simulators, it is crucial to have a tissue model that responds in real time. In this work, the arteries are discretized by a two dimensional mesh and the nodes are connected by three kinds of linear springs. Three tissue layers (Intima, Media, Adventitia) are considered and, starting from the stretch-energy density, some of the elasticity tensor components are calculated. The physical model linearizes and homogenizes the material response, but it still contemplates the geometric nonlinearity. In general, if the arterial stretch varies by 1\% or less, then the agreement between the linear and nonlinear models is trustworthy. In the last part, the physical model of the wire proposed by Konings is improved. As a result, a simpler and more stable method is obtained to calculate the equilibrium configuration of the wire. In addition, a geometrical method is developed to perform relaxations. It is particularly useful when the wire is hindered in the physical method because of the boundary conditions. The physical and the geometrical methods are merged, resulting in efficient relaxations. Tests show that the shape of the virtual wire agrees with the experiment. The proposed algorithm allows real-time executions and the hardware to assemble the simulator has a low cost.}, subject = {Computersimulation}, language = {en} } @phdthesis{Borrmann2018, author = {Borrmann, Dorit}, title = {Multi-modal 3D mapping - Combining 3D point clouds with thermal and color information}, isbn = {978-3-945459-20-1}, issn = {1868-7474}, doi = {10.25972/OPUS-15708}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-157085}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2018}, abstract = {Imagine a technology that automatically creates a full 3D thermal model of an environment and detects temperature peaks in it. For better orientation in the model it is enhanced with color information. The current state of the art for analyzing temperature related issues is thermal imaging. It is relevant for energy efficiency but also for securing important infrastructure such as power supplies and temperature regulation systems. Monitoring and analysis of the data for a large building is tedious as stable conditions need to be guaranteed for several hours and detailed notes about the pose and the environment conditions for each image must be taken. For some applications repeated measurements are necessary to monitor changes over time. The analysis of the scene is only possible through expertise and experience. This thesis proposes a robotic system that creates a full 3D model of the environment with color and thermal information by combining thermal imaging with the technology of terrestrial laser scanning. The addition of a color camera facilitates the interpretation of the data and allows for other application areas. The data from all sensors collected at different positions is joined in one common reference frame using calibration and scan matching. The first part of the thesis deals with 3D point cloud processing with the emphasis on accessing point cloud data efficiently, detecting planar structures in the data and registering multiple point clouds into one common coordinate system. The second part covers the autonomous exploration and data acquisition with a mobile robot with the objective to minimize the unseen area in 3D space. Furthermore, the combination of different modalities, color images, thermal images and point cloud data through calibration is elaborated. The last part presents applications for the the collected data. Among these are methods to detect the structure of building interiors for reconstruction purposes and subsequent detection and classification of windows. A system to project the gathered thermal information back into the scene is presented as well as methods to improve the color information and to join separately acquired point clouds and photo series. A full multi-modal 3D model contains all the relevant geometric information about the recorded scene and enables an expert to fully analyze it off-site. The technology clears the path for automatically detecting points of interest thereby helping the expert to analyze the heat flow as well as localize and identify heat leaks. The concept is modular and neither limited to achieving energy efficiency nor restricted to the use in combination with a mobile platform. It also finds its application in fields such as archaeology and geology and can be extended by further sensors.}, subject = {Punktwolke}, language = {en} } @phdthesis{Aschenbrenner2017, author = {Aschenbrenner, Doris}, title = {Human Robot Interaction Concepts for Human Supervisory Control and Telemaintenance Applications in an Industry 4.0 Environment}, isbn = {978-3-945459-18-8}, doi = {10.25972/OPUS-15052}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-150520}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2017}, abstract = {While teleoperation of technical highly sophisticated systems has already been a wide field of research, especially for space and robotics applications, the automation industry has not yet benefited from its results. Besides the established fields of application, also production lines with industrial robots and the surrounding plant components are in need of being remotely accessible. This is especially critical for maintenance or if an unexpected problem cannot be solved by the local specialists. Special machine manufacturers, especially robotics companies, sell their technology worldwide. Some factories, for example in emerging economies, lack qualified personnel for repair and maintenance tasks. When a severe failure occurs, an expert of the manufacturer needs to fly there, which leads to long down times of the machine or even the whole production line. With the development of data networks, a huge part of those travels can be omitted, if appropriate teleoperation equipment is provided. This thesis describes the development of a telemaintenance system, which was established in an active production line for research purposes. The customer production site of Braun in Marktheidenfeld, a factory which belongs to Procter \& Gamble, consists of a six-axis cartesian industrial robot by KUKA Industries, a two-component injection molding system and an assembly unit. The plant produces plastic parts for electric toothbrushes. In the research projects "MainTelRob" and "Bayern.digital", during which this plant was utilised, the Zentrum f{\"u}r Telematik e.V. (ZfT) and its project partners develop novel technical approaches and procedures for modern telemaintenance. The term "telemaintenance" hereby refers to the integration of computer science and communication technologies into the maintenance strategy. It is particularly interesting for high-grade capital-intensive goods like industrial robots. Typical telemaintenance tasks are for example the analysis of a robot failure or difficult repair operations. The service department of KUKA Industries is responsible for the worldwide distributed customers who own more than one robot. Currently such tasks are offered via phone support and service staff which travels abroad. They want to expand their service activities on telemaintenance and struggle with the high demands of teleoperation especially regarding security infrastructure. In addition, the facility in Marktheidenfeld has to keep up with the high international standards of Procter \& Gamble and wants to minimize machine downtimes. Like 71.6 \% of all German companies, P\&G sees a huge potential for early information on their production system, but complains about the insufficient quality and the lack of currentness of data. The main research focus of this work lies on the human machine interface for all human tasks in a telemaintenance setup. This thesis provides own work in the use of a mobile device in context of maintenance, describes new tools on asynchronous remote analysis and puts all parts together in an integrated telemaintenance infrastructure. With the help of Augmented Reality, the user performance and satisfaction could be raised. A special regard is put upon the situation awareness of the remote expert realized by different camera viewpoints. In detail the work consists of: - Support of maintenance tasks with a mobile device - Development and evaluation of a context-aware inspection tool - Comparison of a new touch-based mobile robot programming device to the former teach pendant - Study on Augmented Reality support for repair tasks with a mobile device - Condition monitoring for a specific plant with industrial robot - Human computer interaction for remote analysis of a single plant cycle - A big data analysis tool for a multitude of cycles and similar plants - 3D process visualization for a specific plant cycle with additional virtual information - Network architecture in hardware, software and network infrastructure - Mobile device computer supported collaborative work for telemaintenance - Motor exchange telemaintenance example in running production environment - Augmented reality supported remote plant visualization for better situation awareness}, subject = {Fernwartung}, language = {en} } @phdthesis{Houshiar2017, author = {Houshiar, Hamidreza}, title = {Documentation and mapping with 3D point cloud processing}, isbn = {978-3-945459-14-0}, doi = {10.25972/OPUS-14449}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-144493}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2017}, abstract = {3D point clouds are a de facto standard for 3D documentation and modelling. The advances in laser scanning technology broadens the usability and access to 3D measurement systems. 3D point clouds are used in many disciplines such as robotics, 3D modelling, archeology and surveying. Scanners are able to acquire up to a million of points per second to represent the environment with a dense point cloud. This represents the captured environment with a very high degree of detail. The combination of laser scanning technology with photography adds color information to the point clouds. Thus the environment is represented more realistically. Full 3D models of environments, without any occlusion, require multiple scans. Merging point clouds is a challenging process. This thesis presents methods for point cloud registration based on the panorama images generated from the scans. Image representation of point clouds introduces 2D image processing methods to 3D point clouds. Several projection methods for the generation of panorama maps of point clouds are presented in this thesis. Additionally, methods for point cloud reduction and compression based on the panorama maps are proposed. Due to the large amounts of data generated from the 3D measurement systems these methods are necessary to improve the point cloud processing, transmission and archiving. This thesis introduces point cloud processing methods as a novel framework for the digitisation of archeological excavations. The framework replaces the conventional documentation methods for excavation sites. It employs point clouds for the generation of the digital documentation of an excavation with the help of an archeologist on-site. The 3D point cloud is used not only for data representation but also for analysis and knowledge generation. Finally, this thesis presents an autonomous indoor mobile mapping system. The mapping system focuses on the sensor placement planning method. Capturing a complete environment requires several scans. The sensor placement planning method solves for the minimum required scans to digitise large environments. Combining this method with a navigation system on a mobile robot platform enables it to acquire data fully autonomously. This thesis introduces a novel hole detection method for point clouds to detect obscured parts of a captured environment. The sensor placement planning method selects the next scan position with the most coverage of the obscured environment. This reduces the required number of scans. The navigation system on the robot platform consist of path planning, path following and obstacle avoidance. This guarantees the safe navigation of the mobile robot platform between the scan positions. The sensor placement planning method is designed as a stand alone process that could be used with a mobile robot platform for autonomous mapping of an environment or as an assistant tool for the surveyor on scanning projects.}, subject = {3D Punktwolke}, language = {en} } @phdthesis{Busch2016, author = {Busch, Stephan}, title = {Robust, Flexible and Efficient Design for Miniature Satellite Systems}, isbn = {978-3-945459-10-2}, doi = {10.25972/OPUS-13652}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-136523}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2016}, abstract = {Small satellites contribute significantly in the rapidly evolving innovation in space engineering, in particular in distributed space systems for global Earth observation and communication services. Significant mass reduction by miniaturization, increased utilization of commercial high-tech components, and in particular standardization are the key drivers for modern miniature space technology. This thesis addresses key fields in research and development on miniature satellite technology regarding efficiency, flexibility, and robustness. Here, these challenges are addressed by the University of Wuerzburg's advanced pico-satellite bus, realizing a generic modular satellite architecture and standardized interfaces for all subsystems. The modular platform ensures reusability, scalability, and increased testability due to its flexible subsystem interface which allows efficient and compact integration of the entire satellite in a plug-and-play manner. Beside systematic design for testability, a high degree of operational robustness is achieved by the consequent implementation of redundancy of crucial subsystems. This is combined with efficient fault detection, isolation and recovery mechanisms. Thus, the UWE-3 platform, and in particular the on-board data handling system and the electrical power system, offers one of the most efficient pico-satellite architectures launched in recent years and provides a solid basis for future extensions. The in-orbit performance results of the pico-satellite UWE-3 are presented and summarize successful operations since its launch in 2013. Several software extensions and adaptations have been uploaded to UWE-3 increasing its capabilities. Thus, a very flexible platform for in-orbit software experiments and for evaluations of innovative concepts was provided and tested.}, subject = {Kleinsatellit}, language = {en} } @phdthesis{Sun2014, author = {Sun, Kaipeng}, title = {Six Degrees of Freedom Object Pose Estimation with Fusion Data from a Time-of-flight Camera and a Color Camera}, isbn = {978-3-923959-97-6}, doi = {10.25972/OPUS-10508}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-105089}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {Object six Degrees of Freedom (6DOF) pose estimation is a fundamental problem in many practical robotic applications, where the target or an obstacle with a simple or complex shape can move fast in cluttered environments. In this thesis, a 6DOF pose estimation algorithm is developed based on the fused data from a time-of-flight camera and a color camera. The algorithm is divided into two stages, an annealed particle filter based coarse pose estimation stage and a gradient decent based accurate pose optimization stage. In the first stage, each particle is evaluated with sparse representation. In this stage, the large inter-frame motion of the target can be well handled. In the second stage, the range data based conventional Iterative Closest Point is extended by incorporating the target appearance information and used for calculating the accurate pose by refining the coarse estimate from the first stage. For dealing with significant illumination variations during the tracking, spherical harmonic illumination modeling is investigated and integrated into both stages. The robustness and accuracy of the proposed algorithm are demonstrated through experiments on various objects in both indoor and outdoor environments. Moreover, real-time performance can be achieved with graphics processing unit acceleration.}, subject = {Mustererkennung}, language = {en} } @phdthesis{Xu2014, author = {Xu, Zhihao}, title = {Cooperative Formation Controller Design for Time-Delay and Optimality Problems}, isbn = {978-3-923959-96-9}, doi = {10.25972/OPUS-10555}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-105555}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {This dissertation presents controller design methodologies for a formation of cooperative mobile robots to perform trajectory tracking and convoy protection tasks. Two major problems related to multi-agent formation control are addressed, namely the time-delay and optimality problems. For the task of trajectory tracking, a leader-follower based system structure is adopted for the controller design, where the selection criteria for controller parameters are derived through analyses of characteristic polynomials. The resulting parameters ensure the stability of the system and overcome the steady-state error as well as the oscillation behavior under time-delay effect. In the convoy protection scenario, a decentralized coordination strategy for balanced deployment of mobile robots is first proposed. Based on this coordination scheme, optimal controller parameters are generated in both centralized and decentralized fashion to achieve dynamic convoy protection in a unified framework, where distributed optimization technique is applied in the decentralized strategy. This unified framework takes into account the motion of the target to be protected, and the desired system performance, for instance, minimal energy to spend, equal inter-vehicle distance to keep, etc. Both trajectory tracking and convoy protection tasks are demonstrated through simulations and real-world hardware experiments based on the robotic equipment at Department of Computer Science VII, University of W{\"u}rzburg.}, subject = {Optimalwertregelung}, language = {en} } @phdthesis{Tzschichholz2014, author = {Tzschichholz, Tristan}, title = {Relative pose estimation of known rigid objects using a novel approach to high-level PMD-/CCD- sensor data fusion with regard to applications in space}, isbn = {978-3-923959-95-2}, issn = {1868-7474}, doi = {10.25972/OPUS-10391}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-103918}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {In this work, a novel method for estimating the relative pose of a known object is presented, which relies on an application-specific data fusion process. A PMD-sensor in conjunction with a CCD-sensor is used to perform the pose estimation. Furthermore, the work provides a method for extending the measurement range of the PMD sensor along with the necessary calibration methodology. Finally, extensive measurements on a very accurate Rendezvous and Docking testbed are made to evaluate the performance, what includes a detailed discussion of lighting conditions.}, subject = {Bildverarbeitung}, language = {en} } @phdthesis{Herrmann2013, author = {Herrmann, Christian}, title = {Robotic Motion Compensation for Applications in Radiation Oncology}, isbn = {978-3-923959-88-4}, doi = {10.25972/OPUS-6727}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-79045}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2013}, abstract = {Aufgrund vieler Verbesserungen der Behandlungsmethoden im Laufe der letzten 60 Jahre, erlaubt die Strahlentherapie heutzutage pr{\"a}zise Behandlungen von statischen Tumoren. Jedoch birgt die Bestrahlung von sich bewegenden Tumoren noch große Herausforderungen in sich, da bewegliche Tumore oft den Behandlungsstrahl verlassen. Dabei reduziert sich die Strahlendosis im Tumor w{\"a}hrend sich diese im umliegenden gesunden Gewebe erh{\"o}ht. Diese Forschungsarbeit zielt darauf ab, die Grenzen der Strahlentherapie zu erweitern, um pr{\"a}zise Behandlungen von beweglichen Tumoren zu erm{\"o}glichen. Der Fokus der Arbeit liegt auf der Erstellung eines Echtzeitsystems zur aktiven Kompensation von Tumorbewegungen durch robotergest{\"u}tzte Methoden. W{\"a}hrend Behandlungen befinden sich Patienten auf einer Patientenliege, mit der statische Lagerungsfehler vor Beginn einer Behandlung korrigiert werden. Die in dieser Arbeit verwendete Patientenliege "HexaPOD" ist ein paralleler Manipulator mit sechs Freiheitsgraden, der große Lasten innerhalb eines eingeschr{\"a}nkten Arbeitsbereichs pr{\"a}zise positionieren kann. Obwohl der HexaPOD urspr{\"u}nglich nicht f{\"u}r dynamische Anwendungen konzipiert wurde, wird dieser f{\"u}r eine dauerhafte Bewegungskompensation eingesetzt, in dem Patienten so bewegt werden, dass Tumore pr{\"a}zise im Zentralstrahl w{\"a}hrend der Dauer einer gesamten Behandlung verbleiben. Um ein echtzeitf{\"a}higes Kompensationssystem auf Basis des HexaPODs zu realisieren, muss eine Reihe an Herausforderungen bew{\"a}ltigt werden. Echtzeitaspekte werden einerseits durch die Verwendung eines harten Echtzeitbetriebssystems abgedeckt, andererseits durch die Messung und Sch{\"a}tzung von Latenzzeiten aller physikalischen Gr{\"o}ßen im System, z.B. Messungen der Tumor- und Atemposition. Neben der konsistenten und durchg{\"a}ngigen Ber{\"u}cksichtigung von akkuraten Zeitinformation, werden alle software-induzierten Latenzen adaptiv ausgeglichen. Dies erfordert Vorhersagen der Tumorposition in die nahe Zukunft. Zahlreiche Pr{\"a}diktoren zur Atem- und Tumorpositionsvorhersage werden vorgeschlagen und anhand verschiedenster Metriken evaluiert. Erweiterungen der Pr{\"a}diktionsalgorithmen werden eingef{\"u}hrt, die sowohl Atem- als auch Tumorpositionsinformationen fusionieren, um Vorhersagen ohne explizites Korrelationsmodell zu erm{\"o}glichen. Die Vorhersagen bestimmen den zuk{\"u}nftigen Bewegungspfad des HexaPODs, um Tumorbewegungen zu kompensieren. Dazu werden verschiedene Regler entwickelt, die eine Trajektorienverfolgung mit dem HexaPOD erm{\"o}glichen. Auf der Basis von linearer und nicht-linearer dynamischer Modellierung des HexaPODs mit Methoden der Systemidentifikation, wird zun{\"a}chst ein modellpr{\"a}diktiver Regler entwickelt. Ein zweiter Regler wird auf Basis einer Annahme {\"u}ber das Arbeitsprinzip des internen Reglers im HexaPOD entworfen. Schließlich wird ein dritter Regler vorgeschlagen, der beide vorhergehenden Regler miteinander kombiniert. F{\"u}r jeden dieser Regler werden vergleichende Ergebnisse aus Experimenten mit realer Hardware und menschlichen Versuchspersonen pr{\"a}sentiert und diskutiert. Dar{\"u}ber hinaus wird die geeignete Wahl von freien Parametern in den Reglern vorgestellt. Neben einer pr{\"a}zisen Verfolgung der Referenztrajektorie spielt der Patientenkomfort eine entscheidende Rolle f{\"u}r die Akzeptanz des Systems. Es wird gezeigt, dass die Regler glatte Trajektorien realisieren k{\"o}nnen, um zu garantieren, dass sich Patienten wohl f{\"u}hlen w{\"a}hrend ihre Tumorbewegung mit Genauigkeiten im Submillimeterbereich ausgeglichen wird. Gesamtfehler werden im Kompensationssystem analysiert, in dem diese zu Trajektorienverfolgungsfehlern und Pr{\"a}diktionsfehlern in Beziehung gesetzt werden. Durch Ausnutzung von Eigenschaften verschiedener Pr{\"a}diktoren wird gezeigt, dass die Startzeit des Systems bis die Verfolgung der Referenztrajektorie erreicht ist, wenige Sekunden betr{\"a}gt. Dies gilt insbesondere f{\"u}r den Fall eines initial ruhenden HexaPODs und ohne Vorwissen {\"u}ber Tumorbewegungen. Dies zeigt die Eignung des Systems f{\"u}r die sehr kurz fraktionierten Behandlungen von Lungentumoren. Das Tumorkompensationssystem wurde ausschließlich auf Basis von klinischer Standard-Hardware entwickelt, die in vielen Behandlungsr{\"a}umen zu finden ist. Durch ein einfaches und flexibles Design k{\"o}nnen Behandlungsr{\"a}ume in kosteneffizienter Weise um M{\"o}glichkeiten der Bewegungskompensation erg{\"a}nzt werden. Dar{\"u}ber hinaus werden aktuelle Behandlungsmethoden wie intensit{\"a}tsmodulierte Strahlentherapie oder Volumetric Modulated Arc Therapy in keiner Weise eingeschr{\"a}nkt. Aufgrund der Unterst{\"u}tzung verschiedener Kompensationsmodi kann das System auf alle beweglichen Tumore angewendet werden, unabh{\"a}ngig davon ob die Bewegungen vorhersagbar (Lungentumore) oder nicht vorhersagbar (Prostatatumore) sind. Durch Integration von geeigneten Methoden zur Tumorpositionsbestimmung kann das System auf einfache Weise zur Kompensation von anderen Tumoren erweitert werden.}, subject = {Robotik}, language = {en} } @phdthesis{Schmidt2011, author = {Schmidt, Marco}, title = {Ground Station Networks for Efficient Operation of Distributed Small Satellite Systems}, isbn = {978-3-923959-77-8}, doi = {10.25972/OPUS-4984}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-64999}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2011}, abstract = {The field of small satellite formations and constellations attracted growing attention, based on recent advances in small satellite engineering. The utilization of distributed space systems allows the realization of innovative applications and will enable improved temporal and spatial resolution in observation scenarios. On the other side, this new paradigm imposes a variety of research challenges. In this monograph new networking concepts for space missions are presented, using networks of ground stations. The developed approaches combine ground station resources in a coordinated way to achieve more robust and efficient communication links. Within this thesis, the following topics were elaborated to improve the performance in distributed space missions: Appropriate scheduling of contact windows in a distributed ground system is a necessary process to avoid low utilization of ground stations. The theoretical basis for the novel concept of redundant scheduling was elaborated in detail. Additionally to the presented algorithm was a scheduling system implemented, its performance was tested extensively with real world scheduling problems. In the scope of data management, a system was developed which autonomously synchronizes data frames in ground station networks and uses this information to detect and correct transmission errors. The system was validated with hardware in the loop experiments, demonstrating the benefits of the developed approach.}, subject = {Kleinsatellit}, language = {en} } @phdthesis{Sauer2010, author = {Sauer, Markus}, title = {Mixed-Reality for Enhanced Robot Teleoperation}, isbn = {978-3-923959-67-9}, doi = {10.25972/OPUS-4666}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-55083}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2010}, abstract = {In den letzten Jahren ist die Forschung in der Robotik soweit fortgeschritten, dass die Mensch-Maschine Schnittstelle zunehmend die kritischste Komponente f{\"u}r eine hohe Gesamtperformanz von Systemen zur Navigation und Koordination von Robotern wird. In dieser Dissertation wird untersucht wie Mixed-Reality Technologien f{\"u}r Nutzerschnittstellen genutzt werden k{\"o}nnen, um diese Gesamtperformanz zu erh{\"o}hen. Hierzu werden Konzepte und Technologien entwickelt, die durch Evaluierung mit Nutzertest ein optimiertes und anwenderbezogenes Design von Mixed-Reality Nutzerschnittstellen erm{\"o}glichen. Er werden somit sowohl die technische Anforderungen als auch die menschlichen Faktoren f{\"u}r ein konsistentes Systemdesign ber{\"u}cksichtigt. Nach einer detaillierten Problemanalyse und der Erstellung eines Systemmodels, das den Menschen als Schl{\"u}sselkomponente mit einbezieht, wird zun{\"a}chst die Anwendung der neuartigen 3D-Time-of-Flight Kamera zur Navigation von Robotern, aber auch f{\"u}r den Einsatz in Mixed-Reality Schnittstellen analysiert und optimiert. Weiterhin wird gezeigt, wie sich der Netzwerkverkehr des Videostroms als wichtigstes Informationselement der meisten Nutzerschnittstellen f{\"u}r die Navigationsaufgabe auf der Netzwerk Applikationsebene in typischen Multi-Roboter Netzwerken mit dynamischen Topologien und Lastsituation optimieren l{\"a}sst. Hierdurch ist es m{\"o}glich in sonst in sonst typischen Ausfallszenarien den Videostrom zu erhalten und die Bildrate zu stabilisieren. Diese fortgeschrittenen Technologien werden dann auch dem entwickelten Konzept der generischen 3D Mixed Reality Schnittselle eingesetzt. Dieses Konzept erm{\"o}glicht eine integrierte 3D Darstellung der verf{\"u}gbaren Information, so dass r{\"a}umliche Beziehungen von Informationen aufrechterhalten werden und somit die Anzahl der mentalen Transformationen beim menschlichen Bediener reduziert wird. Gleichzeitig werden durch diesen Ansatz auch immersive Stereo Anzeigetechnologien unterst{\"u}tzt, welche zus{\"a}tzlich das r{\"a}umliche Verst{\"a}ndnis der entfernten Situation f{\"o}rdern. Die in der Dissertation vorgestellten und evaluierten Ans{\"a}tze nutzen auch die Tatsache, dass sich eine lokale Autonomie von Robotern heute sehr robust realisieren l{\"a}sst. Dies wird zum Beispiel zur Realisierung eines Assistenzsystems mit variabler Autonomie eingesetzt. Hierbei erh{\"a}lt der Fernbediener {\"u}ber eine Kraftr{\"u}ckkopplung kombiniert mit einer integrierten Augmented Reality Schnittstelle, einen Eindruck {\"u}ber die Situation am entfernten Arbeitsbereich, aber auch {\"u}ber die aktuelle Navigationsintention des Roboters. Die durchgef{\"u}hrten Nutzertests belegen die signifikante Steigerung der Navigationsperformanz durch den entwickelten Ansatz. Die robuste lokale Autonomie erm{\"o}glicht auch den in der Dissertation eingef{\"u}hrten Ansatz der pr{\"a}diktiven Mixed-Reality Schnittstelle. Die durch diesen Ansatz entkoppelte Regelschleife {\"u}ber den Menschen erm{\"o}glicht es die Sichtbarkeit von unvermeidbaren Systemverz{\"o}gerungen signifikant zu reduzieren. Zus{\"a}tzlich k{\"o}nnen durch diesen Ansatz beide f{\"u}r die Navigation hilfreichen Blickwinkel in einer 3D-Nutzerschnittstelle kombiniert werden - der exozentrische Blickwinkel und der egozentrische Blickwinkel als Augmented Reality Sicht.}, subject = {Mobiler Roboter}, language = {en} } @phdthesis{Zeiger2010, author = {Zeiger, Florian}, title = {Internet Protocol based networking of mobile robots}, isbn = {978-3-923959-59-4}, doi = {10.25972/OPUS-4661}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-54776}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2010}, abstract = {This work is composed of three main parts: remote control of mobile systems via Internet, ad-hoc networks of mobile robots, and remote control of mobile robots via 3G telecommunication technologies. The first part gives a detailed state of the art and a discussion of the problems to be solved in order to teleoperate mobile robots via the Internet. The focus of the application to be realized is set on a distributed tele-laboratory with remote experiments on mobile robots which can be accessed world-wide via the Internet. Therefore, analyses of the communication link are used in order to realize a robust system. The developed and implemented architecture of this distributed tele-laboratory allows for a smooth access also with a variable or low link quality. The second part covers the application of ad-hoc networks for mobile robots. The networking of mobile robots via mobile ad-hoc networks is a very promising approach to realize integrated telematic systems without relying on preexisting communication infrastructure. Relevant civilian application scenarios are for example in the area of search and rescue operations where first responders are supported by multi-robot systems. Here, mobile robots, humans, and also existing stationary sensors can be connected very fast and efficient. Therefore, this work investigates and analyses the performance of different ad-hoc routing protocols for IEEE 802.11 based wireless networks in relevant scenarios. The analysis of the different protocols allows for an optimization of the parameter settings in order to use these ad-hoc routing protocols for mobile robot teleoperation. Also guidelines for the realization of such telematics systems are given. Also traffic shaping mechanisms of application layer are presented which allow for a more efficient use of the communication link. An additional application scenario, the integration of a small size helicopter into an IP based ad-hoc network, is presented. The teleoperation of mobile robots via 3G telecommunication technologies is addressed in the third part of this work. The high availability, high mobility, and the high bandwidth provide a very interesting opportunity to realize scenarios for the teleoperation of mobile robots or industrial remote maintenance. This work analyses important parameters of the UMTS communication link and investigates also the characteristics for different data streams. These analyses are used to give guidelines which are necessary for the realization of or industrial remote maintenance or mobile robot teleoperation scenarios. All the results and guidelines for the design of telematic systems in this work were derived from analyses and experiments with real hardware.}, subject = {Robotik}, language = {en} } @phdthesis{Saska2009, author = {Saska, Martin}, title = {Trajectory planning and optimal control for formations of autonomous robots}, isbn = {978-3-923959-56-3}, doi = {10.25972/OPUS-4622}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-53175}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2009}, abstract = {In this thesis, we present novel approaches for formation driving of nonholonomic robots and optimal trajectory planning to reach a target region. The methods consider a static known map of the environment as well as unknown and dynamic obstacles detected by sensors of the formation. The algorithms are based on leader following techniques, where the formation of car-like robots is maintained in a shape determined by curvilinear coordinates. Beyond this, the general methods of formation driving are specialized and extended for an application of airport snow shoveling. Detailed descriptions of the algorithms complemented by relevant stability and convergence studies will be provided in the following chapters. Furthermore, discussions of the applicability will be verified by various simulations in existing robotic environments and also by a hardware experiment.}, subject = {Autonomer Roboter}, language = {en} } @phdthesis{Hess2009, author = {Hess, Martin}, title = {Motion coordination and control in systems of nonholonomic autonomous vehicles}, isbn = {978-3-923959-55-6}, doi = {10.25972/OPUS-3794}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-46442}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2009}, abstract = {This work focuses on coordination methods and the control of motion in groups of nonholonomic wheeled mobile robots, in particular of the car-like type. These kind of vehicles are particularly restricted in their mobility. In the main part of this work the two problems of formation motion coordination and of rendezvous in distributed multi-vehicle systems are considered. We introduce several enhancements to an existing motion planning approach for formations of nonholonomic mobile robots. Compared to the original method, the extended approach is able to handle time-varying reference speeds as well as adjustments of the formation's shape during reference trajectory segments with continuously differentiable curvature. Additionally, undesired discontinuities in the speed and steering profiles of the vehicles are avoided. Further, the scenario of snow shoveling on an airfield by utilizing multiple formations of autonomous snowplows is discussed. We propose solutions to the subproblems of motion planning for the formations and tracking control for the individual vehicles. While all situations that might occur have been tested in a simulation environment, we also verified the developed tracking controller in real robot hardware experiments. The task of the rendezvous problem in groups of car-like robots is to drive all vehicles to a common position by means of decentralized control laws. Typically there exists no direct interaction link between all of the vehicles. In this work we present decentralized rendezvous control laws for vehicles with free and with bounded steering. The convergence properties of the approaches are analyzed by utilizing Lyapunov based techniques. Furthermore, they are evaluated within various simulation experiments, while the bounded steering case is also verified within laboratory hardware experiments. Finally we introduce a modification to the bounded steering system that increases the convergence speed at the expense of a higher traveled distance of the vehicles.}, subject = {Robotik}, language = {en} } @phdthesis{Driewer2008, author = {Driewer, Frauke}, title = {Teleoperation Interfaces in Human-Robot Teams}, isbn = {978-3-923959-57-0}, doi = {10.25972/OPUS-2955}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-36351}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2008}, abstract = {Diese Arbeit besch{\"a}ftigt sich mit der Verbesserung von Mensch-Roboter Interaktion in Mensch-Roboter Teams f{\"u}r Teleoperation Szenarien, wie z.B. robotergest{\"u}tzte Feuerwehreins{\"a}tze. Hierbei wird ein Konzept und eine Architektur f{\"u}r ein System zur Unterst{\"u}tzung von Teleoperation von Mensch-Roboter Teams vorgestellt. Die Anforderungen an Informationsaustausch und -verarbeitung, insbesondere f{\"u}r die Anwendung Rettungseinsatz, werden ausgearbeitet. Weiterhin wird das Design der Benutzerschnittstellen f{\"u}r Mensch-Roboter Teams dargestellt und Prinzipien f{\"u}r Teleoperation-Systeme und Benutzerschnittstellen erarbeitet. Alle Studien und Ans{\"a}tze werden in einem Prototypen-System implementiert und in verschiedenen Benutzertests abgesichert. Erweiterungsm{\"o}glichkeiten zum Einbinden von 3D Sensordaten und die Darstellung auf Stereovisualisierungssystemen werden gezeigt.}, subject = {Robotik}, language = {en} }