@phdthesis{Pfitzner2019, author = {Pfitzner, Christian}, title = {Visual Human Body Weight Estimation with Focus on Clinical Applications}, isbn = {978-3-945459-27-0 (online)}, doi = {10.25972/OPUS-17484}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-174842}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2019}, abstract = {It is the aim of this thesis to present a visual body weight estimation, which is suitable for medical applications. A typical scenario where the estimation of the body weight is essential, is the emergency treatment of stroke patients: In case of an ischemic stroke, the patient has to receive a body weight adapted drug, to solve a blood clot in a vessel. The accuracy of the estimated weight influences the outcome of the therapy directly. However, the treatment has to start as early as possible after the arrival at a trauma room, to provide sufficient treatment. Weighing a patient takes time, and the patient has to be moved. Furthermore, patients are often not able to communicate a value for their body weight due to their stroke symptoms. Therefore, it is state of the art that physicians guess the body weight. A patient receiving a too low dose has an increased risk that the blood clot does not dissolve and brain tissue is permanently damaged. Today, about one-third gets an insufficient dosage. In contrast to that, an overdose can cause bleedings and further complications. Physicians are aware of this issue, but a reliable alternative is missing. The thesis presents state-of-the-art principles and devices for the measurement and estimation of body weight in the context of medical applications. While scales are common and available at a hospital, the process of weighing takes too long and can hardly be integrated into the process of stroke treatment. Sensor systems and algorithms are presented in the section for related work and provide an overview of different approaches. The here presented system -- called Libra3D -- consists of a computer installed in a real trauma room, as well as visual sensors integrated into the ceiling. For the estimation of the body weight, the patient is on a stretcher which is placed in the field of view of the sensors. The three sensors -- two RGB-D and a thermal camera -- are calibrated intrinsically and extrinsically. Also, algorithms for sensor fusion are presented to align the data from all sensors which is the base for a reliable segmentation of the patient. A combination of state-of-the-art image and point cloud algorithms is used to localize the patient on the stretcher. The challenges in the scenario with the patient on the bed is the dynamic environment, including other people or medical devices in the field of view. After the successful segmentation, a set of hand-crafted features is extracted from the patient's point cloud. These features rely on geometric and statistical values and provide a robust input to a subsequent machine learning approach. The final estimation is done with a previously trained artificial neural network. The experiment section offers different configurations of the previously extracted feature vector. Additionally, the here presented approach is compared to state-of-the-art methods; the patient's own assessment, the physician's guess, and an anthropometric estimation. Besides the patient's own estimation, Libra3D outperforms all state-of-the-art estimation methods: 95 percent of all patients are estimated with a relative error of less than 10 percent to ground truth body weight. It takes only a minimal amount of time for the measurement, and the approach can easily be integrated into the treatment of stroke patients, while physicians are not hindered. Furthermore, the section for experiments demonstrates two additional applications: The extracted features can also be used to estimate the body weight of people standing, or even walking in front of a 3D camera. Also, it is possible to determine or classify the BMI of a subject on a stretcher. A potential application for this approach is the reduction of the radiation dose of patients being exposed to X-rays during a CT examination. During the time of this thesis, several data sets were recorded. These data sets contain the ground truth body weight, as well as the data from the sensors. They are available for the collaboration in the field of body weight estimation for medical applications.}, subject = {Punktwolke}, language = {en} } @phdthesis{Koch2018, author = {Koch, Rainer}, title = {Sensor Fusion for Precise Mapping of Transparent and Specular Reflective Objects}, isbn = {978-3-945459-25-6}, doi = {10.25972/OPUS-16346}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-163462}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2018}, abstract = {Almost once a week broadcasts about earthquakes, hurricanes, tsunamis, or forest fires are filling the news. While oneself feels it is hard to watch such news, it is even harder for rescue troops to enter such areas. They need some skills to get a quick overview of the devastated area and find victims. Time is ticking, since the chance for survival shrinks the longer it takes till help is available. To coordinate the teams efficiently, all information needs to be collected at the command center. Therefore, teams investigate the destroyed houses and hollow spaces for victims. Doing so, they never can be sure that the building will not fully collapse while they are inside. Here, rescue robots are welcome helpers, as they are replaceable and make work more secure. Unfortunately, rescue robots are not usable off-the-shelf, yet. There is no doubt, that such a robot has to fulfil essential requirements to successfully accomplish a rescue mission. Apart from the mechanical requirements it has to be able to build a 3D map of the environment. This is essential to navigate through rough terrain and fulfil manipulation tasks (e.g. open doors). To build a map and gather environmental information, robots are equipped with multiple sensors. Since laser scanners produce precise measurements and support a wide scanning range, they are common visual sensors utilized for mapping. Unfortunately, they produce erroneous measurements when scanning transparent (e.g. glass, transparent plastic) or specular reflective objects (e.g. mirror, shiny metal). It is understood that such objects can be everywhere and a pre-manipulation to prevent their influences is impossible. Using additional sensors also bear risks. The problem is that these objects are occasionally visible, based on the incident angle of the laser beam, the surface, and the type of object. Hence, for transparent objects, measurements might result from the object surface or objects behind it. For specular reflective objects, measurements might result from the object surface or a mirrored object. These mirrored objects are illustrated behind the surface which is wrong. To obtain a precise map, the surfaces need to be recognised and mapped reliably. Otherwise, the robot navigates into it and crashes. Further, points behind the surface should be identified and treated based on the object type. Points behind a transparent surface should remain as they represent real objects. In contrast, Points behind a specular reflective surface should be erased. To do so, the object type needs to be classified. Unfortunately, none of the current approaches is capable to fulfil these requirements. Therefore, the following thesis addresses this problem to detect transparent and specular reflective objects and to identify their influences. To give the reader a start up, the first chapters describe: the theoretical background concerning propagation of light; sensor systems applied for range measurements; mapping approaches used in this work; and the state-of-the-art concerning detection and identification of transparent and specular reflective objects. Afterwards, the Reflection-Identification-Approach, which is the core of subject thesis is presented. It describes 2D and a 3D implementation to detect and classify such objects. Both are available as ROS-nodes. In the next chapter, various experiments demonstrate the applicability and reliability of these nodes. It proves that transparent and specular reflective objects can be detected and classified. Therefore, a Pre- and Post-Filter module is required in 2D. In 3D, classification is possible solely with the Pre-Filter. This is due to the higher amount of measurements. An example shows that an updatable mapping module allows the robot navigation to rely on refined maps. Otherwise, two individual maps are build which require a fusion afterwards. Finally, the last chapter summarizes the results and proposes suggestions for future work.}, subject = {laserscanner}, language = {en} } @phdthesis{Krenzer2023, author = {Krenzer, Adrian}, title = {Machine learning to support physicians in endoscopic examinations with a focus on automatic polyp detection in images and videos}, doi = {10.25972/OPUS-31911}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-319119}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {Deep learning enables enormous progress in many computer vision-related tasks. Artificial Intel- ligence (AI) steadily yields new state-of-the-art results in the field of detection and classification. Thereby AI performance equals or exceeds human performance. Those achievements impacted many domains, including medical applications. One particular field of medical applications is gastroenterology. In gastroenterology, machine learning algorithms are used to assist examiners during interventions. One of the most critical concerns for gastroenterologists is the development of Colorectal Cancer (CRC), which is one of the leading causes of cancer-related deaths worldwide. Detecting polyps in screening colonoscopies is the essential procedure to prevent CRC. Thereby, the gastroenterologist uses an endoscope to screen the whole colon to find polyps during a colonoscopy. Polyps are mucosal growths that can vary in severity. This thesis supports gastroenterologists in their examinations with automated detection and clas- sification systems for polyps. The main contribution is a real-time polyp detection system. This system is ready to be installed in any gastroenterology practice worldwide using open-source soft- ware. The system achieves state-of-the-art detection results and is currently evaluated in a clinical trial in four different centers in Germany. The thesis presents two additional key contributions: One is a polyp detection system with ex- tended vision tested in an animal trial. Polyps often hide behind folds or in uninvestigated areas. Therefore, the polyp detection system with extended vision uses an endoscope assisted by two additional cameras to see behind those folds. If a polyp is detected, the endoscopist receives a vi- sual signal. While the detection system handles the additional two camera inputs, the endoscopist focuses on the main camera as usual. The second one are two polyp classification models, one for the classification based on shape (Paris) and the other on surface and texture (NBI International Colorectal Endoscopic (NICE) classification). Both classifications help the endoscopist with the treatment of and the decisions about the detected polyp. The key algorithms of the thesis achieve state-of-the-art performance. Outstandingly, the polyp detection system tested on a highly demanding video data set shows an F1 score of 90.25 \% while working in real-time. The results exceed all real-time systems in the literature. Furthermore, the first preliminary results of the clinical trial of the polyp detection system suggest a high Adenoma Detection Rate (ADR). In the preliminary study, all polyps were detected by the polyp detection system, and the system achieved a high usability score of 96.3 (max 100). The Paris classification model achieved an F1 score of 89.35 \% which is state-of-the-art. The NICE classification model achieved an F1 score of 81.13 \%. Furthermore, a large data set for polyp detection and classification was created during this thesis. Therefore a fast and robust annotation system called Fast Colonoscopy Annotation Tool (FastCAT) was developed. The system simplifies the annotation process for gastroenterologists. Thereby the i gastroenterologists only annotate key parts of the endoscopic video. Afterward, those video parts are pre-labeled by a polyp detection AI to speed up the process. After the AI has pre-labeled the frames, non-experts correct and finish the annotation. This annotation process is fast and ensures high quality. FastCAT reduces the overall workload of the gastroenterologist on average by a factor of 20 compared to an open-source state-of-art annotation tool.}, subject = {Deep Learning}, language = {en} } @phdthesis{Wagner2023, author = {Wagner, Jan Cetric}, title = {Maximalnetzplan zur reaktiven Steuerung von Produktionsabl{\"a}ufen}, isbn = {978-3-945459-43-0}, doi = {10.25972/OPUS-30545}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-305452}, school = {Universit{\"a}t W{\"u}rzburg}, pages = {182}, year = {2023}, abstract = {In produzierenden Unternehmen werden verschiedene Vorgehensweisen zur Planung, {\"U}berwachung und Steuerung von Produktionsabl{\"a}ufen eingesetzt. Einer dieser Methoden wird als Vorgangsknotennetzplantechnik bezeichnet. Die einzelnen Produktionsschritte werden als Knoten definiert und durch Pfeile miteinander verbunden. Die Pfeile stellen die Beziehungen der jeweiligen Vorg{\"a}nge zueinander und damit den Produktionsablauf dar. Diese Technik erlaubt den Anwendern einen umfassenden {\"U}berblick {\"u}ber die einzelnen Prozessrelationen. Zus{\"a}tzlich k{\"o}nnen mit ihr Vorgangszeiten und Produktfertigstellungszeiten ermittelt werden, wodurch eine ausf{\"u}hrliche Planung der Produktion erm{\"o}glicht wird. Ein Nachteil dieser Technik begr{\"u}ndet sich in der alleinigen Darstellung einer ausf{\"u}hrbaren Prozessabfolge. Im Falle eines St{\"o}rungseintritts mit der Folge eines nicht durchf{\"u}hrbaren Vorgangs muss von dem origin{\"a}ren Prozess abgewichen werden. Aufgrund dessen wird eine Neuplanung erforderlich. Es werden Alternativen f{\"u}r den gest{\"o}rten Vorgang ben{\"o}tigt, um eine Fortf{\"u}hrung des Prozesses ungeachtet der St{\"o}rung zu erreichen. Innerhalb dieser Arbeit wird daher eine Erweiterung der Vorgangsknotennetzplantechnik beschrieben, die es erlaubt, erg{\"a}nzend zu dem geplanten Soll-Prozess Alternativvorg{\"a}nge f{\"u}r einzelne Vorg{\"a}nge darzulegen. Diese Methode wird als Maximalnetzplan bezeichnet. Die Alternativen werden im Falle eines St{\"o}rungseintritts automatisch evaluiert und dem Anwender in priorisierter Reihenfolge pr{\"a}sentiert. Durch die Verwendung des Maximalnetzplans kann eine aufwendige Neuplanung vermieden werden. Als Anwendungsbeispiel dient ein Montageprozess, mithilfe dessen die Verwendbarkeit der Methode dargelegt wird. Weiterf{\"u}hrend zeigt eine zeitliche Analyse zufallsbedingter Maximalnetzpl{\"a}ne eine Begr{\"u}ndung zur Durchf{\"u}hrung von Alternativen und damit den Nutzen des Maximalnetzplans auf. Zus{\"a}tzlich sei angemerkt, dass innerhalb dieser Arbeit verwendete Begrifflichkeiten wie Anwender, Werker oder Mitarbeiter in maskuliner Schreibweise niedergeschrieben werden. Dieses ist ausschließlich der Einfachheit geschuldet und nicht dem Zweck der Diskriminierung anderer Geschlechter dienlich. Die verwendete Schreibweise soll alle Geschlechter ansprechen, ob m{\"a}nnlich, weiblich oder divers.}, subject = {Produktionsplanung}, language = {de} } @phdthesis{Sauer2023, author = {Sauer, Christian}, title = {Development, Simulation and Evaluation of Mobile Wireless Networks in Industrial Applications}, doi = {10.25972/OPUS-29923}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-299238}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {Manyindustrialautomationsolutionsusewirelesscommunicationandrelyontheavail- ability and quality of the wireless channel. At the same time the wireless medium is highly congested and guaranteeing the availability of wireless channels is becoming increasingly difficult. In this work we show, that ad-hoc networking solutions can be used to provide new communication channels and improve the performance of mobile automation systems. These ad-hoc networking solutions describe different communi- cation strategies, but avoid relying on network infrastructure by utilizing the Peer-to- Peer (P2P) channel between communicating entities. This work is a step towards the effective implementation of low-range communication technologies(e.g. VisibleLightCommunication(VLC), radarcommunication, mmWave communication) to the industrial application. Implementing infrastructure networks with these technologies is unrealistic, since the low communication range would neces- sitate a high number of Access Points (APs) to yield full coverage. However, ad-hoc networks do not require any network infrastructure. In this work different ad-hoc net- working solutions for the industrial use case are presented and tools and models for their examination are proposed. The main use case investigated in this work are Automated Guided Vehicles (AGVs) for industrial applications. These mobile devices drive throughout the factory trans- porting crates, goods or tools or assisting workers. In most implementations they must exchange data with a Central Control Unit (CCU) and between one another. Predicting if a certain communication technology is suitable for an application is very challenging since the applications and the resulting requirements are very heterogeneous. The proposed models and simulation tools enable the simulation of the complex inter- action of mobile robotic clients and a wireless communication network. The goal is to predict the characteristics of a networked AGV fleet. Theproposedtoolswereusedtoimplement, testandexaminedifferentad-hocnetwork- ing solutions for industrial applications using AGVs. These communication solutions handle time-critical and delay-tolerant communication. Additionally a control method for the AGVs is proposed, which optimizes the communication and in turn increases the transport performance of the AGV fleet. Therefore, this work provides not only tools for the further research of industrial ad-hoc system, but also first implementations of ad-hoc systems which address many of the most pressing issues in industrial applica- tions.}, subject = {Industrie}, language = {en} }