@phdthesis{Koch2018, author = {Koch, Rainer}, title = {Sensor Fusion for Precise Mapping of Transparent and Specular Reflective Objects}, isbn = {978-3-945459-25-6}, doi = {10.25972/OPUS-16346}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-163462}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2018}, abstract = {Almost once a week broadcasts about earthquakes, hurricanes, tsunamis, or forest fires are filling the news. While oneself feels it is hard to watch such news, it is even harder for rescue troops to enter such areas. They need some skills to get a quick overview of the devastated area and find victims. Time is ticking, since the chance for survival shrinks the longer it takes till help is available. To coordinate the teams efficiently, all information needs to be collected at the command center. Therefore, teams investigate the destroyed houses and hollow spaces for victims. Doing so, they never can be sure that the building will not fully collapse while they are inside. Here, rescue robots are welcome helpers, as they are replaceable and make work more secure. Unfortunately, rescue robots are not usable off-the-shelf, yet. There is no doubt, that such a robot has to fulfil essential requirements to successfully accomplish a rescue mission. Apart from the mechanical requirements it has to be able to build a 3D map of the environment. This is essential to navigate through rough terrain and fulfil manipulation tasks (e.g. open doors). To build a map and gather environmental information, robots are equipped with multiple sensors. Since laser scanners produce precise measurements and support a wide scanning range, they are common visual sensors utilized for mapping. Unfortunately, they produce erroneous measurements when scanning transparent (e.g. glass, transparent plastic) or specular reflective objects (e.g. mirror, shiny metal). It is understood that such objects can be everywhere and a pre-manipulation to prevent their influences is impossible. Using additional sensors also bear risks. The problem is that these objects are occasionally visible, based on the incident angle of the laser beam, the surface, and the type of object. Hence, for transparent objects, measurements might result from the object surface or objects behind it. For specular reflective objects, measurements might result from the object surface or a mirrored object. These mirrored objects are illustrated behind the surface which is wrong. To obtain a precise map, the surfaces need to be recognised and mapped reliably. Otherwise, the robot navigates into it and crashes. Further, points behind the surface should be identified and treated based on the object type. Points behind a transparent surface should remain as they represent real objects. In contrast, Points behind a specular reflective surface should be erased. To do so, the object type needs to be classified. Unfortunately, none of the current approaches is capable to fulfil these requirements. Therefore, the following thesis addresses this problem to detect transparent and specular reflective objects and to identify their influences. To give the reader a start up, the first chapters describe: the theoretical background concerning propagation of light; sensor systems applied for range measurements; mapping approaches used in this work; and the state-of-the-art concerning detection and identification of transparent and specular reflective objects. Afterwards, the Reflection-Identification-Approach, which is the core of subject thesis is presented. It describes 2D and a 3D implementation to detect and classify such objects. Both are available as ROS-nodes. In the next chapter, various experiments demonstrate the applicability and reliability of these nodes. It proves that transparent and specular reflective objects can be detected and classified. Therefore, a Pre- and Post-Filter module is required in 2D. In 3D, classification is possible solely with the Pre-Filter. This is due to the higher amount of measurements. An example shows that an updatable mapping module allows the robot navigation to rely on refined maps. Otherwise, two individual maps are build which require a fusion afterwards. Finally, the last chapter summarizes the results and proposes suggestions for future work.}, subject = {laserscanner}, language = {en} } @phdthesis{Pfitzner2019, author = {Pfitzner, Christian}, title = {Visual Human Body Weight Estimation with Focus on Clinical Applications}, isbn = {978-3-945459-27-0 (online)}, doi = {10.25972/OPUS-17484}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-174842}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2019}, abstract = {It is the aim of this thesis to present a visual body weight estimation, which is suitable for medical applications. A typical scenario where the estimation of the body weight is essential, is the emergency treatment of stroke patients: In case of an ischemic stroke, the patient has to receive a body weight adapted drug, to solve a blood clot in a vessel. The accuracy of the estimated weight influences the outcome of the therapy directly. However, the treatment has to start as early as possible after the arrival at a trauma room, to provide sufficient treatment. Weighing a patient takes time, and the patient has to be moved. Furthermore, patients are often not able to communicate a value for their body weight due to their stroke symptoms. Therefore, it is state of the art that physicians guess the body weight. A patient receiving a too low dose has an increased risk that the blood clot does not dissolve and brain tissue is permanently damaged. Today, about one-third gets an insufficient dosage. In contrast to that, an overdose can cause bleedings and further complications. Physicians are aware of this issue, but a reliable alternative is missing. The thesis presents state-of-the-art principles and devices for the measurement and estimation of body weight in the context of medical applications. While scales are common and available at a hospital, the process of weighing takes too long and can hardly be integrated into the process of stroke treatment. Sensor systems and algorithms are presented in the section for related work and provide an overview of different approaches. The here presented system -- called Libra3D -- consists of a computer installed in a real trauma room, as well as visual sensors integrated into the ceiling. For the estimation of the body weight, the patient is on a stretcher which is placed in the field of view of the sensors. The three sensors -- two RGB-D and a thermal camera -- are calibrated intrinsically and extrinsically. Also, algorithms for sensor fusion are presented to align the data from all sensors which is the base for a reliable segmentation of the patient. A combination of state-of-the-art image and point cloud algorithms is used to localize the patient on the stretcher. The challenges in the scenario with the patient on the bed is the dynamic environment, including other people or medical devices in the field of view. After the successful segmentation, a set of hand-crafted features is extracted from the patient's point cloud. These features rely on geometric and statistical values and provide a robust input to a subsequent machine learning approach. The final estimation is done with a previously trained artificial neural network. The experiment section offers different configurations of the previously extracted feature vector. Additionally, the here presented approach is compared to state-of-the-art methods; the patient's own assessment, the physician's guess, and an anthropometric estimation. Besides the patient's own estimation, Libra3D outperforms all state-of-the-art estimation methods: 95 percent of all patients are estimated with a relative error of less than 10 percent to ground truth body weight. It takes only a minimal amount of time for the measurement, and the approach can easily be integrated into the treatment of stroke patients, while physicians are not hindered. Furthermore, the section for experiments demonstrates two additional applications: The extracted features can also be used to estimate the body weight of people standing, or even walking in front of a 3D camera. Also, it is possible to determine or classify the BMI of a subject on a stretcher. A potential application for this approach is the reduction of the radiation dose of patients being exposed to X-rays during a CT examination. During the time of this thesis, several data sets were recorded. These data sets contain the ground truth body weight, as well as the data from the sensors. They are available for the collaboration in the field of body weight estimation for medical applications.}, subject = {Punktwolke}, language = {en} } @phdthesis{Eidel2020, author = {Eidel, Matthias T. A. M.}, title = {Training Effects of a Tactile Brain-Computer Interface System During Prolonged Use by Healthy And Motor-Impaired People}, doi = {10.25972/OPUS-20851}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-208511}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2020}, abstract = {Background - Brain-Computer Interfaces (BCI) enable their users to interact and communicate with the environment without requiring intact muscle control. To this end, brain activity is directly measured, digitized and interpreted by the computer. Thus, BCIs may be a valuable tool to assist severely or even completely paralysed patients. Many BCIs, however, rely on neurophysiological potentials evoked by visual stimulation, which can result in usability issues among patients with impaired vision or gaze control. Because of this, several non-visual BCI paradigms have been developed. Most notably, a recent study revealed promising results from a tactile BCI for wheelchair control. In this multi-session approach, healthy participants used the BCI to navigate a simulated wheelchair through a virtual apartment, which revealed not only that the BCI could be operated highly efficiently, but also that it could be trained over five sessions. The present thesis continues the research on this paradigm in order to - confirm its previously reported high performance levels and trainability - reveal the underlying factors responsible for observed performance increases - establish its feasibility among potential impaired end-users Methods - To approach these goals, three studies were conducted with both healthy participants and patients with amyotrophic lateral sclerosis (ALS). Brain activity during BCI operation was recorded via electroencephalography (EEG) and interpreted using a machine learning-based linear classifier. Wheelchair navigation was executed according to the classification results and visualized on a monitor. For offline statistical analysis, neurophysiological features were extracted from EEG data. Subjective data on usability were collected from all participants. Two specialized experiments were conducted to identify factors for training. Results and Discussion - Healthy participants: Results revealed positive effects of training on BCI performances and their underlying neurophysiological potentials. The paradigm was confirmed to be feasible and (for a non-visual BCI) highly efficient for most participants. However, some had to be excluded from analysis of the training effects because they could not achieve meaningful BCI control. Increased somatosensory sensitivity was identified as a possible mediator for training-related performance improvements. Participants with ALS: Out of seven patients with various stages of ALS, five could operate the BCI with accuracies significantly above chance level. Another ALS patient in a state of near-complete paralysis trained with the BCI for several months. Although no effects of training were observed, he was consistently able to operate the system above chance level. Subjective data regarding workload, satisfaction and other parameters were reported. Significance - The tactile BCI was evaluated on the example of wheelchair control. In the future, it could help impaired patients to regain some lost mobility and self-sufficiency. Further, it has the potential to be adapted to other purposes, including communication. Once visual BCIs and other assistive technologies fail for patients with (progressive) motor impairments, vision-independent paradigms such as the tactile BCI may be among the last remaining alternatives to interact with the environment. The present thesis has strongly confirmed the general feasibility of the tactile paradigm for healthy participants and provides first clues about the underlying factors of training. More importantly, the BCI was established among potential end-users with ALS, providing essential external validity.}, subject = {Myatrophische Lateralsklerose}, language = {en} }