@phdthesis{Bleier2023, author = {Bleier, Michael}, title = {Underwater Laser Scanning - Refractive Calibration, Self-calibration and Mapping for 3D Reconstruction}, isbn = {978-3-945459-45-4}, doi = {10.25972/OPUS-32269}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-322693}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {There is great interest in affordable, precise and reliable metrology underwater: Archaeologists want to document artifacts in situ with high detail. In marine research, biologists require the tools to monitor coral growth and geologists need recordings to model sediment transport. Furthermore, for offshore construction projects, maintenance and inspection millimeter-accurate measurements of defects and offshore structures are essential. While the process of digitizing individual objects and complete sites on land is well understood and standard methods, such as Structure from Motion or terrestrial laser scanning, are regularly applied, precise underwater surveying with high resolution is still a complex and difficult task. Applying optical scanning techniques in water is challenging due to reduced visibility caused by turbidity and light absorption. However, optical underwater scanners provide significant advantages in terms of achievable resolution and accuracy compared to acoustic systems. This thesis proposes an underwater laser scanning system and the algorithms for creating dense and accurate 3D scans in water. It is based on laser triangulation and the main optical components are an underwater camera and a cross-line laser projector. The prototype is configured with a motorized yaw axis for capturing scans from a tripod. Alternatively, it is mounted to a moving platform for mobile mapping. The main focus lies on the refractive calibration of the underwater camera and laser projector, the image processing and 3D reconstruction. For highest accuracy, the refraction at the individual media interfaces must be taken into account. This is addressed by an optimization-based calibration framework using a physical-geometric camera model derived from an analytical formulation of a ray-tracing projection model. In addition to scanning underwater structures, this work presents the 3D acquisition of semi-submerged structures and the correction of refraction effects. As in-situ calibration in water is complex and time-consuming, the challenge of transferring an in-air scanner calibration to water without re-calibration is investigated, as well as self-calibration techniques for structured light. The system was successfully deployed in various configurations for both static scanning and mobile mapping. An evaluation of the calibration and 3D reconstruction using reference objects and a comparison of free-form surfaces in clear water demonstrate the high accuracy potential in the range of one millimeter to less than one centimeter, depending on the measurement distance. Mobile underwater mapping and motion compensation based on visual-inertial odometry is demonstrated using a new optical underwater scanner based on fringe projection. Continuous registration of individual scans allows the acquisition of 3D models from an underwater vehicle. RGB images captured in parallel are used to create 3D point clouds of underwater scenes in full color. 3D maps are useful to the operator during the remote control of underwater vehicles and provide the building blocks to enable offshore inspection and surveying tasks. The advancing automation of the measurement technology will allow non-experts to use it, significantly reduce acquisition time and increase accuracy, making underwater metrology more cost-effective.}, subject = {Selbstkalibrierung}, language = {en} } @phdthesis{Reinhard2023, author = {Reinhard, Sebastian}, title = {Improving Super-Resolution Microscopy Data Reconstruction and Evaluation by Developing Advanced Processing Algorithms and Artifcial Neuronal Networks}, doi = {10.25972/OPUS-31695}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-316959}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {The fusion of methods from several disciplines is a crucial component of scientific development. Artificial Neural Networks, based on the principle of biological neuronal networks, demonstrate how nature provides the best templates for technological advancement. These innovations can then be employed to solve the remaining mysteries of biology, including, in particular, processes that take place on microscopic scales and can only be studied with sophisticated techniques. For instance, direct Stochastic Optical Reconstruction Microscopy combines tools from chemistry, physics, and computer science to visualize biological processes at the molecular level. One of the key components is the computer-aided reconstruction of super-resolved images. Improving the corresponding algorithms increases the quality of the generated data, providing further insights into our biology. It is important, however, to ensure that the heavily processed images are still a reflection of reality and do not originate in random artefacts. Expansion microscopy is expanding the sample by embedding it in a swellable hydrogel. The method can be combined with other super-resolution techniques to gain additional resolution. We tested this approach on microtubules, a well-known filamentous reference structure, to evaluate the performance of different protocols and labelling techniques. We developed LineProfiler an objective tool for data collection. Instead of collecting perpendicular profiles in small areas, the software gathers line profiles from filamentous structures of the entire image. This improves data quantity, quality and prevents a biased choice of the evaluated regions. On the basis of the collected data, we deployed theoretical models of the expected intensity distribution across the filaments. This led to the conclusion that post-expansion labelling significantly reduces the labelling error and thus, improves the data quality. The software was further used to determine the expansion factor and arrangement of synaptonemal complex data. Automated Simple Elastix uses state-of-the-art image alignment to compare pre- and post-expansion images. It corrects linear distortions occurring under isotropic expansion, calculates a structural expansion factor and highlights structural mismatches in a distortion map. We used the software to evaluate expanded fungi and NK cells. We found that the expansion factor differs for the two structures and is lower than the overall expansion of the hydrogel. Assessing the fluorescence lifetime of emitters used for direct Stochastic Optical Reconstruction Microscopy can reveal additional information about the molecular environment or distinguish dyes emitting with a similar wavelength. The corresponding measurements require a confocal scanning of the sample in combination with the fluorescent switching of the underlying emitters. This leads to non-linear, interrupted Point Spread Functions. The software ReCSAI targets this problem by combining the classical algorithm of compressed sensing with modern methods of artificial intelligence. We evaluated several different approaches to combine these components and found, that unrolling compressed sensing into the network architecture yields the best performance in terms of reconstruction speed and accuracy. In addition to a deep insight into the functioning and learning of artificial intelligence in combination with classical algorithms, we were able to reconstruct the described non-linearities with significantly improved resolution, in comparison to other state-of-the-art architectures.}, subject = {Mikroskopie}, language = {en} } @phdthesis{Schmithausen2019, author = {Schmithausen, Patrick Alexander Gerhard}, title = {Three-dimensional fluorescence image analysis of megakaryocytes and vascular structures in intact bone}, doi = {10.25972/OPUS-17854}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-178541}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2019}, abstract = {The thesis provides insights in reconstruction and analysis pipelines for processing of three-dimensional cell and vessel images of megakaryopoiesis in intact murine bone. The images were captured in a Light Sheet Fluorescence Microscope. The work presented here is part of Collaborative Research Centre (CRC) 688 (project B07) of the University of W{\"u}rzburg, performed at the Rudolf-Virchow Center. Despite ongoing research within the field of megakaryopoiesis, its spatio-temporal pattern of megakaryopoiesis is largely unknown. Deeper insight to this field is highly desirable to promote development of new therapeutic strategies for conditions related to thrombocytopathy as well as thrombocytopenia. The current concept of megakaryopoiesis is largely based on data from cryosectioning or in vitro studies indicating the existence of spatial niches within the bone marrow where specific stages of megakaryopoiesis take place. Since classic imaging of bone sections is typically limited to selective two-dimensional views and prone to cutting artefacts, imaging of intact murine bone is highly desired. However, this has its own challenges to meet, particularly in image reconstruction. Here, I worked on processing pipelines to account for irregular specimen staining or attenuation as well as the extreme heterogeneity of megakaryocyte morphology. Specific challenges for imaging and image reconstruction are tackled and solution strategies as well as remaining limitations are presented and discussed. Fortunately, modern image processing and segmentation strongly benefits from continuous advances in hardware as well as software-development. This thesis exemplifies how a combined effort in biomedicine, computer vision, data processing and image technology leads to deeper understanding of megakaryopoiesis. Tailored imaging pipelines significantly helped elucidating that the large megakaryocytes are broadly distributed throughout the bone marrow facing a surprisingly dense vessel network. No evidence was found for spatial niches in the bone marrow, eventually resulting in a revised model of megakaryopoiesis.}, subject = {Megakaryozytopoese}, language = {en} } @phdthesis{Pfitzner2019, author = {Pfitzner, Christian}, title = {Visual Human Body Weight Estimation with Focus on Clinical Applications}, isbn = {978-3-945459-27-0 (online)}, doi = {10.25972/OPUS-17484}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-174842}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2019}, abstract = {It is the aim of this thesis to present a visual body weight estimation, which is suitable for medical applications. A typical scenario where the estimation of the body weight is essential, is the emergency treatment of stroke patients: In case of an ischemic stroke, the patient has to receive a body weight adapted drug, to solve a blood clot in a vessel. The accuracy of the estimated weight influences the outcome of the therapy directly. However, the treatment has to start as early as possible after the arrival at a trauma room, to provide sufficient treatment. Weighing a patient takes time, and the patient has to be moved. Furthermore, patients are often not able to communicate a value for their body weight due to their stroke symptoms. Therefore, it is state of the art that physicians guess the body weight. A patient receiving a too low dose has an increased risk that the blood clot does not dissolve and brain tissue is permanently damaged. Today, about one-third gets an insufficient dosage. In contrast to that, an overdose can cause bleedings and further complications. Physicians are aware of this issue, but a reliable alternative is missing. The thesis presents state-of-the-art principles and devices for the measurement and estimation of body weight in the context of medical applications. While scales are common and available at a hospital, the process of weighing takes too long and can hardly be integrated into the process of stroke treatment. Sensor systems and algorithms are presented in the section for related work and provide an overview of different approaches. The here presented system -- called Libra3D -- consists of a computer installed in a real trauma room, as well as visual sensors integrated into the ceiling. For the estimation of the body weight, the patient is on a stretcher which is placed in the field of view of the sensors. The three sensors -- two RGB-D and a thermal camera -- are calibrated intrinsically and extrinsically. Also, algorithms for sensor fusion are presented to align the data from all sensors which is the base for a reliable segmentation of the patient. A combination of state-of-the-art image and point cloud algorithms is used to localize the patient on the stretcher. The challenges in the scenario with the patient on the bed is the dynamic environment, including other people or medical devices in the field of view. After the successful segmentation, a set of hand-crafted features is extracted from the patient's point cloud. These features rely on geometric and statistical values and provide a robust input to a subsequent machine learning approach. The final estimation is done with a previously trained artificial neural network. The experiment section offers different configurations of the previously extracted feature vector. Additionally, the here presented approach is compared to state-of-the-art methods; the patient's own assessment, the physician's guess, and an anthropometric estimation. Besides the patient's own estimation, Libra3D outperforms all state-of-the-art estimation methods: 95 percent of all patients are estimated with a relative error of less than 10 percent to ground truth body weight. It takes only a minimal amount of time for the measurement, and the approach can easily be integrated into the treatment of stroke patients, while physicians are not hindered. Furthermore, the section for experiments demonstrates two additional applications: The extracted features can also be used to estimate the body weight of people standing, or even walking in front of a 3D camera. Also, it is possible to determine or classify the BMI of a subject on a stretcher. A potential application for this approach is the reduction of the radiation dose of patients being exposed to X-rays during a CT examination. During the time of this thesis, several data sets were recorded. These data sets contain the ground truth body weight, as well as the data from the sensors. They are available for the collaboration in the field of body weight estimation for medical applications.}, subject = {Punktwolke}, language = {en} } @phdthesis{PradaSalcedo2018, author = {Prada Salcedo, Juan Pablo}, title = {Image Processing and other bioinformatic tools for Neurobiology}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-157721}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2018}, abstract = {Neurobiology is widely supported by bioinformatics. Due to the big amount of data generated from the biological side a computational approach is required. This thesis presents four different cases of bioinformatic tools applied to the service of Neurobiology. The first two tools presented belong to the field of image processing. In the first case, we make use of an algorithm based on the wavelet transformation to assess calcium activity events in cultured neurons. We designed an open source tool to assist neurobiology researchers in the analysis of calcium imaging videos. Such analysis is usually done manually which is time consuming and highly subjective. Our tool speeds up the work and offers the possibility of an unbiased detection of the calcium events. Even more important is that our algorithm not only detects the neuron spiking activity but also local spontaneous activity which is normally discarded because it is considered irrelevant. We showed that this activity is determinant in the calcium dynamics in neurons and it is involved in important functions like signal modulation and memory and learning. The second project is a segmentation task. In our case we are interested in segmenting the neuron nuclei in electron microscopy images of c.elegans. Marking these structures is necessary in order to reconstruct the connectome of the organism. C.elegans is a great study case due to the simplicity of its nervous system (only 502 neurons). This worm, despite its simplicity has taught us a lot about neuronal mechanisms. There is still a lot of information we can extract from the c.elegans, therein lies the importance of reconstructing its connectome. There is a current version of the c.elegans connectome but it was done by hand and on a single subject which leaves a big room for errors. By automatizing the segmentation of the electron microscopy images we guarantee an unbiased approach and we will be able to verify the connectome on several subjects. For the third project we moved from image processing applications to biological modeling. Because of the high complexity of even small biological systems it is necessary to analyze them with the help of computational tools. The term in silico was coined to refer to such computational models of biological systems. We designed an in silico model of the TNF (Tumor necrosis factor) ligand and its two principal receptors. This biological system is of high relevance because it is involved in the inflammation process. Inflammation is of most importance as protection mechanism but it can also lead to complicated diseases (e.g. cancer). Chronic inflammation processes can be particularly dangerous in the brain. In order to better understand the dynamics that govern the TNF system we created a model using the BioNetGen language. This is a rule based language that allows one to simulate systems where multiple agents are governed by a single rule. Using our model we characterized the TNF system and hypothesized about the relation of the ligand with each of the two receptors. Our hypotheses can be later used to define drug targets in the system or possible treatments for chronic inflammation or lack of the inflammatory response. The final project deals with the protein folding problem. In our organism proteins are folded all the time, because only in their folded conformation are proteins capable of doing their job (with some very few exceptions). This folding process presents a great challenge for science because it has been shown to be an NP problem. NP means non deterministic Polynomial time problem. This basically means that this kind of problems cannot be efficiently solved. Nevertheless, somehow the body is capable of folding a protein in just milliseconds. This phenomenon puzzles not only biologists but also mathematicians. In mathematics NP problems have been studied for a long time and it is known that given the solution to one NP problem we could solve many of them (i.e. NP-complete problems). If we manage to understand how nature solves the protein folding problem then we might be able to apply this solution to many other problems. Our research intends to contribute to this discussion. Unfortunately, not to explain how nature solves the protein folding problem, but to explain that it does not solve the problem at all. This seems contradictory since I just mentioned that the body folds proteins all the time, but our hypothesis is that the organisms have learned to solve a simplified version of the NP problem. Nature does not solve the protein folding problem in its full complexity. It simply solves a small instance of the problem. An instance which is as simple as a convex optimization problem. We formulate the protein folding problem as an optimization problem to illustrate our claim and present some toy examples to illustrate the formulation. If our hypothesis is true, it means that protein folding is a simple problem. So we just need to understand and model the conditions of the vicinity inside the cell at the moment the folding process occurs. Once we understand this starting conformation and its influence in the folding process we will be able to design treatments for amyloid diseases such as Alzheimer's and Parkinson's. In summary this thesis project contributes to the neurobiology research field from four different fronts. Two are practical contributions with immediate benefits, such as the calcium imaging video analysis tool and the TNF in silico model. The neuron nuclei segmentation is a contribution for the near future. A step towards the full annotation of the c.elegans connectome and later for the reconstruction of the connectome of other species. And finally, the protein folding project is a first impulse to change the way we conceive the protein folding process in nature. We try to point future research in a novel direction, where the amino code is not the most relevant characteristic of the process but the conditions within the cell.}, subject = {Bildverarbeitung}, language = {en} } @phdthesis{Tzschichholz2014, author = {Tzschichholz, Tristan}, title = {Relative pose estimation of known rigid objects using a novel approach to high-level PMD-/CCD- sensor data fusion with regard to applications in space}, isbn = {978-3-923959-95-2}, issn = {1868-7474}, doi = {10.25972/OPUS-10391}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-103918}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {In this work, a novel method for estimating the relative pose of a known object is presented, which relies on an application-specific data fusion process. A PMD-sensor in conjunction with a CCD-sensor is used to perform the pose estimation. Furthermore, the work provides a method for extending the measurement range of the PMD sensor along with the necessary calibration methodology. Finally, extensive measurements on a very accurate Rendezvous and Docking testbed are made to evaluate the performance, what includes a detailed discussion of lighting conditions.}, subject = {Bildverarbeitung}, language = {en} } @phdthesis{Schmid2010, author = {Schmid, Benjamin}, title = {Computational tools for the segmentation and registration of confocal brain images of Drosophila melanogaster}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-51490}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2010}, abstract = {Neuroanatomical data in fly brain research are mostly available as spatial gene expression patterns of genetically distinct fly strains. The Drosophila standard brain, which was developed in the past to provide a reference coordinate system, can be used to integrate these data. Working with the standard brain requires advanced image processing methods, including visualisation, segmentation and registration. The previously published VIB Protocol addressed the problem of image registration. Unfortunately, its usage was severely limited by the necessity of manually labelling a predefined set of neuropils in the brain images at hand. In this work I present novel tools to facilitate the work with the Drosophila standard brain. These tools are integrated in a well-known open-source image processing framework which can potentially serve as a common platform for image analysis in the neuroanatomical research community: ImageJ. In particular, a hardware-accelerated 3D visualisation framework was developed for ImageJ which extends its limited 3D visualisation capabilities. It is used for the development of a novel semi-automatic segmentation method, which implements automatic surface growing based on user-provided seed points. Template surfaces, incorporated with a modified variant of an active surface model, complement the segmentation. An automatic nonrigid warping algorithm is applied, based on point correspondences established through the extracted surfaces. Finally, I show how the individual steps can be fully automated, and demonstrate its application for the successful registration of fly brain images. The new tools are freely available as ImageJ plugins. I compare the results obtained by the introduced methods with the output of the VIB Protocol and conclude that our methods reduce the required effort five to ten fold. Furthermore, reproducibility and accuracy are enhanced using the proposed tools.}, subject = {Taufliege}, language = {en} } @phdthesis{Wawrowsky2007, author = {Wawrowsky, Kolja Alexander}, title = {Analysis and Visualization in Multidimensional Microscopy}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-23867}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2007}, abstract = {The live sciences currently undergo a paradigm shift to computer aided discoveries. Discoveries in the live sciences were historically made by either direct observation or as a result of chemical assays. Today we see a growing shift toward computer aided analysis and visualization. This gradual process happens in microscopy. Multidimensional laser scanning microscopy can acquire very complex multichannel data from fixed or live specimen. New probes such as visible fluorescent proteins let us observe the expression of genes and track protein localization. Ion sensitive dyes change intensity with the concentration of ions in the cell. The laser scanning confocal allows us to record these processes in three dimensions over time. This work demonstrates the application of software analysis to multidimensional microscopy data. We introduce methods for volume investigation, ion flux analysis and molecular modeling. The visualization methods are based on a multidimensional data model to accommodate complex datasets. The software uses vector processing and multiple processors to accelerate volume rendering and achieve interactive rendering. The algorithms are based on human visual perception and allow the observer a wide range of mixed render modes. The software was used to reconstruct the pituitary development in zebrafish and observe the degeneration of neurons after injury in a mouse model. Calicum indicator dyes have long been used to study calcium fluxes. We optimized the imaging method to minimize impact on the cell. Live cells were imaged continuously for 45 minutes and subjected to increasing does of a drug. We correlated the amplitude of calcium oscillations to increasing doses of a drug and obtain single cell dose response curves. Because this method is very sensitive and measures single cell responses it has potential in drug discovery and characterization. Microtubules form a dynamic cytoskeleton, which is responsible for cell shape, intracellular transport and has an integral role in mitosis. A hallmark of microtubule organization is lateral interactions. Microtubules are bundles by proteins into dense structures. To estimate the contribution of this bundling process, we created a fractal model of microtubule organization. This model demonstrates that morphology of complex microtubule arrays can be explained by bundling alone. In summary we showed that advances in software for visualization, data analysis and modeling lead to new discoveries.}, subject = {Konfokale Mikroskopie}, language = {en} } @phdthesis{Schindelin2005, author = {Schindelin, Johannes}, title = {The standard brain of Drosophila melanogaster and its automatic segmentation}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-15518}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2005}, abstract = {In this thesis, I introduce the Virtual Brain Protocol, which facilitates applications of the Standard Brain of Drosophila melanogaster. By providing reliable and extensible tools for the handling of neuroanatomical data, this protocol simplifies and organizes the recurring tasks involved in these applications. It is demonstrated that this protocol can also be used to generate average brains, i.e. to combine recordings of several brains with the same features such that the common features are emphasized. One of the most important steps of the Virtual Insect Protocol is the aligning of newly recorded data sets with the Standard Brain. After presenting methods commonly applied in a biological or medical context to align two different recordings, it is evaluated to what extent this alignment can be automated. To that end, existing Image Processing techniques are assessed. I demonstrate that these techniques do not satisfy the requirements needed to guarantee sensible alignments between two brains. Then, I analyze what needs to be taken into account in order to formulate an algorithm which satisfies the needs of the protocol. In the last chapter, I derive such an algorithm using methods from Information Theory, which bases the technique on a solid mathematical foundation. I show how Bayesian Inference can be applied to enhance the results further. It is demonstrated that this approach yields good results on very noisy images, detecting apparent boundaries between structures. The same approach can be extended to take additional knowledge into account, e.g. the relative position of the anatomical structures and their shape. It is shown how this extension can be utilized to segment a newly recorded brain automatically.}, subject = {Taufliege}, language = {en} }