@article{GramGenslerAlbertovaetal.2022, author = {Gram, Maximilian and Gensler, Daniel and Albertova, Petra and Gutjahr, Fabian Tobias and Lau, Kolja and Arias-Loza, Paula-Anahi and Jakob, Peter Michael and Nordbeck, Peter}, title = {Quantification correction for free-breathing myocardial T1ρ mapping in mice using a recursively derived description of a T\(_{1p}\)\(^{*}\) relaxation pathway}, series = {Journal of Cardiovascular Magnetic Resonance}, volume = {24}, journal = {Journal of Cardiovascular Magnetic Resonance}, number = {1}, doi = {10.1186/s12968-022-00864-2}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-300491}, year = {2022}, abstract = {Background Fast and accurate T1ρ mapping in myocardium is still a major challenge, particularly in small animal models. The complex sequence design owing to electrocardiogram and respiratory gating leads to quantification errors in in vivo experiments, due to variations of the T\(_{1p}\) relaxation pathway. In this study, we present an improved quantification method for T\(_{1p}\) using a newly derived formalism of a T\(_{1p}\)\(^{*}\) relaxation pathway. Methods The new signal equation was derived by solving a recursion problem for spin-lock prepared fast gradient echo readouts. Based on Bloch simulations, we compared quantification errors using the common monoexponential model and our corrected model. The method was validated in phantom experiments and tested in vivo for myocardial T\(_{1p}\) mapping in mice. Here, the impact of the breath dependent spin recovery time T\(_{rec}\) on the quantification results was examined in detail. Results Simulations indicate that a correction is necessary, since systematically underestimated values are measured under in vivo conditions. In the phantom study, the mean quantification error could be reduced from - 7.4\% to - 0.97\%. In vivo, a correlation of uncorrected T\(_{1p}\) with the respiratory cycle was observed. Using the newly derived correction method, this correlation was significantly reduced from r = 0.708 (p < 0.001) to r = 0.204 and the standard deviation of left ventricular T\(_{1p}\) values in different animals was reduced by at least 39\%. Conclusion The suggested quantification formalism enables fast and precise myocardial T\(_{1p}\) quantification for small animals during free breathing and can improve the comparability of study results. Our new technique offers a reasonable tool for assessing myocardial diseases, since pathologies that cause a change in heart or breathing rates do not lead to systematic misinterpretations. Besides, the derived signal equation can be used for sequence optimization or for subsequent correction of prior study results.}, language = {en} } @article{AndelovicWinterKampfetal.2021, author = {Andelovic, Kristina and Winter, Patrick and Kampf, Thomas and Xu, Anton and Jakob, Peter Michael and Herold, Volker and Bauer, Wolfgang Rudolf and Zernecke, Alma}, title = {2D Projection Maps of WSS and OSI Reveal Distinct Spatiotemporal Changes in Hemodynamics in the Murine Aorta during Ageing and Atherosclerosis}, series = {Biomedicines}, volume = {9}, journal = {Biomedicines}, number = {12}, issn = {2227-9059}, doi = {10.3390/biomedicines9121856}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-252164}, year = {2021}, abstract = {Growth, ageing and atherosclerotic plaque development alter the biomechanical forces acting on the vessel wall. However, monitoring the detailed local changes in wall shear stress (WSS) at distinct sites of the murine aortic arch over time has been challenging. Here, we studied the temporal and spatial changes in flow, WSS, oscillatory shear index (OSI) and elastic properties of healthy wildtype (WT, n = 5) and atherosclerotic apolipoprotein E-deficient (Apoe\(^{-/-}\), n = 6) mice during ageing and atherosclerosis using high-resolution 4D flow magnetic resonance imaging (MRI). Spatially resolved 2D projection maps of WSS and OSI of the complete aortic arch were generated, allowing the pixel-wise statistical analysis of inter- and intragroup hemodynamic changes over time and local correlations between WSS, pulse wave velocity (PWV), plaque and vessel wall characteristics. The study revealed converse differences of local hemodynamic profiles in healthy WT and atherosclerotic Apoe\(^{-/-}\) mice, and we identified the circumferential WSS as potential marker of plaque size and composition in advanced atherosclerosis and the radial strain as a potential marker for vascular elasticity. Two-dimensional (2D) projection maps of WSS and OSI, including statistical analysis provide a powerful tool to monitor local aortic hemodynamics during ageing and atherosclerosis. The correlation of spatially resolved hemodynamics and plaque characteristics could significantly improve our understanding of the impact of hemodynamics on atherosclerosis, which may be key to understand plaque progression towards vulnerability.}, language = {en} } @article{DuLauterbachLietal.2020, author = {Du, Shitong and Lauterbach, Helge A. and Li, Xuyou and Demisse, Girum G. and Borrmann, Dorit and N{\"u}chter, Andreas}, title = {Curvefusion — A Method for Combining Estimated Trajectories with Applications to SLAM and Time-Calibration}, series = {Sensors}, volume = {20}, journal = {Sensors}, number = {23}, issn = {1424-8220}, doi = {10.3390/s20236918}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-219988}, year = {2020}, abstract = {Mapping and localization of mobile robots in an unknown environment are essential for most high-level operations like autonomous navigation or exploration. This paper presents a novel approach for combining estimated trajectories, namely curvefusion. The robot used in the experiments is equipped with a horizontally mounted 2D profiler, a constantly spinning 3D laser scanner and a GPS module. The proposed algorithm first combines trajectories from different sensors to optimize poses of the planar three degrees of freedom (DoF) trajectory, which is then fed into continuous-time simultaneous localization and mapping (SLAM) to further improve the trajectory. While state-of-the-art multi-sensor fusion methods mainly focus on probabilistic methods, our approach instead adopts a deformation-based method to optimize poses. To this end, a similarity metric for curved shapes is introduced into the robotics community to fuse the estimated trajectories. Additionally, a shape-based point correspondence estimation method is applied to the multi-sensor time calibration. Experiments show that the proposed fusion method can achieve relatively better accuracy, even if the error of the trajectory before fusion is large, which demonstrates that our method can still maintain a certain degree of accuracy in an environment where typical pose estimation methods have poor performance. In addition, the proposed time-calibration method also achieves high accuracy in estimating point correspondences.}, language = {en} } @phdthesis{Koch2018, author = {Koch, Rainer}, title = {Sensor Fusion for Precise Mapping of Transparent and Specular Reflective Objects}, isbn = {978-3-945459-25-6}, doi = {10.25972/OPUS-16346}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-163462}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2018}, abstract = {Almost once a week broadcasts about earthquakes, hurricanes, tsunamis, or forest fires are filling the news. While oneself feels it is hard to watch such news, it is even harder for rescue troops to enter such areas. They need some skills to get a quick overview of the devastated area and find victims. Time is ticking, since the chance for survival shrinks the longer it takes till help is available. To coordinate the teams efficiently, all information needs to be collected at the command center. Therefore, teams investigate the destroyed houses and hollow spaces for victims. Doing so, they never can be sure that the building will not fully collapse while they are inside. Here, rescue robots are welcome helpers, as they are replaceable and make work more secure. Unfortunately, rescue robots are not usable off-the-shelf, yet. There is no doubt, that such a robot has to fulfil essential requirements to successfully accomplish a rescue mission. Apart from the mechanical requirements it has to be able to build a 3D map of the environment. This is essential to navigate through rough terrain and fulfil manipulation tasks (e.g. open doors). To build a map and gather environmental information, robots are equipped with multiple sensors. Since laser scanners produce precise measurements and support a wide scanning range, they are common visual sensors utilized for mapping. Unfortunately, they produce erroneous measurements when scanning transparent (e.g. glass, transparent plastic) or specular reflective objects (e.g. mirror, shiny metal). It is understood that such objects can be everywhere and a pre-manipulation to prevent their influences is impossible. Using additional sensors also bear risks. The problem is that these objects are occasionally visible, based on the incident angle of the laser beam, the surface, and the type of object. Hence, for transparent objects, measurements might result from the object surface or objects behind it. For specular reflective objects, measurements might result from the object surface or a mirrored object. These mirrored objects are illustrated behind the surface which is wrong. To obtain a precise map, the surfaces need to be recognised and mapped reliably. Otherwise, the robot navigates into it and crashes. Further, points behind the surface should be identified and treated based on the object type. Points behind a transparent surface should remain as they represent real objects. In contrast, Points behind a specular reflective surface should be erased. To do so, the object type needs to be classified. Unfortunately, none of the current approaches is capable to fulfil these requirements. Therefore, the following thesis addresses this problem to detect transparent and specular reflective objects and to identify their influences. To give the reader a start up, the first chapters describe: the theoretical background concerning propagation of light; sensor systems applied for range measurements; mapping approaches used in this work; and the state-of-the-art concerning detection and identification of transparent and specular reflective objects. Afterwards, the Reflection-Identification-Approach, which is the core of subject thesis is presented. It describes 2D and a 3D implementation to detect and classify such objects. Both are available as ROS-nodes. In the next chapter, various experiments demonstrate the applicability and reliability of these nodes. It proves that transparent and specular reflective objects can be detected and classified. Therefore, a Pre- and Post-Filter module is required in 2D. In 3D, classification is possible solely with the Pre-Filter. This is due to the higher amount of measurements. An example shows that an updatable mapping module allows the robot navigation to rely on refined maps. Otherwise, two individual maps are build which require a fusion afterwards. Finally, the last chapter summarizes the results and proposes suggestions for future work.}, subject = {laserscanner}, language = {en} } @article{AsareKyeiForkuorVenus2015, author = {Asare-Kyei, Daniel and Forkuor, Gerald and Venus, Valentijn}, title = {Modeling Flood Hazard Zones at the Sub-District Level with the Rational Model Integrated with GIS and Remote Sensing Approaches}, series = {Water}, volume = {7}, journal = {Water}, doi = {10.3390/w7073531}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-151581}, pages = {3531 -- 3564}, year = {2015}, abstract = {Robust risk assessment requires accurate flood intensity area mapping to allow for the identification of populations and elements at risk. However, available flood maps in West Africa lack spatial variability while global datasets have resolutions too coarse to be relevant for local scale risk assessment. Consequently, local disaster managers are forced to use traditional methods such as watermarks on buildings and media reports to identify flood hazard areas. In this study, remote sensing and Geographic Information System (GIS) techniques were combined with hydrological and statistical models to delineate the spatial limits of flood hazard zones in selected communities in Ghana, Burkina Faso and Benin. The approach involves estimating peak runoff concentrations at different elevations and then applying statistical methods to develop a Flood Hazard Index (FHI). Results show that about half of the study areas fall into high intensity flood zones. Empirical validation using statistical confusion matrix and the principles of Participatory GIS show that flood hazard areas could be mapped at an accuracy ranging from 77\% to 81\%. This was supported with local expert knowledge which accurately classified 79\% of communities deemed to be highly susceptible to flood hazard. The results will assist disaster managers to reduce the risk to flood disasters at the community level where risk outcomes are first materialized.}, language = {en} } @article{ElsebergBorrmannNuechter2013, author = {Elseberg, Jan and Borrmann, Dorit and N{\"u}chter, Andreas}, title = {Algorithmic Solutions for Computing Precise Maximum Likelihood 3D Point Clouds from Mobile Laser Scanning Platforms}, series = {Remote Sensing}, volume = {5}, journal = {Remote Sensing}, number = {11}, doi = {10.3390/rs5115871}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-130478}, pages = {5871-5906}, year = {2013}, abstract = {Mobile laser scanning puts high requirements on the accuracy of the positioning systems and the calibration of the measurement system. We present a novel algorithmic approach for calibration with the goal of improving the measurement accuracy of mobile laser scanners. We describe a general framework for calibrating mobile sensor platforms that estimates all configuration parameters for any arrangement of positioning sensors, including odometry. In addition, we present a novel semi-rigid Simultaneous Localization and Mapping (SLAM) algorithm that corrects the vehicle position at every point in time along its trajectory, while simultaneously improving the quality and precision of the entire acquired point cloud. Using this algorithm, the temporary failure of accurate external positioning systems or the lack thereof can be compensated for. We demonstrate the capabilities of the two newly proposed algorithms on a wide variety of datasets.}, language = {en} }