@article{LauterbachBorrmannHessetal.2015, author = {Lauterbach, Helge A. and Borrmann, Dorit and Heß, Robin and Eck, Daniel and Schilling, Klaus and N{\"u}chter, Andreas}, title = {Evaluation of a Backpack-Mounted 3D Mobile Scanning System}, series = {Remote Sensing}, volume = {7}, journal = {Remote Sensing}, number = {10}, doi = {10.3390/rs71013753}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-126247}, pages = {13753-13781}, year = {2015}, abstract = {Recently, several backpack-mounted systems, also known as personal laser scanning systems, have been developed. They consist of laser scanners or cameras that are carried by a human operator to acquire measurements of the environment while walking. These systems were first designed to overcome the challenges of mapping indoor environments with doors and stairs. While the human operator inherently has the ability to open doors and to climb stairs, the flexible movements introduce irregularities of the trajectory to the system. To compete with other mapping systems, the accuracy of these systems has to be evaluated. In this paper, we present an extensive evaluation of our backpack mobile mapping system in indoor environments. It is shown that the system can deal with the normal human walking motion, but has problems with irregular jittering. Moreover, we demonstrate the applicability of the backpack in a suitable urban scenario.}, language = {en} } @article{ElsebergBorrmannNuechter2013, author = {Elseberg, Jan and Borrmann, Dorit and N{\"u}chter, Andreas}, title = {Algorithmic Solutions for Computing Precise Maximum Likelihood 3D Point Clouds from Mobile Laser Scanning Platforms}, series = {Remote Sensing}, volume = {5}, journal = {Remote Sensing}, number = {11}, doi = {10.3390/rs5115871}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-130478}, pages = {5871-5906}, year = {2013}, abstract = {Mobile laser scanning puts high requirements on the accuracy of the positioning systems and the calibration of the measurement system. We present a novel algorithmic approach for calibration with the goal of improving the measurement accuracy of mobile laser scanners. We describe a general framework for calibrating mobile sensor platforms that estimates all configuration parameters for any arrangement of positioning sensors, including odometry. In addition, we present a novel semi-rigid Simultaneous Localization and Mapping (SLAM) algorithm that corrects the vehicle position at every point in time along its trajectory, while simultaneously improving the quality and precision of the entire acquired point cloud. Using this algorithm, the temporary failure of accurate external positioning systems or the lack thereof can be compensated for. We demonstrate the capabilities of the two newly proposed algorithms on a wide variety of datasets.}, language = {en} } @article{DuLauterbachLietal.2020, author = {Du, Shitong and Lauterbach, Helge A. and Li, Xuyou and Demisse, Girum G. and Borrmann, Dorit and N{\"u}chter, Andreas}, title = {Curvefusion — A Method for Combining Estimated Trajectories with Applications to SLAM and Time-Calibration}, series = {Sensors}, volume = {20}, journal = {Sensors}, number = {23}, issn = {1424-8220}, doi = {10.3390/s20236918}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-219988}, year = {2020}, abstract = {Mapping and localization of mobile robots in an unknown environment are essential for most high-level operations like autonomous navigation or exploration. This paper presents a novel approach for combining estimated trajectories, namely curvefusion. The robot used in the experiments is equipped with a horizontally mounted 2D profiler, a constantly spinning 3D laser scanner and a GPS module. The proposed algorithm first combines trajectories from different sensors to optimize poses of the planar three degrees of freedom (DoF) trajectory, which is then fed into continuous-time simultaneous localization and mapping (SLAM) to further improve the trajectory. While state-of-the-art multi-sensor fusion methods mainly focus on probabilistic methods, our approach instead adopts a deformation-based method to optimize poses. To this end, a similarity metric for curved shapes is introduced into the robotics community to fuse the estimated trajectories. Additionally, a shape-based point correspondence estimation method is applied to the multi-sensor time calibration. Experiments show that the proposed fusion method can achieve relatively better accuracy, even if the error of the trajectory before fusion is large, which demonstrates that our method can still maintain a certain degree of accuracy in an environment where typical pose estimation methods have poor performance. In addition, the proposed time-calibration method also achieves high accuracy in estimating point correspondences.}, language = {en} } @article{YuanBorrmannHouetal.2021, author = {Yuan, Yijun and Borrmann, Dorit and Hou, Jiawei and Ma, Yuexin and N{\"u}chter, Andreas and Schwertfeger, S{\"o}ren}, title = {Self-Supervised point set local descriptors for Point Cloud Registration}, series = {Sensors}, volume = {21}, journal = {Sensors}, number = {2}, issn = {1424-8220}, doi = {10.3390/s21020486}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-223000}, year = {2021}, abstract = {Descriptors play an important role in point cloud registration. The current state-of-the-art resorts to the high regression capability of deep learning. However, recent deep learning-based descriptors require different levels of annotation and selection of patches, which make the model hard to migrate to new scenarios. In this work, we learn local registration descriptors for point clouds in a self-supervised manner. In each iteration of the training, the input of the network is merely one unlabeled point cloud. Thus, the whole training requires no manual annotation and manual selection of patches. In addition, we propose to involve keypoint sampling into the pipeline, which further improves the performance of our model. Our experiments demonstrate the capability of our self-supervised local descriptor to achieve even better performance than the supervised model, while being easier to train and requiring no data labeling.}, language = {en} }