@phdthesis{Schindelin2005, author = {Schindelin, Johannes}, title = {The standard brain of Drosophila melanogaster and its automatic segmentation}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-15518}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2005}, abstract = {In this thesis, I introduce the Virtual Brain Protocol, which facilitates applications of the Standard Brain of Drosophila melanogaster. By providing reliable and extensible tools for the handling of neuroanatomical data, this protocol simplifies and organizes the recurring tasks involved in these applications. It is demonstrated that this protocol can also be used to generate average brains, i.e. to combine recordings of several brains with the same features such that the common features are emphasized. One of the most important steps of the Virtual Insect Protocol is the aligning of newly recorded data sets with the Standard Brain. After presenting methods commonly applied in a biological or medical context to align two different recordings, it is evaluated to what extent this alignment can be automated. To that end, existing Image Processing techniques are assessed. I demonstrate that these techniques do not satisfy the requirements needed to guarantee sensible alignments between two brains. Then, I analyze what needs to be taken into account in order to formulate an algorithm which satisfies the needs of the protocol. In the last chapter, I derive such an algorithm using methods from Information Theory, which bases the technique on a solid mathematical foundation. I show how Bayesian Inference can be applied to enhance the results further. It is demonstrated that this approach yields good results on very noisy images, detecting apparent boundaries between structures. The same approach can be extended to take additional knowledge into account, e.g. the relative position of the anatomical structures and their shape. It is shown how this extension can be utilized to segment a newly recorded brain automatically.}, subject = {Taufliege}, language = {en} } @article{ConradFritschZeidleretal.2010, author = {Conrad, Christopher and Fritsch, Sebastian and Zeidler, Julian and R{\"u}cker, Gerd and Dech, Stefan}, title = {Per-Field Irrigated Crop Classification in Arid Central Asia Using SPOT and ASTER Data}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-68630}, year = {2010}, abstract = {The overarching goal of this research was to explore accurate methods of mapping irrigated crops, where digital cadastre information is unavailable: (a) Boundary separation by object-oriented image segmentation using very high spatial resolution (2.5-5 m) data was followed by (b) identification of crops and crop rotations by means of phenology, tasselled cap, and rule-based classification using high resolution (15-30 m) bi-temporal data. The extensive irrigated cotton production system of the Khorezm province in Uzbekistan, Central Asia, was selected as a study region. Image segmentation was carried out on pan-sharpened SPOT data. Varying combinations of segmentation parameters (shape, compactness, and color) were tested for optimized boundary separation. The resulting geometry was validated against polygons digitized from the data and cadastre maps, analysing similarity (size, shape) and congruence. The parameters shape and compactness were decisive for segmentation accuracy. Differences between crop phenologies were analyzed at field level using bi-temporal ASTER data. A rule set based on the tasselled cap indices greenness and brightness allowed for classifying crop rotations of cotton, winter-wheat and rice, resulting in an overall accuracy of 80 \%. The proposed field-based crop classification method can be an important tool for use in water demand estimations, crop yield simulations, or economic models in agricultural systems similar to Khorezm.}, subject = {Geologie}, language = {en} } @article{BischlerKopfVoss2014, author = {Bischler, Thorsten and Kopf, Matthias and Voss, Bjoern}, title = {Transcript mapping based on dRNA-seq data}, series = {BMC Bioinformatics}, volume = {15}, journal = {BMC Bioinformatics}, number = {122}, issn = {1471-2105}, doi = {10.1186/1471-2105-15-122}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-116663}, year = {2014}, abstract = {Background: RNA-seq and its variant differential RNA-seq (dRNA-seq) are today routine methods for transcriptome analysis in bacteria. While expression profiling and transcriptional start site prediction are standard tasks today, the problem of identifying transcriptional units in a genome-wide fashion is still not solved for prokaryotic systems. Results: We present RNASEG, an algorithm for the prediction of transcriptional units based on dRNA-seq data. A key feature of the algorithm is that, based on the data, it distinguishes between transcribed and un-transcribed genomic segments. Furthermore, the program provides many different predictions in a single run, which can be used to infer the significance of transcriptional units in a consensus procedure. We show the performance of our method based on a well-studied dRNA-seq data set for Helicobacter pylori. Conclusions: With our algorithm it is possible to identify operons and 5'- and 3'-UTRs in an automated fashion. This alleviates the need for labour intensive manual inspection and enables large-scale studies in the area of comparative transcriptomics.}, language = {en} } @article{vanToorNewmanTakekawaetal.2016, author = {van Toor, Mari{\"e}lle L. and Newman, Scott H. and Takekawa, John Y. and Wegmann, Martin and Safi, Kamran}, title = {Temporal segmentation of animal trajectories informed by habitat use}, series = {Ecosphere}, volume = {7}, journal = {Ecosphere}, number = {10}, doi = {10.1002/ecs2.1498}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-164970}, pages = {e01498}, year = {2016}, abstract = {Most animals live in seasonal environments and experience very different conditions throughout the year. Behavioral strategies like migration, hibernation, and a life cycle adapted to the local seasonality help to cope with fluctuations in environmental conditions. Thus, how an individual utilizes the environment depends both on the current availability of habitat and the behavioral prerequisites of the individual at that time. While the increasing availability and richness of animal movement data has facilitated the development of algorithms that classify behavior by movement geometry, changes in the environmental correlates of animal movement have so far not been exploited for a behavioral annotation. Here, we suggest a method that uses these changes in individual-environment associations to divide animal location data into segments of higher ecological coherence, which we term niche segmentation. We use time series of random forest models to evaluate the transferability of habitat use over time to cluster observational data accordingly. We show that our method is able to identify relevant changes in habitat use corresponding to both changes in the availability of habitat and how it was used using simulated data, and apply our method to a tracking data set of common teal (Anas crecca). The niche segmentation proved to be robust, and segmented habitat suitability outperformed models neglecting the temporal dynamics of habitat use. Overall, we show that it is possible to classify animal trajectories based on changes of habitat use similar to geometric segmentation algorithms. We conclude that such an environmentally informed classification of animal trajectories can provide new insights into an individuals' behavior and enables us to make sensible predictions of how suitable areas might be connected by movement in space and time.}, language = {en} } @article{PfitznerMayNuechter2018, author = {Pfitzner, Christian and May, Stefan and N{\"u}chter, Andreas}, title = {Body weight estimation for dose-finding and health monitoring of lying, standing and walking patients based on RGB-D data}, series = {Sensors}, volume = {18}, journal = {Sensors}, number = {5}, doi = {10.3390/s18051311}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-176642}, pages = {1311}, year = {2018}, abstract = {This paper describes the estimation of the body weight of a person in front of an RGB-D camera. A survey of different methods for body weight estimation based on depth sensors is given. First, an estimation of people standing in front of a camera is presented. Second, an approach based on a stream of depth images is used to obtain the body weight of a person walking towards a sensor. The algorithm first extracts features from a point cloud and forwards them to an artificial neural network (ANN) to obtain an estimation of body weight. Besides the algorithm for the estimation, this paper further presents an open-access dataset based on measurements from a trauma room in a hospital as well as data from visitors of a public event. In total, the dataset contains 439 measurements. The article illustrates the efficiency of the approach with experiments with persons lying down in a hospital, standing persons, and walking persons. Applicable scenarios for the presented algorithm are body weight-related dosing of emergency patients.}, language = {en} } @article{AnkenbrandLohrSchloetelburgetal.2021, author = {Ankenbrand, Markus Johannes and Lohr, David and Schl{\"o}telburg, Wiebke and Reiter, Theresa and Wech, Tobias and Schreiber, Laura Maria}, title = {Deep learning-based cardiac cine segmentation: Transfer learning application to 7T ultrahigh-field MRI}, series = {Magnetic Resonance in Medicine}, volume = {86}, journal = {Magnetic Resonance in Medicine}, number = {4}, doi = {10.1002/mrm.28822}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-257604}, pages = {2179-2191}, year = {2021}, abstract = {Purpose Artificial neural networks show promising performance in automatic segmentation of cardiac MRI. However, training requires large amounts of annotated data and generalization to different vendors, field strengths, sequence parameters, and pathologies is limited. Transfer learning addresses this challenge, but specific recommendations regarding type and amount of data required is lacking. In this study, we assess data requirements for transfer learning to experimental cardiac MRI at 7T where the segmentation task can be challenging. In addition, we provide guidelines, tools, and annotated data to enable transfer learning approaches by other researchers and clinicians. Methods A publicly available segmentation model was used to annotate a publicly available data set. This labeled data set was subsequently used to train a neural network for segmentation of left ventricle and myocardium in cardiac cine MRI. The network is used as starting point for transfer learning to 7T cine data of healthy volunteers (n = 22; 7873 images) by updating the pre-trained weights. Structured and random data subsets of different sizes were used to systematically assess data requirements for successful transfer learning. Results Inconsistencies in the publically available data set were corrected, labels created, and a neural network trained. On 7T cardiac cine images the model pre-trained on public imaging data, acquired at 1.5T and 3T, achieved DICE\(_{LV}\) = 0.835 and DICE\(_{MY}\) = 0.670. Transfer learning using 7T cine data and ImageNet weight initialization improved model performance to DICE\(_{LV}\) = 0.900 and DICE\(_{MY}\) = 0.791. Using only end-systolic and end-diastolic images reduced training data by 90\%, with no negative impact on segmentation performance (DICE\(_{LV}\) = 0.908, DICE\(_{MY}\) = 0.805). Conclusions This work demonstrates and quantifies the benefits of transfer learning for cardiac cine image segmentation. We provide practical guidelines for researchers planning transfer learning projects in cardiac MRI and make data, models, and code publicly available.}, language = {en} } @article{AnkenbrandShainbergHocketal.2021, author = {Ankenbrand, Markus J. and Shainberg, Liliia and Hock, Michael and Lohr, David and Schreiber, Laura M.}, title = {Sensitivity analysis for interpretation of machine learning based segmentation models in cardiac MRI}, series = {BMC Medical Imaging}, volume = {21}, journal = {BMC Medical Imaging}, number = {1}, doi = {10.1186/s12880-021-00551-1}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-259169}, pages = {27}, year = {2021}, abstract = {Background Image segmentation is a common task in medical imaging e.g., for volumetry analysis in cardiac MRI. Artificial neural networks are used to automate this task with performance similar to manual operators. However, this performance is only achieved in the narrow tasks networks are trained on. Performance drops dramatically when data characteristics differ from the training set properties. Moreover, neural networks are commonly considered black boxes, because it is hard to understand how they make decisions and why they fail. Therefore, it is also hard to predict whether they will generalize and work well with new data. Here we present a generic method for segmentation model interpretation. Sensitivity analysis is an approach where model input is modified in a controlled manner and the effect of these modifications on the model output is evaluated. This method yields insights into the sensitivity of the model to these alterations and therefore to the importance of certain features on segmentation performance. Results We present an open-source Python library (misas), that facilitates the use of sensitivity analysis with arbitrary data and models. We show that this method is a suitable approach to answer practical questions regarding use and functionality of segmentation models. We demonstrate this in two case studies on cardiac magnetic resonance imaging. The first case study explores the suitability of a published network for use on a public dataset the network has not been trained on. The second case study demonstrates how sensitivity analysis can be used to evaluate the robustness of a newly trained model. Conclusions Sensitivity analysis is a useful tool for deep learning developers as well as users such as clinicians. It extends their toolbox, enabling and improving interpretability of segmentation models. Enhancing our understanding of neural networks through sensitivity analysis also assists in decision making. Although demonstrated only on cardiac magnetic resonance images this approach and software are much more broadly applicable.}, language = {en} }