@phdthesis{Jenett2007, author = {Jenett, Arnim}, title = {The Virtual Insect Brain Protocol : development and application of software for the standardization of neuroanatomy}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-22297}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2007}, abstract = {Since the fruit fly Drosophila melanogaster entered the laboratories as a model organism, new genetic, physiological, molecular and behavioral techniques for the functional analysis of the brain rapidly accumulated. Nowadays this concerted assault obtains its main thrust form Gal4 expression patterns that can be visualized and provide the means for manipulating -in unrestrained animals- groups of neurons of the brain. To take advantage of these patterns one needs to know their anatomy. This thesis describes the Virtual Insect Brain (VIB) protocol, a software package for the quantitative assessment, comparison, and presentation of neuroanatomical data. It is based on the 3D-reconstruction and visualization software Amira (Mercury Inc.). Its main part is a standardization procedure which aligns individual 3D images (series of virtual sections obtained by confocal microscopy) to a common coordinate system and computes average intensities for each voxel (volume pixel). The VIB protocol facilitates direct comparison of gene expression patterns and describes their interindividual variability. It provides volumetry of brain regions and helps to characterize the phenotypes of brain structure mutants. Using the VIB protocol does not require any programming skills since all operations are carried out at a (near to) self-explanatory graphical user interface. Although the VIB protocol has been developed for the standardization of Drosophila neuroanatomy, the program structure can be used for the standardization of other 3D structures as well. Standardizing brains and gene expression patterns is a new approach to biological shape and its variability. Using the VIB protocol consequently may help to integrate knowledge on the correlation of form and function of the insect brain. The VIB protocol provides a first set of tools supporting this endeavor in Drosophila. The software is freely available at http://www.neurofly.de.}, subject = {Taufliege}, language = {en} } @article{KarulinKaracsonyZhangetal.2015, author = {Karulin, Alexey Y. and Karacsony, Kinga and Zhang, Wenji and Targoni, Oleg S. and Moldova, Ioana and Dittrich, Marcus and Sundararaman, Srividya and Lehmann, Paul V.}, title = {ELISPOTs produced by CD8 and CD4 cells follow Log Normal size distribution permitting objective counting}, series = {Cells}, volume = {4}, journal = {Cells}, number = {1}, doi = {10.3390/cells4010056}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-149648}, pages = {56-70}, year = {2015}, abstract = {Each positive well in ELISPOT assays contains spots of variable sizes that can range from tens of micrometers up to a millimeter in diameter. Therefore, when it comes to counting these spots the decision on setting the lower and the upper spot size thresholds to discriminate between non-specific background noise, spots produced by individual T cells, and spots formed by T cell clusters is critical. If the spot sizes follow a known statistical distribution, precise predictions on minimal and maximal spot sizes, belonging to a given T cell population, can be made. We studied the size distributional properties of IFN-γ, IL-2, IL-4, IL-5 and IL-17 spots elicited in ELISPOT assays with PBMC from 172 healthy donors, upon stimulation with 32 individual viral peptides representing defined HLA Class I-restricted epitopes for CD8 cells, and with protein antigens of CMV and EBV activating CD4 cells. A total of 334 CD8 and 80 CD4 positive T cell responses were analyzed. In 99.7\% of the test cases, spot size distributions followed Log Normal function. These data formally demonstrate that it is possible to establish objective, statistically validated parameters for counting T cell ELISPOTs.}, language = {en} } @article{AhmedZeeshanDandekar2016, author = {Ahmed, Zeeshan and Zeeshan, Saman and Dandekar, Thomas}, title = {Mining biomedical images towards valuable information retrieval in biomedical and life sciences}, series = {Database - The Journal of Biological Databases and Curation}, volume = {2016}, journal = {Database - The Journal of Biological Databases and Curation}, doi = {10.1093/database/baw118}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-162697}, pages = {baw118}, year = {2016}, abstract = {Biomedical images are helpful sources for the scientists and practitioners in drawing significant hypotheses, exemplifying approaches and describing experimental results in published biomedical literature. In last decades, there has been an enormous increase in the amount of heterogeneous biomedical image production and publication, which results in a need for bioimaging platforms for feature extraction and analysis of text and content in biomedical images to take advantage in implementing effective information retrieval systems. In this review, we summarize technologies related to data mining of figures. We describe and compare the potential of different approaches in terms of their developmental aspects, used methodologies, produced results, achieved accuracies and limitations. Our comparative conclusions include current challenges for bioimaging software with selective image mining, embedded text extraction and processing of complex natural language queries.}, language = {en} } @article{AlZabenMedyukhinaDietrichetal.2019, author = {Al-Zaben, Naim and Medyukhina, Anna and Dietrich, Stefanie and Marolda, Alessandra and H{\"u}nniger, Kerstin and Kurzai, Oliver and Figge, Marc Thilo}, title = {Automated tracking of label-free cells with enhanced recognition of whole tracks}, series = {Scientific Reports}, volume = {9}, journal = {Scientific Reports}, doi = {10.1038/s41598-019-39725-x}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-221093}, year = {2019}, abstract = {Migration and interactions of immune cells are routinely studied by time-lapse microscopy of in vitro migration and confrontation assays. To objectively quantify the dynamic behavior of cells, software tools for automated cell tracking can be applied. However, many existing tracking algorithms recognize only rather short fragments of a whole cell track and rely on cell staining to enhance cell segmentation. While our previously developed segmentation approach enables tracking of label-free cells, it still suffers from frequently recognizing only short track fragments. In this study, we identify sources of track fragmentation and provide solutions to obtain longer cell tracks. This is achieved by improving the detection of low-contrast cells and by optimizing the value of the gap size parameter, which defines the number of missing cell positions between track fragments that is accepted for still connecting them into one track. We find that the enhanced track recognition increases the average length of cell tracks up to 2.2-fold. Recognizing cell tracks as a whole will enable studying and quantifying more complex patterns of cell behavior, e.g. switches in migration mode or dependence of the phagocytosis efficiency on the number and type of preceding interactions. Such quantitative analyses will improve our understanding of how immune cells interact and function in health and disease.}, language = {en} } @article{HartrampfHeinrichSeitzetal.2020, author = {Hartrampf, Philipp E. and Heinrich, Marieke and Seitz, Anna Katharina and Brumberg, Joachim and Sokolakis, Ioannis and Kalogirou, Charis and Schirbel, Andreas and K{\"u}bler, Hubert and Buck, Andreas K. and Lapa, Constantin and Krebs, Markus}, title = {Metabolic Tumour Volume from PSMA PET/CT Scans of Prostate Cancer Patients during Chemotherapy — Do Different Software Solutions Deliver Comparable Results?}, series = {Journal of Clinical Medicine}, volume = {9}, journal = {Journal of Clinical Medicine}, number = {5}, issn = {2077-0383}, doi = {10.3390/jcm9051390}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-205893}, year = {2020}, abstract = {(1) Background: Prostate-specific membrane antigen (PSMA)-derived tumour volume (PSMA-TV) and total lesion PSMA (TL-PSMA) from PSMA PET/CT scans are promising biomarkers for assessing treatment response in prostate cancer (PCa). Currently, it is unclear whether different software tools for assessing PSMA-TV and TL-PSMA produce comparable results. (2) Methods: \(^{68}\)Ga-PSMA PET/CT scans from n = 21 patients with castration-resistant PCa (CRPC) receiving chemotherapy were identified from our single-centre database. PSMA-TV and TL-PSMA were calculated with Syngo.via (Siemens) as well as the freely available Beth Israel plugin for FIJI (Fiji Is Just ImageJ) before and after chemotherapy. While statistical comparability was illustrated and quantified via Bland-Altman diagrams, the clinical agreement was estimated by matching PSMA-TV, TL-PSMA and relative changes of both variables during chemotherapy with changes in serum PSA (ΔPSA) and PERCIST (Positron Emission Response Criteria in Solid Tumors). (3) Results: Comparing absolute PSMA-TV and TL-PSMA as well as Bland-Altman plotting revealed a good statistical comparability of both software algorithms. For clinical agreement, classifying therapy response did not differ between PSMA-TV and TL-PSMA for both software solutions and showed highly positive correlations with BR. (4) Conclusions: due to the high levels of statistical and clinical agreement in our CRPC patient cohort undergoing taxane chemotherapy, comparing PSMA-TV and TL-PSMA determined by Syngo.via and FIJI appears feasible.}, language = {en} } @article{StebaniBlaimerZableretal.2023, author = {Stebani, Jannik and Blaimer, Martin and Zabler, Simon and Neun, Tilmann and Pelt, Dani{\"e}l M. and Rak, Kristen}, title = {Towards fully automated inner ear analysis with deep-learning-based joint segmentation and landmark detection framework}, series = {Scientific Reports}, volume = {13}, journal = {Scientific Reports}, doi = {10.1038/s41598-023-45466-9}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-357411}, year = {2023}, abstract = {Automated analysis of the inner ear anatomy in radiological data instead of time-consuming manual assessment is a worthwhile goal that could facilitate preoperative planning and clinical research. We propose a framework encompassing joint semantic segmentation of the inner ear and anatomical landmark detection of helicotrema, oval and round window. A fully automated pipeline with a single, dual-headed volumetric 3D U-Net was implemented, trained and evaluated using manually labeled in-house datasets from cadaveric specimen (N = 43) and clinical practice (N = 9). The model robustness was further evaluated on three independent open-source datasets (N = 23 + 7 + 17 scans) consisting of cadaveric specimen scans. For the in-house datasets, Dice scores of 0.97 and 0.94, intersection-over-union scores of 0.94 and 0.89 and average Hausdorf distances of 0.065 and 0.14 voxel units were achieved. The landmark localization task was performed automatically with an average localization error of 3.3 and 5.2 voxel units. A robust, albeit reduced performance could be attained for the catalogue of three open-source datasets. Results of the ablation studies with 43 mono-parametric variations of the basal architecture and training protocol provided task-optimal parameters for both categories. Ablation studies against single-task variants of the basal architecture showed a clear performance beneft of coupling landmark localization with segmentation and a dataset-dependent performance impact on segmentation ability.}, language = {en} } @article{GriebelSegebarthSteinetal.2023, author = {Griebel, Matthias and Segebarth, Dennis and Stein, Nikolai and Schukraft, Nina and Tovote, Philip and Blum, Robert and Flath, Christoph M.}, title = {Deep learning-enabled segmentation of ambiguous bioimages with deepflash2}, series = {Nature Communications}, volume = {14}, journal = {Nature Communications}, doi = {10.1038/s41467-023-36960-9}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-357286}, year = {2023}, abstract = {Bioimages frequently exhibit low signal-to-noise ratios due to experimental conditions, specimen characteristics, and imaging trade-offs. Reliable segmentation of such ambiguous images is difficult and laborious. Here we introduce deepflash2, a deep learning-enabled segmentation tool for bioimage analysis. The tool addresses typical challenges that may arise during the training, evaluation, and application of deep learning models on ambiguous data. The tool's training and evaluation pipeline uses multiple expert annotations and deep model ensembles to achieve accurate results. The application pipeline supports various use-cases for expert annotations and includes a quality assurance mechanism in the form of uncertainty measures. Benchmarked against other tools, deepflash2 offers both high predictive accuracy and efficient computational resource usage. The tool is built upon established deep learning libraries and enables sharing of trained model ensembles with the research community. deepflash2 aims to simplify the integration of deep learning into bioimage analysis projects while improving accuracy and reliability.}, language = {en} }