@article{KarulinKaracsonyZhangetal.2015, author = {Karulin, Alexey Y. and Karacsony, Kinga and Zhang, Wenji and Targoni, Oleg S. and Moldova, Ioana and Dittrich, Marcus and Sundararaman, Srividya and Lehmann, Paul V.}, title = {ELISPOTs produced by CD8 and CD4 cells follow Log Normal size distribution permitting objective counting}, series = {Cells}, volume = {4}, journal = {Cells}, number = {1}, doi = {10.3390/cells4010056}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-149648}, pages = {56-70}, year = {2015}, abstract = {Each positive well in ELISPOT assays contains spots of variable sizes that can range from tens of micrometers up to a millimeter in diameter. Therefore, when it comes to counting these spots the decision on setting the lower and the upper spot size thresholds to discriminate between non-specific background noise, spots produced by individual T cells, and spots formed by T cell clusters is critical. If the spot sizes follow a known statistical distribution, precise predictions on minimal and maximal spot sizes, belonging to a given T cell population, can be made. We studied the size distributional properties of IFN-γ, IL-2, IL-4, IL-5 and IL-17 spots elicited in ELISPOT assays with PBMC from 172 healthy donors, upon stimulation with 32 individual viral peptides representing defined HLA Class I-restricted epitopes for CD8 cells, and with protein antigens of CMV and EBV activating CD4 cells. A total of 334 CD8 and 80 CD4 positive T cell responses were analyzed. In 99.7\% of the test cases, spot size distributions followed Log Normal function. These data formally demonstrate that it is possible to establish objective, statistically validated parameters for counting T cell ELISPOTs.}, language = {en} } @article{AhmedZeeshanDandekar2016, author = {Ahmed, Zeeshan and Zeeshan, Saman and Dandekar, Thomas}, title = {Mining biomedical images towards valuable information retrieval in biomedical and life sciences}, series = {Database - The Journal of Biological Databases and Curation}, volume = {2016}, journal = {Database - The Journal of Biological Databases and Curation}, doi = {10.1093/database/baw118}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-162697}, pages = {baw118}, year = {2016}, abstract = {Biomedical images are helpful sources for the scientists and practitioners in drawing significant hypotheses, exemplifying approaches and describing experimental results in published biomedical literature. In last decades, there has been an enormous increase in the amount of heterogeneous biomedical image production and publication, which results in a need for bioimaging platforms for feature extraction and analysis of text and content in biomedical images to take advantage in implementing effective information retrieval systems. In this review, we summarize technologies related to data mining of figures. We describe and compare the potential of different approaches in terms of their developmental aspects, used methodologies, produced results, achieved accuracies and limitations. Our comparative conclusions include current challenges for bioimaging software with selective image mining, embedded text extraction and processing of complex natural language queries.}, language = {en} } @article{HartrampfHeinrichSeitzetal.2020, author = {Hartrampf, Philipp E. and Heinrich, Marieke and Seitz, Anna Katharina and Brumberg, Joachim and Sokolakis, Ioannis and Kalogirou, Charis and Schirbel, Andreas and K{\"u}bler, Hubert and Buck, Andreas K. and Lapa, Constantin and Krebs, Markus}, title = {Metabolic Tumour Volume from PSMA PET/CT Scans of Prostate Cancer Patients during Chemotherapy — Do Different Software Solutions Deliver Comparable Results?}, series = {Journal of Clinical Medicine}, volume = {9}, journal = {Journal of Clinical Medicine}, number = {5}, issn = {2077-0383}, doi = {10.3390/jcm9051390}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-205893}, year = {2020}, abstract = {(1) Background: Prostate-specific membrane antigen (PSMA)-derived tumour volume (PSMA-TV) and total lesion PSMA (TL-PSMA) from PSMA PET/CT scans are promising biomarkers for assessing treatment response in prostate cancer (PCa). Currently, it is unclear whether different software tools for assessing PSMA-TV and TL-PSMA produce comparable results. (2) Methods: \(^{68}\)Ga-PSMA PET/CT scans from n = 21 patients with castration-resistant PCa (CRPC) receiving chemotherapy were identified from our single-centre database. PSMA-TV and TL-PSMA were calculated with Syngo.via (Siemens) as well as the freely available Beth Israel plugin for FIJI (Fiji Is Just ImageJ) before and after chemotherapy. While statistical comparability was illustrated and quantified via Bland-Altman diagrams, the clinical agreement was estimated by matching PSMA-TV, TL-PSMA and relative changes of both variables during chemotherapy with changes in serum PSA (ΔPSA) and PERCIST (Positron Emission Response Criteria in Solid Tumors). (3) Results: Comparing absolute PSMA-TV and TL-PSMA as well as Bland-Altman plotting revealed a good statistical comparability of both software algorithms. For clinical agreement, classifying therapy response did not differ between PSMA-TV and TL-PSMA for both software solutions and showed highly positive correlations with BR. (4) Conclusions: due to the high levels of statistical and clinical agreement in our CRPC patient cohort undergoing taxane chemotherapy, comparing PSMA-TV and TL-PSMA determined by Syngo.via and FIJI appears feasible.}, language = {en} } @article{StebaniBlaimerZableretal.2023, author = {Stebani, Jannik and Blaimer, Martin and Zabler, Simon and Neun, Tilmann and Pelt, Dani{\"e}l M. and Rak, Kristen}, title = {Towards fully automated inner ear analysis with deep-learning-based joint segmentation and landmark detection framework}, series = {Scientific Reports}, volume = {13}, journal = {Scientific Reports}, doi = {10.1038/s41598-023-45466-9}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-357411}, year = {2023}, abstract = {Automated analysis of the inner ear anatomy in radiological data instead of time-consuming manual assessment is a worthwhile goal that could facilitate preoperative planning and clinical research. We propose a framework encompassing joint semantic segmentation of the inner ear and anatomical landmark detection of helicotrema, oval and round window. A fully automated pipeline with a single, dual-headed volumetric 3D U-Net was implemented, trained and evaluated using manually labeled in-house datasets from cadaveric specimen (N = 43) and clinical practice (N = 9). The model robustness was further evaluated on three independent open-source datasets (N = 23 + 7 + 17 scans) consisting of cadaveric specimen scans. For the in-house datasets, Dice scores of 0.97 and 0.94, intersection-over-union scores of 0.94 and 0.89 and average Hausdorf distances of 0.065 and 0.14 voxel units were achieved. The landmark localization task was performed automatically with an average localization error of 3.3 and 5.2 voxel units. A robust, albeit reduced performance could be attained for the catalogue of three open-source datasets. Results of the ablation studies with 43 mono-parametric variations of the basal architecture and training protocol provided task-optimal parameters for both categories. Ablation studies against single-task variants of the basal architecture showed a clear performance beneft of coupling landmark localization with segmentation and a dataset-dependent performance impact on segmentation ability.}, language = {en} } @article{GriebelSegebarthSteinetal.2023, author = {Griebel, Matthias and Segebarth, Dennis and Stein, Nikolai and Schukraft, Nina and Tovote, Philip and Blum, Robert and Flath, Christoph M.}, title = {Deep learning-enabled segmentation of ambiguous bioimages with deepflash2}, series = {Nature Communications}, volume = {14}, journal = {Nature Communications}, doi = {10.1038/s41467-023-36960-9}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-357286}, year = {2023}, abstract = {Bioimages frequently exhibit low signal-to-noise ratios due to experimental conditions, specimen characteristics, and imaging trade-offs. Reliable segmentation of such ambiguous images is difficult and laborious. Here we introduce deepflash2, a deep learning-enabled segmentation tool for bioimage analysis. The tool addresses typical challenges that may arise during the training, evaluation, and application of deep learning models on ambiguous data. The tool's training and evaluation pipeline uses multiple expert annotations and deep model ensembles to achieve accurate results. The application pipeline supports various use-cases for expert annotations and includes a quality assurance mechanism in the form of uncertainty measures. Benchmarked against other tools, deepflash2 offers both high predictive accuracy and efficient computational resource usage. The tool is built upon established deep learning libraries and enables sharing of trained model ensembles with the research community. deepflash2 aims to simplify the integration of deep learning into bioimage analysis projects while improving accuracy and reliability.}, language = {en} }