@article{WechAnkenbrandBleyetal.2022, author = {Wech, Tobias and Ankenbrand, Markus Johannes and Bley, Thorsten Alexander and Heidenreich, Julius Frederik}, title = {A data-driven semantic segmentation model for direct cardiac functional analysis based on undersampled radial MR cine series}, series = {Magnetic Resonance in Medicine}, volume = {87}, journal = {Magnetic Resonance in Medicine}, number = {2}, doi = {10.1002/mrm.29017}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-257616}, pages = {972-983}, year = {2022}, abstract = {Purpose Image acquisition and subsequent manual analysis of cardiac cine MRI is time-consuming. The purpose of this study was to train and evaluate a 3D artificial neural network for semantic segmentation of radially undersampled cardiac MRI to accelerate both scan time and postprocessing. Methods A database of Cartesian short-axis MR images of the heart (148,500 images, 484 examinations) was assembled from an openly accessible database and radial undersampling was simulated. A 3D U-Net architecture was pretrained for segmentation of undersampled spatiotemporal cine MRI. Transfer learning was then performed using samples from a second database, comprising 108 non-Cartesian radial cine series of the midventricular myocardium to optimize the performance for authentic data. The performance was evaluated for different levels of undersampling by the Dice similarity coefficient (DSC) with respect to reference labels, as well as by deriving ventricular volumes and myocardial masses. Results Without transfer learning, the pretrained model performed moderately on true radial data [maximum number of projections tested, P = 196; DSC = 0.87 (left ventricle), DSC = 0.76 (myocardium), and DSC =0.64 (right ventricle)]. After transfer learning with authentic data, the predictions achieved human level even for high undersampling rates (P = 33, DSC = 0.95, 0.87, and 0.93) without significant difference compared with segmentations derived from fully sampled data. Conclusion A 3D U-Net architecture can be used for semantic segmentation of radially undersampled cine acquisitions, achieving a performance comparable with human experts in fully sampled data. This approach can jointly accelerate time-consuming cine image acquisition and cumbersome manual image analysis.}, language = {en} } @article{AnkenbrandLohrSchloetelburgetal.2021, author = {Ankenbrand, Markus Johannes and Lohr, David and Schl{\"o}telburg, Wiebke and Reiter, Theresa and Wech, Tobias and Schreiber, Laura Maria}, title = {Deep learning-based cardiac cine segmentation: Transfer learning application to 7T ultrahigh-field MRI}, series = {Magnetic Resonance in Medicine}, volume = {86}, journal = {Magnetic Resonance in Medicine}, number = {4}, doi = {10.1002/mrm.28822}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-257604}, pages = {2179-2191}, year = {2021}, abstract = {Purpose Artificial neural networks show promising performance in automatic segmentation of cardiac MRI. However, training requires large amounts of annotated data and generalization to different vendors, field strengths, sequence parameters, and pathologies is limited. Transfer learning addresses this challenge, but specific recommendations regarding type and amount of data required is lacking. In this study, we assess data requirements for transfer learning to experimental cardiac MRI at 7T where the segmentation task can be challenging. In addition, we provide guidelines, tools, and annotated data to enable transfer learning approaches by other researchers and clinicians. Methods A publicly available segmentation model was used to annotate a publicly available data set. This labeled data set was subsequently used to train a neural network for segmentation of left ventricle and myocardium in cardiac cine MRI. The network is used as starting point for transfer learning to 7T cine data of healthy volunteers (n = 22; 7873 images) by updating the pre-trained weights. Structured and random data subsets of different sizes were used to systematically assess data requirements for successful transfer learning. Results Inconsistencies in the publically available data set were corrected, labels created, and a neural network trained. On 7T cardiac cine images the model pre-trained on public imaging data, acquired at 1.5T and 3T, achieved DICE\(_{LV}\) = 0.835 and DICE\(_{MY}\) = 0.670. Transfer learning using 7T cine data and ImageNet weight initialization improved model performance to DICE\(_{LV}\) = 0.900 and DICE\(_{MY}\) = 0.791. Using only end-systolic and end-diastolic images reduced training data by 90\%, with no negative impact on segmentation performance (DICE\(_{LV}\) = 0.908, DICE\(_{MY}\) = 0.805). Conclusions This work demonstrates and quantifies the benefits of transfer learning for cardiac cine image segmentation. We provide practical guidelines for researchers planning transfer learning projects in cardiac MRI and make data, models, and code publicly available.}, language = {en} } @article{WengHeidenreichMetzetal.2021, author = {Weng, Andreas M. and Heidenreich, Julius F. and Metz, Corona and Veldhoen, Simon and Bley, Thorsten A. and Wech, Tobias}, title = {Deep learning-based segmentation of the lung in MR-images acquired by a stack-of-spirals trajectory at ultra-short echo-times}, series = {BMC Medical Imaging}, volume = {21}, journal = {BMC Medical Imaging}, doi = {10.1186/s12880-021-00608-1}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-260520}, year = {2021}, abstract = {Background Functional lung MRI techniques are usually associated with time-consuming post-processing, where manual lung segmentation represents the most cumbersome part. The aim of this study was to investigate whether deep learning-based segmentation of lung images which were scanned by a fast UTE sequence exploiting the stack-of-spirals trajectory can provide sufficiently good accuracy for the calculation of functional parameters. Methods In this study, lung images were acquired in 20 patients suffering from cystic fibrosis (CF) and 33 healthy volunteers, by a fast UTE sequence with a stack-of-spirals trajectory and a minimum echo-time of 0.05 ms. A convolutional neural network was then trained for semantic lung segmentation using 17,713 2D coronal slices, each paired with a label obtained from manual segmentation. Subsequently, the network was applied to 4920 independent 2D test images and results were compared to a manual segmentation using the S{\o}rensen-Dice similarity coefficient (DSC) and the Hausdorff distance (HD). Obtained lung volumes and fractional ventilation values calculated from both segmentations were compared using Pearson's correlation coefficient and Bland Altman analysis. To investigate generalizability to patients outside the CF collective, in particular to those exhibiting larger consolidations inside the lung, the network was additionally applied to UTE images from four patients with pneumonia and one with lung cancer. Results The overall DSC for lung tissue was 0.967 ± 0.076 (mean ± standard deviation) and HD was 4.1 ± 4.4 mm. Lung volumes derived from manual and deep learning based segmentations as well as values for fractional ventilation exhibited a high overall correlation (Pearson's correlation coefficent = 0.99 and 1.00). For the additional cohort with unseen pathologies / consolidations, mean DSC was 0.930 ± 0.083, HD = 12.9 ± 16.2 mm and the mean difference in lung volume was 0.032 ± 0.048 L. Conclusions Deep learning-based image segmentation in stack-of-spirals based lung MRI allows for accurate estimation of lung volumes and fractional ventilation values and promises to replace the time-consuming step of manual image segmentation in the future.}, language = {en} }