@article{SteiningerAbelZiegleretal.2023, author = {Steininger, Michael and Abel, Daniel and Ziegler, Katrin and Krause, Anna and Paeth, Heiko and Hotho, Andreas}, title = {ConvMOS: climate model output statistics with deep learning}, series = {Data Mining and Knowledge Discovery}, volume = {37}, journal = {Data Mining and Knowledge Discovery}, number = {1}, issn = {1384-5810}, doi = {10.1007/s10618-022-00877-6}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-324213}, pages = {136-166}, year = {2023}, abstract = {Climate models are the tool of choice for scientists researching climate change. Like all models they suffer from errors, particularly systematic and location-specific representation errors. One way to reduce these errors is model output statistics (MOS) where the model output is fitted to observational data with machine learning. In this work, we assess the use of convolutional Deep Learning climate MOS approaches and present the ConvMOS architecture which is specifically designed based on the observation that there are systematic and location-specific errors in the precipitation estimates of climate models. We apply ConvMOS models to the simulated precipitation of the regional climate model REMO, showing that a combination of per-location model parameters for reducing location-specific errors and global model parameters for reducing systematic errors is indeed beneficial for MOS performance. We find that ConvMOS models can reduce errors considerably and perform significantly better than three commonly used MOS approaches and plain ResNet and U-Net models in most cases. Our results show that non-linear MOS models underestimate the number of extreme precipitation events, which we alleviate by training models specialized towards extreme precipitation events with the imbalanced regression method DenseLoss. While we consider climate MOS, we argue that aspects of ConvMOS may also be beneficial in other domains with geospatial data, such as air pollution modeling or weather forecasts.}, subject = {Klima}, language = {en} } @article{MuellerLeppichGeissetal.2023, author = {M{\"u}ller, Konstantin and Leppich, Robert and Geiß, Christian and Borst, Vanessa and Pelizari, Patrick Aravena and Kounev, Samuel and Taubenb{\"o}ck, Hannes}, title = {Deep neural network regression for normalized digital surface model generation with Sentinel-2 imagery}, series = {IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing}, volume = {16}, journal = {IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing}, issn = {1939-1404}, doi = {10.1109/JSTARS.2023.3297710}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-349424}, pages = {8508-8519}, year = {2023}, abstract = {In recent history, normalized digital surface models (nDSMs) have been constantly gaining importance as a means to solve large-scale geographic problems. High-resolution surface models are precious, as they can provide detailed information for a specific area. However, measurements with a high resolution are time consuming and costly. Only a few approaches exist to create high-resolution nDSMs for extensive areas. This article explores approaches to extract high-resolution nDSMs from low-resolution Sentinel-2 data, allowing us to derive large-scale models. We thereby utilize the advantages of Sentinel 2 being open access, having global coverage, and providing steady updates through a high repetition rate. Several deep learning models are trained to overcome the gap in producing high-resolution surface maps from low-resolution input data. With U-Net as a base architecture, we extend the capabilities of our model by integrating tailored multiscale encoders with differently sized kernels in the convolution as well as conformed self-attention inside the skip connection gates. Using pixelwise regression, our U-Net base models can achieve a mean height error of approximately 2 m. Moreover, through our enhancements to the model architecture, we reduce the model error by more than 7\%.}, language = {en} } @article{LeubeGustafssonLassmannetal.2022, author = {Leube, Julian and Gustafsson, Johan and Lassmann, Michael and Salas-Ramirez, Maikol and Tran-Gia, Johannes}, title = {Analysis of a deep learning-based method for generation of SPECT projections based on a large Monte Carlo simulated dataset}, series = {EJNMMI Physics}, volume = {9}, journal = {EJNMMI Physics}, issn = {2197-7364}, doi = {10.1186/s40658-022-00476-w}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-300697}, year = {2022}, abstract = {Background In recent years, a lot of effort has been put in the enhancement of medical imaging using artificial intelligence. However, limited patient data in combination with the unavailability of a ground truth often pose a challenge to a systematic validation of such methodologies. The goal of this work was to investigate a recently proposed method for an artificial intelligence-based generation of synthetic SPECT projections, for acceleration of the image acquisition process based on a large dataset of realistic SPECT simulations. Methods A database of 10,000 SPECT projection datasets of heterogeneous activity distributions of randomly placed random shapes was simulated for a clinical SPECT/CT system using the SIMIND Monte Carlo program. Synthetic projections at fixed angular increments from a set of input projections at evenly distributed angles were generated by different u-shaped convolutional neural networks (u-nets). These u-nets differed in noise realization used for the training data, number of input projections, projection angle increment, and number of training/validation datasets. Synthetic projections were generated for 500 test projection datasets for each u-net, and a quantitative analysis was performed using statistical hypothesis tests based on structural similarity index measure and normalized root-mean-squared error. Additional simulations with varying detector orbits were performed on a subset of the dataset to study the effect of the detector orbit on the performance of the methodology. For verification of the results, the u-nets were applied to Jaszczak and NEMA physical phantom data obtained on a clinical SPECT/CT system. Results No statistically significant differences were observed between u-nets trained with different noise realizations. In contrast, a statistically significant deterioration was found for training with a small subset (400 datasets) of the 10,000 simulated projection datasets in comparison with using a large subset (9500 datasets) for training. A good agreement between synthetic (i.e., u-net generated) and simulated projections before adding noise demonstrates a denoising effect. Finally, the physical phantom measurements show that our findings also apply for projections measured on a clinical SPECT/CT system. Conclusion Our study shows the large potential of u-nets for accelerating SPECT/CT imaging. In addition, our analysis numerically reveals a denoising effect when generating synthetic projections with a u-net. Clinically interesting, the methodology has proven robust against camera orbit deviations in a clinically realistic range. Lastly, we found that a small number of training samples (e.g., ~ 400 datasets) may not be sufficient for reliable generalization of the u-net.}, language = {en} }