@article{MuellerLeppichGeissetal.2023, author = {M{\"u}ller, Konstantin and Leppich, Robert and Geiß, Christian and Borst, Vanessa and Pelizari, Patrick Aravena and Kounev, Samuel and Taubenb{\"o}ck, Hannes}, title = {Deep neural network regression for normalized digital surface model generation with Sentinel-2 imagery}, series = {IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing}, volume = {16}, journal = {IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing}, issn = {1939-1404}, doi = {10.1109/JSTARS.2023.3297710}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-349424}, pages = {8508-8519}, year = {2023}, abstract = {In recent history, normalized digital surface models (nDSMs) have been constantly gaining importance as a means to solve large-scale geographic problems. High-resolution surface models are precious, as they can provide detailed information for a specific area. However, measurements with a high resolution are time consuming and costly. Only a few approaches exist to create high-resolution nDSMs for extensive areas. This article explores approaches to extract high-resolution nDSMs from low-resolution Sentinel-2 data, allowing us to derive large-scale models. We thereby utilize the advantages of Sentinel 2 being open access, having global coverage, and providing steady updates through a high repetition rate. Several deep learning models are trained to overcome the gap in producing high-resolution surface maps from low-resolution input data. With U-Net as a base architecture, we extend the capabilities of our model by integrating tailored multiscale encoders with differently sized kernels in the convolution as well as conformed self-attention inside the skip connection gates. Using pixelwise regression, our U-Net base models can achieve a mean height error of approximately 2 m. Moreover, through our enhancements to the model architecture, we reduce the model error by more than 7\%.}, language = {en} } @article{SteiningerAbelZiegleretal.2023, author = {Steininger, Michael and Abel, Daniel and Ziegler, Katrin and Krause, Anna and Paeth, Heiko and Hotho, Andreas}, title = {ConvMOS: climate model output statistics with deep learning}, series = {Data Mining and Knowledge Discovery}, volume = {37}, journal = {Data Mining and Knowledge Discovery}, number = {1}, issn = {1384-5810}, doi = {10.1007/s10618-022-00877-6}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-324213}, pages = {136-166}, year = {2023}, abstract = {Climate models are the tool of choice for scientists researching climate change. Like all models they suffer from errors, particularly systematic and location-specific representation errors. One way to reduce these errors is model output statistics (MOS) where the model output is fitted to observational data with machine learning. In this work, we assess the use of convolutional Deep Learning climate MOS approaches and present the ConvMOS architecture which is specifically designed based on the observation that there are systematic and location-specific errors in the precipitation estimates of climate models. We apply ConvMOS models to the simulated precipitation of the regional climate model REMO, showing that a combination of per-location model parameters for reducing location-specific errors and global model parameters for reducing systematic errors is indeed beneficial for MOS performance. We find that ConvMOS models can reduce errors considerably and perform significantly better than three commonly used MOS approaches and plain ResNet and U-Net models in most cases. Our results show that non-linear MOS models underestimate the number of extreme precipitation events, which we alleviate by training models specialized towards extreme precipitation events with the imbalanced regression method DenseLoss. While we consider climate MOS, we argue that aspects of ConvMOS may also be beneficial in other domains with geospatial data, such as air pollution modeling or weather forecasts.}, subject = {Klima}, language = {en} } @phdthesis{Philipp2023, author = {Philipp, Marius Balthasar}, title = {Quantifying the Effects of Permafrost Degradation in Arctic Coastal Environments via Satellite Earth Observation}, doi = {10.25972/OPUS-34563}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-345634}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {Permafrost degradation is observed all over the world as a consequence of climate change and the associated Arctic amplification, which has severe implications for the environment. Landslides, increased rates of surface deformation, rising likelihood of infrastructure damage, amplified coastal erosion rates, and the potential turnover of permafrost from a carbon sink to a carbon source are thereby exemplary implications linked to the thawing of frozen ground material. In this context, satellite earth observation is a potent tool for the identification and continuous monitoring of relevant processes and features on a cheap, long-term, spatially explicit, and operational basis as well as up to a circumpolar scale. A total of 325 articles published in 30 different international journals during the past two decades were investigated on the basis of studied environmental foci, remote sensing platforms, sensor combinations, applied spatio-temporal resolutions, and study locations in an extensive review on past achievements, current trends, as well as future potentials and challenges of satellite earth observation for permafrost related analyses. The development of analysed environmental subjects, utilized sensors and platforms, and the number of annually published articles over time are addressed in detail. Studies linked to atmospheric features and processes, such as the release of greenhouse gas emissions, appear to be strongly under-represented. Investigations on the spatial distribution of study locations revealed distinct study clusters across the Arctic. At the same time, large sections of the continuous permafrost domain are only poorly covered and remain to be investigated in detail. A general trend towards increasing attention in satellite earth observation of permafrost and related processes and features was observed. The overall amount of published articles hereby more than doubled since the year 2015. New sources of satellite data, such as the Sentinel satellites and the Methane Remote Sensing LiDAR Mission (Merlin), as well as novel methodological approaches, such as data fusion and deep learning, will thereby likely improve our understanding of the thermal state and distribution of permafrost, and the effects of its degradation. Furthermore, cloud-based big data processing platforms (e.g. Google Earth Engine (GEE)) will further enable sophisticated and long-term analyses on increasingly larger scales and at high spatial resolutions. In this thesis, a specific focus was put on Arctic permafrost coasts, which feature increasing vulnerability to environmental parameters, such as the thawing of frozen ground, and are therefore associated with amplified erosion rates. In particular, a novel monitoring framework for quantifying Arctic coastal erosion rates within the permafrost domain at high spatial resolution and on a circum-Arctic scale is presented within this thesis. Challenging illumination conditions and frequent cloud cover restrict the applicability of optical satellite imagery in Arctic regions. In order to overcome these limitations, Synthetic Aperture RADAR (SAR) data derived from Sentinel-1 (S1), which is largely independent from sun illumination and weather conditions, was utilized. Annual SAR composites covering the months June-September were combined with a Deep Learning (DL) framework and a Change Vector Analysis (CVA) approach to generate both a high-quality and circum-Arctic coastline product as well as a coastal change product that highlights areas of erosion and build-up. Annual composites in the form of standard deviation (sd) and median backscatter were computed and used as inputs for both the DL framework and the CVA coastal change quantification. The final DL-based coastline product covered a total of 161,600 km of Arctic coastline and featured a median accuracy of ±6.3 m to the manually digitized reference data. Annual coastal change quantification between 2017-2021 indicated erosion rates of up to 67 m per year for some areas based on 400 m coastal segments. In total, 12.24\% of the investigated coastline featured an average erosion rate of 3.8 m per year, which corresponds to 17.83 km2 of annually eroded land area. Multiple quality layers associated to both products, the generated DL-coastline and the coastal change rates, are provided on a pixel basis to further assess the accuracy and applicability of the proposed data, methods, and products. Lastly, the extracted circum-Arctic erosion rates were utilized as a basis in an experimental framework for estimating the amount of permafrost and carbon loss as a result of eroding permafrost coastlines. Information on permafrost fraction, Active Layer Thickness (ALT), soil carbon content, and surface elevation were thereby combined with the aforementioned erosion rates. While the proposed experimental framework provides a valuable outline for quantifying the volume loss of frozen ground and carbon release, extensive validation of the utilized environmental products and resulting volume loss numbers based on 200 m segments are necessary. Furthermore, data of higher spatial resolution and information of carbon content for deeper soil depths are required for more accurate estimates.}, subject = {Dauerfrostboden}, language = {en} } @phdthesis{Steininger2023, author = {Steininger, Michael}, title = {Deep Learning for Geospatial Environmental Regression}, doi = {10.25972/OPUS-31312}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-313121}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {Environmental issues have emerged especially since humans burned fossil fuels, which led to air pollution and climate change that harm the environment. These issues' substantial consequences evoked strong efforts towards assessing the state of our environment. Various environmental machine learning (ML) tasks aid these efforts. These tasks concern environmental data but are common ML tasks otherwise, i.e., datasets are split (training, validatition, test), hyperparameters are optimized on validation data, and test set metrics measure a model's generalizability. This work focuses on the following environmental ML tasks: Regarding air pollution, land use regression (LUR) estimates air pollutant concentrations at locations where no measurements are available based on measured locations and each location's land use (e.g., industry, streets). For LUR, this work uses data from London (modeled) and Zurich (measured). Concerning climate change, a common ML task is model output statistics (MOS), where a climate model's output for a study area is altered to better fit Earth observations and provide more accurate climate data. This work uses the regional climate model (RCM) REMO and Earth observations from the E-OBS dataset for MOS. Another task regarding climate is grain size distribution interpolation where soil properties at locations without measurements are estimated based on the few measured locations. This can provide climate models with soil information, that is important for hydrology. For this task, data from Lower Franconia is used. Such environmental ML tasks commonly have a number of properties: (i) geospatiality, i.e., their data refers to locations relative to the Earth's surface. (ii) The environmental variables to estimate or predict are usually continuous. (iii) Data can be imbalanced due to relatively rare extreme events (e.g., extreme precipitation). (iv) Multiple related potential target variables can be available per location, since measurement devices often contain different sensors. (v) Labels are spatially often only sparsely available since conducting measurements at all locations of interest is usually infeasible. These properties present challenges but also opportunities when designing ML methods for such tasks. In the past, environmental ML tasks have been tackled with conventional ML methods, such as linear regression or random forests (RFs). However, the field of ML has made tremendous leaps beyond these classic models through deep learning (DL). In DL, models use multiple layers of neurons, producing increasingly higher-level feature representations with growing layer depth. DL has made previously infeasible ML tasks feasible, improved the performance for many tasks in comparison to existing ML models significantly, and eliminated the need for manual feature engineering in some domains due to its ability to learn features from raw data. To harness these advantages for environmental domains it is promising to develop novel DL methods for environmental ML tasks. This thesis presents methods for dealing with special challenges and exploiting opportunities inherent to environmental ML tasks in conjunction with DL. To this end, the proposed methods explore the following techniques: (i) Convolutions as in convolutional neural networks (CNNs) to exploit reoccurring spatial patterns in geospatial data. (ii) Posing the problems as regression tasks to estimate the continuous variables. (iii) Density-based weighting to improve estimation performance for rare and extreme events. (iv) Multi-task learning to make use of multiple related target variables. (v) Semi-supervised learning to cope with label sparsity. Using these techniques, this thesis considers four research questions: (i) Can air pollution be estimated without manual feature engineering? This is answered positively by the introduction of the CNN-based LUR model MapLUR as well as the off-the-shelf LUR solution OpenLUR. (ii) Can colocated pollution data improve spatial air pollution models? Multi-task learning for LUR is developed for this, showing potential for improvements with colocated data. (iii) Can DL models improve the quality of climate model outputs? The proposed DL climate MOS architecture ConvMOS demonstrates this. Additionally, semi-supervised training of multilayer perceptrons (MLPs) for grain size distribution interpolation is presented, which can provide improved input data. (iv) Can DL models be taught to better estimate climate extremes? To this end, density-based weighting for imbalanced regression (DenseLoss) is proposed and applied to the DL architecture ConvMOS, improving climate extremes estimation. These methods show how especially DL techniques can be developed for environmental ML tasks with their special characteristics in mind. This allows for better models than previously possible with conventional ML, leading to more accurate assessment and better understanding of the state of our environment.}, subject = {Deep learning}, language = {en} } @phdthesis{Somody2023, author = {Somody, Joseph Christian Campbell}, title = {Leveraging deep learning for identification and structural determination of novel protein complexes from \(in\) \(situ\) electron cryotomography of \(Mycoplasma\) \(pneumoniae\)}, doi = {10.25972/OPUS-31344}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-313447}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {The holy grail of structural biology is to study a protein in situ, and this goal has been fast approaching since the resolution revolution and the achievement of atomic resolution. A cell's interior is not a dilute environment, and proteins have evolved to fold and function as needed in that environment; as such, an investigation of a cellular component should ideally include the full complexity of the cellular environment. Imaging whole cells in three dimensions using electron cryotomography is the best method to accomplish this goal, but it comes with a limitation on sample thickness and produces noisy data unamenable to direct analysis. This thesis establishes a novel workflow to systematically analyse whole-cell electron cryotomography data in three dimensions and to find and identify instances of protein complexes in the data to set up a determination of their structure and identity for success. Mycoplasma pneumoniae is a very small parasitic bacterium with fewer than 700 protein-coding genes, is thin enough and small enough to be imaged in large quantities by electron cryotomography, and can grow directly on the grids used for imaging, making it ideal for exploratory studies in structural proteomics. As part of the workflow, a methodology for training deep-learning-based particle-picking models is established. As a proof of principle, a dataset of whole-cell Mycoplasma pneumoniae tomograms is used with this workflow to characterize a novel membrane-associated complex observed in the data. Ultimately, 25431 such particles are picked from 353 tomograms and refined to a density map with a resolution of 11 {\AA}. Making good use of orthogonal datasets to filter search space and verify results, structures were predicted for candidate proteins and checked for suitable fit in the density map. In the end, with this approach, nine proteins were found to be part of the complex, which appears to be associated with chaperone activity and interact with translocon machinery. Visual proteomics refers to the ultimate potential of in situ electron cryotomography: the comprehensive interpretation of tomograms. The workflow presented here is demonstrated to help in reaching that potential.}, subject = {Kryoelektronenmikroskopie}, language = {en} }