@article{WoznickiLaquaAlHajetal.2023, author = {Woznicki, Piotr and Laqua, Fabian Christopher and Al-Haj, Adam and Bley, Thorsten and Baeßler, Bettina}, title = {Addressing challenges in radiomics research: systematic review and repository of open-access cancer imaging datasets}, series = {Insights into Imaging}, volume = {14}, journal = {Insights into Imaging}, issn = {1869-4101}, doi = {10.1186/s13244-023-01556-w}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-357936}, year = {2023}, abstract = {Objectives Open-access cancer imaging datasets have become integral for evaluating novel AI approaches in radiology. However, their use in quantitative analysis with radiomics features presents unique challenges, such as incomplete documentation, low visibility, non-uniform data formats, data inhomogeneity, and complex preprocessing. These issues may cause problems with reproducibility and standardization in radiomics studies. Methods We systematically reviewed imaging datasets with public copyright licenses, published up to March 2023 across four large online cancer imaging archives. We included only datasets with tomographic images (CT, MRI, or PET), segmentations, and clinical annotations, specifically identifying those suitable for radiomics research. Reproducible preprocessing and feature extraction were performed for each dataset to enable their easy reuse. Results We discovered 29 datasets with corresponding segmentations and labels in the form of health outcomes, tumor pathology, staging, imaging-based scores, genetic markers, or repeated imaging. We compiled a repository encompassing 10,354 patients and 49,515 scans. Of the 29 datasets, 15 were licensed under Creative Commons licenses, allowing both non-commercial and commercial usage and redistribution, while others featured custom or restricted licenses. Studies spanned from the early 1990s to 2021, with the majority concluding after 2013. Seven different formats were used for the imaging data. Preprocessing and feature extraction were successfully performed for each dataset. Conclusion RadiomicsHub is a comprehensive public repository with radiomics features derived from a systematic review of public cancer imaging datasets. By converting all datasets to a standardized format and ensuring reproducible and traceable processing, RadiomicsHub addresses key reproducibility and standardization challenges in radiomics. Critical relevance statement This study critically addresses the challenges associated with locating, preprocessing, and extracting quantitative features from open-access datasets, to facilitate more robust and reliable evaluations of radiomics models. Key points - Through a systematic review, we identified 29 cancer imaging datasets suitable for radiomics research. - A public repository with collection overview and radiomics features, encompassing 10,354 patients and 49,515 scans, was compiled. - Most datasets can be shared, used, and built upon freely under a Creative Commons license. - All 29 identified datasets have been converted into a common format to enable reproducible radiomics feature extraction.}, language = {en} } @article{RosalesAlvarezRettkowskiHermanetal.2023, author = {Rosales-Alvarez, Reyna Edith and Rettkowski, Jasmin and Herman, Josip Stefan and Dumbović, Gabrijela and Cabezas-Wallscheid, Nina and Gr{\"u}n, Dominic}, title = {VarID2 quantifies gene expression noise dynamics and unveils functional heterogeneity of ageing hematopoietic stem cells}, series = {Genome Biology}, volume = {24}, journal = {Genome Biology}, doi = {10.1186/s13059-023-02974-1}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-358042}, year = {2023}, abstract = {Variability of gene expression due to stochasticity of transcription or variation of extrinsic signals, termed biological noise, is a potential driving force of cellular differentiation. Utilizing single-cell RNA-sequencing, we develop VarID2 for the quantification of biological noise at single-cell resolution. VarID2 reveals enhanced nuclear versus cytoplasmic noise, and distinct regulatory modes stratified by correlation between noise, expression, and chromatin accessibility. Noise levels are minimal in murine hematopoietic stem cells (HSCs) and increase during differentiation and ageing. Differential noise identifies myeloid-biased Dlk1+ long-term HSCs in aged mice with enhanced quiescence and self-renewal capacity. VarID2 reveals noise dynamics invisible to conventional single-cell transcriptome analysis.}, language = {en} } @article{WehrheimFaskowitzSpornsetal.2023, author = {Wehrheim, Maren H. and Faskowitz, Joshua and Sporns, Olaf and Fiebach, Christian J. and Kaschube, Matthias and Hilger, Kirsten}, title = {Few temporally distributed brain connectivity states predict human cognitive abilities}, series = {NeuroImage}, volume = {277}, journal = {NeuroImage}, doi = {10.1016/j.neuroimage.2023.120246}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-349874}, year = {2023}, abstract = {Highlights • Brain connectivity states identified by cofluctuation strength. • CMEP as new method to robustly predict human traits from brain imaging data. • Network-identifying connectivity 'events' are not predictive of cognitive ability. • Sixteen temporally independent fMRI time frames allow for significant prediction. • Neuroimaging-based assessment of cognitive ability requires sufficient scan lengths. Abstract Human functional brain connectivity can be temporally decomposed into states of high and low cofluctuation, defined as coactivation of brain regions over time. Rare states of particularly high cofluctuation have been shown to reflect fundamentals of intrinsic functional network architecture and to be highly subject-specific. However, it is unclear whether such network-defining states also contribute to individual variations in cognitive abilities - which strongly rely on the interactions among distributed brain regions. By introducing CMEP, a new eigenvector-based prediction framework, we show that as few as 16 temporally separated time frames (< 1.5\% of 10 min resting-state fMRI) can significantly predict individual differences in intelligence (N = 263, p < .001). Against previous expectations, individual's network-defining time frames of particularly high cofluctuation do not predict intelligence. Multiple functional brain networks contribute to the prediction, and all results replicate in an independent sample (N = 831). Our results suggest that although fundamentals of person-specific functional connectomes can be derived from few time frames of highest connectivity, temporally distributed information is necessary to extract information about cognitive abilities. This information is not restricted to specific connectivity states, like network-defining high-cofluctuation states, but rather reflected across the entire length of the brain connectivity time series.}, language = {en} } @article{KrenzerHeilFittingetal., author = {Krenzer, Adrian and Heil, Stefan and Fitting, Daniel and Matti, Safa and Zoller, Wolfram G. and Hann, Alexander and Puppe, Frank}, title = {Automated classification of polyps using deep learning architectures and few-shot learning}, series = {BMC Medical Imaging}, volume = {23}, journal = {BMC Medical Imaging}, doi = {10.1186/s12880-023-01007-4}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-357465}, abstract = {Background Colorectal cancer is a leading cause of cancer-related deaths worldwide. The best method to prevent CRC is a colonoscopy. However, not all colon polyps have the risk of becoming cancerous. Therefore, polyps are classified using different classification systems. After the classification, further treatment and procedures are based on the classification of the polyp. Nevertheless, classification is not easy. Therefore, we suggest two novel automated classifications system assisting gastroenterologists in classifying polyps based on the NICE and Paris classification. Methods We build two classification systems. One is classifying polyps based on their shape (Paris). The other classifies polyps based on their texture and surface patterns (NICE). A two-step process for the Paris classification is introduced: First, detecting and cropping the polyp on the image, and secondly, classifying the polyp based on the cropped area with a transformer network. For the NICE classification, we design a few-shot learning algorithm based on the Deep Metric Learning approach. The algorithm creates an embedding space for polyps, which allows classification from a few examples to account for the data scarcity of NICE annotated images in our database. Results For the Paris classification, we achieve an accuracy of 89.35 \%, surpassing all papers in the literature and establishing a new state-of-the-art and baseline accuracy for other publications on a public data set. For the NICE classification, we achieve a competitive accuracy of 81.13 \% and demonstrate thereby the viability of the few-shot learning paradigm in polyp classification in data-scarce environments. Additionally, we show different ablations of the algorithms. Finally, we further elaborate on the explainability of the system by showing heat maps of the neural network explaining neural activations. Conclusion Overall we introduce two polyp classification systems to assist gastroenterologists. We achieve state-of-the-art performance in the Paris classification and demonstrate the viability of the few-shot learning paradigm in the NICE classification, addressing the prevalent data scarcity issues faced in medical machine learning.}, language = {en} } @article{BeierlePryssAizawa2023, author = {Beierle, Felix and Pryss, R{\"u}diger and Aizawa, Akiko}, title = {Sentiments about mental health on Twitter — before and during the COVID-19 pandemic}, series = {Healthcare}, volume = {11}, journal = {Healthcare}, number = {21}, issn = {2227-9032}, doi = {10.3390/healthcare11212893}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-355192}, year = {2023}, abstract = {During the COVID-19 pandemic, the novel coronavirus had an impact not only on public health but also on the mental health of the population. Public sentiment on mental health and depression is often captured only in small, survey-based studies, while work based on Twitter data often only looks at the period during the pandemic and does not make comparisons with the pre-pandemic situation. We collected tweets that included the hashtags \#MentalHealth and \#Depression from before and during the pandemic (8.5 months each). We used LDA (Latent Dirichlet Allocation) for topic modeling and LIWC, VADER, and NRC for sentiment analysis. We used three machine-learning classifiers to seek evidence regarding an automatically detectable change in tweets before vs. during the pandemic: (1) based on TF-IDF values, (2) based on the values from the sentiment libraries, (3) based on tweet content (deep-learning BERT classifier). Topic modeling revealed that Twitter users who explicitly used the hashtags \#Depression and especially \#MentalHealth did so to raise awareness. We observed an overall positive sentiment, and in tough times such as during the COVID-19 pandemic, tweets with \#MentalHealth were often associated with gratitude. Among the three classification approaches, the BERT classifier showed the best performance, with an accuracy of 81\% for \#MentalHealth and 79\% for \#Depression. Although the data may have come from users familiar with mental health, these findings can help gauge public sentiment on the topic. The combination of (1) sentiment analysis, (2) topic modeling, and (3) tweet classification with machine learning proved useful in gaining comprehensive insight into public sentiment and could be applied to other data sources and topics.}, language = {en} } @article{GriebelSegebarthSteinetal.2023, author = {Griebel, Matthias and Segebarth, Dennis and Stein, Nikolai and Schukraft, Nina and Tovote, Philip and Blum, Robert and Flath, Christoph M.}, title = {Deep learning-enabled segmentation of ambiguous bioimages with deepflash2}, series = {Nature Communications}, volume = {14}, journal = {Nature Communications}, doi = {10.1038/s41467-023-36960-9}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-357286}, year = {2023}, abstract = {Bioimages frequently exhibit low signal-to-noise ratios due to experimental conditions, specimen characteristics, and imaging trade-offs. Reliable segmentation of such ambiguous images is difficult and laborious. Here we introduce deepflash2, a deep learning-enabled segmentation tool for bioimage analysis. The tool addresses typical challenges that may arise during the training, evaluation, and application of deep learning models on ambiguous data. The tool's training and evaluation pipeline uses multiple expert annotations and deep model ensembles to achieve accurate results. The application pipeline supports various use-cases for expert annotations and includes a quality assurance mechanism in the form of uncertainty measures. Benchmarked against other tools, deepflash2 offers both high predictive accuracy and efficient computational resource usage. The tool is built upon established deep learning libraries and enables sharing of trained model ensembles with the research community. deepflash2 aims to simplify the integration of deep learning into bioimage analysis projects while improving accuracy and reliability.}, language = {en} } @article{VollmerNaglerHoerneretal.2023, author = {Vollmer, Andreas and Nagler, Simon and H{\"o}rner, Marius and Hartmann, Stefan and Brands, Roman C. and Breitenb{\"u}cher, Niko and Straub, Anton and K{\"u}bler, Alexander and Vollmer, Michael and Gubik, Sebastian and Lang, Gernot and Wollborn, Jakob and Saravi, Babak}, title = {Performance of artificial intelligence-based algorithms to predict prolonged length of stay after head and neck cancer surgery}, series = {Heliyon}, volume = {9}, journal = {Heliyon}, number = {11}, issn = {2405-8440}, doi = {10.1016/j.heliyon.2023.e20752}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-350416}, year = {2023}, abstract = {Background Medical resource management can be improved by assessing the likelihood of prolonged length of stay (LOS) for head and neck cancer surgery patients. The objective of this study was to develop predictive models that could be used to determine whether a patient's LOS after cancer surgery falls within the normal range of the cohort. Methods We conducted a retrospective analysis of a dataset consisting of 300 consecutive patients who underwent head and neck cancer surgery between 2017 and 2022 at a single university medical center. Prolonged LOS was defined as LOS exceeding the 75th percentile of the cohort. Feature importance analysis was performed to evaluate the most important predictors for prolonged LOS. We then constructed 7 machine learning and deep learning algorithms for the prediction modeling of prolonged LOS. Results The algorithms reached accuracy values of 75.40 (radial basis function neural network) to 97.92 (Random Trees) for the training set and 64.90 (multilayer perceptron neural network) to 84.14 (Random Trees) for the testing set. The leading parameters predicting prolonged LOS were operation time, ischemia time, the graft used, the ASA score, the intensive care stay, and the pathological stages. The results revealed that patients who had a higher number of harvested lymph nodes (LN) had a lower probability of recurrence but also a greater LOS. However, patients with prolonged LOS were also at greater risk of recurrence, particularly when fewer (LN) were extracted. Further, LOS was more strongly correlated with the overall number of extracted lymph nodes than with the number of positive lymph nodes or the ratio of positive to overall extracted lymph nodes, indicating that particularly unnecessary lymph node extraction might be associated with prolonged LOS. Conclusions The results emphasize the need for a closer follow-up of patients who experience prolonged LOS. Prospective trials are warranted to validate the present results.}, language = {en} } @article{CaliskanCaliskanRasbachetal.2023, author = {Caliskan, Aylin and Caliskan, Deniz and Rasbach, Lauritz and Yu, Weimeng and Dandekar, Thomas and Breitenbach, Tim}, title = {Optimized cell type signatures revealed from single-cell data by combining principal feature analysis, mutual information, and machine learning}, series = {Computational and Structural Biotechnology Journal}, volume = {21}, journal = {Computational and Structural Biotechnology Journal}, issn = {2001-0370}, doi = {10.1016/j.csbj.2023.06.002}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-349989}, pages = {3293-3314}, year = {2023}, abstract = {Machine learning techniques are excellent to analyze expression data from single cells. These techniques impact all fields ranging from cell annotation and clustering to signature identification. The presented framework evaluates gene selection sets how far they optimally separate defined phenotypes or cell groups. This innovation overcomes the present limitation to objectively and correctly identify a small gene set of high information content regarding separating phenotypes for which corresponding code scripts are provided. The small but meaningful subset of the original genes (or feature space) facilitates human interpretability of the differences of the phenotypes including those found by machine learning results and may even turn correlations between genes and phenotypes into a causal explanation. For the feature selection task, the principal feature analysis is utilized which reduces redundant information while selecting genes that carry the information for separating the phenotypes. In this context, the presented framework shows explainability of unsupervised learning as it reveals cell-type specific signatures. Apart from a Seurat preprocessing tool and the PFA script, the pipeline uses mutual information to balance accuracy and size of the gene set if desired. A validation part to evaluate the gene selection for their information content regarding the separation of the phenotypes is provided as well, binary and multiclass classification of 3 or 4 groups are studied. Results from different single-cell data are presented. In each, only about ten out of more than 30000 genes are identified as carrying the relevant information. The code is provided in a GitHub repository at https://github.com/AC-PHD/Seurat_PFA_pipeline.}, language = {en} } @article{DresiaKurudzijaDeekenetal.2023, author = {Dresia, Kai and Kurudzija, Eldin and Deeken, Jan and Waxenegger-Wilfing, G{\"u}nther}, title = {Improved wall temperature prediction for the LUMEN rocket combustion chamber with neural networks}, series = {Aerospace}, volume = {10}, journal = {Aerospace}, number = {5}, issn = {2226-4310}, doi = {10.3390/aerospace10050450}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-319169}, year = {2023}, abstract = {Accurate calculations of the heat transfer and the resulting maximum wall temperature are essential for the optimal design of reliable and efficient regenerative cooling systems. However, predicting the heat transfer of supercritical methane flowing in cooling channels of a regeneratively cooled rocket combustor presents a significant challenge. High-fidelity CFD calculations provide sufficient accuracy but are computationally too expensive to be used within elaborate design optimization routines. In a previous work it has been shown that a surrogate model based on neural networks is able to predict the maximum wall temperature along straight cooling channels with convincing precision when trained with data from CFD simulations for simple cooling channel segments. In this paper, the methodology is extended to cooling channels with curvature. The predictions of the extended model are tested against CFD simulations with different boundary conditions for the representative LUMEN combustor contour with varying geometries and heat flux densities. The high accuracy of the extended model's predictions, suggests that it will be a valuable tool for designing and analyzing regenerative cooling systems with greater efficiency and effectiveness.}, language = {en} } @phdthesis{Weigand2024, author = {Weigand, Matthias Johann}, title = {Fernerkundung und maschinelles Lernen zur Erfassung von urbanem Gr{\"u}n - Eine Analyse am Beispiel der Verteilungsgerechtigkeit in Deutschland}, doi = {10.25972/OPUS-34961}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-349610}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2024}, abstract = {Gr{\"u}nfl{\"a}chen stellen einen der wichtigsten Umwelteinfl{\"u}sse in der Wohnumwelt der Menschen dar. Einerseits wirken sie sich positiv auf die physische und mentale Gesundheit der Menschen aus, andererseits k{\"o}nnen Gr{\"u}nfl{\"a}chen auch negative Wirkungen anderer Faktoren abmildern, wie beispielsweise die im Laufe des Klimawandels zunehmenden Hitzeereignisse. Dennoch sind Gr{\"u}nfl{\"a}chen nicht f{\"u}r die gesamte Bev{\"o}lkerung gleichermaßen zug{\"a}nglich. Bestehende Forschung im Kontext der Umweltgerechtigkeit (UG) konnte bereits aufzeigen, dass unterschiedliche sozio-{\"o}konomische und demographische Gruppen der deutschen Bev{\"o}lkerung unterschiedlichen Zugriff auf Gr{\"u}nfl{\"a}chen haben. An bestehenden Analysen von Umwelteinfl{\"u}ssen im Kontext der UG wird kritisiert, dass die Auswertung geographischer Daten h{\"a}ufig auf zu stark aggregiertem Level geschieht, wodurch lokal spezifische Expositionen nicht mehr genau abgebildet werden. Dies trifft insbesondere f{\"u}r großfl{\"a}chig angelegte Studien zu. So werden wichtige r{\"a}umliche Informationen verloren. Doch moderne Erdbeobachtungs- und Geodaten sind so detailliert wie nie und Methoden des maschinellen Lernens erm{\"o}glichen die effiziente Verarbeitung zur Ableitung h{\"o}herwertiger Informationen. Das {\"u}bergeordnete Ziel dieser Arbeit besteht darin, am Beispiel von Gr{\"u}nfl{\"a}chen in Deutschland methodische Schritte der systematischen Umwandlung umfassender Geodaten in relevante Geoinformationen f{\"u}r die großfl{\"a}chige und hochaufgel{\"o}ste Analyse von Umwelteigenschaften aufzuzeigen und durchzuf{\"u}hren. An der Schnittstelle der Disziplinen Fernerkundung, Geoinformatik, Sozialgeographie und Umweltgerechtigkeitsforschung sollen Potenziale moderner Methoden f{\"u}r die Verbesserung der r{\"a}umlichen und semantischen Aufl{\"o}sung von Geoinformationen erforscht werden. Hierf{\"u}r werden Methoden des maschinellen Lernens eingesetzt, um Landbedeckung und -nutzung auf nationaler Ebene zu erfassen. Diese Entwicklungen sollen dazu beitragen bestehende Datenl{\"u}cken zu schließen und Aufschluss {\"u}ber die Verteilungsgerechtigkeit von Gr{\"u}nfl{\"a}chen zu bieten. Diese Dissertation gliedert sich in drei konzeptionelle Teilschritte. Im ersten Studienteil werden Erdbeobachtungsdaten der Sentinel-2 Satelliten zur deutschlandweiten Klassifikation von Landbedeckungsinformationen verwendet. In Kombination mit punktuellen Referenzdaten der europaweiten Erfassung f{\"u}r Landbedeckungs- und Landnutzungsinformationen des Land Use and Coverage Area Frame Survey (LUCAS) wird ein maschinelles Lernverfahren trainiert. In diesem Kontext werden verschiedene Vorverarbeitungsschritte der LUCAS-Daten und deren Einfluss auf die Klassifikationsgenauigkeit beleuchtet. Das Klassifikationsverfahren ist in der Lage Landbedeckungsinformationen auch in komplexen urbanen Gebieten mit hoher Genauigkeit abzuleiten. Ein Ergebnis des Studienteils ist eine deutschlandweite Landbedeckungsklassifikation mit einer Gesamtgenauigkeit von 93,07 \%, welche im weiteren Verlauf der Arbeit genutzt wird, um gr{\"u}ne Landbedeckung (GLC) r{\"a}umlich zu quantifizieren. Im zweiten konzeptionellen Teil der Arbeit steht die differenzierte Betrachtung von Gr{\"u}nfl{\"a}chen anhand des Beispiels {\"o}ffentlicher Gr{\"u}nfl{\"a}chen (PGS), die h{\"a}ufig Gegenstand der UG-Forschung ist, im Vordergrund. Doch eine h{\"a}ufig verwendete Quelle f{\"u}r r{\"a}umliche Daten zu {\"o}ffentlichen Gr{\"u}nfl{\"a}chen, der European Urban Atlas (EUA), wird bisher nicht fl{\"a}chendeckend f{\"u}r Deutschland erhoben. Dieser Studienteil verfolgt einen datengetriebenen Ansatz, die Verf{\"u}gbarkeit von {\"o}ffentlichem Gr{\"u}n auf der r{\"a}umlichen Ebene von Nachbarschaften f{\"u}r ganz Deutschland zu ermitteln. Hierf{\"u}r dienen bereits vom EUA erfasste Gebiete als Referenz. Mithilfe einer Kombination von Erdbeobachtungsdaten und Informationen aus dem OpenStreetMap-Projekt wird ein Deep Learning -basiertes Fusionsnetzwerk erstellt, welche die verf{\"u}gbare Fl{\"a}che von {\"o}ffentlichem Gr{\"u}n quantifiziert. Das Ergebnis dieses Schrittes ist ein Modell, welches genutzt wird, um die Menge {\"o}ffentlicher Gr{\"u}nfl{\"a}chen in der Nachbarschaft zu sch{\"a}tzen (𝑅 2 = 0.952). Der dritte Studienteil greift die Ergebnisse der ersten beiden Studienteile auf und betrachtet die Verteilung von Gr{\"u}nfl{\"a}chen in Deutschland unter Hinzunahme von georeferenzierten Bev{\"o}lkerungsdaten. Diese exemplarische Analyse unterscheidet dabei Gr{\"u}nfl{\"a}chen nach zwei Typen: GLC und PGS. Zun{\"a}chst wird mithilfe deskriptiver Statistiken die generelle Gr{\"u}nfl{\"a}chenverteilung in der Bev{\"o}lkerung Deutschlands beleuchtet. Daraufhin wird die Verteilungsgerechtigkeit anhand g{\"a}ngiger Gerechtigkeitsmetriken bestimmt. Abschließend werden die Zusammenh{\"a}nge zwischen der demographischen Komposition der Nachbarschaft und der verf{\"u}gbaren Menge von Gr{\"u}nfl{\"a}chen anhand dreier exemplarischer soziodemographischer Gesellschaftsgruppen untersucht. Die Analyse zeigt starke Unterschiede der Verf{\"u}gbarkeit von PGS zwischen st{\"a}dtischen und l{\"a}ndlichen Gebieten. Ein h{\"o}herer Prozentsatz der Stadtbev{\"o}lkerung hat Zugriff das Mindestmaß von PGS gemessen an der Vorgabe der Weltgesundheitsorganisation. Die Ergebnisse zeigen auch einen deutlichen Unterschied bez{\"u}glich der Verteilungsgerechtigkeit zwischen GLC und PGS und verdeutlichen die Relevanz der Unterscheidung von Gr{\"u}nfl{\"a}chentypen f{\"u}r derartige Untersuchungen. Die abschließende Betrachtung verschiedener Bev{\"o}lkerungsgruppen arbeitet Unterschiede auf soziodemographischer Ebene auf. In der Zusammenschau demonstriert diese Arbeit wie moderne Geodaten und Methoden des maschinellen Lernens genutzt werden k{\"o}nnen bisherige Limitierungen r{\"a}umlicher Datens{\"a}tze zu {\"u}berwinden. Am Beispiel von Gr{\"u}nfl{\"a}chen in der Wohnumgebung der Bev{\"o}lkerung Deutschlands wird gezeigt, dass landesweite Analysen zur Umweltgerechtigkeit durch hochaufgel{\"o}ste und lokal feingliedrige geographische Informationen bereichert werden k{\"o}nnen. Diese Arbeit verdeutlicht, wie die Methoden der Erdbeobachtung und Geoinformatik einen wichtigen Beitrag leisten k{\"o}nnen, die Ungleichheit der Wohnumwelt der Menschen zu identifizieren und schlussendlich den nachhaltigen Siedlungsbau in Form von objektiven Informationen zu unterst{\"u}tzen und {\"u}berwachen.}, subject = {Geografie}, language = {de} }