@article{HoeserBachoferKuenzer2020, author = {Hoeser, Thorsten and Bachofer, Felix and Kuenzer, Claudia}, title = {Object detection and image segmentation with deep learning on Earth Observation data: a review — part II: applications}, series = {Remote Sensing}, volume = {12}, journal = {Remote Sensing}, number = {18}, issn = {2072-4292}, doi = {10.3390/rs12183053}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-213152}, year = {2020}, abstract = {In Earth observation (EO), large-scale land-surface dynamics are traditionally analyzed by investigating aggregated classes. The increase in data with a very high spatial resolution enables investigations on a fine-grained feature level which can help us to better understand the dynamics of land surfaces by taking object dynamics into account. To extract fine-grained features and objects, the most popular deep-learning model for image analysis is commonly used: the convolutional neural network (CNN). In this review, we provide a comprehensive overview of the impact of deep learning on EO applications by reviewing 429 studies on image segmentation and object detection with CNNs. We extensively examine the spatial distribution of study sites, employed sensors, used datasets and CNN architectures, and give a thorough overview of applications in EO which used CNNs. Our main finding is that CNNs are in an advanced transition phase from computer vision to EO. Upon this, we argue that in the near future, investigations which analyze object dynamics with CNNs will have a significant impact on EO research. With a focus on EO applications in this Part II, we complete the methodological review provided in Part I.}, language = {en} } @article{HoeserKuenzer2020, author = {Hoeser, Thorsten and Kuenzer, Claudia}, title = {Object detection and image segmentation with deep learning on Earth observation data: a review-part I: evolution and recent trends}, series = {Remote Sensing}, volume = {12}, journal = {Remote Sensing}, number = {10}, issn = {2072-4292}, doi = {10.3390/rs12101667}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-205918}, year = {2020}, abstract = {Deep learning (DL) has great influence on large parts of science and increasingly established itself as an adaptive method for new challenges in the field of Earth observation (EO). Nevertheless, the entry barriers for EO researchers are high due to the dense and rapidly developing field mainly driven by advances in computer vision (CV). To lower the barriers for researchers in EO, this review gives an overview of the evolution of DL with a focus on image segmentation and object detection in convolutional neural networks (CNN). The survey starts in 2012, when a CNN set new standards in image recognition, and lasts until late 2019. Thereby, we highlight the connections between the most important CNN architectures and cornerstones coming from CV in order to alleviate the evaluation of modern DL models. Furthermore, we briefly outline the evolution of the most popular DL frameworks and provide a summary of datasets in EO. By discussing well performing DL architectures on these datasets as well as reflecting on advances made in CV and their impact on future research in EO, we narrow the gap between the reviewed, theoretical concepts from CV and practical application in EO.}, language = {en} } @article{JordanJovicGilbertetal.2020, author = {Jordan, Martin C. and Jovic, Sebastian and Gilbert, Fabian and Kunz, Andreas and Ertl, Maximilian and Strobl, Ute and Jakubietz, Rafael G. and Jakubietz, Michael G. and Meffert, Rainer H. and Fuchs, Konrad F.}, title = {Qualit{\"a}tssteigerung der Abrechnungspr{\"u}fung durch Smartphone-basierte Fotodokumentation in der Unfall-, Hand-, und Plastischen Chirurgie}, series = {Der Unfallchirurg}, volume = {124}, journal = {Der Unfallchirurg}, issn = {0177-5537}, doi = {10.1007/s00113-020-00866-8}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-232415}, pages = {366-372}, year = {2020}, abstract = {Hintergrund Die Fotodokumentation von offenen Frakturen, Wunden, Dekubitalulzera, Tumoren oder Infektionen ist ein wichtiger Bestandteil der digitalen Patientenakte. Bisher ist unklar, welchen Stellenwert diese Fotodokumentation bei der Abrechnungspr{\"u}fung durch den Medizinischen Dienst der Krankenkassen (MDK) hat. Fragestellung Kann eine Smartphone-basierte Fotodokumentation die Verteidigung von erl{\"o}srelevanten Diagnosen und Prozeduren sowie der Verweildauer verbessern? Material und Methoden Ausstattung der Mitarbeiter mit digitalen Endger{\"a}ten (Smartphone/Tablet) in den Bereichen Notaufnahme, Schockraum, OP, Sprechstunden sowie auf den Stationen. Retrospektive Auswertung der Abrechnungspr{\"u}fung im Jahr 2019 und Identifikation aller Fallbesprechungen, in denen die Fotodokumentation eine Erl{\"o}sver{\"a}nderung bewirkt hat. Ergebnisse Von insgesamt 372 Fallbesprechungen half die Fotodokumentation in 27 F{\"a}llen (7,2 \%) zur Best{\"a}tigung eines Operationen- und Prozedurenschl{\"u}ssels (OPS) (n = 5; 1,3 \%), einer Hauptdiagnose (n = 10; 2,7 \%), einer Nebendiagnose (n = 3; 0,8 \%) oder der Krankenhausverweildauer (n = 9; 2,4 \%). Pro oben genanntem Fall mit Fotodokumentation ergab sich eine durchschnittliche Erl{\"o}ssteigerung von 2119 €. Inklusive Aufwandpauschale f{\"u}r die Verhandlungen wurde somit ein Gesamtbetrag von 65.328 € verteidigt. Diskussion Der Einsatz einer Smartphone-basierten Fotodokumentation kann die Qualit{\"a}t der Dokumentation verbessern und Erl{\"o}seinbußen bei der Abrechnungspr{\"u}fung verhindern. Die Implementierung digitaler Endger{\"a}te mit entsprechender Software ist ein wichtiger Teil des digitalen Strukturwandels in Kliniken.}, language = {de} } @article{DavidsonDuekingZinneretal.2020, author = {Davidson, Padraig and D{\"u}king, Peter and Zinner, Christoph and Sperlich, Billy and Hotho, Andreas}, title = {Smartwatch-Derived Data and Machine Learning Algorithms Estimate Classes of Ratings of Perceived Exertion in Runners: A Pilot Study}, series = {Sensors}, volume = {20}, journal = {Sensors}, number = {9}, issn = {1424-8220}, doi = {10.3390/s20092637}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-205686}, year = {2020}, abstract = {The rating of perceived exertion (RPE) is a subjective load marker and may assist in individualizing training prescription, particularly by adjusting running intensity. Unfortunately, RPE has shortcomings (e.g., underreporting) and cannot be monitored continuously and automatically throughout a training sessions. In this pilot study, we aimed to predict two classes of RPE (≤15 "Somewhat hard to hard" on Borg's 6-20 scale vs. RPE >15 in runners by analyzing data recorded by a commercially-available smartwatch with machine learning algorithms. Twelve trained and untrained runners performed long-continuous runs at a constant self-selected pace to volitional exhaustion. Untrained runners reported their RPE each kilometer, whereas trained runners reported every five kilometers. The kinetics of heart rate, step cadence, and running velocity were recorded continuously ( 1 Hz ) with a commercially-available smartwatch (Polar V800). We trained different machine learning algorithms to estimate the two classes of RPE based on the time series sensor data derived from the smartwatch. Predictions were analyzed in different settings: accuracy overall and per runner type; i.e., accuracy for trained and untrained runners independently. We achieved top accuracies of 84.8 \% for the whole dataset, 81.8 \% for the trained runners, and 86.1 \% for the untrained runners. We predict two classes of RPE with high accuracy using machine learning and smartwatch data. This approach might aid in individualizing training prescriptions.}, language = {en} }