@article{DavidsonDuekingZinneretal.2020, author = {Davidson, Padraig and D{\"u}king, Peter and Zinner, Christoph and Sperlich, Billy and Hotho, Andreas}, title = {Smartwatch-Derived Data and Machine Learning Algorithms Estimate Classes of Ratings of Perceived Exertion in Runners: A Pilot Study}, series = {Sensors}, volume = {20}, journal = {Sensors}, number = {9}, issn = {1424-8220}, doi = {10.3390/s20092637}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-205686}, year = {2020}, abstract = {The rating of perceived exertion (RPE) is a subjective load marker and may assist in individualizing training prescription, particularly by adjusting running intensity. Unfortunately, RPE has shortcomings (e.g., underreporting) and cannot be monitored continuously and automatically throughout a training sessions. In this pilot study, we aimed to predict two classes of RPE (≤15 "Somewhat hard to hard" on Borg's 6-20 scale vs. RPE >15 in runners by analyzing data recorded by a commercially-available smartwatch with machine learning algorithms. Twelve trained and untrained runners performed long-continuous runs at a constant self-selected pace to volitional exhaustion. Untrained runners reported their RPE each kilometer, whereas trained runners reported every five kilometers. The kinetics of heart rate, step cadence, and running velocity were recorded continuously ( 1 Hz ) with a commercially-available smartwatch (Polar V800). We trained different machine learning algorithms to estimate the two classes of RPE based on the time series sensor data derived from the smartwatch. Predictions were analyzed in different settings: accuracy overall and per runner type; i.e., accuracy for trained and untrained runners independently. We achieved top accuracies of 84.8 \% for the whole dataset, 81.8 \% for the trained runners, and 86.1 \% for the untrained runners. We predict two classes of RPE with high accuracy using machine learning and smartwatch data. This approach might aid in individualizing training prescriptions.}, language = {en} } @inproceedings{DaviesDewellHarvey2021, author = {Davies, Richard and Dewell, Nathan and Harvey, Carlo}, title = {A framework for interactive, autonomous and semantic dialogue generation in games}, series = {Proceedings of the 1st Games Technology Summit}, booktitle = {Proceedings of the 1st Games Technology Summit}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-246023}, pages = {16-28}, year = {2021}, abstract = {Immersive virtual environments provide users with the opportunity to escape from the real world, but scripted dialogues can disrupt the presence within the world the user is trying to escape within. Both Non-Playable Character (NPC) to Player and NPC to NPC dialogue can be non-natural and the reliance on responding with pre-defined dialogue does not always meet the players emotional expectations or provide responses appropriate to the given context or world states. This paper investigates the application of Artificial Intelligence (AI) and Natural Language Processing to generate dynamic human-like responses within a themed virtual world. Each thematic has been analysed against humangenerated responses for the same seed and demonstrates invariance of rating across a range of model sizes, but shows an effect of theme and the size of the corpus used for fine-tuning the context for the game world.}, language = {en} } @phdthesis{Griebel2022, author = {Griebel, Matthias}, title = {Applied Deep Learning: from Data to Deployment}, doi = {10.25972/OPUS-27765}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-277650}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2022}, abstract = {Novel deep learning (DL) architectures, better data availability, and a significant increase in computing power have enabled scientists to solve problems that were considered unassailable for many years. A case in point is the "protein folding problem", a 50-year-old grand challenge in biology that was recently solved by the DL-system AlphaFold. Other examples comprise the development of large DL-based language models that, for instance, generate newspaper articles that hardly differ from those written by humans. However, developing unbiased, reliable, and accurate DL models for various practical applications remains a major challenge - and many promising DL projects get stuck in the piloting stage, never to be completed. In light of these observations, this thesis investigates the practical challenges encountered throughout the life cycle of DL projects and proposes solutions to develop and deploy rigorous DL models. The first part of the thesis is concerned with prototyping DL solutions in different domains. First, we conceptualize guidelines for applied image recognition and showcase their application in a biomedical research project. Next, we illustrate the bottom-up development of a DL backend for an augmented intelligence system in the manufacturing sector. We then turn to the fashion domain and present an artificial curation system for individual fashion outfit recommendations that leverages DL techniques and unstructured data from social media and fashion blogs. After that, we showcase how DL solutions can assist fashion designers in the creative process. Finally, we present our award-winning DL solution for the segmentation of glomeruli in human kidney tissue images that was developed for the Kaggle data science competition HuBMAP - Hacking the Kidney. The second part continues the development path of the biomedical research project beyond the prototyping stage. Using data from five laboratories, we show that ground truth estimation from multiple human annotators and training of DL model ensembles help to establish objectivity, reliability, and validity in DL-based bioimage analyses. In the third part, we present deepflash2, a DL solution that addresses the typical challenges encountered during training, evaluation, and application of DL models in bioimaging. The tool facilitates the objective and reliable segmentation of ambiguous bioimages through multi-expert annotations and integrated quality assurance. It is embedded in an easy-to-use graphical user interface and offers best-in-class predictive performance for semantic and instance segmentation under economical usage of computational resources.}, language = {en} } @article{HenckertMalorgioSchweigeretal.2023, author = {Henckert, David and Malorgio, Amos and Schweiger, Giovanna and Raimann, Florian J. and Piekarski, Florian and Zacharowski, Kai and Hottenrott, Sebastian and Meybohm, Patrick and Tscholl, David W. and Spahn, Donat R. and Roche, Tadzio R.}, title = {Attitudes of anesthesiologists toward artificial intelligence in anesthesia: a multicenter, mixed qualitative-quantitative study}, series = {Journal of Clinical Medicine}, volume = {12}, journal = {Journal of Clinical Medicine}, number = {6}, issn = {2077-0383}, doi = {10.3390/jcm12062096}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-311189}, year = {2023}, abstract = {Artificial intelligence (AI) is predicted to play an increasingly important role in perioperative medicine in the very near future. However, little is known about what anesthesiologists know and think about AI in this context. This is important because the successful introduction of new technologies depends on the understanding and cooperation of end users. We sought to investigate how much anesthesiologists know about AI and what they think about the introduction of AI-based technologies into the clinical setting. In order to better understand what anesthesiologists think of AI, we recruited 21 anesthesiologists from 2 university hospitals for face-to-face structured interviews. The interview transcripts were subdivided sentence-by-sentence into discrete statements, and statements were then grouped into key themes. Subsequently, a survey of closed questions based on these themes was sent to 70 anesthesiologists from 3 university hospitals for rating. In the interviews, the base level of knowledge of AI was good at 86 of 90 statements (96\%), although awareness of the potential applications of AI in anesthesia was poor at only 7 of 42 statements (17\%). Regarding the implementation of AI in anesthesia, statements were split roughly evenly between pros (46 of 105, 44\%) and cons (59 of 105, 56\%). Interviewees considered that AI could usefully be used in diverse tasks such as risk stratification, the prediction of vital sign changes, or as a treatment guide. The validity of these themes was probed in a follow-up survey of 70 anesthesiologists with a response rate of 70\%, which confirmed an overall positive view of AI in this group. Anesthesiologists hold a range of opinions, both positive and negative, regarding the application of AI in their field of work. Survey-based studies do not always uncover the full breadth of nuance of opinion amongst clinicians. Engagement with specific concerns, both technical and ethical, will prove important as this technology moves from research to the clinic.}, language = {en} } @article{HermSteinbachWanneretal.2022, author = {Herm, Lukas-Valentin and Steinbach, Theresa and Wanner, Jonas and Janiesch, Christian}, title = {A nascent design theory for explainable intelligent systems}, series = {Electronic Markets}, volume = {32}, journal = {Electronic Markets}, number = {4}, issn = {1019-6781}, doi = {10.1007/s12525-022-00606-3}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-323809}, pages = {2185-2205}, year = {2022}, abstract = {Due to computational advances in the past decades, so-called intelligent systems can learn from increasingly complex data, analyze situations, and support users in their decision-making to address them. However, in practice, the complexity of these intelligent systems renders the user hardly able to comprehend the inherent decision logic of the underlying machine learning model. As a result, the adoption of this technology, especially for high-stake scenarios, is hampered. In this context, explainable artificial intelligence offers numerous starting points for making the inherent logic explainable to people. While research manifests the necessity for incorporating explainable artificial intelligence into intelligent systems, there is still a lack of knowledge about how to socio-technically design these systems to address acceptance barriers among different user groups. In response, we have derived and evaluated a nascent design theory for explainable intelligent systems based on a structured literature review, two qualitative expert studies, a real-world use case application, and quantitative research. Our design theory includes design requirements, design principles, and design features covering the topics of global explainability, local explainability, personalized interface design, as well as psychological/emotional factors.}, language = {en} } @article{HoeserBachoferKuenzer2020, author = {Hoeser, Thorsten and Bachofer, Felix and Kuenzer, Claudia}, title = {Object detection and image segmentation with deep learning on Earth Observation data: a review — part II: applications}, series = {Remote Sensing}, volume = {12}, journal = {Remote Sensing}, number = {18}, issn = {2072-4292}, doi = {10.3390/rs12183053}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-213152}, year = {2020}, abstract = {In Earth observation (EO), large-scale land-surface dynamics are traditionally analyzed by investigating aggregated classes. The increase in data with a very high spatial resolution enables investigations on a fine-grained feature level which can help us to better understand the dynamics of land surfaces by taking object dynamics into account. To extract fine-grained features and objects, the most popular deep-learning model for image analysis is commonly used: the convolutional neural network (CNN). In this review, we provide a comprehensive overview of the impact of deep learning on EO applications by reviewing 429 studies on image segmentation and object detection with CNNs. We extensively examine the spatial distribution of study sites, employed sensors, used datasets and CNN architectures, and give a thorough overview of applications in EO which used CNNs. Our main finding is that CNNs are in an advanced transition phase from computer vision to EO. Upon this, we argue that in the near future, investigations which analyze object dynamics with CNNs will have a significant impact on EO research. With a focus on EO applications in this Part II, we complete the methodological review provided in Part I.}, language = {en} } @article{HoeserKuenzer2020, author = {Hoeser, Thorsten and Kuenzer, Claudia}, title = {Object detection and image segmentation with deep learning on Earth observation data: a review-part I: evolution and recent trends}, series = {Remote Sensing}, volume = {12}, journal = {Remote Sensing}, number = {10}, issn = {2072-4292}, doi = {10.3390/rs12101667}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-205918}, year = {2020}, abstract = {Deep learning (DL) has great influence on large parts of science and increasingly established itself as an adaptive method for new challenges in the field of Earth observation (EO). Nevertheless, the entry barriers for EO researchers are high due to the dense and rapidly developing field mainly driven by advances in computer vision (CV). To lower the barriers for researchers in EO, this review gives an overview of the evolution of DL with a focus on image segmentation and object detection in convolutional neural networks (CNN). The survey starts in 2012, when a CNN set new standards in image recognition, and lasts until late 2019. Thereby, we highlight the connections between the most important CNN architectures and cornerstones coming from CV in order to alleviate the evaluation of modern DL models. Furthermore, we briefly outline the evolution of the most popular DL frameworks and provide a summary of datasets in EO. By discussing well performing DL architectures on these datasets as well as reflecting on advances made in CV and their impact on future research in EO, we narrow the gap between the reviewed, theoretical concepts from CV and practical application in EO.}, language = {en} } @phdthesis{Hoeser2022, author = {H{\"o}ser, Thorsten}, title = {Global Dynamics of the Offshore Wind Energy Sector Derived from Earth Observation Data - Deep Learning Based Object Detection Optimised with Synthetic Training Data for Offshore Wind Energy Infrastructure Extraction from Sentinel-1 Imagery}, doi = {10.25972/OPUS-29285}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-292857}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2022}, abstract = {The expansion of renewable energies is being driven by the gradual phaseout of fossil fuels in order to reduce greenhouse gas emissions, the steadily increasing demand for energy and, more recently, by geopolitical events. The offshore wind energy sector is on the verge of a massive expansion in Europe, the United Kingdom, China, but also in the USA, South Korea and Vietnam. Accordingly, the largest marine infrastructure projects to date will be carried out in the upcoming decades, with thousands of offshore wind turbines being installed. In order to accompany this process globally and to provide a database for research, development and monitoring, this dissertation presents a deep learning-based approach for object detection that enables the derivation of spatiotemporal developments of offshore wind energy infrastructures from satellite-based radar data of the Sentinel-1 mission. For training the deep learning models for offshore wind energy infrastructure detection, an approach is presented that makes it possible to synthetically generate remote sensing data and the necessary annotation for the supervised deep learning process. In this synthetic data generation process, expert knowledge about image content and sensor acquisition techniques is made machine-readable. Finally, extensive and highly variable training data sets are generated from this knowledge representation, with which deep learning models can learn to detect objects in real-world satellite data. The method for the synthetic generation of training data based on expert knowledge offers great potential for deep learning in Earth observation. Applications of deep learning based methods can be developed and tested faster with this procedure. Furthermore, the synthetically generated and thus controllable training data offer the possibility to interpret the learning process of the optimised deep learning models. The method developed in this dissertation to create synthetic remote sensing training data was finally used to optimise deep learning models for the global detection of offshore wind energy infrastructure. For this purpose, images of the entire global coastline from ESA's Sentinel-1 radar mission were evaluated. The derived data set includes over 9,941 objects, which distinguish offshore wind turbines, transformer stations and offshore wind energy infrastructures under construction from each other. In addition to this spatial detection, a quarterly time series from July 2016 to June 2021 was derived for all objects. This time series reveals the start of construction, the construction phase and the time of completion with subsequent operation for each object. The derived offshore wind energy infrastructure data set provides the basis for an analysis of the development of the offshore wind energy sector from July 2016 to June 2021. For this analysis, further attributes of the detected offshore wind turbines were derived. The most important of these are the height and installed capacity of a turbine. The turbine height was calculated by a radargrammetric analysis of the previously detected Sentinel-1 signal and then used to statistically model the installed capacity. The results show that in June 2021, 8,885 offshore wind turbines with a total capacity of 40.6 GW were installed worldwide. The largest installed capacities are in the EU (15.2 GW), China (14.1 GW) and the United Kingdom (10.7 GW). From July 2016 to June 2021, China has expanded 13 GW of offshore wind energy infrastructure. The EU has installed 8 GW and the UK 5.8 GW of offshore wind energy infrastructure in the same period. This temporal analysis shows that China was the main driver of the expansion of the offshore wind energy sector in the period under investigation. The derived data set for the description of the offshore wind energy sector was made publicly available. It is thus freely accessible to all decision-makers and stakeholders involved in the development of offshore wind energy projects. Especially in the scientific context, it serves as a database that enables a wide range of investigations. Research questions regarding offshore wind turbines themselves as well as the influence of the expansion in the coming decades can be investigated. This supports the imminent and urgently needed expansion of offshore wind energy in order to promote sustainable expansion in addition to the expansion targets that have been set.}, language = {en} } @article{JanieschZschechHeinrich2021, author = {Janiesch, Christian and Zschech, Patrick and Heinrich, Kai}, title = {Machine learning and deep learning}, series = {Electronic Markets}, volume = {31}, journal = {Electronic Markets}, number = {3}, issn = {1422-8890}, doi = {10.1007/s12525-021-00475-2}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-270155}, pages = {685-695}, year = {2021}, abstract = {Today, intelligent systems that offer artificial intelligence capabilities often rely on machine learning. Machine learning describes the capacity of systems to learn from problem-specific training data to automate the process of analytical model building and solve associated tasks. Deep learning is a machine learning concept based on artificial neural networks. For many applications, deep learning models outperform shallow machine learning models and traditional data analysis approaches. In this article, we summarize the fundamentals of machine learning and deep learning to generate a broader understanding of the methodical underpinning of current intelligent systems. In particular, we provide a conceptual distinction between relevant terms and concepts, explain the process of automated analytical model building through machine learning and deep learning, and discuss the challenges that arise when implementing such intelligent systems in the field of electronic markets and networked business. These naturally go beyond technological aspects and highlight issues in human-machine interaction and artificial intelligence servitization.}, language = {en} } @article{KazuhinoWernerToriumietal.2018, author = {Kazuhino, Koshino and Werner, Rudolf A. and Toriumi, Fuijo and Javadi, Mehrbod S. and Pomper, Martin G. and Solnes, Lilja B. and Verde, Franco and Higuchi, Takahiro and Rowe, Steven P.}, title = {Generative Adversarial Networks for the Creation of Realistic Artificial Brain Magnetic Resonance Images}, series = {Tomography}, volume = {4}, journal = {Tomography}, number = {4}, doi = {10.18383/j.tom.2018.00042}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-172185}, pages = {159-163}, year = {2018}, abstract = {Even as medical data sets become more publicly accessible, most are restricted to specific medical conditions. Thus, data collection for machine learning approaches remains challenging, and synthetic data augmentation, such as generative adversarial networks (GAN), may overcome this hurdle. In the present quality control study, deep convolutional GAN (DCGAN)-based human brain magnetic resonance (MR) images were validated by blinded radiologists. In total, 96 T1-weighted brain images from 30 healthy individuals and 33 patients with cerebrovascular accident were included. A training data set was generated from the T1-weighted images and DCGAN was applied to generate additional artificial brain images. The likelihood that images were DCGAN-created versus acquired was evaluated by 5 radiologists (2 neuroradiologists [NRs], vs 3 non-neuroradiologists [NNRs]) in a binary fashion to identify real vs created images. Images were selected randomly from the data set (variation of created images, 40\%-60\%). None of the investigated images was rated as unknown. Of the created images, the NRs rated 45\% and 71\% as real magnetic resonance imaging images (NNRs, 24\%, 40\%, and 44\%). In contradistinction, 44\% and 70\% of the real images were rated as generated images by NRs (NNRs, 10\%, 17\%, and 27\%). The accuracy for the NRs was 0.55 and 0.30 (NNRs, 0.83, 0.72, and 0.64). DCGAN-created brain MR images are similar enough to acquired MR images so as to be indistinguishable in some cases. Such an artificial intelligence algorithm may contribute to synthetic data augmentation for "data-hungry" technologies, such as supervised machine learning approaches, in various clinical applications.}, subject = {Magnetresonanztomografie}, language = {en} }