@phdthesis{Reinhard2023, author = {Reinhard, Sebastian}, title = {Improving Super-Resolution Microscopy Data Reconstruction and Evaluation by Developing Advanced Processing Algorithms and Artifcial Neuronal Networks}, doi = {10.25972/OPUS-31695}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-316959}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {The fusion of methods from several disciplines is a crucial component of scientific development. Artificial Neural Networks, based on the principle of biological neuronal networks, demonstrate how nature provides the best templates for technological advancement. These innovations can then be employed to solve the remaining mysteries of biology, including, in particular, processes that take place on microscopic scales and can only be studied with sophisticated techniques. For instance, direct Stochastic Optical Reconstruction Microscopy combines tools from chemistry, physics, and computer science to visualize biological processes at the molecular level. One of the key components is the computer-aided reconstruction of super-resolved images. Improving the corresponding algorithms increases the quality of the generated data, providing further insights into our biology. It is important, however, to ensure that the heavily processed images are still a reflection of reality and do not originate in random artefacts. Expansion microscopy is expanding the sample by embedding it in a swellable hydrogel. The method can be combined with other super-resolution techniques to gain additional resolution. We tested this approach on microtubules, a well-known filamentous reference structure, to evaluate the performance of different protocols and labelling techniques. We developed LineProfiler an objective tool for data collection. Instead of collecting perpendicular profiles in small areas, the software gathers line profiles from filamentous structures of the entire image. This improves data quantity, quality and prevents a biased choice of the evaluated regions. On the basis of the collected data, we deployed theoretical models of the expected intensity distribution across the filaments. This led to the conclusion that post-expansion labelling significantly reduces the labelling error and thus, improves the data quality. The software was further used to determine the expansion factor and arrangement of synaptonemal complex data. Automated Simple Elastix uses state-of-the-art image alignment to compare pre- and post-expansion images. It corrects linear distortions occurring under isotropic expansion, calculates a structural expansion factor and highlights structural mismatches in a distortion map. We used the software to evaluate expanded fungi and NK cells. We found that the expansion factor differs for the two structures and is lower than the overall expansion of the hydrogel. Assessing the fluorescence lifetime of emitters used for direct Stochastic Optical Reconstruction Microscopy can reveal additional information about the molecular environment or distinguish dyes emitting with a similar wavelength. The corresponding measurements require a confocal scanning of the sample in combination with the fluorescent switching of the underlying emitters. This leads to non-linear, interrupted Point Spread Functions. The software ReCSAI targets this problem by combining the classical algorithm of compressed sensing with modern methods of artificial intelligence. We evaluated several different approaches to combine these components and found, that unrolling compressed sensing into the network architecture yields the best performance in terms of reconstruction speed and accuracy. In addition to a deep insight into the functioning and learning of artificial intelligence in combination with classical algorithms, we were able to reconstruct the described non-linearities with significantly improved resolution, in comparison to other state-of-the-art architectures.}, subject = {Mikroskopie}, language = {en} } @phdthesis{Grundke2023, author = {Grundke, Andrea}, title = {Head and Heart: On the Acceptability of Sophisticated Robots Based on an Enhancement of the Mind Perception Dichotomy and the Uncanny Valley of Mind}, doi = {10.25972/OPUS-33015}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-330152}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {With the continuous development of artificial intelligence, there is an effort to let the expressed mind of robots resemble more and more human-like minds. However, just as the human-like appearance of robots can lead to feelings of aversion to such robots, recent research has shown that the apparent mind expressed by machines can also be responsible for their negative evaluations. This work strives to explore facets of aversion evoked by machines with human-like mind (uncanny valley of mind) within three empirical projects from a psychological point of view in different contexts, including the resulting consequences. In Manuscript \#1, the perspective of previous work in the research area is reversed and thus shows that humans feel eeriness in response to robots that can read human minds, a capability unknown from human-human interaction. In Manuscript \#2, it is explored whether empathy for a robot being harmed by a human is a way to alleviate the uncanny valley of mind. A result of this work worth highlighting is that aversion in this study did not arise from the manipulation of the robot's mental capabilities but from its attributed incompetence and failure. The results of Manuscript \#3 highlight that status threat is revealed if humans perform worse than machines in a work-relevant task requiring human-like mental capabilities, while higher status threat is linked with a higher willingness to interact, due to the machine's perceived usefulness. In sum, if explanatory variables and concrete scenarios are considered, people will react fairly positively to machines with human-like mental capabilities. As long as the machine's usefulness is palpable to people, but machines are not fully autonomous, people seem willing to interact with them, accepting aversion in favor of the expected benefits.}, subject = {Humanoider Roboter}, language = {en} } @incollection{MurielCiceri2023, author = {Muriel Ciceri, Jos{\´e} Hern{\´a}n}, title = {Auf dem Weg zur Regelung der k{\"u}nstlichen Intelligenz in Lateinamerika}, series = {Digitalization as a challenge for justice and administration = La digitalizacion como reto para la justicia y la administracion = Digitalisierung als Herausforderung f{\"u}r Justiz und Verwaltung}, booktitle = {Digitalization as a challenge for justice and administration = La digitalizacion como reto para la justicia y la administracion = Digitalisierung als Herausforderung f{\"u}r Justiz und Verwaltung}, editor = {Ludwigs, Markus and Muriel Ciceri, Jos{\´e} Hern{\´a}n and Velling, Annika}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, doi = {10.25972/978-3-95826-201-0-55}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-306262}, publisher = {W{\"u}rzburg University Press}, pages = {55-69}, year = {2023}, abstract = {No abstract available.}, language = {de} } @phdthesis{Herm2023, author = {Herm, Lukas-Valentin}, title = {Algorithmic Decision-Making Facilities: Perception and Design of Explainable AI-based Decision Support Systems}, doi = {10.25972/OPUS-32294}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-322948}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {Recent computing advances are driving the integration of artificial intelligence (AI)-based systems into nearly every facet of our daily lives. To this end, AI is becoming a frontier for enabling algorithmic decision-making by mimicking or even surpassing human intelligence. Thereupon, these AI-based systems can function as decision support systems (DSSs) that assist experts in high-stakes use cases where human lives are at risk. All that glitters is not gold, due to the accompanying complexity of the underlying machine learning (ML) models, which apply mathematical and statistical algorithms to autonomously derive nonlinear decision knowledge. One particular subclass of ML models, called deep learning models, accomplishes unsurpassed performance, with the drawback that these models are no longer explainable to humans. This divergence may result in an end-user's unwillingness to utilize this type of AI-based DSS, thus diminishing the end-user's system acceptance. Hence, the explainable AI (XAI) research stream has gained momentum, as it develops techniques to unravel this black-box while maintaining system performance. Non-surprisingly, these XAI techniques become necessary for justifying, evaluating, improving, or managing the utilization of AI-based DSSs. This yields a plethora of explanation techniques, creating an XAI jungle from which end-users must choose. In turn, these techniques are preliminarily engineered by developers for developers without ensuring an actual end-user fit. Thus, it renders unknown how an end-user's mental model behaves when encountering such explanation techniques. For this purpose, this cumulative thesis seeks to address this research deficiency by investigating end-user perceptions when encountering intrinsic ML and post-hoc XAI explanations. Drawing on this, the findings are synthesized into design knowledge to enable the deployment of XAI-based DSSs in practice. To this end, this thesis comprises six research contributions that follow the iterative and alternating interplay between behavioral science and design science research employed in information systems (IS) research and thus contribute to the overall research objectives as follows: First, an in-depth study of the impact of transparency and (initial) trust on end-user acceptance is conducted by extending and validating the unified theory of acceptance and use of technology model. This study indicates both factors' strong but indirect effects on system acceptance, validating further research incentives. In particular, this thesis focuses on the overarching concept of transparency. Herein, a systematization in the form of a taxonomy and pattern analysis of existing user-centered XAI studies is derived to structure and guide future research endeavors, which enables the empirical investigation of the theoretical trade-off between performance and explainability in intrinsic ML algorithms, yielding a less gradual trade-off, fragmented into three explainability groups. This includes an empirical investigation on end-users' perceived explainability of post-hoc explanation types, with local explanation types performing best. Furthermore, an empirical investigation emphasizes the correlation between comprehensibility and explainability, indicating almost significant (with outliers) results for the assumed correlation. The final empirical investigation aims at researching XAI explanation types on end-user cognitive load and the effect of cognitive load on end-user task performance and task time, which also positions local explanation types as best and demonstrates the correlations between cognitive load and task performance and, moreover, between cognitive load and task time. Finally, the last research paper utilizes i.a. the obtained knowledge and derives a nascent design theory for XAI-based DSSs. This design theory encompasses (meta-) design requirements, design principles, and design features in a domain-independent and interdisciplinary fashion, including end-users and developers as potential user groups. This design theory is ultimately tested through a real-world instantiation in a high-stakes maintenance scenario. From an IS research perspective, this cumulative thesis addresses the lack of research on perception and design knowledge for an ensured utilization of XAI-based DSS. This lays the foundation for future research to obtain a holistic understanding of end-users' heuristic behaviors during decision-making to facilitate the acceptance of XAI-based DSSs in operational practice.}, subject = {K{\"u}nstliche Intelligenz}, language = {en} }