@phdthesis{Herm2023, author = {Herm, Lukas-Valentin}, title = {Algorithmic Decision-Making Facilities: Perception and Design of Explainable AI-based Decision Support Systems}, doi = {10.25972/OPUS-32294}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-322948}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {Recent computing advances are driving the integration of artificial intelligence (AI)-based systems into nearly every facet of our daily lives. To this end, AI is becoming a frontier for enabling algorithmic decision-making by mimicking or even surpassing human intelligence. Thereupon, these AI-based systems can function as decision support systems (DSSs) that assist experts in high-stakes use cases where human lives are at risk. All that glitters is not gold, due to the accompanying complexity of the underlying machine learning (ML) models, which apply mathematical and statistical algorithms to autonomously derive nonlinear decision knowledge. One particular subclass of ML models, called deep learning models, accomplishes unsurpassed performance, with the drawback that these models are no longer explainable to humans. This divergence may result in an end-user's unwillingness to utilize this type of AI-based DSS, thus diminishing the end-user's system acceptance. Hence, the explainable AI (XAI) research stream has gained momentum, as it develops techniques to unravel this black-box while maintaining system performance. Non-surprisingly, these XAI techniques become necessary for justifying, evaluating, improving, or managing the utilization of AI-based DSSs. This yields a plethora of explanation techniques, creating an XAI jungle from which end-users must choose. In turn, these techniques are preliminarily engineered by developers for developers without ensuring an actual end-user fit. Thus, it renders unknown how an end-user's mental model behaves when encountering such explanation techniques. For this purpose, this cumulative thesis seeks to address this research deficiency by investigating end-user perceptions when encountering intrinsic ML and post-hoc XAI explanations. Drawing on this, the findings are synthesized into design knowledge to enable the deployment of XAI-based DSSs in practice. To this end, this thesis comprises six research contributions that follow the iterative and alternating interplay between behavioral science and design science research employed in information systems (IS) research and thus contribute to the overall research objectives as follows: First, an in-depth study of the impact of transparency and (initial) trust on end-user acceptance is conducted by extending and validating the unified theory of acceptance and use of technology model. This study indicates both factors' strong but indirect effects on system acceptance, validating further research incentives. In particular, this thesis focuses on the overarching concept of transparency. Herein, a systematization in the form of a taxonomy and pattern analysis of existing user-centered XAI studies is derived to structure and guide future research endeavors, which enables the empirical investigation of the theoretical trade-off between performance and explainability in intrinsic ML algorithms, yielding a less gradual trade-off, fragmented into three explainability groups. This includes an empirical investigation on end-users' perceived explainability of post-hoc explanation types, with local explanation types performing best. Furthermore, an empirical investigation emphasizes the correlation between comprehensibility and explainability, indicating almost significant (with outliers) results for the assumed correlation. The final empirical investigation aims at researching XAI explanation types on end-user cognitive load and the effect of cognitive load on end-user task performance and task time, which also positions local explanation types as best and demonstrates the correlations between cognitive load and task performance and, moreover, between cognitive load and task time. Finally, the last research paper utilizes i.a. the obtained knowledge and derives a nascent design theory for XAI-based DSSs. This design theory encompasses (meta-) design requirements, design principles, and design features in a domain-independent and interdisciplinary fashion, including end-users and developers as potential user groups. This design theory is ultimately tested through a real-world instantiation in a high-stakes maintenance scenario. From an IS research perspective, this cumulative thesis addresses the lack of research on perception and design knowledge for an ensured utilization of XAI-based DSS. This lays the foundation for future research to obtain a holistic understanding of end-users' heuristic behaviors during decision-making to facilitate the acceptance of XAI-based DSSs in operational practice.}, subject = {K{\"u}nstliche Intelligenz}, language = {en} } @phdthesis{Wanner2022, author = {Wanner, Jonas Paul}, title = {Artificial Intelligence for Human Decision-Makers: Systematization, Perception, and Adoption of Intelligent Decision Support Systems in Industry 4.0}, doi = {10.25972/OPUS-25901}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-259014}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2022}, abstract = {Innovative possibilities for data collection, networking, and evaluation are unleashing previously untapped potential for industrial production. However, harnessing this potential also requires a change in the way we work. In addition to expanded automation, human-machine cooperation is becoming more important: The machine achieves a reduction in complexity for humans through artificial intelligence. In fractions of a second large amounts of data of high decision quality are analyzed and suggestions are offered. The human being, for this part, usually makes the ultimate decision. He validates the machine's suggestions and, if necessary, (physically) executes them. Both entities are highly dependent on each other to accomplish the task in the best possible way. Therefore, it seems particularly important to understand to what extent such cooperation can be effective. Current developments in the field of artificial intelligence show that research in this area is particularly focused on neural network approaches. These are considered to be highly powerful but have the disadvantage of lacking transparency. Their inherent computational processes and the respective result reasoning remain opaque to humans. Some researchers assume that human users might therefore reject the system's suggestions. The research domain of explainable artificial intelligence (XAI) addresses this problem and tries to develop methods to realize systems that are highly efficient and explainable. This work is intended to provide further insights relevant to the defined goal of XAI. For this purpose, artifacts are developed that represent research achievements regarding the systematization, perception, and adoption of artificially intelligent decision support systems from a user perspective. The focus is on socio-technical insights with the aim to better understand which factors are important for effective human-machine cooperation. The elaborations predominantly represent extended grounded research. Thus, the artifacts imply an extension of knowledge in order to develop and/ or test effective XAI methods and techniques based on this knowledge. Industry 4.0, with a focus on maintenance, is used as the context for this development.}, subject = {K{\"u}nstliche Intelligenz}, language = {en} }