@article{HermSteinbachWanneretal.2022, author = {Herm, Lukas-Valentin and Steinbach, Theresa and Wanner, Jonas and Janiesch, Christian}, title = {A nascent design theory for explainable intelligent systems}, series = {Electronic Markets}, volume = {32}, journal = {Electronic Markets}, number = {4}, issn = {1019-6781}, doi = {10.1007/s12525-022-00606-3}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-323809}, pages = {2185-2205}, year = {2022}, abstract = {Due to computational advances in the past decades, so-called intelligent systems can learn from increasingly complex data, analyze situations, and support users in their decision-making to address them. However, in practice, the complexity of these intelligent systems renders the user hardly able to comprehend the inherent decision logic of the underlying machine learning model. As a result, the adoption of this technology, especially for high-stake scenarios, is hampered. In this context, explainable artificial intelligence offers numerous starting points for making the inherent logic explainable to people. While research manifests the necessity for incorporating explainable artificial intelligence into intelligent systems, there is still a lack of knowledge about how to socio-technically design these systems to address acceptance barriers among different user groups. In response, we have derived and evaluated a nascent design theory for explainable intelligent systems based on a structured literature review, two qualitative expert studies, a real-world use case application, and quantitative research. Our design theory includes design requirements, design principles, and design features covering the topics of global explainability, local explainability, personalized interface design, as well as psychological/emotional factors.}, language = {en} } @article{WannerHermHeinrichetal.2022, author = {Wanner, Jonas and Herm, Lukas-Valentin and Heinrich, Kai and Janiesch, Christian}, title = {The effect of transparency and trust on intelligent system acceptance: evidence from a user-based study}, series = {Electronic Markets}, volume = {32}, journal = {Electronic Markets}, number = {4}, issn = {1019-6781}, doi = {10.1007/s12525-022-00593-5}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-323829}, pages = {2079-2102}, year = {2022}, abstract = {Contemporary decision support systems are increasingly relying on artificial intelligence technology such as machine learning algorithms to form intelligent systems. These systems have human-like decision capacity for selected applications based on a decision rationale which cannot be looked-up conveniently and constitutes a black box. As a consequence, acceptance by end-users remains somewhat hesitant. While lacking transparency has been said to hinder trust and enforce aversion towards these systems, studies that connect user trust to transparency and subsequently acceptance are scarce. In response, our research is concerned with the development of a theoretical model that explains end-user acceptance of intelligent systems. We utilize the unified theory of acceptance and use in information technology as well as explanation theory and related theories on initial trust and user trust in information systems. The proposed model is tested in an industrial maintenance workplace scenario using maintenance experts as participants to represent the user group. Results show that acceptance is performance-driven at first sight. However, transparency plays an important indirect role in regulating trust and the perception of performance.}, language = {en} } @article{HermJanieschHelmetal.2023, author = {Herm, Lukas-Valentin and Janiesch, Christian and Helm, Alexander and Imgrund, Florian and Hofmann, Adrian and Winkelmann, Axel}, title = {A framework for implementing robotic process automation projects}, series = {Information Systems and e-Business Management}, volume = {21}, journal = {Information Systems and e-Business Management}, number = {1}, issn = {1617-9846}, doi = {10.1007/s10257-022-00553-8}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-323798}, pages = {1-35}, year = {2023}, abstract = {Robotic process automation is a disruptive technology to automate already digital yet manual tasks and subprocesses as well as whole business processes rapidly. In contrast to other process automation technologies, robotic process automation is lightweight and only accesses the presentation layer of IT systems to mimic human behavior. Due to the novelty of robotic process automation and the varying approaches when implementing the technology, there are reports that up to 50\% of robotic process automation projects fail. To tackle this issue, we use a design science research approach to develop a framework for the implementation of robotic process automation projects. We analyzed 35 reports on real-life projects to derive a preliminary sequential model. Then, we performed multiple expert interviews and workshops to validate and refine our model. The result is a framework with variable stages that offers guidelines with enough flexibility to be applicable in complex and heterogeneous corporate environments as well as for small and medium-sized companies. It is structured by the three phases of initialization, implementation, and scaling. They comprise eleven stages relevant during a project and as a continuous cycle spanning individual projects. Together they structure how to manage knowledge and support processes for the execution of robotic process automation implementation projects.}, language = {en} } @phdthesis{Herm2023, author = {Herm, Lukas-Valentin}, title = {Algorithmic Decision-Making Facilities: Perception and Design of Explainable AI-based Decision Support Systems}, doi = {10.25972/OPUS-32294}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-322948}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {Recent computing advances are driving the integration of artificial intelligence (AI)-based systems into nearly every facet of our daily lives. To this end, AI is becoming a frontier for enabling algorithmic decision-making by mimicking or even surpassing human intelligence. Thereupon, these AI-based systems can function as decision support systems (DSSs) that assist experts in high-stakes use cases where human lives are at risk. All that glitters is not gold, due to the accompanying complexity of the underlying machine learning (ML) models, which apply mathematical and statistical algorithms to autonomously derive nonlinear decision knowledge. One particular subclass of ML models, called deep learning models, accomplishes unsurpassed performance, with the drawback that these models are no longer explainable to humans. This divergence may result in an end-user's unwillingness to utilize this type of AI-based DSS, thus diminishing the end-user's system acceptance. Hence, the explainable AI (XAI) research stream has gained momentum, as it develops techniques to unravel this black-box while maintaining system performance. Non-surprisingly, these XAI techniques become necessary for justifying, evaluating, improving, or managing the utilization of AI-based DSSs. This yields a plethora of explanation techniques, creating an XAI jungle from which end-users must choose. In turn, these techniques are preliminarily engineered by developers for developers without ensuring an actual end-user fit. Thus, it renders unknown how an end-user's mental model behaves when encountering such explanation techniques. For this purpose, this cumulative thesis seeks to address this research deficiency by investigating end-user perceptions when encountering intrinsic ML and post-hoc XAI explanations. Drawing on this, the findings are synthesized into design knowledge to enable the deployment of XAI-based DSSs in practice. To this end, this thesis comprises six research contributions that follow the iterative and alternating interplay between behavioral science and design science research employed in information systems (IS) research and thus contribute to the overall research objectives as follows: First, an in-depth study of the impact of transparency and (initial) trust on end-user acceptance is conducted by extending and validating the unified theory of acceptance and use of technology model. This study indicates both factors' strong but indirect effects on system acceptance, validating further research incentives. In particular, this thesis focuses on the overarching concept of transparency. Herein, a systematization in the form of a taxonomy and pattern analysis of existing user-centered XAI studies is derived to structure and guide future research endeavors, which enables the empirical investigation of the theoretical trade-off between performance and explainability in intrinsic ML algorithms, yielding a less gradual trade-off, fragmented into three explainability groups. This includes an empirical investigation on end-users' perceived explainability of post-hoc explanation types, with local explanation types performing best. Furthermore, an empirical investigation emphasizes the correlation between comprehensibility and explainability, indicating almost significant (with outliers) results for the assumed correlation. The final empirical investigation aims at researching XAI explanation types on end-user cognitive load and the effect of cognitive load on end-user task performance and task time, which also positions local explanation types as best and demonstrates the correlations between cognitive load and task performance and, moreover, between cognitive load and task time. Finally, the last research paper utilizes i.a. the obtained knowledge and derives a nascent design theory for XAI-based DSSs. This design theory encompasses (meta-) design requirements, design principles, and design features in a domain-independent and interdisciplinary fashion, including end-users and developers as potential user groups. This design theory is ultimately tested through a real-world instantiation in a high-stakes maintenance scenario. From an IS research perspective, this cumulative thesis addresses the lack of research on perception and design knowledge for an ensured utilization of XAI-based DSS. This lays the foundation for future research to obtain a holistic understanding of end-users' heuristic behaviors during decision-making to facilitate the acceptance of XAI-based DSSs in operational practice.}, subject = {K{\"u}nstliche Intelligenz}, language = {en} }