@article{WannerHermHeinrichetal.2022, author = {Wanner, Jonas and Herm, Lukas-Valentin and Heinrich, Kai and Janiesch, Christian}, title = {The effect of transparency and trust on intelligent system acceptance: evidence from a user-based study}, series = {Electronic Markets}, volume = {32}, journal = {Electronic Markets}, number = {4}, issn = {1019-6781}, doi = {10.1007/s12525-022-00593-5}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-323829}, pages = {2079-2102}, year = {2022}, abstract = {Contemporary decision support systems are increasingly relying on artificial intelligence technology such as machine learning algorithms to form intelligent systems. These systems have human-like decision capacity for selected applications based on a decision rationale which cannot be looked-up conveniently and constitutes a black box. As a consequence, acceptance by end-users remains somewhat hesitant. While lacking transparency has been said to hinder trust and enforce aversion towards these systems, studies that connect user trust to transparency and subsequently acceptance are scarce. In response, our research is concerned with the development of a theoretical model that explains end-user acceptance of intelligent systems. We utilize the unified theory of acceptance and use in information technology as well as explanation theory and related theories on initial trust and user trust in information systems. The proposed model is tested in an industrial maintenance workplace scenario using maintenance experts as participants to represent the user group. Results show that acceptance is performance-driven at first sight. However, transparency plays an important indirect role in regulating trust and the perception of performance.}, language = {en} } @article{HermSteinbachWanneretal.2022, author = {Herm, Lukas-Valentin and Steinbach, Theresa and Wanner, Jonas and Janiesch, Christian}, title = {A nascent design theory for explainable intelligent systems}, series = {Electronic Markets}, volume = {32}, journal = {Electronic Markets}, number = {4}, issn = {1019-6781}, doi = {10.1007/s12525-022-00606-3}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-323809}, pages = {2185-2205}, year = {2022}, abstract = {Due to computational advances in the past decades, so-called intelligent systems can learn from increasingly complex data, analyze situations, and support users in their decision-making to address them. However, in practice, the complexity of these intelligent systems renders the user hardly able to comprehend the inherent decision logic of the underlying machine learning model. As a result, the adoption of this technology, especially for high-stake scenarios, is hampered. In this context, explainable artificial intelligence offers numerous starting points for making the inherent logic explainable to people. While research manifests the necessity for incorporating explainable artificial intelligence into intelligent systems, there is still a lack of knowledge about how to socio-technically design these systems to address acceptance barriers among different user groups. In response, we have derived and evaluated a nascent design theory for explainable intelligent systems based on a structured literature review, two qualitative expert studies, a real-world use case application, and quantitative research. Our design theory includes design requirements, design principles, and design features covering the topics of global explainability, local explainability, personalized interface design, as well as psychological/emotional factors.}, language = {en} } @article{HermJanieschHelmetal.2023, author = {Herm, Lukas-Valentin and Janiesch, Christian and Helm, Alexander and Imgrund, Florian and Hofmann, Adrian and Winkelmann, Axel}, title = {A framework for implementing robotic process automation projects}, series = {Information Systems and e-Business Management}, volume = {21}, journal = {Information Systems and e-Business Management}, number = {1}, issn = {1617-9846}, doi = {10.1007/s10257-022-00553-8}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-323798}, pages = {1-35}, year = {2023}, abstract = {Robotic process automation is a disruptive technology to automate already digital yet manual tasks and subprocesses as well as whole business processes rapidly. In contrast to other process automation technologies, robotic process automation is lightweight and only accesses the presentation layer of IT systems to mimic human behavior. Due to the novelty of robotic process automation and the varying approaches when implementing the technology, there are reports that up to 50\% of robotic process automation projects fail. To tackle this issue, we use a design science research approach to develop a framework for the implementation of robotic process automation projects. We analyzed 35 reports on real-life projects to derive a preliminary sequential model. Then, we performed multiple expert interviews and workshops to validate and refine our model. The result is a framework with variable stages that offers guidelines with enough flexibility to be applicable in complex and heterogeneous corporate environments as well as for small and medium-sized companies. It is structured by the three phases of initialization, implementation, and scaling. They comprise eleven stages relevant during a project and as a continuous cycle spanning individual projects. Together they structure how to manage knowledge and support processes for the execution of robotic process automation implementation projects.}, language = {en} } @article{HermJanieschFuchs2022, author = {Herm, Lukas-Valentin and Janiesch, Christian and Fuchs, Patrick}, title = {Der Einfluss von menschlichen Denkmustern auf k{\"u}nstliche Intelligenz - eine strukturierte Untersuchung von kognitiven Verzerrungen}, series = {HMD Praxis der Wirtschaftsinformatik}, volume = {59}, journal = {HMD Praxis der Wirtschaftsinformatik}, number = {2}, issn = {1436-3011}, doi = {10.1365/s40702-022-00844-1}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-323787}, pages = {556-571}, year = {2022}, abstract = {K{\"u}nstliche Intelligenz (KI) dringt vermehrt in sensible Bereiche des allt{\"a}glichen menschlichen Lebens ein. Es werden nicht mehr nur noch einfache Entscheidungen durch intelligente Systeme getroffen, sondern zunehmend auch komplexe Entscheidungen. So entscheiden z. B. intelligente Systeme, ob Bewerber in ein Unternehmen eingestellt werden sollen oder nicht. Oftmals kann die zugrundeliegende Entscheidungsfindung nur schwer nachvollzogen werden und ungerechtfertigte Entscheidungen k{\"o}nnen dadurch unerkannt bleiben, weshalb die Implementierung einer solchen KI auch h{\"a}ufig als sogenannte Blackbox bezeichnet wird. Folglich steigt die Bedrohung, durch unfaire und diskriminierende Entscheidungen einer KI benachteiligt behandelt zu werden. Resultieren diese Verzerrungen aus menschlichen Handlungen und Denkmustern spricht man von einer kognitiven Verzerrung oder einem kognitiven Bias. Aufgrund der Neuigkeit dieser Thematik ist jedoch bisher nicht ersichtlich, welche verschiedenen kognitiven Bias innerhalb eines KI-Projektes auftreten k{\"o}nnen. Ziel dieses Beitrages ist es, anhand einer strukturierten Literaturanalyse, eine gesamtheitliche Darstellung zu erm{\"o}glichen. Die gewonnenen Erkenntnisse werden anhand des in der Praxis weit verbreiten Cross-Industry Standard Process for Data Mining (CRISP-DM) Modell aufgearbeitet und klassifiziert. Diese Betrachtung zeigt, dass der menschliche Einfluss auf eine KI in jeder Entwicklungsphase des Modells gegeben ist und es daher wichtig ist „mensch-{\"a}hnlichen" Bias in einer KI explizit zu untersuchen.}, language = {de} } @techreport{HermJaniesch2019, type = {Working Paper}, author = {Herm, Lukas-Valentin and Janiesch, Christian}, title = {Anforderungsanalyse f{\"u}r eine Kollaborationsplattform in Blockchain-basierten Wertsch{\"o}pfungsnetzwerken}, doi = {10.25972/OPUS-18886}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-188866}, year = {2019}, abstract = {In our globalized world, companies operate on an international market. To concentrate on their main competencies and be more competitive, they integrate into supply chain networks. However, these potentials also bear many risks. The emergence of an international market also creates pressure from competitors, forcing companies to collaborate with new and unknown companies in dynamic supply chain networks. In many cases, this can cause a lack of trust as the application of illegal practices and the breaking of agreements through complex and nontransparent supply chain networks pose a threat. Blockchain technology provides a transparent, decentralized, and distributed means of chaining data storage and thus enables trust in its tamper-proof storage, even if there is no trust in the cooperation partners. The use of the blockchain also provides the opportunity to digitize, automate, and monitor processes within supply chain networks in real time. The research project "Plattform f{\"u}r das integrierte Management von Kollaborationen in Wertsch{\"o}pfungsnetzwerken" (PIMKoWe) addresses this issue. The aim of this report is to define requirements for such a collaboration platform. We define requirements based on a literature review and expert interviews, which allow for an objective consideration of scientific and practical aspects. An additional survey validates and further classifies these requirements as "essential", "optional", or "irrelevant". In total, we have derived a collection of 45 requirements from different dimensions for the collaboration platform. Employing these requirements, we illustrate a conceptual architecture of the platform as well as introduce a realistic application scenario. The presentation of the platform concept and the application scenario can provide the foundation for implementing and introducing a blockchain-based collaboration platform into existing supply chain networks in context of the research project PIMKoWe.}, subject = {Blockchain}, language = {de} } @phdthesis{Herm2023, author = {Herm, Lukas-Valentin}, title = {Algorithmic Decision-Making Facilities: Perception and Design of Explainable AI-based Decision Support Systems}, doi = {10.25972/OPUS-32294}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-322948}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {Recent computing advances are driving the integration of artificial intelligence (AI)-based systems into nearly every facet of our daily lives. To this end, AI is becoming a frontier for enabling algorithmic decision-making by mimicking or even surpassing human intelligence. Thereupon, these AI-based systems can function as decision support systems (DSSs) that assist experts in high-stakes use cases where human lives are at risk. All that glitters is not gold, due to the accompanying complexity of the underlying machine learning (ML) models, which apply mathematical and statistical algorithms to autonomously derive nonlinear decision knowledge. One particular subclass of ML models, called deep learning models, accomplishes unsurpassed performance, with the drawback that these models are no longer explainable to humans. This divergence may result in an end-user's unwillingness to utilize this type of AI-based DSS, thus diminishing the end-user's system acceptance. Hence, the explainable AI (XAI) research stream has gained momentum, as it develops techniques to unravel this black-box while maintaining system performance. Non-surprisingly, these XAI techniques become necessary for justifying, evaluating, improving, or managing the utilization of AI-based DSSs. This yields a plethora of explanation techniques, creating an XAI jungle from which end-users must choose. In turn, these techniques are preliminarily engineered by developers for developers without ensuring an actual end-user fit. Thus, it renders unknown how an end-user's mental model behaves when encountering such explanation techniques. For this purpose, this cumulative thesis seeks to address this research deficiency by investigating end-user perceptions when encountering intrinsic ML and post-hoc XAI explanations. Drawing on this, the findings are synthesized into design knowledge to enable the deployment of XAI-based DSSs in practice. To this end, this thesis comprises six research contributions that follow the iterative and alternating interplay between behavioral science and design science research employed in information systems (IS) research and thus contribute to the overall research objectives as follows: First, an in-depth study of the impact of transparency and (initial) trust on end-user acceptance is conducted by extending and validating the unified theory of acceptance and use of technology model. This study indicates both factors' strong but indirect effects on system acceptance, validating further research incentives. In particular, this thesis focuses on the overarching concept of transparency. Herein, a systematization in the form of a taxonomy and pattern analysis of existing user-centered XAI studies is derived to structure and guide future research endeavors, which enables the empirical investigation of the theoretical trade-off between performance and explainability in intrinsic ML algorithms, yielding a less gradual trade-off, fragmented into three explainability groups. This includes an empirical investigation on end-users' perceived explainability of post-hoc explanation types, with local explanation types performing best. Furthermore, an empirical investigation emphasizes the correlation between comprehensibility and explainability, indicating almost significant (with outliers) results for the assumed correlation. The final empirical investigation aims at researching XAI explanation types on end-user cognitive load and the effect of cognitive load on end-user task performance and task time, which also positions local explanation types as best and demonstrates the correlations between cognitive load and task performance and, moreover, between cognitive load and task time. Finally, the last research paper utilizes i.a. the obtained knowledge and derives a nascent design theory for XAI-based DSSs. This design theory encompasses (meta-) design requirements, design principles, and design features in a domain-independent and interdisciplinary fashion, including end-users and developers as potential user groups. This design theory is ultimately tested through a real-world instantiation in a high-stakes maintenance scenario. From an IS research perspective, this cumulative thesis addresses the lack of research on perception and design knowledge for an ensured utilization of XAI-based DSS. This lays the foundation for future research to obtain a holistic understanding of end-users' heuristic behaviors during decision-making to facilitate the acceptance of XAI-based DSSs in operational practice.}, subject = {K{\"u}nstliche Intelligenz}, language = {en} } @techreport{BaumgartBredebachHermetal.2022, author = {Baumgart, Michael and Bredebach, Patrick and Herm, Lukas-Valentin and Hock, David and Hofmann, Adrian and Janiesch, Christian and Jankowski, Leif Ole and Kampik, Timotheus and Keil, Matthias and Kolb, Julian and Kr{\"o}hn, Michael and Pytel, Norman and Schaschek, Myriam and St{\"u}bs, Oliver and Winkelmann, Axel and Zeiß, Christian}, title = {Plattform f{\"u}r das integrierte Management von Kollaborationen in Wertsch{\"o}pfungsnetzwerken (PIMKoWe)}, editor = {Winkelmann, Axel and Janiesch, Christian}, issn = {2199-0328}, doi = {10.25972/OPUS-29335}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-293354}, pages = {248}, year = {2022}, abstract = {Das Verbundprojekt „Plattform f{\"u}r das integrierte Management von Kollaborationen in Wertsch{\"o}pfungsnetzwerken" (PIMKoWe - F{\"o}rderkennzeichen „02P17D160") ist ein Forschungsvorhaben im Rahmen des Forschungsprogramms „Innovationen f{\"u}r die Produktion, Dienstleistung und Arbeit von morgen" der Bekanntmachung „Industrie 4.0 - Intelligente Kollaborationen in dynamischen Wertsch{\"o}pfungs-netzwerken" (InKoWe). Das Forschungsvorhaben wurde mit Mitteln des Bundesministeriums f{\"u}r Bildung und Forschung (BMBF) gef{\"o}rdert und durch den Projekttr{\"a}ger des Karlsruher Instituts f{\"u}r Technologie (PTKA) betreut. Ziel des Forschungsprojekts PIMKoWe ist die Entwicklung und Bereitstellung einer Plattforml{\"o}sung zur Flexibilisierung, Automatisierung und Absicherung von Kooperationen in Wertsch{\"o}pfungsnetzwerken des industriellen Sektors.}, subject = {Blockchain}, language = {de} }