@article{CarolusWienrich2022, author = {Carolus, Astrid and Wienrich, Carolin}, title = {"Imagine this smart speaker to have a body": An analysis of the external appearances and the characteristics that people associate with voice assistants}, series = {Frontiers in Computer Science}, volume = {4}, journal = {Frontiers in Computer Science}, issn = {2624-9898}, doi = {10.3389/fcomp.2022.981435}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-297175}, year = {2022}, abstract = {Introduction Modern digital devices, such as conversational agents, simulate human-human interactions to an increasing extent. However, their outward appearance remains distinctly technological. While research revealed that mental representations of technology shape users' expectations and experiences, research on technology sending ambiguous cues is rare. Methods To bridge this gap, this study analyzes drawings of the outward appearance participants associate with voice assistants (Amazon Echo or Google Home). Results Human beings and (humanoid) robots were the most frequent associations, which were rated to be rather trustworthy, conscientious, agreeable, and intelligent. Drawings of the Amazon Echos and Google Homes differed marginally, but "human," "robotic," and "other" associations differed with respect to the ascribed humanness, consciousness, intellect, affinity to technology, and innovation ability. Discussion This study aims to further elaborate on the rather unconscious cognitive and emotional processes elicited by technology and discusses the implications of this perspective for developers, users, and researchers.}, language = {en} } @article{WienrichCarolusRothIsigkeitetal.2022, author = {Wienrich, Carolin and Carolus, Astrid and Roth-Isigkeit, David and Hotho, Andreas}, title = {Inhibitors and enablers to explainable AI success: a systematic examination of explanation complexity and individual characteristics}, series = {Multimodal Technologies and Interaction}, volume = {6}, journal = {Multimodal Technologies and Interaction}, number = {12}, issn = {2414-4088}, doi = {10.3390/mti6120106}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-297288}, year = {2022}, abstract = {With the increasing adaptability and complexity of advisory artificial intelligence (AI)-based agents, the topics of explainable AI and human-centered AI are moving close together. Variations in the explanation itself have been widely studied, with some contradictory results. These could be due to users' individual differences, which have rarely been systematically studied regarding their inhibiting or enabling effect on the fulfillment of explanation objectives (such as trust, understanding, or workload). This paper aims to shed light on the significance of human dimensions (gender, age, trust disposition, need for cognition, affinity for technology, self-efficacy, attitudes, and mind attribution) as well as their interplay with different explanation modes (no, simple, or complex explanation). Participants played the game Deal or No Deal while interacting with an AI-based agent. The agent gave advice to the participants on whether they should accept or reject the deals offered to them. As expected, giving an explanation had a positive influence on the explanation objectives. However, the users' individual characteristics particularly reinforced the fulfillment of the objectives. The strongest predictor of objective fulfillment was the degree of attribution of human characteristics. The more human characteristics were attributed, the more trust was placed in the agent, advice was more likely to be accepted and understood, and important needs were satisfied during the interaction. Thus, the current work contributes to a better understanding of the design of explanations of an AI-based agent system that takes into account individual characteristics and meets the demand for both explainable and human-centered agent systems.}, language = {en} }