@phdthesis{Schneider2024, author = {Schneider, Florian Alexander}, title = {Voice Assistants are Social Actors - An Empirical Analysis of Media Equation Effects in Human-Voice Assistant Interaction}, doi = {10.25972/OPUS-34670}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-346704}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2024}, abstract = {Ownership and usage of personal voice assistant devices like Amazon Echo or Google Home have increased drastically over the last decade since their market launch. This thesis builds upon existing computers are social actors (CASA) and media equation research that is concerned with humans displaying social reactions usually exclusive to human-human interaction when interacting with media and technological devices. CASA research has been conducted with a variety of technological devices such as desktop computers, smartphones, embodied virtual agents, and robots. However, despite their increasing popularity, little empirical work has been done to examine social reactions towards these personal stand-alone voice assistant devices, also referred to as smart speakers. Thus, this dissertation aims to adopt the CASA approach to empirically evaluate social responses to smart speakers. With this goal in mind, four laboratory experiments with a total of 407 participants have been conducted for this thesis. Results show that participants display a wide range of social reactions when interacting with voice assistants. This includes the utilization of politeness strategies such as the interviewer-bias, which led to participants giving better evaluations directly to a smart speaker device compared to a separate computer. Participants also displayed prosocial behavior toward a smart speaker after interdependence and thus a team affiliation had been induced. In a third study, participants applied gender stereotypes to a smart speaker not only in self-reports but also exhibited conformal behavior patterns based on the voice the device used. In a fourth and final study, participants followed the rule of reciprocity and provided help to a smart speaker device that helped them in a prior interaction. This effect was also moderated by subjects' personalities, indicating that individual differences are relevant for CASA research. Consequently, this thesis provides strong empirical support for a voice assistants are social actors paradigm. This doctoral dissertation demonstrates the power and utility of this research paradigm for media psychological research and shows how considering voice assistant devices as social actors lead to a more profound understanding of voice-based technology. The findings discussed in this thesis also have implications for these devices that need to be carefully considered both in future research as well as in practical design.}, subject = {Mensch-Maschine-Kommunikation}, language = {en} } @phdthesis{Muth2023, author = {Muth, Felicitas Vanessa}, title = {Step by step: Sense of agency for complex action-event sequences}, doi = {10.25972/OPUS-30756}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-307569}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {From simply ringing a bell to preparing a five-course menu, human behavior commonly causes changes in the environment. Such episodes where an agent acts, thereby causing changes in their environment constitute the sense of agency. In this thesis four series of experi-ments elucidate how the sense of agency is represented in complex action-event sequences, thereby bridging a gap between basic cognitive research and real-life practice. It builds upon extensive research on the sense of agency in unequivocal sequences consisting of single ac-tions and distinct, predominantly auditory, outcomes. Employing implicit as well as explicit measures, the scope is opened up to multi-step sequences. The experiments show that it is worthwhile devoting more research to complex action-event sequences. With a newly introduced auditory measure (Chapter II), common phenomena such as temporal binding and a decrease in agency ratings following distorted feedback were replicated in multi-step sequences. However, diverging results between traditional implicit and explicit measures call for further inspection. Multisensory integration appears to gain more weight when multiple actions have to be performed to attain a goal leading to more accurate representations of the own actions (Chapter III). Additionally, freedom of choice (Chapter III) as well as early spatial ambiguity altered the perceived timing of outcomes, while late spatial ambi-guity (Chapter IV) and the outcome's self-relevance did not (Chapter V). The data suggests that the cognitive system is capable of representing multi-step action-event sequences implicitly and explicitly. Actions and sensory events show a temporal attraction stemming from a bias in the perception of outcomes. Explicit knowledge about causing an event-sequence facilitates neither feelings of control nor taking authorship. The results corroborate current theorizing on the un-derpinnings of temporal binding and the divergence between traditional implicit and explicit measures of the sense of agency. Promising avenues for further research include structured analyses of how much inferred causality contributes to implicit and explicit measures of agency as well as finding alternative measures to capture conceptual as well as non-conceptual facets of the agency experience with one method.}, subject = {Psychologie}, language = {en} } @inproceedings{OPUS4-24577, title = {Proceedings of the 1st Games Technology Summit}, editor = {von Mammen, Sebastian and Klemke, Roland and Lorber, Martin}, isbn = {978-3-945459-36-2}, doi = {10.25972/OPUS-24577}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-245776}, pages = {vi, 46}, year = {2021}, abstract = {As part of the Clash of Realities International Conference on the Technology and Theory of Digital Games, the Game Technology Summit is a premium venue to bring together experts from academia and industry to disseminate state-of-the-art research on trending technology topics in digital games. In this first iteration of the Game Technology Summit, we specifically paid attention on how the successes in AI in Natural User Interfaces have been impacting the games industry (industry track) and which scientific, state-of-the-art ideas and approaches are currently pursued (scientific track).}, subject = {Veranstaltung}, language = {en} } @phdthesis{Dittrich2021, author = {Dittrich, Monique}, title = {Persuasive Technology to Mitigate Aggressive Driving : A Human-centered Design Approach}, doi = {10.25972/OPUS-23022}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-230226}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2021}, abstract = {Manifestations of aggressive driving, such as tailgating, speeding, or swearing, are not trivial offenses but are serious problems with hazardous consequences—for the offender as well as the target of aggression. Aggression on the road erases the joy of driving, affects heart health, causes traffic jams, and increases the risk of traffic accidents. This work is aimed at developing a technology-driven solution to mitigate aggressive driving according to the principles of Persuasive Technology. Persuasive Technology is a scientific field dealing with computerized software or information systems that are designed to reinforce, change, or shape attitudes, behaviors, or both without using coercion or deception. Against this background, the Driving Feedback Avatar (DFA) was developed through this work. The system is a visual in-car interface that provides the driver with feedback on aggressive driving. The main element is an abstract avatar displayed in the vehicle. The feedback is transmitted through the emotional state of this avatar, i.e., if the driver behaves aggressively, the avatar becomes increasingly angry (negative feedback). If no aggressive action occurs, the avatar is more relaxed (positive feedback). In addition, directly after an aggressive action is recognized by the system, the display is flashing briefly to give the driver an instant feedback on his action. Five empirical studies were carried out as part of the human-centered design process of the DFA. They were aimed at understanding the user and the use context of the future system, ideating system ideas, and evaluating a system prototype. The initial research question was about the triggers of aggressive driving. In a driver study on a public road, 34 participants reported their emotions and their triggers while they were driving (study 1). The second research question asked for interventions to cope with aggression in everyday life. For this purpose, 15 experts dealing with the treatment of aggressive individuals were interviewed (study 2). In total, 75 triggers of aggressive driving and 34 anti-aggression interventions were identified. Inspired by these findings, 108 participants generated more than 100 ideas of how to mitigate aggressive driving using technology in a series of ideation workshops (study 3). Based on these ideas, the concept of the DFA was elaborated on. In an online survey, the concept was evaluated by 1,047 German respondents to get a first assessment of its perception (study 4). Later on, the DFA was implemented into a prototype and evaluated in an experimental driving study with 32 participants, focusing on the system's effectiveness (study 5). The DFA had only weak and, in part, unexpected effects on aggressive driving that require a deeper discussion. With the DFA, this work has shown that there is room to change aggressive driving through Persuasive Technology. However, this is a very sensitive issue with special requirements regarding the design of avatar-based feedback systems in the context of aggressive driving. Moreover, this work makes a significant contribution through the number of empirical insights gained on the problem of aggressive driving and wants to encourage future research and design activities in this regard.}, subject = {Fahrerassistenzsystem}, language = {en} } @phdthesis{Wandtner2018, author = {Wandtner, Bernhard}, title = {Non-driving related tasks in highly automated driving - Effects of task characteristics and drivers' self-regulation on take-over performance}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-173956}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2018}, abstract = {The rise of automated driving will fundamentally change our mobility in the near future. This thesis specifically considers the stage of so called highly automated driving (Level 3, SAE International, 2014). At this level, a system carries out vehicle guidance in specific application areas, e.g. on highway roads. The driver can temporarily suspend from monitoring the driving task and might use the time by engaging in so called non-driving related tasks (NDR-tasks). However, the driver is still in charge to resume vehicle control when prompted by the system. This new role of the driver has to be critically examined from a human factors perspective. The main aim of this thesis was to systematically investigate the impact of different NDR-tasks on driver behavior and take-over performance. Wickens' (2008) architecture of multiple resource theory was chosen as theoretical framework, with the building blocks of multiplicity (task interference due to resource overlap), mental workload (task demands), and aspects of executive control or self-regulation. Specific adaptations and extensions of the theory were discussed to account for the context of NDR-task interactions in highly automated driving. Overall four driving simulator studies were carried out to investigate the role of these theoretical components. Study 1 showed that drivers focused NDR-task engagement on sections of highly automated compared to manual driving. In addition, drivers avoided task engagement prior to predictable take-over situations. These results indicate that self-regulatory behavior, as reported for manual driving, also takes place in the context of highly automated driving. Study 2 specifically addressed the impact of NDR-tasks' stimulus and response modalities on take-over performance. Results showed that particularly visual-manual tasks with high motoric load (including the need to get rid of a handheld object) had detrimental effects. However, drivers seemed to be aware of task specific distraction in take-over situations and strictly canceled visual-manual tasks compared to a low impairing auditory-vocal task. Study 3 revealed that also the mental demand of NDR-tasks should be considered for drivers' take-over performance. Finally, different human-machine-interfaces were developed and evaluated in Simulator Study 4. Concepts including an explicit pre-alert ("notification") clearly supported drivers' self-regulation and achieved high usability and acceptance ratings. Overall, this thesis indicates that the architecture of multiple resource theory provides a useful framework for research in this field. Practical implications arise regarding the potential legal regulation of NDR-tasks as well as the design of elaborated human-machine-interfaces.}, subject = {Autonomes Fahrzeug}, language = {en} } @phdthesis{Roth2020, author = {Roth, Daniel}, title = {Intrapersonal, Interpersonal, and Hybrid Interactions in Virtual Reality}, doi = {10.25972/OPUS-18862}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-188627}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2020}, abstract = {Virtual reality and related media and communication technologies have a growing impact on professional application fields and our daily life. Virtual environments have the potential to change the way we perceive ourselves and how we interact with others. In comparison to other technologies, virtual reality allows for the convincing display of a virtual self-representation, an avatar, to oneself and also to others. This is referred to as user embodiment. Avatars can be of varying realism and abstraction in their appearance and in the behaviors they convey. Such userembodying interfaces, in turn, can impact the perception of the self as well as the perception of interactions. For researchers, designers, and developers it is of particular interest to understand these perceptual impacts, to apply them to therapy, assistive applications, social platforms, or games, for example. The present thesis investigates and relates these impacts with regard to three areas: intrapersonal effects, interpersonal effects, and effects of social augmentations provided by the simulation. With regard to intrapersonal effects, we specifically explore which simulation properties impact the illusion of owning and controlling a virtual body, as well as a perceived change in body schema. Our studies lead to the construction of an instrument to measure these dimensions and our results indicate that these dimensions are especially affected by the level of immersion, the simulation latency, as well as the level of personalization of the avatar. With regard to interpersonal effects we compare physical and user-embodied social interactions, as well as different degrees of freedom in the replication of nonverbal behavior. Our results suggest that functional levels of interaction are maintained, whereas aspects of presence can be affected by avatar-mediated interactions, and collaborative motor coordination can be disturbed by immersive simulations. Social interaction is composed of many unknown symbols and harmonic patterns that define our understanding and interpersonal rapport. For successful virtual social interactions, a mere replication of physical world behaviors to virtual environments may seem feasible. However, the potential of mediated social interactions goes beyond this mere replication. In a third vein of research, we propose and evaluate alternative concepts on how computers can be used to actively engage in mediating social interactions, namely hybrid avatar-agent technologies. Specifically, we investigated the possibilities to augment social behaviors by modifying and transforming user input according to social phenomena and behavior, such as nonverbal mimicry, directed gaze, joint attention, and grouping. Based on our results we argue that such technologies could be beneficial for computer-mediated social interactions such as to compensate for lacking sensory input and disturbances in data transmission or to increase aspects of social presence by visual substitution or amplification of social behaviors. Based on related work and presented findings, the present thesis proposes the perspective of considering computers as social mediators. Concluding from prototypes and empirical studies, the potential of technology to be an active mediator of social perception with regard to the perception of the self, as well as the perception of social interactions may benefit our society by enabling further methods for diagnosis, treatment, and training, as well as the inclusion of individuals with social disorders. To this regard, we discuss implications for our society and ethical aspects. This thesis extends previous empirical work and further presents novel instruments, concepts, and implications to open up new perspectives for the development of virtual reality, mixed reality, and augmented reality applications.}, subject = {Virtuelle Realit{\"a}t}, language = {en} } @phdthesis{Tscharn2019, author = {Tscharn, Robert}, title = {Innovative And Age-Inclusive Interaction Design with Image-Schematic Metaphors}, doi = {10.25972/OPUS-17576}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-175762}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2019}, abstract = {The field of human-computer interaction (HCI) strives for innovative user interfaces. Innovative and novel user interfaces are a challenge for a growing population of older users and endanger older adults to be excluded from an increasingly digital world. This is because older adults often have lower cognitive abilities and little prior experiences with technology. This thesis aims at resolving the tension between innovation and age-inclusiveness by developing user interfaces that can be used regardless of cognitive abilities and technology-dependent prior knowledge. The method of image-schematic metaphors holds promises for innovative and age-inclusive interaction design. Image-schematic metaphors represent a form of technology-independent prior knowledge. They reveal basic mental models and can be gathered in language (e.g. bank account is container from "I put money into my bank account"). Based on a discussion of previous applications of image-schematic metaphors in HCI, the present work derives three empirical research questions regarding image-schematic metaphors for innovative and age-inclusive interaction design. The first research question addresses the yet untested assumption that younger and older adults overlap in their technology-independent prior knowledge and, therefore, their usage of image-schematic metaphors. In study 1, a total of 41 participants described abstract concepts from the domains of online banking and everyday life. In study 2, ten contextual interviews were conducted. In both studies, younger and older adults showed a substantial overlap of 70\% to 75\%, indicating that also their mental models overlap substantially. The second research question addresses the applicability and potential of image-schematic metaphors for innovative design from the perspective of designers. In study 3, 18 student design teams completed an ideation process with either an affinity diagram as the industry standard, image-schematic metaphors or both methods in combination and created paper prototypes. The image-schematic metaphor method alone, but not the combination of both methods, was readily adopted and applied just as a well as the more familiar standard method. In study 4, professional interaction designers created prototypes either with or without image-schematic metaphors. In both studies, the method of image-schematic metaphors was perceived as applicable and creativity stimulating. The third research question addresses whether designs that explicitly follow image-schematic metaphors are more innovative and age-inclusive regarding differences in cognitive abilities and prior technological knowledge. In two experimental studies (study 5 and 6) involving a total of 54 younger and 53 older adults, prototypes that were designed with image-schematic metaphors were perceived as more innovative compared to those who were designed without image-schematic metaphors. Moreover, the impact of prior technological knowledge on interaction was reduced for prototypes that had been designed with image-schematic metaphors. However, participants' cognitive abilities and age still influenced the interaction significantly. The present work provides empirical as well as methodological findings that can help to promote the method of image-schematic metaphors in interaction design. As a result of these studies it can be concluded that the image-schematic metaphors are an applicable and effective method for innovative user interfaces that can be used regardless of prior technological knowledge.}, subject = {Mensch-Maschine-Kommunikation}, language = {en} } @phdthesis{Gall2022, author = {Gall, Dominik}, title = {Increasing the effectiveness of human-computer interfaces for mental health interventions}, doi = {10.25972/OPUS-23012}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-230120}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2022}, abstract = {Human-computer interfaces have the potential to support mental health practitioners in alleviating mental distress. Adaption of this technology in practice is, however, slow. We provide means to extend the design space of human-computer interfaces for mitigating mental distress. To this end, we suggest three complementary approaches: using presentation technology, using virtual environments, and using communication technology to facilitate social interaction. We provide new evidence that elementary aspects of presentation technology affect the emotional processing of virtual stimuli, that perception of our environment affects the way we assess our environment, and that communication technologies affect social bonding between users. By showing how interfaces modify emotional reactions and facilitate social interaction, we provide converging evidence that human-computer interfaces can help alleviate mental distress. These findings may advance the goal of adapting technological means to the requirements of mental health practitioners.}, subject = {Mensch-Maschine-Kommunikation}, language = {en} } @phdthesis{Grundke2023, author = {Grundke, Andrea}, title = {Head and Heart: On the Acceptability of Sophisticated Robots Based on an Enhancement of the Mind Perception Dichotomy and the Uncanny Valley of Mind}, doi = {10.25972/OPUS-33015}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-330152}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {With the continuous development of artificial intelligence, there is an effort to let the expressed mind of robots resemble more and more human-like minds. However, just as the human-like appearance of robots can lead to feelings of aversion to such robots, recent research has shown that the apparent mind expressed by machines can also be responsible for their negative evaluations. This work strives to explore facets of aversion evoked by machines with human-like mind (uncanny valley of mind) within three empirical projects from a psychological point of view in different contexts, including the resulting consequences. In Manuscript \#1, the perspective of previous work in the research area is reversed and thus shows that humans feel eeriness in response to robots that can read human minds, a capability unknown from human-human interaction. In Manuscript \#2, it is explored whether empathy for a robot being harmed by a human is a way to alleviate the uncanny valley of mind. A result of this work worth highlighting is that aversion in this study did not arise from the manipulation of the robot's mental capabilities but from its attributed incompetence and failure. The results of Manuscript \#3 highlight that status threat is revealed if humans perform worse than machines in a work-relevant task requiring human-like mental capabilities, while higher status threat is linked with a higher willingness to interact, due to the machine's perceived usefulness. In sum, if explanatory variables and concrete scenarios are considered, people will react fairly positively to machines with human-like mental capabilities. As long as the machine's usefulness is palpable to people, but machines are not fully autonomous, people seem willing to interact with them, accepting aversion in favor of the expected benefits.}, subject = {Humanoider Roboter}, language = {en} } @phdthesis{Menne2020, author = {Menne, Isabelle M.}, title = {Facing Social Robots - Emotional Reactions towards Social Robots}, edition = {1. Auflage}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-120-4}, doi = {10.25972/WUP-978-3-95826-121-1}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-187131}, school = {W{\"u}rzburg University Press}, pages = {XXIV, 201}, year = {2020}, abstract = {Ein Army Colonel empfindet Mitleid mit einem Roboter, der versuchsweise Landminen entsch{\"a}rft und deklariert den Test als inhuman (Garreau, 2007). Roboter bekommen milit{\"a}rische Bef{\"o}rderungen, Beerdigungen und Ehrenmedaillen (Garreau, 2007; Carpenter, 2013). Ein Schildkr{\"o}tenroboter wird entwickelt, um Kindern beizubringen, Roboter gut zu behandeln (Ackermann, 2018). Der humanoide Roboter Sophia wurde erst k{\"u}rzlich Saudi-Arabischer Staatsb{\"u}rger und es gibt bereits Debatten, ob Roboter Rechte bekommen sollen (Delcker, 2018). Diese und {\"a}hnliche Entwicklungen zeigen schon jetzt die Bedeutsamkeit von Robotern und die emotionale Wirkung die diese ausl{\"o}sen. Dennoch scheinen sich diese emotionalen Reaktionen auf einer anderen Ebene abzuspielen, gemessen an Kommentaren in Internetforen. Dort ist oftmals die Rede davon, wieso jemand {\"u}berhaupt emotional auf einen Roboter reagieren kann. Tats{\"a}chlich ist es, rein rational gesehen, schwierig zu erkl{\"a}ren, warum Menschen mit einer leblosen (‚mindless') Maschine mitf{\"u}hlen sollten. Und dennoch zeugen nicht nur oben genannte Berichte, sondern auch erste wissenschaftliche Studien (z.B. Rosenthal- von der P{\"u}tten et al., 2013) von dem emotionalen Einfluss den Roboter auf Menschen haben k{\"o}nnen. Trotz der Bedeutsamkeit der Erforschung emotionaler Reaktionen auf Roboter existieren bislang wenige wissenschaftliche Studien hierzu. Tats{\"a}chlich identifizierten Kappas, Krumhuber und K{\"u}ster (2013) die systematische Analyse und Evaluation sozialer Reaktionen auf Roboter als eine der gr{\"o}ßten Herausforderungen der affektiven Mensch-Roboter Interaktion. Nach Scherer (2001; 2005) bestehen Emotionen aus der Koordination und Synchronisation verschiedener Komponenten, die miteinander verkn{\"u}pft sind. Motorischer Ausdruck (Mimik), subjektives Erleben, Handlungstendenzen, physiologische und kognitive Komponenten geh{\"o}ren hierzu. Um eine Emotion vollst{\"a}ndig zu erfassen, m{\"u}ssten all diese Komponenten gemessen werden, jedoch wurde eine solch umfassende Analyse bisher noch nie durchgef{\"u}hrt (Scherer, 2005). Haupts{\"a}chlich werden Frageb{\"o}gen eingesetzt (vgl. Bethel \& Murphy, 2010), die allerdings meist nur das subjektive Erleben abfragen. Bakeman und Gottman (1997) geben sogar an, dass nur etwa 8\% der psychologischen Forschung auf Verhaltensdaten basiert, obwohl die Psychologie traditionell als das ‚Studium von Psyche und Verhalten' (American Psychological Association, 2018) definiert wird. Die Messung anderer Emotionskomponenten ist selten. Zudem sind Frageb{\"o}gen mit einer Reihe von Nachteilen behaftet (Austin, Deary, Gibson, McGregor, Dent, 1998; Fan et al., 2006; Wilcox, 2011). Bethel und Murphy (2010) als auch Arkin und Moshkina (2015) pl{\"a}dieren f{\"u}r einen Multi-Methodenansatz um ein umfassenderes Verst{\"a}ndnis von affektiven Prozessen in der Mensch-Roboter Interaktion zu erlangen. Das Hauptziel der vorliegenden Dissertation ist es daher, mithilfe eines Multi-Methodenansatzes verschiedene Komponenten von Emotionen (motorischer Ausdruck, subjektive Gef{\"u}hlskomponente, Handlungstendenzen) zu erfassen und so zu einem vollst{\"a}ndigeren und tiefgreifenderem Bild emotionaler Prozesse auf Roboter beizutragen. Um dieses Ziel zu erreichen, wurden drei experimentelle Studien mit insgesamt 491 Teilnehmern durchgef{\"u}hrt. Mit unterschiedlichen Ebenen der „apparent reality" (Frijda, 2007) sowie Macht / Kontrolle {\"u}ber die Situation (vgl. Scherer \& Ellgring, 2007) wurde untersucht, inwiefern sich Intensit{\"a}t und Qualit{\"a}t emotionaler Reaktionen auf Roboter {\"a}ndern und welche weiteren Faktoren (Aussehen des Roboters, emotionale Expressivit{\"a}t des Roboters, Behandlung des Roboters, Autorit{\"a}tsstatus des Roboters) Einfluss aus{\"u}ben. Experiment 1 basierte auf Videos, die verschiedene Arten von Robotern (tier{\"a}hnlich, anthropomorph, maschinenartig), die entweder emotional expressiv waren oder nicht (an / aus) in verschiedenen Situationen (freundliche Behandlung des Roboters vs. Misshandlung) zeigten. Frageb{\"o}gen {\"u}ber selbstberichtete Gef{\"u}hle und die motorisch-expressive Komponente von Emotionen: Mimik (vgl. Scherer, 2005) wurden analysiert. Das Facial Action Coding System (Ekman, Friesen, \& Hager, 2002), die umfassendste und am weitesten verbreitete Methode zur objektiven Untersuchung von Mimik, wurde hierf{\"u}r verwendet. Die Ergebnisse zeigten, dass die Probanden Gesichtsausdr{\"u}cke (Action Unit [AU] 12 und AUs, die mit positiven Emotionen assoziiert sind, sowie AU 4 und AUs, die mit negativen Emotionen assoziiert sind) sowie selbstberichtete Gef{\"u}hle in {\"U}bereinstimmung mit der Valenz der in den Videos gezeigten Behandlung zeigten. Bei emotional expressiven Robotern konnten st{\"a}rkere emotionale Reaktionen beobachtet werden als bei nicht-expressiven Robotern. Der tier{\"a}hnliche Roboter Pleo erfuhr in der Misshandlungs-Bedingung am meisten Mitleid, Empathie, negative Gef{\"u}hle und Traurigkeit, gefolgt vom anthropomorphen Roboter Reeti und am wenigsten f{\"u}r den maschinenartigen Roboter Roomba. Roomba wurde am meisten Antipathie zugeschrieben. Die Ergebnisse kn{\"u}pfen an fr{\"u}here Forschungen an (z.B. Krach et al., 2008; Menne \& Schwab, 2018; Riek et al., 2009; Rosenthal-von der P{\"u}tten et al., 2013) und zeigen das Potenzial der Mimik f{\"u}r eine nat{\"u}rliche Mensch-Roboter Interaktion. Experiment 2 und Experiment 3 {\"u}bertrugen die klassischen Experimente von Milgram (1963; 1974) zum Thema Gehorsam in den Kontext der Mensch-Roboter Interaktion. Die Gehorsamkeitsstudien von Milgram wurden als sehr geeignet erachtet, um das Ausmaß der Empathie gegen{\"u}ber einem Roboter im Verh{\"a}ltnis zum Gehorsam gegen{\"u}ber einem Roboter zu untersuchen. Experiment 2 unterschied sich von Experiment 3 in der Ebene der „apparent reality" (Frijda, 2007): in Anlehnung an Milgram (1963) wurde eine rein text-basierte Studie (Experiment 2) einer live Mensch-Roboter Interaktion (Experiment 3) gegen{\"u}bergestellt. W{\"a}hrend die abh{\"a}ngigen Variablen von Experiment 2 aus den Selbstberichten emotionaler Gef{\"u}hle sowie Einsch{\"a}tzungen des hypothetischen Verhaltens bestand, erfasste Experiment 3 subjektive Gef{\"u}hle sowie reales Verhalten (Reaktionszeit: Dauer des Z{\"o}gerns; Gehorsamkeitsrate; Anzahl der Proteste; Mimik) der Teilnehmer. Beide Experimente untersuchten den Einfluss der Faktoren „Autorit{\"a}tsstatus" (hoch / niedrig) des Roboters, der die Befehle erteilt (Nao) und die emotionale Expressivit{\"a}t (an / aus) des Roboters, der die Strafen erh{\"a}lt (Pleo). Die subjektiven Gef{\"u}hle der Teilnehmer aus Experiment 2 unterschieden sich zwischen den Gruppen nicht. Dar{\"u}ber hinaus gaben nur wenige Teilnehmer (20.2\%) an, dass sie den „Opfer"-Roboter definitiv bestrafen w{\"u}rden. Ein {\"a}hnliches Ergebnis fand auch Milgram (1963). Das reale Verhalten von Versuchsteilnehmern in Milgrams' Labor-Experiment unterschied sich jedoch von Einsch{\"a}tzungen hypothetischen Verhaltens von Teilnehmern, denen Milgram das Experiment nur beschrieben hatte. Ebenso lassen Kommentare von Teilnehmern aus Experiment 2 darauf schließen, dass das beschriebene Szenario m{\"o}glicherweise als fiktiv eingestuft wurde und Einsch{\"a}tzungen von hypothetischem Verhalten daher kein realistisches Bild realen Verhaltens gegen{\"u}ber Roboter in einer live Interaktion zeichnen k{\"o}nnen. Daher wurde ein weiteres Experiment (Experiment 3) mit einer Live Interaktion mit einem Roboter als Autorit{\"a}tsfigur (hoher Autorit{\"a}tsstatus vs. niedriger) und einem weiteren Roboter als „Opfer" (emotional expressiv vs. nicht expressiv) durchgef{\"u}hrt. Es wurden Gruppenunterschiede in Frageb{\"o}gen {\"u}ber emotionale Reaktionen gefunden. Dem emotional expressiven Roboter wurde mehr Empathie entgegengebracht und es wurde mehr Freude und weniger Antipathie berichtet als gegen{\"u}ber einem nicht-expressiven Roboter. Außerdem konnten Gesichtsausdr{\"u}cke beobachtet werden, die mit negativen Emotionen assoziiert sind w{\"a}hrend Probanden Nao's Befehl ausf{\"u}hrten und Pleo bestraften. Obwohl Probanden tendenziell l{\"a}nger z{\"o}gerten, wenn sie einen emotional expressiven Roboter bestrafen sollten und der Befehl von einem Roboter mit niedrigem Autorit{\"a}tsstatus kam, wurde dieser Unterschied nicht signifikant. Zudem waren alle bis auf einen Probanden gehorsam und bestraften Pleo, wie vom Nao Roboter befohlen. Dieses Ergebnis steht in starkem Gegensatz zu dem selbstberichteten hypothetischen Verhalten der Teilnehmer aus Experiment 2 und unterst{\"u}tzt die Annahme, dass die Einsch{\"a}tzungen von hypothetischem Verhalten in einem Mensch-Roboter-Gehorsamkeitsszenario nicht zuverl{\"a}ssig sind f{\"u}r echtes Verhalten in einer live Mensch-Roboter Interaktion. Situative Variablen, wie z.B. der Gehorsam gegen{\"u}ber Autorit{\"a}ten, sogar gegen{\"u}ber einem Roboter, scheinen st{\"a}rker zu sein als Empathie f{\"u}r einen Roboter. Dieser Befund kn{\"u}pft an andere Studien an (z.B. Bartneck \& Hu, 2008; Geiskkovitch et al., 2016; Menne, 2017; Slater et al., 2006), er{\"o}ffnet neue Erkenntnisse zum Einfluss von Robotern, zeigt aber auch auf, dass die Wahl einer Methode um Empathie f{\"u}r einen Roboter zu evozieren eine nicht triviale Angelegenheit ist (vgl. Geiskkovitch et al., 2016; vgl. Milgram, 1965). Insgesamt st{\"u}tzen die Ergebnisse die Annahme, dass die emotionalen Reaktionen auf Roboter tiefgreifend sind und sich sowohl auf der subjektiven Ebene als auch in der motorischen Komponente zeigen. Menschen reagieren emotional auf einen Roboter, der emotional expressiv ist und eher weniger wie eine Maschine aussieht. Sie empfinden Empathie und negative Gef{\"u}hle, wenn ein Roboter misshandelt wird und diese emotionalen Reaktionen spiegeln sich in der Mimik. Dar{\"u}ber hinaus unterscheiden sich die Einsch{\"a}tzungen von Menschen {\"u}ber ihr eigenes hypothetisches Verhalten von ihrem tats{\"a}chlichen Verhalten, weshalb videobasierte oder live Interaktionen zur Analyse realer Verhaltensreaktionen empfohlen wird. Die Ankunft sozialer Roboter in der Gesellschaft f{\"u}hrt zu nie dagewesenen Fragen und diese Dissertation liefert einen ersten Schritt zum Verst{\"a}ndnis dieser neuen Herausforderungen.}, subject = {Roboter}, language = {en} }