@phdthesis{Grundke2023, author = {Grundke, Andrea}, title = {Head and Heart: On the Acceptability of Sophisticated Robots Based on an Enhancement of the Mind Perception Dichotomy and the Uncanny Valley of Mind}, doi = {10.25972/OPUS-33015}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-330152}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {With the continuous development of artificial intelligence, there is an effort to let the expressed mind of robots resemble more and more human-like minds. However, just as the human-like appearance of robots can lead to feelings of aversion to such robots, recent research has shown that the apparent mind expressed by machines can also be responsible for their negative evaluations. This work strives to explore facets of aversion evoked by machines with human-like mind (uncanny valley of mind) within three empirical projects from a psychological point of view in different contexts, including the resulting consequences. In Manuscript \#1, the perspective of previous work in the research area is reversed and thus shows that humans feel eeriness in response to robots that can read human minds, a capability unknown from human-human interaction. In Manuscript \#2, it is explored whether empathy for a robot being harmed by a human is a way to alleviate the uncanny valley of mind. A result of this work worth highlighting is that aversion in this study did not arise from the manipulation of the robot's mental capabilities but from its attributed incompetence and failure. The results of Manuscript \#3 highlight that status threat is revealed if humans perform worse than machines in a work-relevant task requiring human-like mental capabilities, while higher status threat is linked with a higher willingness to interact, due to the machine's perceived usefulness. In sum, if explanatory variables and concrete scenarios are considered, people will react fairly positively to machines with human-like mental capabilities. As long as the machine's usefulness is palpable to people, but machines are not fully autonomous, people seem willing to interact with them, accepting aversion in favor of the expected benefits.}, subject = {Humanoider Roboter}, language = {en} }