@article{CheethamWuPaulietal.2015, author = {Cheetham, Marcus and Wu, Lingdan and Pauli, Paul and Jancke, Lutz}, title = {Arousal, valence, and the uncanny valley: psychophysiological and self-report findings}, series = {Frontiers in Psychology}, volume = {6}, journal = {Frontiers in Psychology}, number = {981}, doi = {10.3389/fpsyg.2015.00981}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-151519}, year = {2015}, abstract = {The main prediction of the Uncanny Valley Hypothesis (UVH) is that observation of humanlike characters that are difficult to distinguish from the human counterpart will evoke a state of negative affect. Well-established electrophysiological [late positive potential (LPP) and facial electromyography (EMG)] and self-report [Self-Assessment Manikin (SAM)] indices of valence and arousal, i.e., the primary orthogonal dimensions of affective experience, were used to test this prediction by examining affective experience in response to categorically ambiguous compared with unambiguous avatar and human faces (N = 30). LPP and EMG provided direct psychophysiological indices of affective state during passive observation and the SAM provided self-reported indices of affective state during explicit cognitive evaluation of static facial stimuli. The faces were drawn from well-controlled morph continua representing the UVH' dimension of human likeness (DHL). The results provide no support for the notion that category ambiguity along the DHL is specifically associated with enhanced experience of negative affect. On the contrary, the LPP and SAM-based measures of arousal and valence indicated a general increase in negative affective state (i.e., enhanced arousal and negative valence) with greater morph distance from the human end of the DHL. A second sample (N = 30) produced the same finding, using an ad hoc self-rating scale of feelings of familiarity, i.e., an oft-used measure of affective experience along the UVH' familiarity dimension. In conclusion, this multi-method approach using well-validated psychophysiological and self-rating indices of arousal and valence rejects for passive observation and for explicit affective evaluation of static faces the main prediction of the UVH.}, language = {en} } @article{KozlikNeumannLozo2015, author = {Kozlik, Julia and Neumann, Roland and Lozo, Ljubica}, title = {Contrasting motivational orientation and evaluative coding accounts: on the need to differentiate the effectors of approach/avoidance responses}, series = {Frontiers in Psychology}, volume = {6}, journal = {Frontiers in Psychology}, number = {563}, doi = {10.3389/fpsyg.2015.00563}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-143192}, year = {2015}, abstract = {Several emotion theorists suggest that valenced stimuli automatically trigger motivational orientations and thereby facilitate corresponding behavior. Positive stimuli were thought to activate approach motivational circuits which in turn primed approach-related behavioral tendencies whereas negative stimuli were supposed to activate avoidance motivational circuits so that avoidance-related behavioral tendencies were primed (motivational orientation account). However, recent research suggests that typically observed affective stimulus response compatibility phenomena might be entirely explained in terms of theories accounting for mechanisms of general action control instead of assuming motivational orientations to mediate the effects (evaluative coding account). In what follows, we explore to what extent this notion is applicable. We present literature suggesting that evaluative coding mechanisms indeed influence a wide variety of affective stimulus response compatibility phenomena. However, the evaluative coding account does not seem to be sufficient to explain affective S-R compatibility effects. Instead, several studies provide clear evidence in favor of the motivational orientation account that seems to operate independently of evaluative coding mechanisms. Implications for theoretical developments and future research designs are discussed.}, language = {en} }