@article{WieserMoscovitch2015, author = {Wieser, Matthias J. and Moscovitch, David A.}, title = {The effect of affective context on visuocortical processing of neutral faces in social anxiety - An ERP study}, series = {Frontiers in Psychology}, volume = {6}, journal = {Frontiers in Psychology}, doi = {10.3389/fpsyg.2015.01824}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-125148}, pages = {1824}, year = {2015}, abstract = {It has been demonstrated that verbal context information alters the neural processing of ambiguous faces such as faces with no apparent facial expression. In social anxiety, neutral faces may be implicitly threatening for socially anxious individuals due to their ambiguous nature, but even more so if these neutral faces are put in self-referential negative contexts. Therefore, we measured event-related brain potentials (ERPs) in response to neutral faces which were preceded by affective verbal information (negative, neutral, positive). Participants with low social anxiety (LSA; n = 23) and high social anxiety (HSA; n = 21) were asked to watch and rate valence and arousal of the respective faces while continuous EEG was recorded. ERP analysis revealed that HSA showed elevated P100 amplitudes in response to faces, but reduced structural encoding of faces as indexed by reduced N170 amplitudes. In general, affective context led to an enhanced early posterior negativity (EPN) for negative compared to neutral facial expressions. Moreover, HSA compared to LSA showed enhanced late positive potentials (LPP) to negatively contextualized faces, whereas in LSA this effect was found for faces in positive contexts. Also, HSA rated faces in negative contexts as more negative compared to LSA. These results point at enhanced vigilance for neutral faces regardless of context in HSA, while structural encoding seems to be diminished (avoidance). Interestingly, later components of sustained processing (LPP) indicate that LSA show enhanced visuocortical processing for faces in positive contexts (happy bias), whereas this seems to be the case for negatively contextualized faces in HSA (threat bias). Finally, our results add further new evidence that top-down information in interaction with individual anxiety levels can influence early-stage aspects of visual perception.}, language = {en} } @article{HerbertSfaerleaBlumenthal2013, author = {Herbert, Cornelia and Sf{\"a}rlea, Anca and Blumenthal, Terry}, title = {Your emotion or mine: labeling feelings alters emotional face perception—an ERP study on automatic and intentional affect labeling}, series = {Frontiers in Human Neuroscience}, journal = {Frontiers in Human Neuroscience}, doi = {10.3389/fnhum.2013.00378}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-97065}, year = {2013}, abstract = {Empirical evidence suggests that words are powerful regulators of emotion processing. Although a number of studies have used words as contextual cues for emotion processing, the role of what is being labeled by the words (i.e., one's own emotion as compared to the emotion expressed by the sender) is poorly understood. The present study reports results from two experiments which used ERP methodology to evaluate the impact of emotional faces and self- vs. sender-related emotional pronoun-noun pairs (e.g., my fear vs. his fear) as cues for emotional face processing. The influence of self- and sender-related cues on the processing of fearful, angry and happy faces was investigated in two contexts: an automatic (experiment 1) and intentional affect labeling task (experiment 2), along with control conditions of passive face processing. ERP patterns varied as a function of the label's reference (self vs. sender) and the intentionality of the labeling task (experiment 1 vs. experiment 2). In experiment 1, self-related labels increased the motivational relevance of the emotional faces in the time-window of the EPN component. Processing of sender-related labels improved emotion recognition specifically for fearful faces in the N170 time-window. Spontaneous processing of affective labels modulated later stages of face processing as well. Amplitudes of the late positive potential (LPP) were reduced for fearful, happy, and angry faces relative to the control condition of passive viewing. During intentional regulation (experiment 2) amplitudes of the LPP were enhanced for emotional faces when subjects used the self-related emotion labels to label their own emotion during face processing, and they rated the faces as higher in arousal than the emotional faces that had been presented in the "label sender's emotion" condition or the passive viewing condition. The present results argue in favor of a differentiated view of language-as-context for emotion processing.}, language = {en} } @phdthesis{Anderson2011, author = {Anderson, Christina}, title = {Idiosyncratic Facial Movement in Face Perception and Recognition}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-70355}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2011}, abstract = {It has been proposed that different features of a face provide a source of information for separate perceptual and cognitive processes. Properties of a face that remain rather stable over time, so called invariant facial features, yield information about a face's identity, and changeable aspects of faces transmit information underlying social communication such as emotional expressions and speech movements. While processing of these different face properties was initially claimed to be independent, a growing body of evidence suggests that these sources of information can interact when people recognize faces with whom they are familiar. This is the case because the way a face moves can contain patterns that are characteristic for that specific person, so called idiosyncratic movements. As a face becomes familiar these idiosyncratic movements are learned and hence also provide information serving face identification. While an abundance of experiments has addressed the independence of invariant and variable facial features in face recognition, little is known about the exact nature of the impact idiosyncratic facial movements have on face recognition. Gaining knowledge about the way facial motion contributes to face recognition is, however, important for a deeper understanding of the way the brain processes and recognizes faces. In the following dissertation three experiments are reported that investigate the impact familiarity of changeable facial features has on processes of face recognition. Temporal aspects of the processing of familiar idiosyncratic facial motion were addressed in the first experiment via EEG by investigating the influence familiar facial movement exerts on event-related potentials associated to face processing and face recognition. After being familiarized with a face and its idiosyncratic movement, participants viewed familiar or unfamiliar faces with familiar or unfamiliar facial movement while their brain potentials were recorded. Results showed that familiarity of facial motion influenced later event-related potentials linked to memory processes involved in face recognition. The second experiment used fMRI to investigate the brain areas involved in processing familiar facial movement. Participants' BOLD-signal was registered while they viewed familiar and unfamiliar faces with familiar or unfamiliar idiosyncratic movement. It was found that activity of brain regions, such as the fusiform gyrus, that underlie the processing of face identity, was modulated by familiar facial movement. Together these two experiments provide valuable information about the nature of the involvement of idiosyncratic facial movement in face recognition and have important implications for cognitive and neural models of face perception and recognition. The third experiment addressed the question whether idiosyncratic facial movement could increase individuation in perceiving faces from a different ethnic group and hence reduce impaired recognition of these other-race faces compared to own-race faces, a phenomenon named the own-race bias. European participants viewed European and African faces that were each animated with an idiosyncratic smile while their attention was either directed to the form or the motion of the face. Subsequently recognition memory for these faces was tested. Results showed that the own-race bias was equally present in both attention conditions indicating that idiosyncratic facial movement was not able to reduce or diminish the own-race bias. In combination the here presented experiments provide further insight into the involvement of idiosyncratic facial motion in face recognition. It is necessary to consider the dynamic component of faces when investigating face recognition because static facial images are not able to provide the full range of information that leads to recognition of a face. In order to reflect the full process of face recognition, cognitive and neural models of face perception and recognition need to integrate dynamic facial features as a source of information which contributes to the recognition of a face.}, subject = {Gesicht}, language = {en} }