@article{BlechertMeuleBuschetal.2014, author = {Blechert, Jens and Meule, Adrian and Busch, Niko A. and Ohla, Kathrin}, title = {Food-pics: an image database for experimental research on eating and appetite}, series = {Frontiers in Psychology}, volume = {5}, journal = {Frontiers in Psychology}, issn = {1664-1078}, doi = {10.3389/fpsyg.2014.00617}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-115987}, pages = {617}, year = {2014}, abstract = {Our current environment is characterized by the omnipresence of food cues. The sight and smell of real foods, but also graphically depictions of appetizing foods, can guide our eating behavior, for example, by eliciting food craving and influencing food choice. The relevance of visual food cues on human information processing has been demonstrated by a growing body of studies employing food images across the disciplines of psychology, medicine, and neuroscience. However, currently used food image sets vary considerably across laboratories and image characteristics (contrast, brightness, etc.) and food composition (calories, macronutrients, etc.) are often unspecified. These factors might have contributed to some of the inconsistencies of this research. To remedy this, we developed food-pics, a picture database comprising 568 food images and 315 non-food images along with detailed meta-data. A total of N = 1988 individuals with large variance in age and weight from German speaking countries and North America provided normative ratings of valence, arousal, palatability, desire to eat, recognizability and visual complexity. Furthermore, data on macronutrients (g), energy density (kcal), and physical image characteristics (color composition, contrast, brightness, size, complexity) are provided. The food-pics image database is freely available under the creative commons license with the hope that the set will facilitate standardization and comparability across studies and advance experimental research on the determinants of eating behavior. Read F}, language = {en} } @article{MeuleLutzKrawietzetal.2014, author = {Meule, Adrian and Lutz, Annika P. C. and Krawietz, Vera and St{\"u}tzer, Judith and V{\"o}gele, Claus and K{\"u}bler, Andrea}, title = {Food-cue affected motor response inhibition and self-reported dieting success: a pictorial affective shifting task}, series = {Frontiers in Psychology}, volume = {5}, journal = {Frontiers in Psychology}, issn = {1664-1078}, doi = {10.3389/fpsyg.2014.00216}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-119840}, pages = {216}, year = {2014}, abstract = {Behavioral inhibition is one of the basic facets of executive functioning and is closely related to self-regulation. Impulsive reactions, that is, low inhibitory control, have been associated with higher body mass index (BMI), binge eating, and other problem behaviors (e.g., substance abuse, pathological gambling, etc.). Nevertheless, studies which investigated the direct influence of food-cues on behavioral inhibition have been fairly inconsistent. In the current studies, we investigated food-cue affected behavioral inhibition in young women. For this purpose, we used a go/no-go task with pictorial food and neutral stimuli in which stimulus-response mapping is reversed after every other block (affective shifting task). In study 1, hungry participants showed faster reaction times to and omitted fewer food than neutral targets. Low dieting success and higher BMI were associated with behavioral disinhibition in food relative to neutral blocks. In study 2, both hungry and satiated individuals were investigated. Satiation did not influence overall task performance, but modulated associations of task performance with dieting success and self-reported impulsivity. When satiated, increased food craving during the task was associated with low dieting success, possibly indicating a preload-disinhibition effect following food intake. Food-cues elicited automatic action and approach tendencies regardless of dieting success, self-reported impulsivity, or current hunger levels. Yet, associations between dieting success, impulsivity, and behavioral food-cue responses were modulated by hunger and satiation. Future research investigating clinical samples and including other salient non-food stimuli as control category is warranted.}, language = {en} } @misc{RodriguezMartinMeule2015, author = {Rodr{\´i}guez-Mart{\´i}n, Boris C. and Meule, Adrian}, title = {Food craving: new contributions on its assessment, moderators, and consequences}, series = {Frontiers in Psychology}, volume = {6}, journal = {Frontiers in Psychology}, number = {21}, issn = {1664-1078}, doi = {10.3389/fpsyg.2015.00021}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-190299}, year = {2015}, abstract = {No abstract available.}, language = {en} } @phdthesis{Meule2014, author = {Meule, Adrian}, title = {Food craving as a central construct in the self-regulation of eating behavior}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-104597}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {Background: Food craving refers to an intense desire to consume a specific kind of food of which chocolate is the most often craved one. It is this intensity and specificity that differentiates food craving from feelings of hunger. Although food craving and hunger often co-occur, an energy deficit is not a prerequisite for experiencing food craving, that is, it can also occur without being hungry. Food craving often precedes and predicts over- or binge eating which makes it a reasonable target in the treatment of eating disorders or obesity. One of the arguably most extensively validated measures for the assessment of food craving are the Food Cravings Questionnaires (FCQs), which measure food craving on a state (FCQ-S) and trait (FCQ-T) level. Specifically, the FCQ-S measures the intensity of current food craving whereas the FCQ-T measures the frequency of food craving experiences in general. The aims of the present thesis were to provide a German measure for the assessment of food craving and to investigate cognitive, behavioral, and physiological correlates of food craving. For this purpose, a German version of the FCQs was presented and its reliability and validity was evaluated. Using self-reports, relationships between trait food craving and dieting were examined. Cognitive-behavioral correlates of food craving were investigated using food-related tasks assessing executive functions. Psychophysiological correlates of food craving were investigated using event-related potentials (ERPs) in the electroencephalogram and heart rate variability (HRV). Possible intervention approaches to reduce food craving were derived from results of those studies. Methods: The FCQs were translated into German and their psychometric properties and correlates were investigated in a questionnaire-based study (articles \#1 \& \#2). The relationship between state and trait food craving with executive functioning was examined with behavioral tasks measuring working memory performance and behavioral inhibition which involved highly palatable food-cues (articles \#3 \& \#4). Electrophysiological correlates of food craving were tested with ERPs during a craving regulation task (article \#5). Finally, a pilot study on the effects of HRV-biofeedback for reducing food craving was conducted (article \#6). Results: The FCQs demonstrated high internal consistency while their factorial structure could only partially be replicated. The FCQ-T also had high retest-reliability which, expectedly, was lower for the FCQ-S. Validity of the FCQ-S was shown by positive relationships with current food deprivation and negative affect. Validity of the FCQ-T was shown by positive correlations with related constructs. Importantly, scores on the subscales of the FCQ-T were able to discriminate between non-dieters and successful and unsuccessful dieters (article \#1). Furthermore, scores on the FCQ-T mediated the relationship between rigid dietary control strategies and low dieting success (article \#2). With regard to executive functioning, high-calorie food-cues impaired working memory performance, yet this was independent of trait food craving and rarely related to state food craving (article \#3). Behavioral disinhibition in response to high-calorie food-cues was predicted by trait food craving, particularly when participants were also impulsive (article \#4). Downregulation of food craving by cognitive strategies in response to high-calorie food-cues increased early, but not later, segments of the Late Positive Potential (LPP) (article \#5). Few sessions of HRV-biofeedback reduced self-reported food cravings and eating and weight concerns in high trait food cravers (article \#6). Conclusions: The German FCQs represent sound measures with good psychometric properties for the assessment of state and trait food craving. Although state food craving increases during cognitive tasks involving highly palatable food-cues, impairment of task performance does not appear to be mediated by current food craving experiences. Instead, trait food craving is associated with low behavioral inhibition in response to high-calorie food-cues, but not with impaired working memory performance. Future studies need to examine if trait food craving and, subsequently, food-cue affected behavioral inhibition can be reduced by using food-related inhibition tasks as a training. Current food craving and ERPs in response to food-cues can easily be modulated by cognitive strategies, yet the LPP probably does not represent a direct index of food craving. Finally, HRV-biofeedback may be a useful add-on element in the treatment of disorders in which food cravings are elevated. To conclude, the current thesis provided measures for the assessment of food craving in German and showed differential relationships between state and trait food craving with self-reported dieting behavior, food-cue affected executive functioning, ERPs and HRV-biofeedback. These results provide promising starting points for interventions to reduce food craving based on (1) food-cue-related behavioral trainings of executive functions, (2) cognitive craving regulation strategies, and (3) physiological parameters such as HRV-biofeedback.}, subject = {Craving}, language = {en} } @article{MeuleGearhard2014, author = {Meule, Adrian and Gearhard, Ashley N.}, title = {Food Addiction in the Light of DSM-5}, series = {Nutrients}, volume = {6}, journal = {Nutrients}, number = {9}, issn = {2072-6643}, doi = {10.3390/nu6093653}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-119279}, pages = {3653-71}, year = {2014}, abstract = {The idea that specific kind of foods may have an addiction potential and that some forms of overeating may represent an addicted behavior has been discussed for decades. In recent years, the interest in food addiction is growing and research on this topic lead to more precise definitions and assessment methods. For example, the Yale Food Addiction Scale has been developed for the measurement of addiction-like eating behavior based on the diagnostic criteria for substance dependence of the fourth revision of the Diagnostic and Statistical Manual of Mental Disorders (DSM-IV). In 2013, diagnostic criteria for substance abuse and-dependence were merged, thereby increasing the number of symptoms for substance use disorders (SUDs) in the DSM-5. Moreover, gambling disorder is now included along SUDs as a behavioral addiction. Although a plethora of review articles exist that discuss the applicability of the DSM-IV substance dependence criteria to eating behavior, the transferability of the newly added criteria to eating is unknown. Thus, the current article discusses if and how these new criteria may be translated to overeating. Furthermore, it is examined if the new SUD criteria will impact future research on food addiction, for example, if "diagnosing" food addiction should also be adapted by considering all of the new symptoms. Given the critical response to the revisions in DSM-5, we also discuss if the recent approach of Research Domain Criteria can be helpful in evaluating the concept of food addiction.}, language = {en} } @article{ReussPohlKieseletal.2011, author = {Reuss, Heiko and Pohl, Carsten and Kiesel, Andrea and Kunde, Wilfried}, title = {Follow the sign! Top-down contingent attentional capture of masked arrow cues}, series = {Advances in Cognitive Psychology}, volume = {7}, journal = {Advances in Cognitive Psychology}, doi = {10.2478/v10053-008-0091-3}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-140030}, pages = {82-91}, year = {2011}, abstract = {Arrow cues and other overlearned spatial symbols automatically orient attention according to their spatial meaning. This renders them similar to exogenous cues that occur at stimulus location. Exogenous cues trigger shifts of attention even when they are presented subliminally. Here, we investigate to what extent the mechanisms underlying the orienting of attention by exogenous cues and by arrow cues are comparable by analyzing the effects of visible and masked arrow cues on attention. In Experiment 1, we presented arrow cues with overall 50\% validity. Visible cues, but not masked cues, lead to shifts of attention. In Experiment 2, the arrow cues had an overall validity of 80\%. Now both visible and masked arrows lead to shifts of attention. This is in line with findings that subliminal exogenous cues capture attention only in a top-down contingent manner, that is, when the cues fit the observer's intentions.}, language = {en} } @article{WehrheimFaskowitzSpornsetal.2023, author = {Wehrheim, Maren H. and Faskowitz, Joshua and Sporns, Olaf and Fiebach, Christian J. and Kaschube, Matthias and Hilger, Kirsten}, title = {Few temporally distributed brain connectivity states predict human cognitive abilities}, series = {NeuroImage}, volume = {277}, journal = {NeuroImage}, doi = {10.1016/j.neuroimage.2023.120246}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-349874}, year = {2023}, abstract = {Highlights • Brain connectivity states identified by cofluctuation strength. • CMEP as new method to robustly predict human traits from brain imaging data. • Network-identifying connectivity 'events' are not predictive of cognitive ability. • Sixteen temporally independent fMRI time frames allow for significant prediction. • Neuroimaging-based assessment of cognitive ability requires sufficient scan lengths. Abstract Human functional brain connectivity can be temporally decomposed into states of high and low cofluctuation, defined as coactivation of brain regions over time. Rare states of particularly high cofluctuation have been shown to reflect fundamentals of intrinsic functional network architecture and to be highly subject-specific. However, it is unclear whether such network-defining states also contribute to individual variations in cognitive abilities - which strongly rely on the interactions among distributed brain regions. By introducing CMEP, a new eigenvector-based prediction framework, we show that as few as 16 temporally separated time frames (< 1.5\% of 10 min resting-state fMRI) can significantly predict individual differences in intelligence (N = 263, p < .001). Against previous expectations, individual's network-defining time frames of particularly high cofluctuation do not predict intelligence. Multiple functional brain networks contribute to the prediction, and all results replicate in an independent sample (N = 831). Our results suggest that although fundamentals of person-specific functional connectomes can be derived from few time frames of highest connectivity, temporally distributed information is necessary to extract information about cognitive abilities. This information is not restricted to specific connectivity states, like network-defining high-cofluctuation states, but rather reflected across the entire length of the brain connectivity time series.}, language = {en} } @phdthesis{Sollfrank2015, author = {Sollfrank, Teresa}, title = {Feedback efficiency and training effects during alpha band modulation over the sensorimotor cortex}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-131769}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {Neural oscillations can be measured by electroencephalography (EEG) and these oscillations can be characterized by their frequency, amplitude and phase. The mechanistic properties of neural oscillations and their synchronization are able to explain various aspects of many cognitive functions such as motor control, memory, attention, information transfer across brain regions, segmentation of the sensory input and perception (Arnal and Giraud, 2012). The alpha band frequency is the dominant oscillation in the human brain. This oscillatory activity is found in the scalp EEG at frequencies around 8-13 Hz in all healthy adults (Makeig et al., 2002) and considerable interest has been generated in exploring EEG alpha oscillations with regard to their role in cognitive (Klimesch et al., 1993; Hanselmayr et al., 2005), sensorimotor (Birbaumer, 2006; Sauseng et al., 2009) and physiological (Lehmann, 1971; Niedermeyer, 1997; Kiyatkin, 2010) aspects of human life. The ability to voluntarily regulate the alpha amplitude can be learned with neurofeedback training and offers the possibility to control a brain-computer interface (BCI), a muscle independent interaction channel. BCI research is predominantly focused on the signal processing, the classification and the algorithms necessary to translate brain signals into control commands than on the person interacting with the technical system. The end-user must be properly trained to be able to successfully use the BCI and factors such as task instructions, training, and especially feedback can therefore play an important role in learning to control a BCI (Neumann and K{\"u}bler, 2003; Pfurtscheller et al., 2006, 2007; Allison and Neuper, 2010; Friedrich et al., 2012; Kaufmann et al., 2013; Lotte et al., 2013). The main purpose of this thesis was to investigate how end-users can efficiently be trained to perform alpha band modulation recorded over their sensorimotor cortex. The herein presented work comprises three studies with healthy participants and participants with schizophrenia focusing on the effects of feedback and training time on cortical activation patterns and performance. In the first study, the application of a realistic visual feedback to support end-users in developing a concrete feeling of kinesthetic motor imagery was tested in 2D and 3D visualization modality during a single training session. Participants were able to elicit the typical event-related desynchronisation responses over sensorimotor cortex in both conditions but the most significant decrease in the alpha band power was obtained following the three-dimensional realistic visualization. The second study strengthen the hypothesis that an enriched visual feedback with information about the quality of the input signal supports an easier approach for motor imagery based BCI control and can help to enhance performance. Significantly better performance levels were measurable during five online training sessions in the groups with enriched feedback as compared to a conventional simple visual feedback group, without significant differences in performance between the unimodal (visual) and multimodal (auditory-visual) feedback modality. Furthermore, the last study, in which people with schizophrenia participated in multiple sessions with simple feedback, demonstrated that these patients can learn to voluntarily regulate their alpha band. Compared to the healthy group they required longer training times and could not achieve performance levels as high as the control group. Nonetheless, alpha neurofeedback training lead to a constant increase of the alpha resting power across all 20 training session. To date only little is known about the effects of feedback and training time on BCI performance and cortical activation patterns. The presented work contributes to the evidence that healthy individuals can benefit from enriched feedback: A realistic presentation can support participants in getting a concrete feeling of motor imagery and enriched feedback, which instructs participants about the quality of their input signal can give support while learning to control the BCI. This thesis demonstrates that people with schizophrenia can learn to gain control of their alpha oscillations recorded over the sensorimotor cortex when participating in sufficient training sessions. In conclusion, this thesis improved current motor imagery BCI feedback protocols and enhanced our understanding of the interplay between feedback and BCI performance.}, subject = {Neurofeedback}, language = {en} } @phdthesis{Pohl2011, author = {Pohl, Carsten}, title = {Feature processing and feature integration in unconscious processing : A Study with chess novices and experts}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-67190}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2011}, abstract = {The scope of the present work encompasses the influence of experience (i.e. expertise) for feature processing in unconscious information processing. In the introduction, I describe the subliminal priming paradigm, a method to examine how stimuli, we are not aware of, nonetheless influence our actions. The activation of semantic response categories, the impact of learned stimulus-response links, and the action triggering through programmed stimulus-response links are the main three hypotheses to explain unconscious response activation. Besides, the congruence of perceptual features can also influence subliminal priming. On the basis of the features location and form, I look at evidence that exists so far for perceptual priming. The second part of the introduction reviews the literature showing perceptual superiority of experts. This is illustrated exemplarily with three domains of expertise - playing action video games, which constitutes a general form of perceptual expertise, radiology, a more natural form of expertise, and expertise in the game of chess, which is seen as the Drosophila of psychology. In the empirical section, I report nine experiments that applied a subliminal check detection task. Experiment 1 shows subliminal response priming for chess experts but not for chess novices. Thus, chess experts are able to judge unconsciously presented chess configurations as checking or nonchecking. The results of Experiment 2 suggest that acquired perceptual chunks, and not the ability to integrate perceptual features unconsciously, was responsible for unconscious check detection, because experts' priming does not occur for simpler chess configurations which afforded an unfamiliar classification. With a more complex chess detection task, Experiment 3 indicates that chess experts are not able to process perceptual features in parallel or alternatively, that chess experts are not able to form specific expectations which are obviously necessary to elicit priming if many chess displays are applied. The aim of Experiment 4-9 was to further elaborate on unconscious processing of the single features location and form in novices. In Experiment 4 and 5, perceptual priming according the congruence of the single features location and form outperformed semantically-based response priming. Experiment 6 and 7 show that (in contrast to form priming) the observed location priming effect is rather robust and is also evident for an unexpected form or colour. In Experiment 8, location and form priming, which was additionally related to response priming, were directly compared to each other. Location priming was again stronger than form priming. Finally, Experiment 9 demonstrates that with the subliminal check detection task it is possible to induce response priming in novices when the confounding influences of location and form are absent. In the General discussion, I first summarized the findings. Second, I discuss possible underlying mechanisms of different subliminal perception in experts and novices. Third, I focus on subliminal perceptual priming in novices, especially on the impact of the features location and form. And finally, I discuss a framework, the action trigger account that integrates the different results of the present work.}, subject = {Bewusstsein}, language = {en} } @article{GrappEllKiermeieretal.2022, author = {Grapp, Miriam and Ell, Johanna and Kiermeier, Senta and Haun, Markus W. and K{\"u}bler, Andrea and Friederich, Hans-Christoph and Maatouk, Imad}, title = {Feasibility study of a self-guided internet-based intervention for family caregivers of patients with cancer (OAse)}, series = {Scientific Reports}, volume = {12}, journal = {Scientific Reports}, doi = {10.1038/s41598-022-21157-9}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-300537}, year = {2022}, abstract = {Despite high levels of distress, family caregivers of patients with cancer rarely seek psychosocial support and Internet-based interventions (IBIs) are a promising approach to reduce some access barriers. Therefore, we developed a self-guided IBI for family caregivers of patients with cancer (OAse), which, in addition to patients' spouses, also addresses other family members (e.g., adult children, parents). This study aimed to determine the feasibility of OAse (recruitment, dropout, adherence, participant satisfaction). Secondary outcomes were caregivers' self-efficacy, emotional state, and supportive care needs. N = 41 family caregivers participated in the study (female: 65\%), mostly spouses (71\%), followed by children (20\%), parents (7\%), and friends (2\%). Recruitment (47\%), retention (68\%), and adherence rates (76\% completed at least 4 of 6 lessons) support the feasibility of OAse. Overall, the results showed a high degree of overall participant satisfaction (96\%). There were no significant pre-post differences in secondary outcome criteria, but a trend toward improvement in managing difficult interactions/emotions (p = .06) and depression/anxiety (p = .06). Although the efficacy of the intervention remains to be investigated, our results suggest that OAse can be well implemented in caregivers' daily lives and has the potential to improve family caregivers' coping strategies.}, language = {en} } @article{MitschkeEder2021, author = {Mitschke, Vanessa and Eder, Andreas B.}, title = {Facing the enemy: Spontaneous facial reactions towards suffering opponents}, series = {Psychophysiology}, volume = {58}, journal = {Psychophysiology}, number = {8}, doi = {10.1111/psyp.13835}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-259672}, year = {2021}, abstract = {The suffering of an opponent is an important social affective cue that modulates how aggressive interactions progress. To investigate the affective consequences of opponent suffering on a revenge seeking individual, two experiments (total N = 82) recorded facial muscle activity while participants observed the reaction of a provoking opponent to a (retaliatory) sound punishment in a laboratory aggression task. Opponents reacted via prerecorded videos either with facial displays of pain, sadness, or neutrality. Results indicate that participants enjoyed seeing the provocateur suffer: indexed by a coordinated muscle response featuring an increase in zygomaticus major (and orbicularis oculi muscle) activation accompanied by a decrease in corrugator supercilii activation. This positive facial reaction was only shown while a provoking opponent expressed pain. Expressions of sadness, and administration of sound blasts to nonprovoking opponents, did not modulate facial activity. Overall, the results suggest that revenge-seeking individuals enjoy observing the offender suffer, which could represent schadenfreude or satisfaction of having succeeded in the retaliation goal.}, language = {en} } @phdthesis{Menne2020, author = {Menne, Isabelle M.}, title = {Facing Social Robots - Emotional Reactions towards Social Robots}, edition = {1. Auflage}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-120-4}, doi = {10.25972/WUP-978-3-95826-121-1}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-187131}, school = {W{\"u}rzburg University Press}, pages = {XXIV, 201}, year = {2020}, abstract = {Ein Army Colonel empfindet Mitleid mit einem Roboter, der versuchsweise Landminen entsch{\"a}rft und deklariert den Test als inhuman (Garreau, 2007). Roboter bekommen milit{\"a}rische Bef{\"o}rderungen, Beerdigungen und Ehrenmedaillen (Garreau, 2007; Carpenter, 2013). Ein Schildkr{\"o}tenroboter wird entwickelt, um Kindern beizubringen, Roboter gut zu behandeln (Ackermann, 2018). Der humanoide Roboter Sophia wurde erst k{\"u}rzlich Saudi-Arabischer Staatsb{\"u}rger und es gibt bereits Debatten, ob Roboter Rechte bekommen sollen (Delcker, 2018). Diese und {\"a}hnliche Entwicklungen zeigen schon jetzt die Bedeutsamkeit von Robotern und die emotionale Wirkung die diese ausl{\"o}sen. Dennoch scheinen sich diese emotionalen Reaktionen auf einer anderen Ebene abzuspielen, gemessen an Kommentaren in Internetforen. Dort ist oftmals die Rede davon, wieso jemand {\"u}berhaupt emotional auf einen Roboter reagieren kann. Tats{\"a}chlich ist es, rein rational gesehen, schwierig zu erkl{\"a}ren, warum Menschen mit einer leblosen (‚mindless') Maschine mitf{\"u}hlen sollten. Und dennoch zeugen nicht nur oben genannte Berichte, sondern auch erste wissenschaftliche Studien (z.B. Rosenthal- von der P{\"u}tten et al., 2013) von dem emotionalen Einfluss den Roboter auf Menschen haben k{\"o}nnen. Trotz der Bedeutsamkeit der Erforschung emotionaler Reaktionen auf Roboter existieren bislang wenige wissenschaftliche Studien hierzu. Tats{\"a}chlich identifizierten Kappas, Krumhuber und K{\"u}ster (2013) die systematische Analyse und Evaluation sozialer Reaktionen auf Roboter als eine der gr{\"o}ßten Herausforderungen der affektiven Mensch-Roboter Interaktion. Nach Scherer (2001; 2005) bestehen Emotionen aus der Koordination und Synchronisation verschiedener Komponenten, die miteinander verkn{\"u}pft sind. Motorischer Ausdruck (Mimik), subjektives Erleben, Handlungstendenzen, physiologische und kognitive Komponenten geh{\"o}ren hierzu. Um eine Emotion vollst{\"a}ndig zu erfassen, m{\"u}ssten all diese Komponenten gemessen werden, jedoch wurde eine solch umfassende Analyse bisher noch nie durchgef{\"u}hrt (Scherer, 2005). Haupts{\"a}chlich werden Frageb{\"o}gen eingesetzt (vgl. Bethel \& Murphy, 2010), die allerdings meist nur das subjektive Erleben abfragen. Bakeman und Gottman (1997) geben sogar an, dass nur etwa 8\% der psychologischen Forschung auf Verhaltensdaten basiert, obwohl die Psychologie traditionell als das ‚Studium von Psyche und Verhalten' (American Psychological Association, 2018) definiert wird. Die Messung anderer Emotionskomponenten ist selten. Zudem sind Frageb{\"o}gen mit einer Reihe von Nachteilen behaftet (Austin, Deary, Gibson, McGregor, Dent, 1998; Fan et al., 2006; Wilcox, 2011). Bethel und Murphy (2010) als auch Arkin und Moshkina (2015) pl{\"a}dieren f{\"u}r einen Multi-Methodenansatz um ein umfassenderes Verst{\"a}ndnis von affektiven Prozessen in der Mensch-Roboter Interaktion zu erlangen. Das Hauptziel der vorliegenden Dissertation ist es daher, mithilfe eines Multi-Methodenansatzes verschiedene Komponenten von Emotionen (motorischer Ausdruck, subjektive Gef{\"u}hlskomponente, Handlungstendenzen) zu erfassen und so zu einem vollst{\"a}ndigeren und tiefgreifenderem Bild emotionaler Prozesse auf Roboter beizutragen. Um dieses Ziel zu erreichen, wurden drei experimentelle Studien mit insgesamt 491 Teilnehmern durchgef{\"u}hrt. Mit unterschiedlichen Ebenen der „apparent reality" (Frijda, 2007) sowie Macht / Kontrolle {\"u}ber die Situation (vgl. Scherer \& Ellgring, 2007) wurde untersucht, inwiefern sich Intensit{\"a}t und Qualit{\"a}t emotionaler Reaktionen auf Roboter {\"a}ndern und welche weiteren Faktoren (Aussehen des Roboters, emotionale Expressivit{\"a}t des Roboters, Behandlung des Roboters, Autorit{\"a}tsstatus des Roboters) Einfluss aus{\"u}ben. Experiment 1 basierte auf Videos, die verschiedene Arten von Robotern (tier{\"a}hnlich, anthropomorph, maschinenartig), die entweder emotional expressiv waren oder nicht (an / aus) in verschiedenen Situationen (freundliche Behandlung des Roboters vs. Misshandlung) zeigten. Frageb{\"o}gen {\"u}ber selbstberichtete Gef{\"u}hle und die motorisch-expressive Komponente von Emotionen: Mimik (vgl. Scherer, 2005) wurden analysiert. Das Facial Action Coding System (Ekman, Friesen, \& Hager, 2002), die umfassendste und am weitesten verbreitete Methode zur objektiven Untersuchung von Mimik, wurde hierf{\"u}r verwendet. Die Ergebnisse zeigten, dass die Probanden Gesichtsausdr{\"u}cke (Action Unit [AU] 12 und AUs, die mit positiven Emotionen assoziiert sind, sowie AU 4 und AUs, die mit negativen Emotionen assoziiert sind) sowie selbstberichtete Gef{\"u}hle in {\"U}bereinstimmung mit der Valenz der in den Videos gezeigten Behandlung zeigten. Bei emotional expressiven Robotern konnten st{\"a}rkere emotionale Reaktionen beobachtet werden als bei nicht-expressiven Robotern. Der tier{\"a}hnliche Roboter Pleo erfuhr in der Misshandlungs-Bedingung am meisten Mitleid, Empathie, negative Gef{\"u}hle und Traurigkeit, gefolgt vom anthropomorphen Roboter Reeti und am wenigsten f{\"u}r den maschinenartigen Roboter Roomba. Roomba wurde am meisten Antipathie zugeschrieben. Die Ergebnisse kn{\"u}pfen an fr{\"u}here Forschungen an (z.B. Krach et al., 2008; Menne \& Schwab, 2018; Riek et al., 2009; Rosenthal-von der P{\"u}tten et al., 2013) und zeigen das Potenzial der Mimik f{\"u}r eine nat{\"u}rliche Mensch-Roboter Interaktion. Experiment 2 und Experiment 3 {\"u}bertrugen die klassischen Experimente von Milgram (1963; 1974) zum Thema Gehorsam in den Kontext der Mensch-Roboter Interaktion. Die Gehorsamkeitsstudien von Milgram wurden als sehr geeignet erachtet, um das Ausmaß der Empathie gegen{\"u}ber einem Roboter im Verh{\"a}ltnis zum Gehorsam gegen{\"u}ber einem Roboter zu untersuchen. Experiment 2 unterschied sich von Experiment 3 in der Ebene der „apparent reality" (Frijda, 2007): in Anlehnung an Milgram (1963) wurde eine rein text-basierte Studie (Experiment 2) einer live Mensch-Roboter Interaktion (Experiment 3) gegen{\"u}bergestellt. W{\"a}hrend die abh{\"a}ngigen Variablen von Experiment 2 aus den Selbstberichten emotionaler Gef{\"u}hle sowie Einsch{\"a}tzungen des hypothetischen Verhaltens bestand, erfasste Experiment 3 subjektive Gef{\"u}hle sowie reales Verhalten (Reaktionszeit: Dauer des Z{\"o}gerns; Gehorsamkeitsrate; Anzahl der Proteste; Mimik) der Teilnehmer. Beide Experimente untersuchten den Einfluss der Faktoren „Autorit{\"a}tsstatus" (hoch / niedrig) des Roboters, der die Befehle erteilt (Nao) und die emotionale Expressivit{\"a}t (an / aus) des Roboters, der die Strafen erh{\"a}lt (Pleo). Die subjektiven Gef{\"u}hle der Teilnehmer aus Experiment 2 unterschieden sich zwischen den Gruppen nicht. Dar{\"u}ber hinaus gaben nur wenige Teilnehmer (20.2\%) an, dass sie den „Opfer"-Roboter definitiv bestrafen w{\"u}rden. Ein {\"a}hnliches Ergebnis fand auch Milgram (1963). Das reale Verhalten von Versuchsteilnehmern in Milgrams' Labor-Experiment unterschied sich jedoch von Einsch{\"a}tzungen hypothetischen Verhaltens von Teilnehmern, denen Milgram das Experiment nur beschrieben hatte. Ebenso lassen Kommentare von Teilnehmern aus Experiment 2 darauf schließen, dass das beschriebene Szenario m{\"o}glicherweise als fiktiv eingestuft wurde und Einsch{\"a}tzungen von hypothetischem Verhalten daher kein realistisches Bild realen Verhaltens gegen{\"u}ber Roboter in einer live Interaktion zeichnen k{\"o}nnen. Daher wurde ein weiteres Experiment (Experiment 3) mit einer Live Interaktion mit einem Roboter als Autorit{\"a}tsfigur (hoher Autorit{\"a}tsstatus vs. niedriger) und einem weiteren Roboter als „Opfer" (emotional expressiv vs. nicht expressiv) durchgef{\"u}hrt. Es wurden Gruppenunterschiede in Frageb{\"o}gen {\"u}ber emotionale Reaktionen gefunden. Dem emotional expressiven Roboter wurde mehr Empathie entgegengebracht und es wurde mehr Freude und weniger Antipathie berichtet als gegen{\"u}ber einem nicht-expressiven Roboter. Außerdem konnten Gesichtsausdr{\"u}cke beobachtet werden, die mit negativen Emotionen assoziiert sind w{\"a}hrend Probanden Nao's Befehl ausf{\"u}hrten und Pleo bestraften. Obwohl Probanden tendenziell l{\"a}nger z{\"o}gerten, wenn sie einen emotional expressiven Roboter bestrafen sollten und der Befehl von einem Roboter mit niedrigem Autorit{\"a}tsstatus kam, wurde dieser Unterschied nicht signifikant. Zudem waren alle bis auf einen Probanden gehorsam und bestraften Pleo, wie vom Nao Roboter befohlen. Dieses Ergebnis steht in starkem Gegensatz zu dem selbstberichteten hypothetischen Verhalten der Teilnehmer aus Experiment 2 und unterst{\"u}tzt die Annahme, dass die Einsch{\"a}tzungen von hypothetischem Verhalten in einem Mensch-Roboter-Gehorsamkeitsszenario nicht zuverl{\"a}ssig sind f{\"u}r echtes Verhalten in einer live Mensch-Roboter Interaktion. Situative Variablen, wie z.B. der Gehorsam gegen{\"u}ber Autorit{\"a}ten, sogar gegen{\"u}ber einem Roboter, scheinen st{\"a}rker zu sein als Empathie f{\"u}r einen Roboter. Dieser Befund kn{\"u}pft an andere Studien an (z.B. Bartneck \& Hu, 2008; Geiskkovitch et al., 2016; Menne, 2017; Slater et al., 2006), er{\"o}ffnet neue Erkenntnisse zum Einfluss von Robotern, zeigt aber auch auf, dass die Wahl einer Methode um Empathie f{\"u}r einen Roboter zu evozieren eine nicht triviale Angelegenheit ist (vgl. Geiskkovitch et al., 2016; vgl. Milgram, 1965). Insgesamt st{\"u}tzen die Ergebnisse die Annahme, dass die emotionalen Reaktionen auf Roboter tiefgreifend sind und sich sowohl auf der subjektiven Ebene als auch in der motorischen Komponente zeigen. Menschen reagieren emotional auf einen Roboter, der emotional expressiv ist und eher weniger wie eine Maschine aussieht. Sie empfinden Empathie und negative Gef{\"u}hle, wenn ein Roboter misshandelt wird und diese emotionalen Reaktionen spiegeln sich in der Mimik. Dar{\"u}ber hinaus unterscheiden sich die Einsch{\"a}tzungen von Menschen {\"u}ber ihr eigenes hypothetisches Verhalten von ihrem tats{\"a}chlichen Verhalten, weshalb videobasierte oder live Interaktionen zur Analyse realer Verhaltensreaktionen empfohlen wird. Die Ankunft sozialer Roboter in der Gesellschaft f{\"u}hrt zu nie dagewesenen Fragen und diese Dissertation liefert einen ersten Schritt zum Verst{\"a}ndnis dieser neuen Herausforderungen.}, subject = {Roboter}, language = {en} } @phdthesis{Mitschke2023, author = {Mitschke, Vanessa}, title = {Facing Enemies. Modulation of Revenge Interactions based on Opponent State Indicators of Suffering}, doi = {10.25972/OPUS-29938}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-299389}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {Research on revenge often treats vengeful acts as singular one-way experiences, an approach which fails to account for the social nature and functions of revenge. This dissertation aims to integrate emotional punishment reactions into dynamic revenge sequences to investigate the affective and cognitive consequences of revenge within a social interaction. Exacting revenge can evoke intense affective consequences, from feelings of guilt to the genuine enjoyment of the suffering of others. In Chapter 2, affective responses towards suffering opponents and the regulation of aggression based on the appraisal of distinct suffering indicators were investigated. Results indicate that the observation of opponent pain evokes positive affect (measured via facial muscle contractions during the observation), which is followed by a downregulation of subsequent punishment. Both, positive affective reactions and the downregulation of punishment, were only observed following pain and not sadness expressions. Empathic distress, indexed by negative affective reactions, was only present following the observation of pain in non-provoking opponents. Showcasing the modulation of empathy related processes due to provocation and competition. In Chapter 3, a significant escalation of punishment, when being confronted with Schadenfreude, was observed. Results are interpreted as supporting the assumption that opponent monitoring processes inform subsequent action selection. The observation of opponent smiles led to imitation behavior (facial mimicry), which was partially attenuated due to previous provocation. The different functions of smile mimicry in the context of the aggressive competitive setting are discussed as containing simulation aspects (to aid in opponent understanding) and as a potential mirroring of dominance gestures, to avoid submission. In an additional series of studies, which are presented in Chapter 4, changes in memory of opponent faces following vengeful encounters were measured. Based on provocation, and punishment outcomes (pain \& anger), face memory was distorted, resulting in more positive representations of opponents that expressed pain. These results are discussed as evidence of the impact of outcome appraisals in the formation of opponent representations and are theorized to aid empathy avoidance in future interactions. The comparison of desired and observed opponent states, is theorized to result in appraisals of the punishment outcomes, which evoke affective states, inform the action selection of subsequent punishments, and are integrated into the representation of the opponent in memory. Overall, the results indicate that suffering cues that are congruent with the chosen punishment action are appraised as positive, evoking an increase in positive affect. The emergence of positive affect during the observation of successful aggressive actions supports recent theories about the chronification of aggressive behavior based on reinforcement learning. To allow positive affect to emerge, affective empathic responses, such as distress, are theorized to be suppressed to facilitate the goal attainment process. The suffering of the opponent constitutes the proximate goal during revenge taking, which highlights the importance of a theoretical differentiation of proximate and ultimate goals in revenge to allow for a deeper understanding of the underlying motives of complex revenge behavior.}, subject = {Aggression}, language = {en} } @phdthesis{Weiland2010, author = {Weiland, Romy}, title = {Facial reactions in response to gustatory and olfactory stimuli in healthy adults, patients with eating disorders, and patients with attention-deficit hyperactivity disorder}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-51759}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2010}, abstract = {The aim of this project was to investigate whether reflex-like innate facial reactions to tastes and odors are altered in patients with eating disorders. Qualitatively different tastes and odors have been found to elicit specific facial expressions in newborns. This specificity in newborns is characterized by positive facial reactions in response to pleasant stimuli and by negative facial reactions in response to unpleasant stimuli. It is, however, unclear, whether these specific facial displays remain stable during ontogeny (1). Despite the fact that several studies had shown that taste-and odor-elicited facial reactions remain quite stable across a human's life-span, the specificity of research questions, as well as different research methods, allow only limited comparisons between studies. Moreover, the gustofacial response patterns might be altered in pathological eating behavior (2). To date, however, the question of whether dysfunctional eating behavior might alter facial activity in response to tastes and odors has not been addressed. Furthermore, changes in facial activity might be linked to deficient inhibitory facial control (3). To investigate these three research questions, facial reactions in response to tastes and odors were assessed. Facial reactions were analyzed using the Facial Action Coding System (FACS, Ekman \& Friesen, 1978; Ekman, Friesen, \& Hager, 2002) and electromyography.}, subject = {Mimik}, language = {en} } @phdthesis{Likowski2011, author = {Likowski, Katja U.}, title = {Facial mimicry, valence evaluation or emotional reaction? Mechanisms underlying the modulation of congruent and incongruent facial reactions to emotional facial expressions}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-65013}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2011}, abstract = {Humans have the tendency to react with congruent facial expressions when looking at an emotional face. Interestingly, recent studies revealed that several situational moderators can modulate strength and direction of these reactions. In current literature, congruent facial reactions to emotional facial expressions are usually described in terms of "facial mimicry" and interpreted as imitative behavior. Thereby, facial mimicry is understood as a process of pure motor resonance resulting from overlapping representations for the perception and the execution of a certain behavior. Motor mimicry, however, is not the only mechanism by which congruent facial reactions can occur. Numerous studies have shown that facial muscles also indicate valence evaluations. Furthermore, facial reactions are also determined by our current emotional state. These thoughts suggest that the modulation of congruent facial reactions to emotional expressions can be based on both motor and affective processes. However, a separation of motor and affective processes in facial reactions is hard to make. None of the published studies that tried that could show a clear involvement of one or the other process so far. Therefore, the aim of the present line of experiments is to shed light on the involvement of motor and affective processes in the modulation of congruent and incongruent facial reactions. Specifically, the experiments are designed to test the assumptions of a working model on mechanisms underlying the modulation of facial reactions and to examine the neuronal correlates involved in such modulations with a broad range of methods. Experiments 1 and 2 experimentally manipulate motor and affective mechanisms by using specific contexts. In the chose settings, motor process models and affective models of valence evaluations make competing predictions about resulting facial reactions. The results of Experiment 1 did not support the involvement of valence evaluations in the modulation of congruent and incongruent facial reactions to facial expressions. The results of Experiments 2a and 2b suggest that emotional reactions are the predominant determinant of facial reactions. Experiment 3 aimed at identifying the psychological mediators that indicate motor and affective mechanisms. Motor mechanisms are assessed via the psychological mediator empathy. Additionally, as a psychological mediator for clarifying the role of affective mechanisms subjective measures of the participants' current emotional state in response to the presented facial expressions were taken. Mediational analyses show that the modulation of congruent facial reactions can be explained by a decrease of state cognitive empathy. This suggests that motor processes mediate the effects of the context on congruent facial reactions. However, such a mechanism could not be observed for incongruent reactions. Instead, it was found that affective processes in terms of emotional reactions are involved in incongruent facial reactions. Additionally, the involvement of a third class of processes, namely strategic processes, was observed. Experiment 4 aimed at investigating whether a change in the strength of perception can explain the contextual modulation of facial reactions to facial expressions. According to motor process models the strength of perception is directly related to the strength of the spread of activation from perception to the execution of an action and thereby to the strength of the resulting mimicry behavior. The results suggest that motor mechanisms were involved in the modulation of congruent facial reactions by attitudes. Such an involvement of motor mechanisms could, however, not be observed for the modulation of incongruent reactions. In Experiment 5 the investigation of neuronal correlates shall be extended to the observation of involved brain areas via fMRI. The proposed brain areas depicting motor areas were prominent parts of the mirror neuron system. The regions of interest depicting areas involved in the affective processing were amygdala, insula, striatum. Furthermore, it could be shown that changes in the activity of parts of the MNS are related to the modulation of congruent facial reactions. Further on, results revealed the involvement of affective processes in the modulation of incongruent facial reactions. In sum, these results lead to a revised working model on the mechanisms underlying the modulation of facial reactions to emotional facial expressions. The results of the five experiments provide strong support for the involvement of motor mechanisms in congruent facial reactions. No evidence was found for the involvement of motor mechanisms in the occurrence or modulation of incongruent facial reactions. Furthermore, no evidence was found for the involvement of valence evaluations in the modulation of facial reactions. Instead, emotional reactions were found to be involved in the modulation of mainly incongruent facial reactions.}, subject = {Gef{\"u}hl}, language = {en} } @article{SeibtMuehlbergerLikowskietal.2015, author = {Seibt, Beate and M{\"u}hlberger, Andreas and Likowski, Katja U. and Weyers, Peter}, title = {Facial mimicry in its social setting}, series = {Frontiers in Psychology}, volume = {6}, journal = {Frontiers in Psychology}, number = {1122}, doi = {10.3389/fpsyg.2015.01122}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-151415}, year = {2015}, abstract = {In interpersonal encounters, individuals often exhibit changes in their own facial expressions in response to emotional expressions of another person. Such changes are often called facial mimicry. While this tendency first appeared to be an automatic tendency of the perceiver to show the same emotional expression as the sender, evidence is now accumulating that situation, person, and relationship jointly determine whether and for which emotions such congruent facial behavior is shown. We review the evidence regarding the moderating influence of such factors on facial mimicry with a focus on understanding the meaning of facial responses to emotional expressions in a particular constellation. From this, we derive recommendations for a research agenda with a stronger focus on the most common forms of encounters, actual interactions with known others, and on assessing potential mediators of facial mimicry. We conclude that facial mimicry is modulated by many factors: attention deployment and sensitivity, detection of valence, emotional feelings, and social motivations. We posit that these are the more proximal causes of changes in facial mimicry due to changes in its social setting.}, language = {en} } @article{LikowskiMuehlbergerGerdesetal.2012, author = {Likowski, Katja U. and M{\"u}hlberger, Andreas and Gerdes, Antje B. M. and Wieser, Mattias J. and Pauli, Paul and Weyers, Peter}, title = {Facial mimicry and the mirror neuron system: simultaneous acquisition of facial electromyography and functional magnetic resonance imaging}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-75813}, year = {2012}, abstract = {Numerous studies have shown that humans automatically react with congruent facial reactions, i.e., facial mimicry, when seeing a vis-{\´a}-vis' facial expressions. The current experiment is the first investigating the neuronal structures responsible for differences in the occurrence of such facial mimicry reactions by simultaneously measuring BOLD and facial EMG in an MRI scanner. Therefore, 20 female students viewed emotional facial expressions (happy, sad, and angry) of male and female avatar characters. During picture presentation, the BOLD signal as well as M. zygomaticus major and M. corrugator supercilii activity were recorded simultaneously. Results show prototypical patterns of facial mimicry after correction for MR-related artifacts: enhanced M. zygomaticus major activity in response to happy and enhanced M. corrugator supercilii activity in response to sad and angry expressions. Regression analyses show that these congruent facial reactions correlate significantly with activations in the IFG, SMA, and cerebellum. Stronger zygomaticus reactions to happy faces were further associated to increased activities in the caudate, MTG, and PCC. Corrugator reactions to angry expressions were further correlated with the hippocampus, insula, and STS. Results are discussed in relation to core and extended models of the mirror neuron system (MNS).}, subject = {Psychologie}, language = {en} } @article{Ellgring1989, author = {Ellgring, Johann Heinrich}, title = {Facial expression as a behavioral indicator of emotional states}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-58753}, year = {1989}, abstract = {This article gives an overview of possibilities for the assessment offacial behavior. With regard to validity, results from a longitudinal study of 36 depressed patients and nine controls as weil as often schizophrenic patients and their relatives will be referred to. These results are used to illustrate the following principles which have to be taken into account when studying facial behavior: a) communication strongly facilitates facial expression, b) activation of facial behavior follows the "principle of least effort", and c) the principle of individual specificity applies to the association of nonverbal behavior and mood states. Making allowance for these principles has, among others, consequences a) for situations or conditions under which to asses behavior (specifically conditions of communication), b) for data analysis (e.g., dealing with frequent and rare events), and c) for empirical or experimental strategies (e.g., aggregation of single-case longitudinal comparisons). From the results on facial behavior during depression it can be concluded that the nonverbal reaction tendencies of endogenous and neurotic depressed patients differ. Moreover, the differential behavioral pattems observed cast doubt on the assumption of a homogeneity of affects in depression. Taking into account the conditions which govern it, facial behavior has proved to be a valid and, especially, a differential indicator for pathoIogic affective states and their changes. Given the fact that a psychiatric illness generally incorporates emotional problems it is more than surprising that little attention has been paid to the systematic study of emotional behavior. Some of the reasons for this will be clarified in the following.}, subject = {Psychologie}, language = {en} } @misc{WieserBrosch2012, author = {Wieser, Mattias J. and Brosch, Tobias}, title = {Faces in context: A review and systematization of contextual influences on affective face processing}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-76351}, year = {2012}, abstract = {Facial expressions are of eminent importance for social interaction as they convey information about other individuals' emotions and social intentions. According to the predominant "basic emotion" approach, the perception of emotion in faces is based on the rapid, auto- matic categorization of prototypical, universal expressions. Consequently, the perception of facial expressions has typically been investigated using isolated, de-contextualized, static pictures of facial expressions that maximize the distinction between categories. However, in everyday life, an individual's face is not perceived in isolation, but almost always appears within a situational context, which may arise from other people, the physical environment surrounding the face, as well as multichannel information from the sender. Furthermore, situational context may be provided by the perceiver, including already present social infor- mation gained from affective learning and implicit processing biases such as race bias.Thus, the perception of facial expressions is presumably always influenced by contextual vari- ables. In this comprehensive review, we aim at (1) systematizing the contextual variables that may influence the perception of facial expressions and (2) summarizing experimental paradigms and findings that have been used to investigate these influences. The studies reviewed here demonstrate that perception and neural processing of facial expressions are substantially modified by contextual information, including verbal, visual, and auditory information presented together with the face as well as knowledge or processing biases already present in the observer. These findings further challenge the assumption of auto- matic, hardwired categorical emotion extraction mechanisms predicted by basic emotion theories. Taking into account a recent model on face processing, we discuss where and when these different contextual influences may take place, thus outlining potential avenues in future research.}, subject = {Psychologie}, language = {en} } @article{GrussWieserSchweinbergeretal.2012, author = {Gruss, L. Forest and Wieser, Matthias J. and Schweinberger, Stefan R. and Keil, Andreas}, title = {Face-evoked steady-state visual potentials: effects of presentation rate and face inversion}, series = {Frontiers in Human Neuroscience}, volume = {6}, journal = {Frontiers in Human Neuroscience}, number = {316}, doi = {10.3389/fnhum.2012.00316}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-134399}, year = {2012}, abstract = {Face processing can be explored using electrophysiological methods. Research with event-related potentials has demonstrated the so-called face inversion effect, in which the N170 component is enhanced in amplitude and latency to inverted, compared to upright, faces. The present study explored the extent to which repetitive lower-level visual cortical engagement, reflected in flicker steady-state visual evoked potentials (ssVEPs), shows similar amplitude enhancement to face inversion. We also asked if inversion-related ssVEP modulation would be dependent on the stimulation rate at which upright and inverted faces were flickered. To this end, multiple tagging frequencies were used (5, 10, 15, and 20 Hz) across two studies (n=21, n=18). Results showed that amplitude enhancement of the ssVEP for inverted faces was found solely at higher stimulation frequencies (15 and 20 Hz). By contrast, lower frequency ssVEPs did not show this inversion effect. These findings suggest that stimulation frequency affects the sensitivity of ssVEPs to face inversion.}, language = {en} }