@article{BerlijnHildebrandtGamer2022, author = {Berlijn, Adam M. and Hildebrandt, Lea K. and Gamer, Matthias}, title = {Idiosyncratic viewing patterns of social scenes reflect individual preferences}, series = {Journal of Vision}, volume = {22}, journal = {Journal of Vision}, number = {13}, doi = {10.1167/jov.22.13.10}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-301029}, year = {2022}, abstract = {In general, humans preferentially look at conspecifics in naturalistic images. However, such group-based effects might conceal systematic individual differences concerning the preference for social information. Here, we investigated to what degree fixations on social features occur consistently within observers and whether this preference generalizes to other measures of social prioritization in the laboratory as well as the real world. Participants carried out a free viewing task, a relevance taps task that required them to actively select image regions that are crucial for understanding a given scene, and they were asked to freely take photographs outside the laboratory that were later classified regarding their social content. We observed stable individual differences in the fixation and active selection of human heads and faces that were correlated across tasks and partly predicted the social content of self-taken photographs. Such relationship was not observed for human bodies indicating that different social elements need to be dissociated. These findings suggest that idiosyncrasies in the visual exploration and interpretation of social features exist and predict real-world behavior. Future studies should further characterize these preferences and elucidate how they shape perception and interpretation of social contexts in healthy participants and patients with mental disorders that affect social functioning.}, language = {en} } @article{RuboGamer2021, author = {Rubo, Marius and Gamer, Matthias}, title = {Stronger reactivity to social gaze in virtual reality compared to a classical laboratory environment}, series = {British Journal of Psychology}, volume = {112}, journal = {British Journal of Psychology}, number = {1}, doi = {10.1111/bjop.12453}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-215972}, pages = {301 -- 314}, year = {2021}, abstract = {People show a robust tendency to gaze at other human beings when viewing images or videos, but were also found to relatively avoid gaze at others in several real-world situations. This discrepancy, along with theoretical considerations, spawned doubts about the appropriateness of classical laboratory-based experimental paradigms in social attention research. Several researchers instead suggested the use of immersive virtual scenarios in eliciting and measuring naturalistic attentional patterns, but the field, struggling with methodological challenges, still needs to establish the advantages of this approach. Here, we show using eye-tracking in a complex social scenario displayed in virtual reality that participants show enhanced attention towards the face of an avatar at near distance and demonstrate an increased reactivity towards her social gaze as compared to participants who viewed the same scene on a computer monitor. The present study suggests that reactive virtual agents observed in immersive virtual reality can elicit natural modes of information processing and can help to conduct ecologically more valid experiments while maintaining high experimental control.}, language = {en} } @article{RoeslerRuboGamer2019, author = {R{\"o}sler, Lara and Rubo, Marius and Gamer, Matthias}, title = {Artificial faces predict gaze allocation in complex dynamic scenes}, series = {Frontiers in Psychology}, volume = {10}, journal = {Frontiers in Psychology}, number = {2877}, issn = {1664-1078}, doi = {10.3389/fpsyg.2019.02877}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-193024}, year = {2019}, abstract = {Both low-level physical saliency and social information, as presented by human heads or bodies, are known to drive gaze behavior in free-viewing tasks. Researchers have previously made use of a great variety of face stimuli, ranging from photographs of real humans to schematic faces, frequently without systematically differentiating between the two. In the current study, we used a Generalized Linear Mixed Model (GLMM) approach to investigate to what extent schematic artificial faces can predict gaze when they are presented alone or in competition with real human faces. Relative differences in predictive power became apparent, while GLMMs suggest substantial effects for real and artificial faces in all conditions. Artificial faces were accordingly less predictive than real human faces but still contributed significantly to gaze allocation. These results help to further our understanding of how social information guides gaze in complex naturalistic scenes.}, language = {en} }