@phdthesis{Rubo2019, author = {Rubo, Marius}, title = {Social Attention in the Laboratory, in Real Life and in Virtual Reality}, doi = {10.25972/OPUS-18845}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-188452}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2019}, abstract = {Social attention is a ubiquitous, but also enigmatic and sometimes elusive phenomenon. We direct our gaze at other human beings to see what they are doing and to guess their intentions, but we may also absorb social events en passant as they unfold in the corner of the eye. We use our gaze as a discrete communication channel, sometimes conveying pieces of information which would be difficult to explicate, but we may also find ourselves avoiding eye-contact with others in moments when self-disclosure is fear-laden. We experience our gaze as the most genuine expression of our will, but research also suggests considerable levels of predictability and automaticity in our gaze behavior. The phenomenon's complexity has hindered researchers from developing a unified framework which can conclusively accommodate all of its aspects, or from even agreeing on the most promising research methodologies. The present work follows a multi-methods approach, taking on several aspects of the phenomenon from various directions. Participants in study 1 viewed dynamic social scenes on a computer screen. Here, low-level physical saliency (i.e. color, contrast, or motion) and human heads both attracted gaze to a similar extent, providing a comparison of two vastly different classes of gaze predictors in direct juxtaposition. In study 2, participants with varying degrees of social anxiety walked in a public train station while their eye movements were tracked. With increasing levels of social anxiety, participants showed a relative avoidance of gaze at near compared to distant people. When replicating the experiment in a laboratory situation with a matched participant group, social anxiety did not modulate gaze behavior, fueling the debate around appropriate experimental designs in the field. Study 3 employed virtual reality (VR) to investigate social gaze in a complex and immersive, but still highly controlled situation. In this situation, participants exhibited a gaze behavior which may be more typical for real-life compared to laboratory situations as they avoided gaze contact with a virtual conspecific unless she gazed at them. This study provided important insights into gaze behavior in virtual social situations, helping to better estimate the possible benefits of this new research approach. Throughout all three experiments, participants showed consistent inter-individual differences in their gaze behavior. However, the present work could not resolve if these differences are linked to psychologically meaningful traits or if they instead have an epiphenomenal character.}, subject = {Aufmerksamkeit}, language = {en} } @article{RuboGamer2019, author = {Rubo, Marius and Gamer, Matthias}, title = {Visuo-tactile congruency influences the body schema during full body ownership illusion}, series = {Consciousness and Cognition}, volume = {73}, journal = {Consciousness and Cognition}, doi = {10.1016/j.concog.2019.05.006}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-227095}, pages = {UNSP 102758, 1-14}, year = {2019}, abstract = {Previous research showed that full body ownership illusions in virtual reality (VR) can be robustly induced by providing congruent visual stimulation, and that congruent tactile experiences provide a dispensable extension to an already established phenomenon. Here we show that visuo-tactile congruency indeed does not add to already high measures for body ownership on explicit measures, but does modulate movement behavior when walking in the laboratory. Specifically, participants who took ownership over a more corpulent virtual body with intact visuo-tactile congruency increased safety distances towards the laboratory's walls compared to participants who experienced the same illusion with deteriorated visuo-tactile congruency. This effect is in line with the body schema more readily adapting to a more corpulent body after receiving congruent tactile information. We conclude that the action-oriented, unconscious body schema relies more heavily on tactile information compared to more explicit aspects of body ownership.}, language = {en} } @article{RuboGamer2018, author = {Rubo, Marius and Gamer, Matthias}, title = {Social content and emotional valence modulate gaze fixations in dynamic scenes}, series = {Scientific Reports}, volume = {8}, journal = {Scientific Reports}, doi = {10.1038/s41598-018-22127-w}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-227106}, pages = {3804, 1-11}, year = {2018}, abstract = {Previous research has shown that low-level visual features (i.e., low-level visual saliency) as well as socially relevant information predict gaze allocation in free viewing conditions. However, these studies mainly used static and highly controlled stimulus material, thus revealing little about the robustness of attentional processes across diverging situations. Secondly, the influence of affective stimulus characteristics on visual exploration patterns remains poorly understood. Participants in the present study freely viewed a set of naturalistic, contextually rich video clips from a variety of settings that were capable of eliciting different moods. Using recordings of eye movements, we quantified to what degree social information, emotional valence and low-level visual features influenced gaze allocation using generalized linear mixed models. We found substantial and similarly large regression weights for low-level saliency and social information, affirming the importance of both predictor classes under ecologically more valid dynamic stimulation conditions. Differences in predictor strength between individuals were large and highly stable across videos. Additionally, low-level saliency was less important for fixation selection in videos containing persons than in videos not containing persons, and less important for videos perceived as negative. We discuss the generalizability of these findings and the feasibility of applying this research paradigm to patient groups.}, language = {en} } @article{RoeslerRuboGamer2019, author = {R{\"o}sler, Lara and Rubo, Marius and Gamer, Matthias}, title = {Artificial faces predict gaze allocation in complex dynamic scenes}, series = {Frontiers in Psychology}, volume = {10}, journal = {Frontiers in Psychology}, number = {2877}, issn = {1664-1078}, doi = {10.3389/fpsyg.2019.02877}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-193024}, year = {2019}, abstract = {Both low-level physical saliency and social information, as presented by human heads or bodies, are known to drive gaze behavior in free-viewing tasks. Researchers have previously made use of a great variety of face stimuli, ranging from photographs of real humans to schematic faces, frequently without systematically differentiating between the two. In the current study, we used a Generalized Linear Mixed Model (GLMM) approach to investigate to what extent schematic artificial faces can predict gaze when they are presented alone or in competition with real human faces. Relative differences in predictive power became apparent, while GLMMs suggest substantial effects for real and artificial faces in all conditions. Artificial faces were accordingly less predictive than real human faces but still contributed significantly to gaze allocation. These results help to further our understanding of how social information guides gaze in complex naturalistic scenes.}, language = {en} } @article{RuboGamer2021, author = {Rubo, Marius and Gamer, Matthias}, title = {Stronger reactivity to social gaze in virtual reality compared to a classical laboratory environment}, series = {British Journal of Psychology}, volume = {112}, journal = {British Journal of Psychology}, number = {1}, doi = {10.1111/bjop.12453}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-215972}, pages = {301 -- 314}, year = {2021}, abstract = {People show a robust tendency to gaze at other human beings when viewing images or videos, but were also found to relatively avoid gaze at others in several real-world situations. This discrepancy, along with theoretical considerations, spawned doubts about the appropriateness of classical laboratory-based experimental paradigms in social attention research. Several researchers instead suggested the use of immersive virtual scenarios in eliciting and measuring naturalistic attentional patterns, but the field, struggling with methodological challenges, still needs to establish the advantages of this approach. Here, we show using eye-tracking in a complex social scenario displayed in virtual reality that participants show enhanced attention towards the face of an avatar at near distance and demonstrate an increased reactivity towards her social gaze as compared to participants who viewed the same scene on a computer monitor. The present study suggests that reactive virtual agents observed in immersive virtual reality can elicit natural modes of information processing and can help to conduct ecologically more valid experiments while maintaining high experimental control.}, language = {en} }