@article{BreilHuesteggeBoeckler2022, author = {Breil, Christina and Huestegge, Lynn and B{\"o}ckler, Anne}, title = {From eye to arrow: Attention capture by direct gaze requires more than just the eyes}, series = {Attention, Perception \& Psychophysics}, volume = {84}, journal = {Attention, Perception \& Psychophysics}, number = {1}, issn = {1943-393X}, doi = {10.3758/s13414-021-02382-2}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-273206}, pages = {64-75}, year = {2022}, abstract = {Human attention is strongly attracted by direct gaze and sudden onset motion. The sudden direct-gaze effect refers to the processing advantage for targets appearing on peripheral faces that suddenly establish eye contact. Here, we investigate the necessity of social information for attention capture by (sudden onset) ostensive cues. Six experiments involving 204 participants applied (1) naturalistic faces, (2) arrows, (3) schematic eyes, (4) naturalistic eyes, or schematic facial configurations (5) without or (6) with head turn to an attention-capture paradigm. Trials started with two stimuli oriented towards the observer and two stimuli pointing into the periphery. Simultaneous to target presentation, one direct stimulus changed to averted and one averted stimulus changed to direct, yielding a 2 × 2 factorial design with direction and motion cues being absent or present. We replicated the (sudden) direct-gaze effect for photographic faces, but found no corresponding effects in Experiments 2-6. Hence, a holistic and socially meaningful facial context seems vital for attention capture by direct gaze. STATEMENT OF SIGNIFICANCE: The present study highlights the significance of context information for social attention. Our findings demonstrate that the direct-gaze effect, that is, the prioritization of direct gaze over averted gaze, critically relies on the presentation of a meaningful holistic and naturalistic facial context. This pattern of results is evidence in favor of early effects of surrounding social information on attention capture by direct gaze.}, language = {en} } @article{BrychHaendelRiechelmannetal.2021, author = {Brych, Mareike and H{\"a}ndel, Barbara F. and Riechelmann, Eva and Pieczykolan, Aleksandra and Huestegge, Lynn}, title = {Effects of vocal demands on pupil dilation}, series = {Psychophysiology}, volume = {58}, journal = {Psychophysiology}, number = {2}, doi = {10.1111/psyp.13729}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-224425}, year = {2021}, abstract = {Pupil dilation is known to be affected by a variety of factors, including physical (e.g., light) and cognitive sources of influence (e.g., mental load due to working memory demands, stimulus/response competition etc.). In the present experiment, we tested the extent to which vocal demands (speaking) can affect pupil dilation. Based on corresponding preliminary evidence found in a reanalysis of an existing data set from our lab, we setup a new experiment that systematically investigated vocal response-related effects compared to mere jaw/lip movement and button press responses. Conditions changed on a trial-by-trial basis while participants were instructed to keep fixating a central cross on a screen throughout. In line with our prediction (and previous observation), speaking caused the pupils to dilate strongest, followed by nonvocal movements and finally a baseline condition without any vocal or muscular demands. An additional analysis of blink rates showed no difference in blink frequency between vocal and baseline conditions, but different blink dynamics. Finally, simultaneously recorded electromyographic activity showed that muscle activity may contribute to some (but not all) aspects of the observed effects on pupil size. The results are discussed in the context of other recent research indicating effects of perceived (instead of executed) vocal action on pupil dynamics.}, language = {en} } @article{GrossheinrichFirkSchulteRuetheretal.2018, author = {Grossheinrich, Nicola and Firk, Christine and Schulte-R{\"u}ther, Martin and von Leupoldt, Andreas and Konrad, Kerstin and Huestegge, Lynn}, title = {Looking while unhappy: a mood-congruent attention bias toward sad adult faces in children}, series = {Frontiers in Psychology}, volume = {9}, journal = {Frontiers in Psychology}, number = {2577}, doi = {10.3389/fpsyg.2018.02577}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-177688}, year = {2018}, abstract = {A negative mood-congruent attention bias has been consistently observed, for example, in clinical studies on major depression. This bias is assumed to be dysfunctional in that it supports maintaining a sad mood, whereas a potentially adaptive role has largely been neglected. Previous experiments involving sad mood induction techniques found a negative mood-congruent attention bias specifically for young individuals, explained by an adaptive need for information transfer in the service of mood regulation. In the present study we investigated the attentional bias in typically developing children (aged 6-12 years) when happy and sad moods were induced. Crucially, we manipulated the age (adult vs. child) of the displayed pairs of facial expressions depicting sadness, anger, fear and happiness. The results indicate that sad children indeed exhibited a mood specific attention bias toward sad facial expressions. Additionally, this bias was more pronounced for adult faces. Results are discussed in the context of an information gain which should be stronger when looking at adult faces due to their more expansive life experience. These findings bear implications for both research methods and future interventions.}, language = {en} } @article{GutzeitWellerMuthetal.2024, author = {Gutzeit, Julian and Weller, Lisa and Muth, Felicitas and K{\"u}rten, Jens and Huestegge, Lynn}, title = {Eye did this! Sense of agency in eye movements}, series = {Acta Psychologica}, volume = {243}, journal = {Acta Psychologica}, doi = {10.1016/j.actpsy.2023.104121}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-349819}, year = {2024}, abstract = {This study investigates the sense of agency (SoA) for saccades with implicit and explicit agency measures. In two eye tracking experiments, participants moved their eyes towards on-screen stimuli that subsequently changed color. Participants then either reproduced the temporal interval between saccade and color-change (Experiment 1) or reported the time points of these events with an auditory Libet clock (Experiment 2) to measure temporal binding effects as implicit indices of SoA. Participants were either made to believe to exert control over the color change or not (agency manipulation). Explicit ratings indicated that the manipulation of causal beliefs and hence agency was successful. However, temporal binding was only evident for caused effects, and only when a sufficiently sensitive procedure was used (auditory Libet clock). This suggests a feebler connection between temporal binding and SoA than previously proposed. The results also provide evidence for a relatively fast acquisition of sense of agency for previously never experienced types of action-effect associations. This indicates that the underlying processes of action control may be rooted in more intricate and adaptable cognitive models than previously thought. Oculomotor SoA as addressed in the present study presumably represents an important cognitive foundation of gaze-based social interaction (social sense of agency) or gaze-based human-machine interaction scenarios. Public significance statement: In this study, sense of agency for eye movements in the non-social domain is investigated in detail, using both explicit and implicit measures. Therefore, it offers novel and specific insights into comprehending sense of agency concerning effects induced by eye movements, as well as broader insights into agency pertaining to entirely newly acquired types of action-effect associations. Oculomotor sense of agency presumably represents an important cognitive foundation of gaze-based social interaction (social agency) or gaze-based human-machine interaction scenarios. Due to peculiarities of the oculomotor domain such as the varying degree of volitional control, eye movements could provide new information regarding more general theories of sense of agency in future research.}, language = {en} } @article{HoffmannKochHuestegge2022, author = {Hoffmann, Mareike A. and Koch, Iring and Huestegge, Lynn}, title = {Are some effector systems harder to switch to? In search of cost asymmetries when switching between manual, vocal, and oculomotor tasks}, series = {Memory \& Cognition}, volume = {50}, journal = {Memory \& Cognition}, number = {7}, doi = {10.3758/s13421-022-01287-1}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-324887}, pages = {1563-1577}, year = {2022}, abstract = {In task-switching studies, performance is typically worse in task-switch trials than in task-repetition trials. These switch costs are often asymmetrical, a phenomenon that has been explained by referring to a dominance of one task over the other. Previous studies also indicated that response modalities associated with two tasks may be considered as integral components for defining a task set. However, a systematic assessment of the role of response modalities in task switching is still lacking: Are some response modalities harder to switch to than others? The present study systematically examined switch costs when combining tasks that differ only with respect to their associated effector systems. In Experiment 1, 16 participants switched (in unpredictable sequence) between oculomotor and vocal tasks. In Experiment 2, 72 participants switched (in pairwise combinations) between oculomotor, vocal, and manual tasks. We observed systematic performance costs when switching between response modalities under otherwise constant task features and could thereby replicate previous observations of response modality switch costs. However, we did not observe any substantial switch-cost asymmetries. As previous studies using temporally overlapping dual-task paradigms found substantial prioritization effects (in terms of asymmetric costs) especially for oculomotor tasks, the present results suggest different underlying processes in sequential task switching than in simultaneous multitasking. While more research is needed to further substantiate a lack of response modality switch-cost asymmetries in a broader range of task switching situations, we suggest that task-set representations related to specific response modalities may exhibit rapid decay.}, language = {en} } @article{HoffmannPieczykolanKochetal.2020, author = {Hoffmann, Mareike A. and Pieczykolan, Aleks and Koch, Iring and Huestegge, Lynn}, title = {Two sources of task prioritization: The interplay of effector-based and task order-based capacity allocation in the PRP paradigm}, series = {Attention, Perception, \& Psychophysics}, volume = {82}, journal = {Attention, Perception, \& Psychophysics}, issn = {1943-3921}, doi = {10.3758/s13414-020-02071-6}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-235365}, pages = {3402-3414}, year = {2020}, abstract = {When processing of two tasks overlaps, performance is known to suffer. In the well-established psychological refractory period (PRP) paradigm, tasks are triggered by two stimuli with a short temporal delay (stimulus onset asynchrony; SOA), thereby allowing control of the degree of task overlap. A decrease of the SOA reliably yields longer RTs of the task associated with the second stimulus (Task 2) while performance in the other task (Task 1) remains largely unaffected. This Task 2-specific SOA effect is usually interpreted in terms of central capacity limitations. Particularly, it has been assumed that response selection in Task 2 is delayed due to the allocation of less capacity until this process has been completed in Task 1. Recently, another important factor determining task prioritization has been proposed—namely, the particular effector systems associated with tasks. Here, we study both sources of task prioritization simultaneously by systematically combining three different effector systems (pairwise combinations of oculomotor, vocal, and manual responses) in the PRP paradigm. Specifically, we asked whether task order-based task prioritization (SOA effect) is modulated as a function of Task 2 effector system. The results indicate a modulation of SOA effects when the same (oculomotor) Task 1 is combined with a vocal versus a manual Task 2. This is incompatible with the assumption that SOA effects are solely determined by Task 1 response selection duration. Instead, they support the view that dual-task processing bottlenecks are resolved by establishing a capacity allocation scheme fed by multiple input factors, including attentional weights associated with particular effector systems.}, language = {en} } @article{HuesteggeBoeckler2016, author = {Huestegge, Lynn and B{\"o}ckler, Anne}, title = {Out of the corner of the driver's eye: Peripheral processing of hazards in static traffic scenes}, series = {Journal of Vision}, volume = {16}, journal = {Journal of Vision}, number = {11}, doi = {10.1167/16.2.11}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-147726}, pages = {1-15}, year = {2016}, abstract = {Effective gaze control in traffic, based on peripheral visual information, is important to avoid hazards. Whereas previous hazard perception research mainly focused on skill-component development (e.g., orientation and hazard processing), little is known about the role and dynamics of peripheral vision in hazard perception. We analyzed eye movement data from a study in which participants scanned static traffic scenes including medium-level versus dangerous hazards and focused on characteristics of fixations prior to entering the hazard region. We found that initial saccade amplitudes into the hazard region were substantially longer for dangerous (vs. medium-level) hazards, irrespective of participants' driving expertise. An analysis of the temporal dynamics of this hazard-level dependent saccade targeting distance effect revealed that peripheral hazard-level processing occurred around 200-400 ms during the course of the fixation prior to entering the hazard region. An additional psychophysical hazard detection experiment, in which hazard eccentricity was manipulated, revealed better detection for dangerous (vs. medium-level) hazards in both central and peripheral vision. Furthermore, we observed a significant perceptual decline from center to periphery for medium (but not for highly) dangerous hazards. Overall, the results suggest that hazard processing is remarkably effective in peripheral vision and utilized to guide the eyes toward potential hazards.}, language = {en} } @article{HuesteggeHerbortGoschetal.2019, author = {Huestegge, Lynn and Herbort, Oliver and Gosch, Nora and Kunde, Wilfried and Pieczykolan, Aleks}, title = {Free-choice saccades and their underlying determinants: explorations of high-level voluntary oculomotor control}, series = {Journal of Vision}, volume = {19}, journal = {Journal of Vision}, number = {3}, doi = {10.1167/19.3.14}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-201493}, pages = {14}, year = {2019}, abstract = {Models of eye-movement control distinguish between different control levels, ranging from automatic (bottom-up, stimulus-driven selection) and automatized (based on well-learned routines) to voluntary (top-down, goal-driven selection, e.g., based on instructions). However, one type of voluntary control has yet only been examined in the manual and not in the oculomotor domain, namely free-choice selection among arbitrary targets, that is, targets that are of equal interest from both a bottom-up and top-down processing perspective. Here, we ask which features of targets (identity- or location-related) are used to determine such oculomotor free-choice behavior. In two experiments, participants executed a saccade to one of four peripheral targets in three different choice conditions: unconstrained free choice, constrained free choice based on target identity (color), and constrained free choice based on target location. The analysis of choice frequencies revealed that unconstrained free-choice selection closely resembled constrained choice based on target location. The results suggest that free-choice oculomotor control is mainly guided by spatial (location-based) target characteristics. We explain these results by assuming that participants tend to avoid less parsimonious recoding of target-identity representations into spatial codes, the latter being a necessary prerequisite to configure oculomotor commands.}, language = {en} } @article{HuesteggePieczykolanKoch2023, author = {Huestegge, Lynn and Pieczykolan, Aleks and Koch, Iring}, title = {A Gestalt account of human behavior is supported by evidence from switching between single and dual actions}, series = {Scientific Reports}, volume = {13}, journal = {Scientific Reports}, doi = {10.1038/s41598-023-47788-0}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-357862}, year = {2023}, abstract = {The question of how behavior is represented in the mind lies at the core of psychology as the science of mind and behavior. While a long-standing research tradition has established two opposing fundamental views of perceptual representation, Structuralism and Gestalt psychology, we test both accounts with respect to action representation: Are multiple actions (characterizing human behavior in general) represented as the sum of their component actions (Structuralist view) or holistically (Gestalt view)? Using a single-/dual-response switch paradigm, we analyzed switches between dual ([A + B]) and single ([A], [B]) responses across different effector systems and revealed comparable performance in partial repetitions and full switches of behavioral requirements (e.g., in [A + B] → [A] vs. [B] → [A], or [A] → [A + B] vs. [B] → [A + B]), but only when the presence of dimensional overlap between responses allows for Gestalt formation. This evidence for a Gestalt view of behavior in our paradigm challenges some fundamental assumptions in current (tacitly Structuralist) action control theories (in particular the idea that all actions are represented compositionally with reference to their components), provides a novel explanatory angle for understanding complex, highly synchronized human behavior (e.g., dance), and delimitates the degree to which complex behavior can be analyzed in terms of its basic components.}, language = {en} } @article{HuesteggeRohrssenvanErmingenMarbachetal.2014, author = {Huestegge, Lynn and Rohrßen, Julia and van Ermingen-Marbach, Muna and Pape-Neumann, Julia and Heim, Stefan}, title = {Devil in the details ? Developmental dyslexia and visual long-term memory for details}, series = {Frontiers in Psychology}, volume = {5}, journal = {Frontiers in Psychology}, number = {686}, issn = {1664-1078}, doi = {10.3389/fpsyg.2014.00686}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-115887}, year = {2014}, abstract = {Cognitive theories on causes of developmental dyslexia can be divided into language-specific and general accounts. While the former assume that words are special in that associated processing problems are rooted in language-related cognition (e.g., phonology) deficits, the latter propose that dyslexia is rather rooted in a general impairment of cognitive (e.g., visual and/or auditory) processing streams. In the present study, we examined to what extent dyslexia (typically characterized by poor orthographic representations) may be associated with a general deficit in visual long-term memory (LTM) for details. We compared object- and detail-related visual LTM performance (and phonological skills) between dyslexic primary school children and IQ-, age-, and gender-matched controls. The results revealed that while the overall amount of LTM errors was comparable between groups, dyslexic children exhibited a greater portion of detail-related errors. The results suggest that not only phonological, but also general visual resolution deficits in LTM may play an important role in developmental dyslexia.}, language = {en} }