@article{SchneiderHuestegge2019, author = {Schneider, Norbert and Huestegge, Lynn}, title = {Interaction of oculomotor and manual behavior: evidence from simulated driving in an approach-avoidance steering task}, series = {Cognitive Research: Principles and Implications}, volume = {4}, journal = {Cognitive Research: Principles and Implications}, doi = {10.1186/s41235-019-0170-7}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-200419}, pages = {19}, year = {2019}, abstract = {Background While the coordination of oculomotor and manual behavior is essential for driving a car, surprisingly little is known about this interaction, especially in situations requiring a quick steering reaction. In the present study, we analyzed oculomotor gaze and manual steering behavior in approach and avoidance tasks. Three task blocks were implemented within a dynamic simulated driving environment requiring the driver either to steer away from/toward a visual stimulus or to switch between both tasks. Results Task blocks requiring task switches were associated with higher manual response times and increased error rates. Manual response times did not significantly differ depending on whether drivers had to steer away from vs toward a stimulus, whereas oculomotor response times and gaze pattern variability were increased when drivers had to steer away from a stimulus compared to steering toward a stimulus. Conclusion The increased manual response times and error rates in mixed tasks indicate performance costs associated with cognitive flexibility, while the increased oculomotor response times and gaze pattern variability indicate a parsimonious cross-modal action control strategy (avoiding stimulus fixation prior to steering away from it) for the avoidance scenario. Several discrepancies between these results and typical eye-hand interaction patterns in basic laboratory research suggest that the specific goals and complex perceptual affordances associated with driving a vehicle strongly shape cross-modal control of behavior.}, language = {en} } @article{HuesteggeHerbortGoschetal.2019, author = {Huestegge, Lynn and Herbort, Oliver and Gosch, Nora and Kunde, Wilfried and Pieczykolan, Aleks}, title = {Free-choice saccades and their underlying determinants: explorations of high-level voluntary oculomotor control}, series = {Journal of Vision}, volume = {19}, journal = {Journal of Vision}, number = {3}, doi = {10.1167/19.3.14}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-201493}, pages = {14}, year = {2019}, abstract = {Models of eye-movement control distinguish between different control levels, ranging from automatic (bottom-up, stimulus-driven selection) and automatized (based on well-learned routines) to voluntary (top-down, goal-driven selection, e.g., based on instructions). However, one type of voluntary control has yet only been examined in the manual and not in the oculomotor domain, namely free-choice selection among arbitrary targets, that is, targets that are of equal interest from both a bottom-up and top-down processing perspective. Here, we ask which features of targets (identity- or location-related) are used to determine such oculomotor free-choice behavior. In two experiments, participants executed a saccade to one of four peripheral targets in three different choice conditions: unconstrained free choice, constrained free choice based on target identity (color), and constrained free choice based on target location. The analysis of choice frequencies revealed that unconstrained free-choice selection closely resembled constrained choice based on target location. The results suggest that free-choice oculomotor control is mainly guided by spatial (location-based) target characteristics. We explain these results by assuming that participants tend to avoid less parsimonious recoding of target-identity representations into spatial codes, the latter being a necessary prerequisite to configure oculomotor commands.}, language = {en} }