@article{KirschHerbortButzetal.2012, author = {Kirsch, Wladimir and Herbort, Oliver and Butz, Martin V. and Kunde, Wilfried}, title = {Influence of Motor Planning on Distance Perception within the Peripersonal Space}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-75332}, year = {2012}, abstract = {We examined whether movement costs as defined by movement magnitude have an impact on distance perception in near space. In Experiment 1, participants were given a numerical cue regarding the amplitude of a hand movement to be carried out. Before the movement execution, the length of a visual distance had to be judged. These visual distances were judged to be larger, the larger the amplitude of the concurrently prepared hand movement was. In Experiment 2, in which numerical cues were merely memorized without concurrent movement planning, this general increase of distance with cue size was not observed. The results of these experiments indicate that visual perception of near space is specifically affected by the costs of planned hand movements.}, subject = {Psychologie}, language = {en} } @article{EhrenfeldHerbortButz2013, author = {Ehrenfeld, Stephan and Herbort, Oliver and Butz, Martin V.}, title = {Modular neuron-based body estimation: maintaining consistency over different limbs, modalities, and frames of reference}, series = {Frontiers in Computational Neuroscience}, volume = {7}, journal = {Frontiers in Computational Neuroscience}, number = {148}, doi = {10.3389/fncom.2013.00148}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-122253}, year = {2013}, abstract = {This paper addresses the question of how the brain maintains a probabilistic body state estimate over time from a modeling perspective. The neural Modular Modality Frame (nMMF) model simulates such a body state estimation process by continuously integrating redundant, multimodal body state information sources. The body state estimate itself is distributed over separate, but bidirectionally interacting modules. nMMF compares the incoming sensory and present body state information across the interacting modules and fuses the information sources accordingly. At the same time, nMMF enforces body state estimation consistency across the modules. nMMF is able to detect conflicting sensory information and to consequently decrease the influence of implausible sensor sources on the fly. In contrast to the previously published Modular Modality Frame (MMF) model, nMMF offers a biologically plausible neural implementation based on distributed, probabilistic population codes. Besides its neural plausibility, the neural encoding has the advantage of enabling (a) additional probabilistic information flow across the separate body state estimation modules and (b) the representation of arbitrary probability distributions of a body state. The results show that the neural estimates can detect and decrease the impact of false sensory information, can propagate conflicting information across modules, and can improve overall estimation accuracy due to additional module interactions. Even bodily illusions, such as the rubber hand illusion, can be simulated with nMMF. We conclude with an outlook on the potential of modeling human data and of invoking goal-directed behavioral control.}, language = {en} } @phdthesis{Herbort2008, author = {Herbort, Oliver}, title = {Encoding Redundancy for Task-dependent Optimal Control : A Neural Network Model of Human Reaching}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-26032}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2008}, abstract = {The human motor system is adaptive in two senses. It adapts to the properties of the body to enable effective control. It also adapts to different situational requirements and constraints. This thesis proposes a new neural network model of both kinds of adaptivity for the motor cortical control of human reaching movements, called SURE_REACH (sensorimotor unsupervised learning redundancy resolving control architecture). In this neural network approach, the kinematic and sensorimotor redundancy of a three-joint planar arm is encoded in task-independent internal models by an unsupervised learning scheme. Before a movement is executed, the neural networks prepare a movement plan from the task-independent internal models, which flexibly incorporates external, task-specific constraints. The movement plan is then implemented by proprioceptive or visual closed-loop control. This structure enables SURE_REACH to reach hand targets while incorporating task-specific contraints, for example adhering to kinematic constraints, anticipating the demands of subsequent movements, avoiding obstacles, or reducing the motion of impaired joints. Besides this functionality, the model accounts for temporal aspects of human reaching movements or for data from priming experiments. Additionally, the neural network structure reflects properties of motor cortical networks like interdependent population encoded body space representations, recurrent connectivity, or associative learning schemes. This thesis introduces and describes the new model, relates it to current computational models, evaluates its functionality, relates it to human behavior and neurophysiology, and finally discusses potential extensions as well as the validity of the model. In conclusion, the proposed model grounds highly flexible task-dependent behavior in a neural network framework and unsupervised sensorimotor learning.}, subject = {Bewegungssteuerung}, language = {en} } @article{HerbortButz2012, author = {Herbort, Oliver and Butz, Martin V.}, title = {Too good to be true? Ideomotor theory from a computational perspective}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-76383}, year = {2012}, abstract = {In recent years, Ideomotor Theory has regained widespread attention and sparked the development of a number of theories on goal-directed behavior and learning. However, there are two issues with previous studies' use of Ideomotor Theory. Although Ideomotor Theory is seen as very general, it is often studied in settings that are considerably more simplistic than most natural situations. Moreover, Ideomotor Theory's claim that effect anticipations directly trigger actions and that action-effect learning is based on the formation of direct action-effect associations is hard to address empirically. We address these points from a computational perspective. A simple computational model of Ideomotor Theory was tested in tasks with different degrees of complexity.The model evaluation showed that Ideomotor Theory is a computationally feasible approach for understanding efficient action-effect learning for goal-directed behavior if the following preconditions are met: (1) The range of potential actions and effects has to be restricted. (2) Effects have to follow actions within a short time window. (3) Actions have to be simple and may not require sequencing. The first two preconditions also limit human performance and thus support Ideomotor Theory. The last precondition can be circumvented by extending the model with more complex, indirect action generation processes. In conclusion, we suggest that IdeomotorTheory offers a comprehensive framework to understand action-effect learning. However, we also suggest that additional processes may mediate the conversion of effect anticipations into actions in many situations.}, subject = {Psychologie}, language = {en} } @article{KirschKundeHerbort2021, author = {Kirsch, Wladimir and Kunde, Wilfried and Herbort, Oliver}, title = {Impact of proprioception on the perceived size and distance of external objects in a virtual action task}, series = {Psychonomic Bulletin \& Review}, volume = {28}, journal = {Psychonomic Bulletin \& Review}, number = {4}, issn = {1531-5320}, doi = {10.3758/s13423-021-01915-y}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-273235}, pages = {1191-1201}, year = {2021}, abstract = {Previous research has revealed changes in the perception of objects due to changes of object-oriented actions. In present study, we varied the arm and finger postures in the context of a virtual reaching and grasping task and tested whether this manipulation can simultaneously affect the perceived size and distance of external objects. Participants manually controlled visual cursors, aiming at reaching and enclosing a distant target object, and judged the size and distance of this object. We observed that a visual-proprioceptive discrepancy introduced during the reaching part of the action simultaneously affected the judgments of target distance and of target size (Experiment 1). A related variation applied to the grasping part of the action affected the judgments of size, but not of distance of the target (Experiment 2). These results indicate that perceptual effects observed in the context of actions can directly arise through sensory integration of multimodal redundant signals and indirectly through perceptual constancy mechanisms.}, language = {en} } @article{HuesteggeHerbortGoschetal.2019, author = {Huestegge, Lynn and Herbort, Oliver and Gosch, Nora and Kunde, Wilfried and Pieczykolan, Aleks}, title = {Free-choice saccades and their underlying determinants: explorations of high-level voluntary oculomotor control}, series = {Journal of Vision}, volume = {19}, journal = {Journal of Vision}, number = {3}, doi = {10.1167/19.3.14}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-201493}, pages = {14}, year = {2019}, abstract = {Models of eye-movement control distinguish between different control levels, ranging from automatic (bottom-up, stimulus-driven selection) and automatized (based on well-learned routines) to voluntary (top-down, goal-driven selection, e.g., based on instructions). However, one type of voluntary control has yet only been examined in the manual and not in the oculomotor domain, namely free-choice selection among arbitrary targets, that is, targets that are of equal interest from both a bottom-up and top-down processing perspective. Here, we ask which features of targets (identity- or location-related) are used to determine such oculomotor free-choice behavior. In two experiments, participants executed a saccade to one of four peripheral targets in three different choice conditions: unconstrained free choice, constrained free choice based on target identity (color), and constrained free choice based on target location. The analysis of choice frequencies revealed that unconstrained free-choice selection closely resembled constrained choice based on target location. The results suggest that free-choice oculomotor control is mainly guided by spatial (location-based) target characteristics. We explain these results by assuming that participants tend to avoid less parsimonious recoding of target-identity representations into spatial codes, the latter being a necessary prerequisite to configure oculomotor commands.}, language = {en} } @article{HerbortKrauseKunde2021, author = {Herbort, Oliver and Krause, Lisa-Marie and Kunde, Wilfried}, title = {Perspective determines the production and interpretation of pointing gestures}, series = {Psychonomic Bulletin \& Review}, volume = {28}, journal = {Psychonomic Bulletin \& Review}, issn = {1069-9384}, doi = {10.3758/s13423-020-01823-7}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-235293}, pages = {641-648}, year = {2021}, abstract = {Pointing is a ubiquitous means of communication. Nevertheless, observers systematically misinterpret the location indicated by pointers. We examined whether these misunderstandings result from the typically different viewpoints of pointers and observers. Participants either pointed themselves or interpreted points while assuming the pointer's or a typical observer perspective in a virtual reality environment. The perspective had a strong effect on the relationship between pointing gestures and referents, whereas the task had only a minor influence. This suggests that misunderstandings between pointers and observers primarily result from their typically different viewpoints.}, language = {en} }