@article{EhrenfeldHerbortButz2013, author = {Ehrenfeld, Stephan and Herbort, Oliver and Butz, Martin V.}, title = {Modular neuron-based body estimation: maintaining consistency over different limbs, modalities, and frames of reference}, series = {Frontiers in Computational Neuroscience}, volume = {7}, journal = {Frontiers in Computational Neuroscience}, number = {148}, doi = {10.3389/fncom.2013.00148}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-122253}, year = {2013}, abstract = {This paper addresses the question of how the brain maintains a probabilistic body state estimate over time from a modeling perspective. The neural Modular Modality Frame (nMMF) model simulates such a body state estimation process by continuously integrating redundant, multimodal body state information sources. The body state estimate itself is distributed over separate, but bidirectionally interacting modules. nMMF compares the incoming sensory and present body state information across the interacting modules and fuses the information sources accordingly. At the same time, nMMF enforces body state estimation consistency across the modules. nMMF is able to detect conflicting sensory information and to consequently decrease the influence of implausible sensor sources on the fly. In contrast to the previously published Modular Modality Frame (MMF) model, nMMF offers a biologically plausible neural implementation based on distributed, probabilistic population codes. Besides its neural plausibility, the neural encoding has the advantage of enabling (a) additional probabilistic information flow across the separate body state estimation modules and (b) the representation of arbitrary probability distributions of a body state. The results show that the neural estimates can detect and decrease the impact of false sensory information, can propagate conflicting information across modules, and can improve overall estimation accuracy due to additional module interactions. Even bodily illusions, such as the rubber hand illusion, can be simulated with nMMF. We conclude with an outlook on the potential of modeling human data and of invoking goal-directed behavioral control.}, language = {en} } @article{GerdesWieserAlpers2014, author = {Gerdes, Antje B. M. and Wieser, Matthias J. and Alpers, Georg W.}, title = {Emotional pictures and sounds: a review of multimodal interactions of emotion cues in multiple domains}, series = {Frontiers in Psychology}, volume = {5}, journal = {Frontiers in Psychology}, issn = {1664-1078}, doi = {10.3389/fpsyg.2014.01351}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-114548}, pages = {1351}, year = {2014}, abstract = {In everyday life, multiple sensory channels jointly trigger emotional experiences and one channel may alter processing in another channel. For example, seeing an emotional facial expression and hearing the voice's emotional tone will jointly create the emotional experience. This example, where auditory and visual input is related to social communication, has gained considerable attention by researchers. However, interactions of visual and auditory emotional information are not limited to social communication but can extend to much broader contexts including human, animal, and environmental cues. In this article, we review current research on audiovisual emotion processing beyond face-voice stimuli to develop a broader perspective on multimodal interactions in emotion processing. We argue that current concepts of multimodality should be extended in considering an ecologically valid variety of stimuli in audiovisual emotion processing. Therefore, we provide an overview of studies in which emotional sounds and interactions with complex pictures of scenes were investigated. In addition to behavioral studies, we focus on neuroimaging, electro- and peripher-physiological findings. Furthermore, we integrate these findings and identify similarities or differences. We conclude with suggestions for future research.}, language = {en} }