@misc{HaufNiedingSeger2021, author = {Hauf, Juliane E. K. and Nieding, Gerhild and Seger, Benedikt T.}, title = {Correction to: The development of dynamic perceptual simulations during sentence comprehension}, series = {Cognitive Processing}, volume = {22}, journal = {Cognitive Processing}, number = {4}, doi = {10.1007/s10339-021-01027-4}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-352611}, pages = {731}, year = {2021}, abstract = {No abstract available.}, language = {en} } @article{LandmannBreilHuesteggeetal.2024, author = {Landmann, Eva and Breil, Christina and Huestegge, Lynn and B{\"o}ckler, Anne}, title = {The semantics of gaze in person perception: a novel qualitative-quantitative approach}, series = {Scientific Reports}, volume = {14}, journal = {Scientific Reports}, number = {1}, issn = {2045-2322}, doi = {10.1038/s41598-024-51331-0}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-361413}, year = {2024}, abstract = {Interpreting gaze behavior is essential in evaluating interaction partners, yet the 'semantics of gaze' in dynamic interactions are still poorly understood. We aimed to comprehensively investigate effects of gaze behavior patterns in different conversation contexts, using a two-step, qualitative-quantitative procedure. Participants watched video clips of single persons listening to autobiographic narrations by another (invisible) person. The listener's gaze behavior was manipulated in terms of gaze direction, frequency and direction of gaze shifts, and blink frequency; emotional context was manipulated through the valence of the narration (neutral/negative). In Experiment 1 (qualitative-exploratory), participants freely described which states and traits they attributed to the listener in each condition, allowing us to identify relevant aspects of person perception and to construct distinct rating scales that were implemented in Experiment 2 (quantitative-confirmatory). Results revealed systematic and differential meanings ascribed to the listener's gaze behavior. For example, rapid blinking and fast gaze shifts were rated more negatively (e.g., restless and unnatural) than slower gaze behavior; downward gaze was evaluated more favorably (e.g., empathetic) than other gaze aversion types, especially in the emotionally negative context. Overall, our study contributes to a more systematic understanding of flexible gaze semantics in social interaction.}, language = {en} } @article{JuGanRinnetal.2022, author = {Ju, Qianqian and Gan, Yiqun and Rinn, Robin and Duan, Yanping and Lippke, Sonia}, title = {Health Status Stability of Patients in a Medical Rehabilitation Program: What Are the Roles of Time, Physical Fitness Level, and Self-efficacy?}, series = {International Journal of Behavioral Medicine}, volume = {29}, journal = {International Journal of Behavioral Medicine}, number = {5}, issn = {1070-5503}, doi = {10.1007/s12529-021-10046-6}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-308445}, pages = {624-637}, year = {2022}, abstract = {Background Individuals' physical and mental health, as well as their chances of returning to work after their ability to work is damaged, can be addressed by medical rehabilitation. Aim This study investigated the developmental trends of mental and physical health among patients in medical rehabilitation and the roles of self-efficacy and physical fitness in the development of mental and physical health. Design A longitudinal design that included four time-point measurements across 15 months. Setting A medical rehabilitation center in Germany. Population Participants included 201 patients who were recruited from a medical rehabilitation center. Methods To objectively measure physical fitness (lung functioning), oxygen reabsorption at anaerobic threshold (VO2AT) was used, along with several self-report scales. Results We found a nonlinear change in mental health among medical rehabilitation patients. The results underscored the importance of medical rehabilitation for patients' mental health over time. In addition, patients' physical health was stable over time. The initial level of physical fitness (VO2AT) positively predicted their mental health and kept the trend more stable. Self-efficacy appeared to have a positive relationship with mental health after rehabilitation treatment. Conclusions This study revealed a nonlinear change in mental health among medical rehabilitation patients. Self-efficacy was positively related to mental health, and the initial level of physical fitness positively predicted the level of mental health after rehabilitation treatment. Clinical Rehabilitation More attention could be given to physical capacity and self-efficacy for improving and maintaining rehabilitants' mental health.}, language = {en} } @phdthesis{Menne2020, author = {Menne, Isabelle M.}, title = {Facing Social Robots - Emotional Reactions towards Social Robots}, edition = {1. Auflage}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-120-4}, doi = {10.25972/WUP-978-3-95826-121-1}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-187131}, school = {W{\"u}rzburg University Press}, pages = {XXIV, 201}, year = {2020}, abstract = {Ein Army Colonel empfindet Mitleid mit einem Roboter, der versuchsweise Landminen entsch{\"a}rft und deklariert den Test als inhuman (Garreau, 2007). Roboter bekommen milit{\"a}rische Bef{\"o}rderungen, Beerdigungen und Ehrenmedaillen (Garreau, 2007; Carpenter, 2013). Ein Schildkr{\"o}tenroboter wird entwickelt, um Kindern beizubringen, Roboter gut zu behandeln (Ackermann, 2018). Der humanoide Roboter Sophia wurde erst k{\"u}rzlich Saudi-Arabischer Staatsb{\"u}rger und es gibt bereits Debatten, ob Roboter Rechte bekommen sollen (Delcker, 2018). Diese und {\"a}hnliche Entwicklungen zeigen schon jetzt die Bedeutsamkeit von Robotern und die emotionale Wirkung die diese ausl{\"o}sen. Dennoch scheinen sich diese emotionalen Reaktionen auf einer anderen Ebene abzuspielen, gemessen an Kommentaren in Internetforen. Dort ist oftmals die Rede davon, wieso jemand {\"u}berhaupt emotional auf einen Roboter reagieren kann. Tats{\"a}chlich ist es, rein rational gesehen, schwierig zu erkl{\"a}ren, warum Menschen mit einer leblosen (‚mindless') Maschine mitf{\"u}hlen sollten. Und dennoch zeugen nicht nur oben genannte Berichte, sondern auch erste wissenschaftliche Studien (z.B. Rosenthal- von der P{\"u}tten et al., 2013) von dem emotionalen Einfluss den Roboter auf Menschen haben k{\"o}nnen. Trotz der Bedeutsamkeit der Erforschung emotionaler Reaktionen auf Roboter existieren bislang wenige wissenschaftliche Studien hierzu. Tats{\"a}chlich identifizierten Kappas, Krumhuber und K{\"u}ster (2013) die systematische Analyse und Evaluation sozialer Reaktionen auf Roboter als eine der gr{\"o}ßten Herausforderungen der affektiven Mensch-Roboter Interaktion. Nach Scherer (2001; 2005) bestehen Emotionen aus der Koordination und Synchronisation verschiedener Komponenten, die miteinander verkn{\"u}pft sind. Motorischer Ausdruck (Mimik), subjektives Erleben, Handlungstendenzen, physiologische und kognitive Komponenten geh{\"o}ren hierzu. Um eine Emotion vollst{\"a}ndig zu erfassen, m{\"u}ssten all diese Komponenten gemessen werden, jedoch wurde eine solch umfassende Analyse bisher noch nie durchgef{\"u}hrt (Scherer, 2005). Haupts{\"a}chlich werden Frageb{\"o}gen eingesetzt (vgl. Bethel \& Murphy, 2010), die allerdings meist nur das subjektive Erleben abfragen. Bakeman und Gottman (1997) geben sogar an, dass nur etwa 8\% der psychologischen Forschung auf Verhaltensdaten basiert, obwohl die Psychologie traditionell als das ‚Studium von Psyche und Verhalten' (American Psychological Association, 2018) definiert wird. Die Messung anderer Emotionskomponenten ist selten. Zudem sind Frageb{\"o}gen mit einer Reihe von Nachteilen behaftet (Austin, Deary, Gibson, McGregor, Dent, 1998; Fan et al., 2006; Wilcox, 2011). Bethel und Murphy (2010) als auch Arkin und Moshkina (2015) pl{\"a}dieren f{\"u}r einen Multi-Methodenansatz um ein umfassenderes Verst{\"a}ndnis von affektiven Prozessen in der Mensch-Roboter Interaktion zu erlangen. Das Hauptziel der vorliegenden Dissertation ist es daher, mithilfe eines Multi-Methodenansatzes verschiedene Komponenten von Emotionen (motorischer Ausdruck, subjektive Gef{\"u}hlskomponente, Handlungstendenzen) zu erfassen und so zu einem vollst{\"a}ndigeren und tiefgreifenderem Bild emotionaler Prozesse auf Roboter beizutragen. Um dieses Ziel zu erreichen, wurden drei experimentelle Studien mit insgesamt 491 Teilnehmern durchgef{\"u}hrt. Mit unterschiedlichen Ebenen der „apparent reality" (Frijda, 2007) sowie Macht / Kontrolle {\"u}ber die Situation (vgl. Scherer \& Ellgring, 2007) wurde untersucht, inwiefern sich Intensit{\"a}t und Qualit{\"a}t emotionaler Reaktionen auf Roboter {\"a}ndern und welche weiteren Faktoren (Aussehen des Roboters, emotionale Expressivit{\"a}t des Roboters, Behandlung des Roboters, Autorit{\"a}tsstatus des Roboters) Einfluss aus{\"u}ben. Experiment 1 basierte auf Videos, die verschiedene Arten von Robotern (tier{\"a}hnlich, anthropomorph, maschinenartig), die entweder emotional expressiv waren oder nicht (an / aus) in verschiedenen Situationen (freundliche Behandlung des Roboters vs. Misshandlung) zeigten. Frageb{\"o}gen {\"u}ber selbstberichtete Gef{\"u}hle und die motorisch-expressive Komponente von Emotionen: Mimik (vgl. Scherer, 2005) wurden analysiert. Das Facial Action Coding System (Ekman, Friesen, \& Hager, 2002), die umfassendste und am weitesten verbreitete Methode zur objektiven Untersuchung von Mimik, wurde hierf{\"u}r verwendet. Die Ergebnisse zeigten, dass die Probanden Gesichtsausdr{\"u}cke (Action Unit [AU] 12 und AUs, die mit positiven Emotionen assoziiert sind, sowie AU 4 und AUs, die mit negativen Emotionen assoziiert sind) sowie selbstberichtete Gef{\"u}hle in {\"U}bereinstimmung mit der Valenz der in den Videos gezeigten Behandlung zeigten. Bei emotional expressiven Robotern konnten st{\"a}rkere emotionale Reaktionen beobachtet werden als bei nicht-expressiven Robotern. Der tier{\"a}hnliche Roboter Pleo erfuhr in der Misshandlungs-Bedingung am meisten Mitleid, Empathie, negative Gef{\"u}hle und Traurigkeit, gefolgt vom anthropomorphen Roboter Reeti und am wenigsten f{\"u}r den maschinenartigen Roboter Roomba. Roomba wurde am meisten Antipathie zugeschrieben. Die Ergebnisse kn{\"u}pfen an fr{\"u}here Forschungen an (z.B. Krach et al., 2008; Menne \& Schwab, 2018; Riek et al., 2009; Rosenthal-von der P{\"u}tten et al., 2013) und zeigen das Potenzial der Mimik f{\"u}r eine nat{\"u}rliche Mensch-Roboter Interaktion. Experiment 2 und Experiment 3 {\"u}bertrugen die klassischen Experimente von Milgram (1963; 1974) zum Thema Gehorsam in den Kontext der Mensch-Roboter Interaktion. Die Gehorsamkeitsstudien von Milgram wurden als sehr geeignet erachtet, um das Ausmaß der Empathie gegen{\"u}ber einem Roboter im Verh{\"a}ltnis zum Gehorsam gegen{\"u}ber einem Roboter zu untersuchen. Experiment 2 unterschied sich von Experiment 3 in der Ebene der „apparent reality" (Frijda, 2007): in Anlehnung an Milgram (1963) wurde eine rein text-basierte Studie (Experiment 2) einer live Mensch-Roboter Interaktion (Experiment 3) gegen{\"u}bergestellt. W{\"a}hrend die abh{\"a}ngigen Variablen von Experiment 2 aus den Selbstberichten emotionaler Gef{\"u}hle sowie Einsch{\"a}tzungen des hypothetischen Verhaltens bestand, erfasste Experiment 3 subjektive Gef{\"u}hle sowie reales Verhalten (Reaktionszeit: Dauer des Z{\"o}gerns; Gehorsamkeitsrate; Anzahl der Proteste; Mimik) der Teilnehmer. Beide Experimente untersuchten den Einfluss der Faktoren „Autorit{\"a}tsstatus" (hoch / niedrig) des Roboters, der die Befehle erteilt (Nao) und die emotionale Expressivit{\"a}t (an / aus) des Roboters, der die Strafen erh{\"a}lt (Pleo). Die subjektiven Gef{\"u}hle der Teilnehmer aus Experiment 2 unterschieden sich zwischen den Gruppen nicht. Dar{\"u}ber hinaus gaben nur wenige Teilnehmer (20.2\%) an, dass sie den „Opfer"-Roboter definitiv bestrafen w{\"u}rden. Ein {\"a}hnliches Ergebnis fand auch Milgram (1963). Das reale Verhalten von Versuchsteilnehmern in Milgrams' Labor-Experiment unterschied sich jedoch von Einsch{\"a}tzungen hypothetischen Verhaltens von Teilnehmern, denen Milgram das Experiment nur beschrieben hatte. Ebenso lassen Kommentare von Teilnehmern aus Experiment 2 darauf schließen, dass das beschriebene Szenario m{\"o}glicherweise als fiktiv eingestuft wurde und Einsch{\"a}tzungen von hypothetischem Verhalten daher kein realistisches Bild realen Verhaltens gegen{\"u}ber Roboter in einer live Interaktion zeichnen k{\"o}nnen. Daher wurde ein weiteres Experiment (Experiment 3) mit einer Live Interaktion mit einem Roboter als Autorit{\"a}tsfigur (hoher Autorit{\"a}tsstatus vs. niedriger) und einem weiteren Roboter als „Opfer" (emotional expressiv vs. nicht expressiv) durchgef{\"u}hrt. Es wurden Gruppenunterschiede in Frageb{\"o}gen {\"u}ber emotionale Reaktionen gefunden. Dem emotional expressiven Roboter wurde mehr Empathie entgegengebracht und es wurde mehr Freude und weniger Antipathie berichtet als gegen{\"u}ber einem nicht-expressiven Roboter. Außerdem konnten Gesichtsausdr{\"u}cke beobachtet werden, die mit negativen Emotionen assoziiert sind w{\"a}hrend Probanden Nao's Befehl ausf{\"u}hrten und Pleo bestraften. Obwohl Probanden tendenziell l{\"a}nger z{\"o}gerten, wenn sie einen emotional expressiven Roboter bestrafen sollten und der Befehl von einem Roboter mit niedrigem Autorit{\"a}tsstatus kam, wurde dieser Unterschied nicht signifikant. Zudem waren alle bis auf einen Probanden gehorsam und bestraften Pleo, wie vom Nao Roboter befohlen. Dieses Ergebnis steht in starkem Gegensatz zu dem selbstberichteten hypothetischen Verhalten der Teilnehmer aus Experiment 2 und unterst{\"u}tzt die Annahme, dass die Einsch{\"a}tzungen von hypothetischem Verhalten in einem Mensch-Roboter-Gehorsamkeitsszenario nicht zuverl{\"a}ssig sind f{\"u}r echtes Verhalten in einer live Mensch-Roboter Interaktion. Situative Variablen, wie z.B. der Gehorsam gegen{\"u}ber Autorit{\"a}ten, sogar gegen{\"u}ber einem Roboter, scheinen st{\"a}rker zu sein als Empathie f{\"u}r einen Roboter. Dieser Befund kn{\"u}pft an andere Studien an (z.B. Bartneck \& Hu, 2008; Geiskkovitch et al., 2016; Menne, 2017; Slater et al., 2006), er{\"o}ffnet neue Erkenntnisse zum Einfluss von Robotern, zeigt aber auch auf, dass die Wahl einer Methode um Empathie f{\"u}r einen Roboter zu evozieren eine nicht triviale Angelegenheit ist (vgl. Geiskkovitch et al., 2016; vgl. Milgram, 1965). Insgesamt st{\"u}tzen die Ergebnisse die Annahme, dass die emotionalen Reaktionen auf Roboter tiefgreifend sind und sich sowohl auf der subjektiven Ebene als auch in der motorischen Komponente zeigen. Menschen reagieren emotional auf einen Roboter, der emotional expressiv ist und eher weniger wie eine Maschine aussieht. Sie empfinden Empathie und negative Gef{\"u}hle, wenn ein Roboter misshandelt wird und diese emotionalen Reaktionen spiegeln sich in der Mimik. Dar{\"u}ber hinaus unterscheiden sich die Einsch{\"a}tzungen von Menschen {\"u}ber ihr eigenes hypothetisches Verhalten von ihrem tats{\"a}chlichen Verhalten, weshalb videobasierte oder live Interaktionen zur Analyse realer Verhaltensreaktionen empfohlen wird. Die Ankunft sozialer Roboter in der Gesellschaft f{\"u}hrt zu nie dagewesenen Fragen und diese Dissertation liefert einen ersten Schritt zum Verst{\"a}ndnis dieser neuen Herausforderungen.}, subject = {Roboter}, language = {en} } @article{MockeWellerFringsetal.2020, author = {Mocke, Viola and Weller, Lisa and Frings, Christian and Rothermund, Klaus and Kunde, Wilfried}, title = {Task relevance determines binding of effect features in action planning}, series = {Attention, Perception, \& Psychophysics}, volume = {82}, journal = {Attention, Perception, \& Psychophysics}, issn = {1943-3921}, doi = {10.3758/s13414-020-02123-x}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-231906}, pages = {3811-3831}, year = {2020}, abstract = {Action planning can be construed as the temporary binding of features of perceptual action effects. While previous research demonstrated binding for task-relevant, body-related effect features, the role of task-irrelevant or environment-related effect features in action planning is less clear. Here, we studied whether task-relevance or body-relatedness determines feature binding in action planning. Participants planned an action A, but before executing it initiated an intermediate action B. Each action relied on a body-related effect feature (index vs. middle finger movement) and an environment-related effect feature (cursor movement towards vs. away from a reference object). In Experiments 1 and 2, both effects were task-relevant. Performance in action B suffered from partial feature overlap with action A compared to full feature repetition or alternation, which is in line with binding of both features while planning action A. Importantly, this cost disappeared when all features were available but only body-related features were task-relevant (Experiment 3). When only the environment-related effect of action A was known in advance, action B benefitted when it aimed at the same (vs. a different) environment-related effect (Experiment 4). Consequently, the present results support the idea that task relevance determines whether binding of body-related and environment-related effect features takes place while the pre-activation of environment-related features without binding them primes feature-overlapping actions.}, language = {en} } @article{MuraliHaendel2022, author = {Murali, Supriya and H{\"a}ndel, Barbara}, title = {Motor restrictions impair divergent thinking during walking and during sitting}, series = {Psychological Research}, volume = {86}, journal = {Psychological Research}, number = {7}, issn = {1430-2772}, doi = {10.1007/s00426-021-01636-w}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-267722}, pages = {2144-2157}, year = {2022}, abstract = {Creativity, specifically divergent thinking, has been shown to benefit from unrestrained walking. Despite these findings, it is not clear if it is the lack of restriction that leads to the improvement. Our goal was to explore the effects of motor restrictions on divergent thinking for different movement states. In addition, we assessed whether spontaneous eye blinks, which are linked to motor execution, also predict performance. In experiment 1, we compared the performance in Guilford's alternate uses task (AUT) during walking vs. sitting, and analysed eye blink rates during both conditions. We found that AUT scores were higher during walking than sitting. Albeit eye blinks differed significantly between movement conditions (walking vs. sitting) and task phase (baseline vs. thinking vs. responding), they did not correlate with task performance. In experiment 2 and 3, participants either walked freely or in a restricted path, or sat freely or fixated on a screen. When the factor restriction was explicitly modulated, the effect of walking was reduced, while restriction showed a significant influence on the fluency scores. Importantly, we found a significant correlation between the rate of eye blinks and creativity scores between subjects, depending on the restriction condition. Our study shows a movement state-independent effect of restriction on divergent thinking. In other words, similar to unrestrained walking, unrestrained sitting also improves divergent thinking. Importantly, we discuss a mechanistic explanation of the effect of restriction on divergent thinking based on the increased size of the focus of attention and the consequent bias towards flexibility.}, language = {en} } @article{KozlikNeumannLozo2015, author = {Kozlik, Julia and Neumann, Roland and Lozo, Ljubica}, title = {Contrasting motivational orientation and evaluative coding accounts: on the need to differentiate the effectors of approach/avoidance responses}, series = {Frontiers in Psychology}, volume = {6}, journal = {Frontiers in Psychology}, number = {563}, doi = {10.3389/fpsyg.2015.00563}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-143192}, year = {2015}, abstract = {Several emotion theorists suggest that valenced stimuli automatically trigger motivational orientations and thereby facilitate corresponding behavior. Positive stimuli were thought to activate approach motivational circuits which in turn primed approach-related behavioral tendencies whereas negative stimuli were supposed to activate avoidance motivational circuits so that avoidance-related behavioral tendencies were primed (motivational orientation account). However, recent research suggests that typically observed affective stimulus response compatibility phenomena might be entirely explained in terms of theories accounting for mechanisms of general action control instead of assuming motivational orientations to mediate the effects (evaluative coding account). In what follows, we explore to what extent this notion is applicable. We present literature suggesting that evaluative coding mechanisms indeed influence a wide variety of affective stimulus response compatibility phenomena. However, the evaluative coding account does not seem to be sufficient to explain affective S-R compatibility effects. Instead, several studies provide clear evidence in favor of the motivational orientation account that seems to operate independently of evaluative coding mechanisms. Implications for theoretical developments and future research designs are discussed.}, language = {en} } @article{KlaffehnSellmannKirschetal.2021, author = {Klaffehn, Annika L. and Sellmann, Florian B. and Kirsch, Wladimir and Kunde, Wilfried and Pfister, Roland}, title = {Temporal binding as multisensory integration: Manipulating perceptual certainty of actions and their effects}, series = {Attention, Perception \& Psychophysics}, volume = {83}, journal = {Attention, Perception \& Psychophysics}, number = {8}, issn = {1943-393X}, doi = {10.3758/s13414-021-02314-0}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-273195}, pages = {3135-3145}, year = {2021}, abstract = {It has been proposed that statistical integration of multisensory cues may be a suitable framework to explain temporal binding, that is, the finding that causally related events such as an action and its effect are perceived to be shifted towards each other in time. A multisensory approach to temporal binding construes actions and effects as individual sensory signals, which are each perceived with a specific temporal precision. When they are integrated into one multimodal event, like an action-effect chain, the extent to which they affect this event's perception depends on their relative reliability. We test whether this assumption holds true in a temporal binding task by manipulating certainty of actions and effects. Two experiments suggest that a relatively uncertain sensory signal in such action-effect sequences is shifted more towards its counterpart than a relatively certain one. This was especially pronounced for temporal binding of the action towards its effect but could also be shown for effect binding. Other conceptual approaches to temporal binding cannot easily explain these results, and the study therefore adds to the growing body of evidence endorsing a multisensory approach to temporal binding.}, language = {en} } @article{BertiVosselGamer2017, author = {Berti, Stefan and Vossel, Gerhard and Gamer, Matthias}, title = {The orienting response in healthy aging: Novelty P3 indicates no general decline but reduced efficacy for fast stimulation rates}, series = {Frontiers in Psychology}, volume = {8}, journal = {Frontiers in Psychology}, doi = {10.3389/fpsyg.2017.01780}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-173651}, year = {2017}, abstract = {Automatic orienting to unexpected changes in the environment is a pre-requisite for adaptive behavior. One prominent mechanism of automatic attentional control is the Orienting Response (OR). Despite the fundamental significance of the OR in everyday life, only little is known about how the OR is affected by healthy aging. We tested this question in two age groups (19-38 and 55-72 years) and measured skin-conductance responses (SCRs) and event-related brain potentials (ERPs) to novels (i.e., short environmental sounds presented only once in the experiment; 10\% of the trials) compared to standard sounds (600 Hz sinusoidal tones with 200 ms duration; 90\% of the trials). Novel and standard stimuli were presented in four conditions differing in the inter-stimulus interval (ISI) with a mean ISI of either 10, 3, 1, or 0.5 s (blocked presentation). In both age groups, pronounced SCRs were elicited by novels in the 10 s ISI condition, suggesting the elicitation of stable ORs. These effects were accompanied by pronounced N1 and frontal P3 amplitudes in the ERP, suggesting that automatic novelty processing and orientation of attention are effective in both age groups. Furthermore, the SCR and ERP effects declined with decreasing ISI length. In addition, differences between the two groups were observable with the fastest presentation rates (i.e., 1 and 0.5 s ISI length). The most prominent difference was a shift of the peak of the frontal positivity from around 300 to 200 ms in the 19-38 years group while in the 55-72 years group the amplitude of the frontal P3 decreased linearly with decreasing ISI length. Taken together, this pattern of results does not suggest a general decline in processing efficacy with healthy aging. At least with very rare changes (here, the novels in the 10 s ISI condition) the OR is as effective in healthy older adults as in younger adults. With faster presentation rates, however, the efficacy of the OR decreases. This seems to result in a switch from novelty to deviant processing in younger adults, but less so in the group of older adults.}, language = {en} } @article{SchererFallerFriedrichetal.2015, author = {Scherer, Reinhold and Faller, Josef and Friedrich, Elisabeth V. C. and Opisso, Eloy and Costa, Ursula and K{\"u}bler, Andrea and M{\"u}ller-Putz, Gernot R.}, title = {Individually Adapted Imagery Improves Brain-Computer Interface Performance in End-Users with Disability}, series = {PLoS ONE}, volume = {10}, journal = {PLoS ONE}, number = {5}, doi = {10.1371/journal.pone.0123727}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-143021}, pages = {e0123727}, year = {2015}, abstract = {Brain-computer interfaces (BCIs) translate oscillatory electroencephalogram (EEG) patterns into action. Different mental activities modulate spontaneous EEG rhythms in various ways. Non-stationarity and inherent variability of EEG signals, however, make reliable recognition of modulated EEG patterns challenging. Able-bodied individuals who use a BCI for the first time achieve - on average - binary classification performance of about 75\%. Performance in users with central nervous system (CNS) tissue damage is typically lower. User training generally enhances reliability of EEG pattern generation and thus also robustness of pattern recognition. In this study, we investigated the impact of mental tasks on binary classification performance in BCI users with central nervous system (CNS) tissue damage such as persons with stroke or spinal cord injury (SCI). Motor imagery (MI), that is the kinesthetic imagination of movement (e.g. squeezing a rubber ball with the right hand), is the "gold standard" and mainly used to modulate EEG patterns. Based on our recent results in able-bodied users, we hypothesized that pair- wise combination of "brain-teaser" (e.g. mental subtraction and mental word association) and "dynamic imagery" (e. g. hand and feet MI) tasks significantly increases classification performance of induced EEG patterns in the selected end-user group. Within- day (How stable is the classification within a day?) and between-day (How well does a model trained on day one perform on unseen data of day two?) analysis of variability of mental task pair classification in nine individuals confirmed the hypothesis. We found that the use of the classical MI task pair hand vs. feed leads to significantly lower classification accuracy - in average up to 15\% less - in most users with stroke or SCI. User-specific selection of task pairs was again essential to enhance performance. We expect that the gained evidence will significantly contribute to make imagery-based BCI technology become accessible to a larger population of users including individuals with special needs due to CNS damage.}, language = {en} }