@article{StegmannAndreattaWieser2023, author = {Stegmann, Yannik and Andreatta, Marta and Wieser, Matthias J.}, title = {The effect of inherently threatening contexts on visuocortical engagement to conditioned threat}, series = {Psychophysiology}, volume = {60}, journal = {Psychophysiology}, number = {4}, doi = {10.1111/psyp.14208}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-312465}, year = {2023}, abstract = {Fear and anxiety are crucial for adaptive responding in life-threatening situations. Whereas fear is a phasic response to an acute threat accompanied by selective attention, anxiety is characterized by a sustained feeling of apprehension and hypervigilance during situations of potential threat. In the current literature, fear and anxiety are usually considered mutually exclusive, with partially separated neural underpinnings. However, there is accumulating evidence that challenges this distinction between fear and anxiety, and simultaneous activation of fear and anxiety networks has been reported. Therefore, the current study experimentally tested potential interactions between fear and anxiety. Fifty-two healthy participants completed a differential fear conditioning paradigm followed by a test phase in which the conditioned stimuli were presented in front of threatening or neutral contextual images. To capture defense system activation, we recorded subjective (threat, US-expectancy), physiological (skin conductance, heart rate) and visuocortical (steady-state visual evoked potentials) responses to the conditioned stimuli as a function of contextual threat. Results demonstrated successful fear conditioning in all measures. In addition, threat and US-expectancy ratings, cardiac deceleration, and visuocortical activity were enhanced for fear cues presented in threatening compared with neutral contexts. These results are in line with an additive or interactive rather than an exclusive model of fear and anxiety, indicating facilitated defensive behavior to imminent danger in situations of potential threat.}, language = {en} } @article{ThieleRichterHilger2023, author = {Thiele, Jonas A. and Richter, Aylin and Hilger, Kirsten}, title = {Multimodal brain signal complexity predicts human intelligence}, series = {eNeuro}, volume = {10}, journal = {eNeuro}, number = {2}, doi = {10.1523/ENEURO.0345-22.2022}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-312949}, year = {2023}, abstract = {Spontaneous brain activity builds the foundation for human cognitive processing during external demands. Neuroimaging studies based on functional magnetic resonance imaging (fMRI) identified specific characteristics of spontaneous (intrinsic) brain dynamics to be associated with individual differences in general cognitive ability, i.e., intelligence. However, fMRI research is inherently limited by low temporal resolution, thus, preventing conclusions about neural fluctuations within the range of milliseconds. Here, we used resting-state electroencephalographical (EEG) recordings from 144 healthy adults to test whether individual differences in intelligence (Raven's Advanced Progressive Matrices scores) can be predicted from the complexity of temporally highly resolved intrinsic brain signals. We compared different operationalizations of brain signal complexity (multiscale entropy, Shannon entropy, Fuzzy entropy, and specific characteristics of microstates) regarding their relation to intelligence. The results indicate that associations between brain signal complexity measures and intelligence are of small effect sizes (r āˆ¼ 0.20) and vary across different spatial and temporal scales. Specifically, higher intelligence scores were associated with lower complexity in local aspects of neural processing, and less activity in task-negative brain regions belonging to the default-mode network. Finally, we combined multiple measures of brain signal complexity to show that individual intelligence scores can be significantly predicted with a multimodal model within the sample (10-fold cross-validation) as well as in an independent sample (external replication, Nā€‰=ā€‰57). In sum, our results highlight the temporal and spatial dependency of associations between intelligence and intrinsic brain dynamics, proposing multimodal approaches as promising means for future neuroscientific research on complex human traits.}, language = {en} } @article{StegmannAndreattaPaulietal.2023, author = {Stegmann, Yannik and Andreatta, Marta and Pauli, Paul and Keil, Andreas and Wieser, Matthias J.}, title = {Investigating sustained attention in contextual threat using steady-state VEPs evoked by flickering video stimuli}, series = {Psychophysiology}, volume = {60}, journal = {Psychophysiology}, number = {5}, doi = {10.1111/psyp.14229}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-312430}, year = {2023}, abstract = {Anxiety is characterized by anxious anticipation and heightened vigilance to uncertain threat. However, if threat is not reliably indicated by a specific cue, the context in which threat was previously experienced becomes its best predictor, leading to anxiety. A suitable means to induce anxiety experimentally is context conditioning: In one context (CTX+), an unpredictable aversive stimulus (US) is repeatedly presented, in contrast to a second context (CTX-), in which no US is ever presented. In this EEG study, we investigated attentional mechanisms during acquisition and extinction learning in 38 participants, who underwent a context conditioning protocol. Flickering video stimuli (32ā€‰s clips depicting virtual offices representing CTX+/-) were used to evoke steady-state visual evoked potentials (ssVEPs) as an index of visuocortical engagement with the contexts. Analyses of the electrocortical responses suggest a successful induction of the ssVEP signal by video presentation in flicker mode. Furthermore, we found clear indices of context conditioning and extinction learning on a subjective level, while cortical processing of the CTX+ was unexpectedly reduced during video presentation. The differences between CTX+ and CTX- diminished during extinction learning. Together, these results indicate that the dynamic sensory input of the video presentation leads to disruptions in the ssVEP signal, which is greater for motivationally significant, threatening contexts.}, language = {en} }