@article{DopplerMeyerDovernetal.2019, author = {Doppler, Christopher E. J. and Meyer, Linda and Dovern, Anna and St{\"u}hmer-Beckh, Jaro and Weiss, Peter H. and Fink, Gereon R.}, title = {Differential impact of social and monetary reward on procedural learning and consolidation in aging and its structural correlates}, series = {Frontiers in Aging Neuroscience}, volume = {11}, journal = {Frontiers in Aging Neuroscience}, doi = {10.3389/fnagi.2019.00188}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-222394}, year = {2019}, abstract = {In young (n = 36, mean +/- SD: 24.8 +/- 4.5 years) and older (n = 34, mean +/- SD: 65.1 +/- 6.5 years) healthy participants, we employed a modified version of the Serial Reaction Time task to measure procedural learning (PL) and consolidation while providing monetary and social reward. Using voxel-based morphometry (VBM), we additionally determined the structural correlates of reward-related motor performance (RMP) and PL. Monetary reward had a beneficial effect on PL in the older subjects only. In contrast, social reward significantly enhanced PL in the older and consolidation in the young participants. VBM analyses revealed that motor performance related to monetary reward was associated with larger grey matter volume (GMV) of the left striatum in the young, and motor performance related to social reward with larger GMV of the medial orbitofrontal cortex in the older group. The differential effects of social reward in young (improved consolidation) and both social and monetary rewards in older (enhanced PL) healthy subjects point to the potential of rewards for interventions targeting aging-associated motor decline or stroke-induced motor deficits.}, language = {en} } @article{WaltmannSchlagenhaufDeserno2022, author = {Waltmann, Maria and Schlagenhauf, Florian and Deserno, Lorenz}, title = {Sufficient reliability of the behavioral and computational readouts of a probabilistic reversal learning task}, series = {Behavior Research Methods}, volume = {54}, journal = {Behavior Research Methods}, number = {6}, issn = {1554-3528}, doi = {10.3758/s13428-021-01739-7}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-324246}, pages = {2993-3014}, year = {2022}, abstract = {Task-based measures that capture neurocognitive processes can help bridge the gap between brain and behavior. To transfer tasks to clinical application, reliability is a crucial benchmark because it imposes an upper bound to potential correlations with other variables (e.g., symptom or brain data). However, the reliability of many task readouts is low. In this study, we scrutinized the retest reliability of a probabilistic reversal learning task (PRLT) that is frequently used to characterize cognitive flexibility in psychiatric populations. We analyzed data from Nā€‰=ā€‰40 healthy subjects, who completed the PRLT twice. We focused on how individual metrics are derived, i.e., whether data were partially pooled across participants and whether priors were used to inform estimates. We compared the reliability of the resulting indices across sessions, as well as the internal consistency of a selection of indices. We found good to excellent reliability for behavioral indices as derived from mixed-effects models that included data from both sessions. The internal consistency was good to excellent. For indices derived from computational modeling, we found excellent reliability when using hierarchical estimation with empirical priors and including data from both sessions. Our results indicate that the PRLT is well equipped to measure individual differences in cognitive flexibility in reinforcement learning. However, this depends heavily on hierarchical modeling of the longitudinal data (whether sessions are modeled separately or jointly), on estimation methods, and on the combination of parameters included in computational models. We discuss implications for the applicability of PRLT indices in psychiatric research and as diagnostic tools.}, language = {en} } @article{WangLiuXiaoetal.2023, author = {Wang, Xiaoliang and Liu, Xuan and Xiao, Yun and Mao, Yue and Wang, Nan and Wang, Wei and Wu, Shufan and Song, Xiaoyong and Wang, Dengfeng and Zhong, Xingwang and Zhu, Zhu and Schilling, Klaus and Damaren, Christopher}, title = {On-orbit verification of RL-based APC calibrations for micrometre level microwave ranging system}, series = {Mathematics}, volume = {11}, journal = {Mathematics}, number = {4}, issn = {2227-7390}, doi = {10.3390/math11040942}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-303970}, year = {2023}, abstract = {Micrometre level ranging accuracy between satellites on-orbit relies on the high-precision calibration of the antenna phase center (APC), which is accomplished through properly designed calibration maneuvers batch estimation algorithms currently. However, the unmodeled perturbations of the space dynamic and sensor-induced uncertainty complicated the situation in reality; ranging accuracy especially deteriorated outside the antenna main-lobe when maneuvers performed. This paper proposes an on-orbit APC calibration method that uses a reinforcement learning (RL) process, aiming to provide the high accuracy ranging datum for onboard instruments with micrometre level. The RL process used here is an improved Temporal Difference advantage actor critic algorithm (TDAAC), which mainly focuses on two neural networks (NN) for critic and actor function. The output of the TDAAC algorithm will autonomously balance the APC calibration maneuvers amplitude and APC-observed sensitivity with an object of maximal APC estimation accuracy. The RL-based APC calibration method proposed here is fully tested in software and on-ground experiments, with an APC calibration accuracy of less than 2 mrad, and the on-orbit maneuver data from 11-12 April 2022, which achieved 1-1.5 mrad calibration accuracy after RL training. The proposed RL-based APC algorithm may extend to prove mass calibration scenes with actions feedback to attitude determination and control system (ADCS), showing flexibility of spacecraft payload applications in the future.}, language = {en} }