@article{EndlichRichterMarxetal.2020, author = {Endlich, Darius and Richter, Tobias and Marx, Peter and Lenhard, Wolfgang and Moll, Kristina and Witzel, Bj{\"o}rn and Schulte-K{\"o}rne, Gerd}, title = {Spelling Error Detection : A Valid and Economical Task for Assessing Spelling Skills in Elementary-School Children}, series = {Zeitschrift f{\"u}r Entwicklungspsychologie und P{\"a}dagogische Psychologie}, volume = {52}, journal = {Zeitschrift f{\"u}r Entwicklungspsychologie und P{\"a}dagogische Psychologie}, number = {1-2}, issn = {0049-8637}, doi = {10.1026/0049-8637/a000227}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-244665}, pages = {25-40}, year = {2020}, abstract = {The ability to spell words correctly is a key competence for educational and professional achievement. Economical procedures are essential to identifying children with spelling problems as early as possible. Given the strong evidence showing that reading and spelling are based on the same orthographic knowledge, error-detection tasks (EDTs) could be considered such an economical procedure. Although EDTs are widely used in English-speaking countries, the few studies in German-speaking countries investigated only pupils in secondary school. The present study investigated N = 1,513 children in elementary school. We predicted spelling competencies (measured by dictation or gap-fill dictation) based on an EDT via linear regression. Error-detection abilities significantly predicted spelling competencies (R² between .509 and .679), indicating a strong connection. Predictive values in identifying children with poor spelling abilities with an EDT proved to be sufficient. Error detection for the assessment of spelling skills is therefore a valid instrument for transparent languages as well.}, language = {en} } @article{GaryLenhardLenhard2021, author = {Gary, Sebastian and Lenhard, Wolfgang and Lenhard, Alexandra}, title = {Modelling norm scores with the cNORM package in R}, series = {Psych}, volume = {3}, journal = {Psych}, number = {3}, issn = {2624-8611}, doi = {10.3390/psych3030033}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-284143}, pages = {501 -- 521}, year = {2021}, abstract = {In this article, we explain and demonstrate how to model norm scores with the cNORM package in R. This package is designed specifically to determine norm scores when the latent ability to be measured covaries with age or other explanatory variables such as grade level. The mathematical method used in this package draws on polynomial regression to model a three-dimensional hyperplane that smoothly and continuously captures the relation between raw scores, norm scores and the explanatory variable. By doing so, it overcomes the typical problems of classical norming methods, such as overly large age intervals, missing norm scores, large amounts of sampling error in the subsamples or huge requirements with regard to the sample size. After a brief introduction to the mathematics of the model, we describe the individual methods of the package. We close the article with a practical example using data from a real reading comprehension test.}, language = {en} } @article{KirschmannLenhardSuggate2021, author = {Kirschmann, Nicole and Lenhard, Wolfgang and Suggate, Sebastian}, title = {Influences from working memory, word and sentence reading on passage comprehension and teacher ratings}, series = {Journal of Research in Reading}, volume = {44}, journal = {Journal of Research in Reading}, number = {4}, doi = {10.1111/1467-9817.12373}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-258043}, pages = {817-836}, year = {2021}, abstract = {Reading fluency is a major determinant of reading comprehension but depends on moderating factors such as auditory working memory (AWM), word recognition and sentence reading skills. We investigated how word and sentence reading skills relate to reading comprehension differentially across the first 6 years of schooling and tested which reading variable best predicted teacher judgements. We conducted our research in a rather transparent language, namely, German, drawing on two different data sets. The first was derived from the normative sample of a reading comprehension test (ELFE-II), including 2056 first to sixth graders with readings tests at the word, sentence and text level. The second sample included 114 students from second to fourth grade. The latter completed a series of tests that measured word and sentence reading fluency, pseudoword reading, AWM, reading comprehension, self-concept and teacher ratings. We analysed the data via hierarchical regression analyses to predict reading comprehension and teacher judgements. The impact of reading fluency was strongest in second and third grade, afterwards superseded by sentence comprehension. AWM significantly contributed to reading comprehension independently of reading fluency, whereas basic decoding skills disappeared after considering fluency. Students' AWM and reading comprehension predicted teacher judgements on reading fluency. Reading comprehension judgements depended both on the students' self-concept and reading comprehension. Our results underline that the role of word reading accuracy for reading comprehension quickly diminishes during elementary school and that teachers base their assessments mainly on the current reading comprehension skill.}, language = {en} } @article{LenhardLenhardGary2019, author = {Lenhard, Alexandra and Lenhard, Wolfgang and Gary, Sebastian}, title = {Continuous norming of psychometric tests: A simulation study of parametric and semi-parametric approaches}, series = {PLoS ONE}, volume = {14}, journal = {PLoS ONE}, number = {9}, doi = {10.1371/journal.pone.0222279}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-200480}, pages = {e0222279}, year = {2019}, abstract = {Continuous norming methods have seldom been subjected to scientific review. In this simulation study, we compared parametric with semi-parametric continuous norming methods in psychometric tests by constructing a fictitious population model within which a latent ability increases with age across seven age groups. We drew samples of different sizes (n = 50, 75, 100, 150, 250, 500 and 1,000 per age group) and simulated the results of an easy, medium, and difficult test scale based on Item Response Theory (IRT). We subjected the resulting data to different continuous norming methods and compared the data fit under the different test conditions with a representative cross-validation dataset of n = 10,000 per age group. The most significant differences were found in suboptimal (i.e., too easy or too difficult) test scales and in ability levels that were far from the population mean. We discuss the results with regard to the selection of the appropriate modeling techniques in psychometric test construction, the required sample sizes, and the requirement to report appropriate quantitative and qualitative test quality criteria for continuous norming methods in test manuals.}, language = {en} }