@article{HoernleinMandelIflandetal.2011, author = {H{\"o}rnlein, Alexander and Mandel, Alexander and Ifland, Marianus and L{\"u}neberg, Edeltraud and Deckert, J{\"u}rgen and Puppe, Frank}, title = {Akzeptanz medizinischer Trainingsf{\"a}lle als Erg{\"a}nzung zu Vorlesungen}, series = {GMS Zeitschrift f{\"u}r Medizinische Ausbildung}, volume = {28}, journal = {GMS Zeitschrift f{\"u}r Medizinische Ausbildung}, number = {3}, doi = {10.3205/zma000754}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-133569}, pages = {Doc42}, year = {2011}, abstract = {Introduction: Medical training cases (virtual patients) are in widespread use for student education. Most publications report about development and experiences in one course with training cases. In this paper we compare the acceptance of different training case courses with different usages deployed as supplement to lectures of the medical faculty of Wuerzburg university during a period of three semesters. Methods: The training cases were developed with the authoring tool CaseTrain and are available for students via the Moodle-based eLearning platform WueCampus at Wuerzburg university. Various data about usage and acceptance is automatically collected. Results: From WS (winter semester) 08/09 till WS 09/10 19 courses with about 200 cases were available. In each semester, about 550 different medical students from W{\"u}rzburg and 50 students from other universities processed about 12000 training cases and filled in about 2000 evaluation forms. In different courses, the usage varied between less than 50 and more than 5000 processed cases. Discussion: Although students demand training cases as supplement to all lectures, the data show that the usage does not primarily depend on the quality of the available training cases. Instead, the training cases of nearly all case collections were processed extremely often shortly before the examination. It shows that the degree of usage depends primarily on the perceived relevance of the training cases for the examination."}, language = {de} } @article{MandelHoernleinIflandetal.2011, author = {Mandel, Alexander and H{\"o}rnlein, Alexander and Ifland, Marianus and L{\"u}neburg, Edeltraud and Deckert, J{\"u}rgen and Puppe, Frank}, title = {Aufwandsanalyse f{\"u}r computerunterst{\"u}tzte Multiple-Choice Papierklausuren}, series = {GMS Journal for Medical Education}, volume = {28}, journal = {GMS Journal for Medical Education}, number = {4}, doi = {10.3205/zma000767}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-134386}, pages = {1-15, Doc55}, year = {2011}, abstract = {Introduction: Multiple-choice-examinations are still fundamental for assessment in medical degree programs. In addition to content related research, the optimization of the technical procedure is an important question. Medical examiners face three options: paper-based examinations with or without computer support or completely electronic examinations. Critical aspects are the effort for formatting, the logistic effort during the actual examination, quality, promptness and effort of the correction, the time for making the documents available for inspection by the students, and the statistical analysis of the examination results. Methods: Since three semesters a computer program for input and formatting of MC-questions in medical and other paper-based examinations is used and continuously improved at Wuerzburg University. In the winter semester (WS) 2009/10 eleven, in the summer semester (SS) 2010 twelve and in WS 2010/11 thirteen medical examinations were accomplished with the program and automatically evaluated. For the last two semesters the remaining manual workload was recorded. Results: The cost of the formatting and the subsequent analysis including adjustments of the analysis of an average examination with about 140 participants and about 35 questions was 5-7 hours for exams without complications in the winter semester 2009/2010, about 2 hours in SS 2010 and about 1.5 hours in the winter semester 2010/11. Including exams with complications, the average time was about 3 hours per exam in SS 2010 and 2.67 hours for the WS 10/11. Discussion: For conventional multiple-choice exams the computer-based formatting and evaluation of paper-based exams offers a significant time reduction for lecturers in comparison with the manual correction of paper-based exams and compared to purely electronically conducted exams it needs a much simpler technological infrastructure and fewer staff during the exam."}, language = {de} } @article{FreyLeutritzBackhausetal.2022, author = {Frey, Anna and Leutritz, Tobias and Backhaus, Joy and H{\"o}rnlein, Alexander and K{\"o}nig, Sarah}, title = {Item format statistics and readability of extended matching questions as an effective tool to assess medical students}, series = {Scientific Reports}, volume = {12}, journal = {Scientific Reports}, number = {1}, doi = {10.1038/s41598-022-25481-y}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-300485}, year = {2022}, abstract = {Testing based on multiple choice questions (MCQ) is one of the most established forms of assessment, not only in the medical field. Extended matching questions (EMQ) represent a specific type of MCQ designed to require higher levels of cognition, such as problem-solving. The purpose of this evaluation was to assess the suitability and efficiency of EMQ as an assessment method. EMQ were incorporated into the end-of-semester examination in internal medicine, in which 154 students participated, and compared with three established MCQ types. Item and examination quality were investigated, as well as readability and processing time. EMQ were slightly more difficult to score; however, both item discrimination and discrimination index were higher when compared to other item types. EMQ were found to be significantly longer and required more processing time, but readability was improved. Students judged EMQ as clearly challenging, but attributed significantly higher clinical relevance when compared to established MCQ formats. Using the Spearman-Brown prediction, only ten EMQ items would be needed to reproduce the Cronbach's alpha value of 0.75 attained for the overall examination. EMQ proved to be both efficient and suitable when assessing medical students, demonstrating powerful characteristics of reliability. Their expanded use in favor of common MCQ could save examination time without losing out on statistical quality.}, language = {en} }