@article{FischerHarteltPuppe2023, author = {Fischer, Norbert and Hartelt, Alexander and Puppe, Frank}, title = {Line-level layout recognition of historical documents with background knowledge}, series = {Algorithms}, volume = {16}, journal = {Algorithms}, number = {3}, issn = {1999-4893}, doi = {10.3390/a16030136}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-310938}, year = {2023}, abstract = {Digitization and transcription of historic documents offer new research opportunities for humanists and are the topics of many edition projects. However, manual work is still required for the main phases of layout recognition and the subsequent optical character recognition (OCR) of early printed documents. This paper describes and evaluates how deep learning approaches recognize text lines and can be extended to layout recognition using background knowledge. The evaluation was performed on five corpora of early prints from the 15th and 16th Centuries, representing a variety of layout features. While the main text with standard layouts could be recognized in the correct reading order with a precision and recall of up to 99.9\%, also complex layouts were recognized at a rate as high as 90\% by using background knowledge, the full potential of which was revealed if many pages of the same source were transcribed.}, language = {en} } @article{HarteltPuppe2022, author = {Hartelt, Alexander and Puppe, Frank}, title = {Optical Medieval Music Recognition using background knowledge}, series = {Algorithms}, volume = {15}, journal = {Algorithms}, number = {7}, issn = {1999-4893}, doi = {10.3390/a15070221}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-278756}, year = {2022}, abstract = {This paper deals with the effect of exploiting background knowledge for improving an OMR (Optical Music Recognition) deep learning pipeline for transcribing medieval, monophonic, handwritten music from the 12th-14th century, whose usage has been neglected in the literature. Various types of background knowledge about overlapping notes and text, clefs, graphical connections (neumes) and their implications on the position in staff of the notes were used and evaluated. Moreover, the effect of different encoder/decoder architectures and of different datasets for training a mixed model and for document-specific fine-tuning based on an extended OMR pipeline with an additional post-processing step were evaluated. The use of background models improves all metrics and in particular the melody accuracy rate (mAR), which is based on the insert, delete and replace operations necessary to convert the generated melody into the correct melody. When using a mixed model and evaluating on a different dataset, our best model achieves without fine-tuning and without post-processing a mAR of 90.4\%, which is raised by nearly 30\% to 93.2\% mAR using background knowledge. With additional fine-tuning, the contribution of post-processing is even greater: the basic mAR of 90.5\% is raised by more than 50\% to 95.8\% mAR.}, language = {en} }