@phdthesis{Budig2018, author = {Budig, Benedikt}, title = {Extracting Spatial Information from Historical Maps: Algorithms and Interaction}, edition = {1. Auflage}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-092-4}, doi = {10.25972/WUP-978-3-95826-093-1}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-160955}, school = {W{\"u}rzburg University Press}, pages = {viii, 160}, year = {2018}, abstract = {Historical maps are fascinating documents and a valuable source of information for scientists of various disciplines. Many of these maps are available as scanned bitmap images, but in order to make them searchable in useful ways, a structured representation of the contained information is desirable. This book deals with the extraction of spatial information from historical maps. This cannot be expected to be solved fully automatically (since it involves difficult semantics), but is also too tedious to be done manually at scale. The methodology used in this book combines the strengths of both computers and humans: it describes efficient algorithms to largely automate information extraction tasks and pairs these algorithms with smart user interactions to handle what is not understood by the algorithm. The effectiveness of this approach is shown for various kinds of spatial documents from the 16th to the early 20th century.}, subject = {Karte}, language = {en} } @phdthesis{Peng2019, author = {Peng, Dongliang}, title = {An Optimization-Based Approach for Continuous Map Generalization}, edition = {1. Auflage}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-104-4}, doi = {10.25972/WUP-978-3-95826-105-1}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-174427}, school = {W{\"u}rzburg University Press}, pages = {xv, 132}, year = {2019}, abstract = {Maps are the main tool to represent geographical information. Geographical information is usually scale-dependent, so users need to have access to maps at different scales. In our digital age, the access is realized by zooming. As discrete changes during the zooming tend to distract users, smooth changes are preferred. This is why some digital maps are trying to make the zooming as continuous as they can. The process of producing maps at different scales with smooth changes is called continuous map generalization. In order to produce maps of high quality, cartographers often take into account additional requirements. These requirements are transferred to models in map generalization. Optimization for map generalization is important not only because it finds optimal solutions in the sense of the models, but also because it helps us to evaluate the quality of the models. Optimization, however, becomes more delicate when we deal with continuous map generalization. In this area, there are requirements not only for a specific map but also for relations between maps at difference scales. This thesis is about continuous map generalization based on optimization. First, we show the background of our research topics. Second, we find optimal sequences for aggregating land-cover areas. We compare the A\$^{\!\star}\$\xspace algorithm and integer linear programming in completing this task. Third, we continuously generalize county boundaries to provincial boundaries based on compatible triangulations. We morph between the two sets of boundaries, using dynamic programming to compute the correspondence. Fourth, we continuously generalize buildings to built-up areas by aggregating and growing. In this work, we group buildings with the help of a minimum spanning tree. Fifth, we define vertex trajectories that allow us to morph between polylines. We require that both the angles and the edge lengths change linearly over time. As it is impossible to fulfill all of these requirements simultaneously, we mediate between them using least-squares adjustment. Sixth, we discuss the performance of some commonly used data structures for a specific spatial problem. Seventh, we conclude this thesis and present open problems.}, subject = {Generalisierung }, language = {en} } @phdthesis{Niebler2019, author = {Niebler, Thomas}, title = {Extracting and Learning Semantics from Social Web Data}, doi = {10.25972/OPUS-17866}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-178666}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2019}, abstract = {Making machines understand natural language is a dream of mankind that existed since a very long time. Early attempts at programming machines to converse with humans in a supposedly intelligent way with humans relied on phrase lists and simple keyword matching. However, such approaches cannot provide semantically adequate answers, as they do not consider the specific meaning of the conversation. Thus, if we want to enable machines to actually understand language, we need to be able to access semantically relevant background knowledge. For this, it is possible to query so-called ontologies, which are large networks containing knowledge about real-world entities and their semantic relations. However, creating such ontologies is a tedious task, as often extensive expert knowledge is required. Thus, we need to find ways to automatically construct and update ontologies that fit human intuition of semantics and semantic relations. More specifically, we need to determine semantic entities and find relations between them. While this is usually done on large corpora of unstructured text, previous work has shown that we can at least facilitate the first issue of extracting entities by considering special data such as tagging data or human navigational paths. Here, we do not need to detect the actual semantic entities, as they are already provided because of the way those data are collected. Thus we can mainly focus on the problem of assessing the degree of semantic relatedness between tags or web pages. However, there exist several issues which need to be overcome, if we want to approximate human intuition of semantic relatedness. For this, it is necessary to represent words and concepts in a way that allows easy and highly precise semantic characterization. This also largely depends on the quality of data from which these representations are constructed. In this thesis, we extract semantic information from both tagging data created by users of social tagging systems and human navigation data in different semantic-driven social web systems. Our main goal is to construct high quality and robust vector representations of words which can the be used to measure the relatedness of semantic concepts. First, we show that navigation in the social media systems Wikipedia and BibSonomy is driven by a semantic component. After this, we discuss and extend methods to model the semantic information in tagging data as low-dimensional vectors. Furthermore, we show that tagging pragmatics influences different facets of tagging semantics. We then investigate the usefulness of human navigational paths in several different settings on Wikipedia and BibSonomy for measuring semantic relatedness. Finally, we propose a metric-learning based algorithm in adapt pre-trained word embeddings to datasets containing human judgment of semantic relatedness. This work contributes to the field of studying semantic relatedness between words by proposing methods to extract semantic relatedness from web navigation, learn highquality and low-dimensional word representations from tagging data, and to learn semantic relatedness from any kind of vector representation by exploiting human feedback. Applications first and foremest lie in ontology learning for the Semantic Web, but also semantic search or query expansion.}, subject = {Semantik}, language = {en} } @phdthesis{Loeffler2021, author = {L{\"o}ffler, Andre}, title = {Constrained Graph Layouts: Vertices on the Outer Face and on the Integer Grid}, edition = {1. Auflage}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-146-4}, doi = {10.25972/WUP-978-3-95826-147-1}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-215746}, school = {W{\"u}rzburg University Press}, pages = {viii, 161}, year = {2021}, abstract = {Constraining graph layouts - that is, restricting the placement of vertices and the routing of edges to obey certain constraints - is common practice in graph drawing. In this book, we discuss algorithmic results on two different restriction types: placing vertices on the outer face and on the integer grid. For the first type, we look into the outer k-planar and outer k-quasi-planar graphs, as well as giving a linear-time algorithm to recognize full and closed outer k-planar graphs Monadic Second-order Logic. For the second type, we consider the problem of transferring a given planar drawing onto the integer grid while perserving the original drawings topology; we also generalize a variant of Cauchy's rigidity theorem for orthogonal polyhedra of genus 0 to those of arbitrary genus.}, subject = {Graphenzeichnen}, language = {en} } @phdthesis{Geissler2022, author = {Geißler, Stefan}, title = {Performance Evaluation of Next-Generation Data Plane Architectures and their Components}, issn = {1432-8801}, doi = {10.25972/OPUS-26015}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-260157}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2022}, abstract = {In this doctoral thesis we cover the performance evaluation of next generation data plane architectures, comprised of complex software as well as programmable hardware components that allow fine granular configuration. In the scope of the thesis we propose mechanisms to monitor the performance of singular components and model key performance indicators of software based packet processing solutions. We present novel approaches towards network abstraction that allow the integration of heterogeneous data plane technologies into a singular network while maintaining total transparency between control and data plane. Finally, we investigate a full, complex system consisting of multiple software-based solutions and perform a detailed performance analysis. We employ simulative approaches to investigate overload control mechanisms that allow efficient operation under adversary conditions. The contributions of this work build the foundation for future research in the areas of network softwarization and network function virtualization.}, subject = {Leistungsbewertung}, language = {en} } @phdthesis{Somody2023, author = {Somody, Joseph Christian Campbell}, title = {Leveraging deep learning for identification and structural determination of novel protein complexes from \(in\) \(situ\) electron cryotomography of \(Mycoplasma\) \(pneumoniae\)}, doi = {10.25972/OPUS-31344}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-313447}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {The holy grail of structural biology is to study a protein in situ, and this goal has been fast approaching since the resolution revolution and the achievement of atomic resolution. A cell's interior is not a dilute environment, and proteins have evolved to fold and function as needed in that environment; as such, an investigation of a cellular component should ideally include the full complexity of the cellular environment. Imaging whole cells in three dimensions using electron cryotomography is the best method to accomplish this goal, but it comes with a limitation on sample thickness and produces noisy data unamenable to direct analysis. This thesis establishes a novel workflow to systematically analyse whole-cell electron cryotomography data in three dimensions and to find and identify instances of protein complexes in the data to set up a determination of their structure and identity for success. Mycoplasma pneumoniae is a very small parasitic bacterium with fewer than 700 protein-coding genes, is thin enough and small enough to be imaged in large quantities by electron cryotomography, and can grow directly on the grids used for imaging, making it ideal for exploratory studies in structural proteomics. As part of the workflow, a methodology for training deep-learning-based particle-picking models is established. As a proof of principle, a dataset of whole-cell Mycoplasma pneumoniae tomograms is used with this workflow to characterize a novel membrane-associated complex observed in the data. Ultimately, 25431 such particles are picked from 353 tomograms and refined to a density map with a resolution of 11 {\AA}. Making good use of orthogonal datasets to filter search space and verify results, structures were predicted for candidate proteins and checked for suitable fit in the density map. In the end, with this approach, nine proteins were found to be part of the complex, which appears to be associated with chaperone activity and interact with translocon machinery. Visual proteomics refers to the ultimate potential of in situ electron cryotomography: the comprehensive interpretation of tomograms. The workflow presented here is demonstrated to help in reaching that potential.}, subject = {Kryoelektronenmikroskopie}, language = {en} } @phdthesis{Huber2023, author = {Huber, Stephan}, title = {Proxemo: Documenting Observed Emotions in HCI}, doi = {10.25972/OPUS-30573}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-305730}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {For formative evaluations of user experience (UX) a variety of methods have been developed over the years. However, most techniques require the users to interact with the study as a secondary task. This active involvement in the evaluation is not inclusive of all users and potentially biases the experience currently being studied. Yet there is a lack of methods for situations in which the user has no spare cognitive resources. This condition occurs when 1) users' cognitive abilities are impaired (e.g., people with dementia) or 2) users are confronted with very demanding tasks (e.g., air traffic controllers). In this work we focus on emotions as a key component of UX and propose the new structured observation method Proxemo for formative UX evaluations. Proxemo allows qualified observers to document users' emotions by proxy in real time and then directly link them to triggers. Technically this is achieved by synchronising the timestamps of emotions documented by observers with a video recording of the interaction. In order to facilitate the documentation of observed emotions in highly diverse contexts we conceptualise and implement two separate versions of a documentation aid named Proxemo App. For formative UX evaluations of technology-supported reminiscence sessions with people with dementia, we create a smartwatch app to discreetly document emotions from the categories anger, general alertness, pleasure, wistfulness and pride. For formative UX evaluations of prototypical user interfaces with air traffic controllers we create a smartphone app to efficiently document emotions from the categories anger, boredom, surprise, stress and pride. Descriptive case studies in both application domains indicate the feasibility and utility of the method Proxemo and the appropriateness of the respectively adapted design of the Proxemo App. The third part of this work is a series of meta-evaluation studies to determine quality criteria of Proxemo. We evaluate Proxemo regarding its reliability, validity, thoroughness and effectiveness, and compare Proxemo's efficiency and the observers' experience to documentation with pen and paper. Proxemo is reliable, as well as more efficient, thorough and effective than handwritten notes and provides a better UX to observers. Proxemo compares well with existing methods where benchmarks are available. With Proxemo we contribute a validated structured observation method that has shown to meet requirements formative UX evaluations in the extreme contexts of users with cognitive impairments or high task demands. Proxemo is agnostic regarding researchers' theoretical approaches and unites reductionist and holistic perspectives within one method. Future work should explore the applicability of Proxemo for further domains and extend the list of audited quality criteria to include, for instance, downstream utility. With respect to basic research we strive to better understand the sources leading observers to empathic judgments and propose reminisce and older adults as model environment for investigating mixed emotions.}, subject = {Gef{\"u}hl}, language = {en} } @phdthesis{Nogatz2023, author = {Nogatz, Falco}, title = {Defining and Implementing Domain-Specific Languages with Prolog}, doi = {10.25972/OPUS-30187}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-301872}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {The landscape of today's programming languages is manifold. With the diversity of applications, the difficulty of adequately addressing and specifying the used programs increases. This often leads to newly designed and implemented domain-specific languages. They enable domain experts to express knowledge in their preferred format, resulting in more readable and concise programs. Due to its flexible and declarative syntax without reserved keywords, the logic programming language Prolog is particularly suitable for defining and embedding domain-specific languages. This thesis addresses the questions and challenges that arise when integrating domain-specific languages into Prolog. We compare the two approaches to define them either externally or internally, and provide assisting tools for each. The grammar of a formal language is usually defined in the extended Backus-Naur form. In this work, we handle this formalism as a domain-specific language in Prolog, and define term expansions that allow to translate it into equivalent definite clause grammars. We present the package library(dcg4pt) for SWI-Prolog, which enriches them by an additional argument to automatically process the term's corresponding parse tree. To simplify the work with definite clause grammars, we visualise their application by a web-based tracer. The external integration of domain-specific languages requires the programmer to keep the grammar, parser, and interpreter in sync. In many cases, domain-specific languages can instead be directly embedded into Prolog by providing appropriate operator definitions. In addition, we propose syntactic extensions for Prolog to expand its expressiveness, for instance to state logic formulas with their connectives verbatim. This allows to use all tools that were originally written for Prolog, for instance code linters and editors with syntax highlighting. We present the package library(plammar), a standard-compliant parser for Prolog source code, written in Prolog. It is able to automatically infer from example sentences the required operator definitions with their classes and precedences as well as the required Prolog language extensions. As a result, we can automatically answer the question: Is it possible to model these example sentences as valid Prolog clauses, and how? We discuss and apply the two approaches to internal and external integrations for several domain-specific languages, namely the extended Backus-Naur form, GraphQL, XPath, and a controlled natural language to represent expert rules in if-then form. The created toolchain with library(dcg4pt) and library(plammar) yields new application opportunities for static Prolog source code analysis, which we also present.}, subject = {PROLOG }, language = {en} } @phdthesis{Bleier2023, author = {Bleier, Michael}, title = {Underwater Laser Scanning - Refractive Calibration, Self-calibration and Mapping for 3D Reconstruction}, isbn = {978-3-945459-45-4}, doi = {10.25972/OPUS-32269}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-322693}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {There is great interest in affordable, precise and reliable metrology underwater: Archaeologists want to document artifacts in situ with high detail. In marine research, biologists require the tools to monitor coral growth and geologists need recordings to model sediment transport. Furthermore, for offshore construction projects, maintenance and inspection millimeter-accurate measurements of defects and offshore structures are essential. While the process of digitizing individual objects and complete sites on land is well understood and standard methods, such as Structure from Motion or terrestrial laser scanning, are regularly applied, precise underwater surveying with high resolution is still a complex and difficult task. Applying optical scanning techniques in water is challenging due to reduced visibility caused by turbidity and light absorption. However, optical underwater scanners provide significant advantages in terms of achievable resolution and accuracy compared to acoustic systems. This thesis proposes an underwater laser scanning system and the algorithms for creating dense and accurate 3D scans in water. It is based on laser triangulation and the main optical components are an underwater camera and a cross-line laser projector. The prototype is configured with a motorized yaw axis for capturing scans from a tripod. Alternatively, it is mounted to a moving platform for mobile mapping. The main focus lies on the refractive calibration of the underwater camera and laser projector, the image processing and 3D reconstruction. For highest accuracy, the refraction at the individual media interfaces must be taken into account. This is addressed by an optimization-based calibration framework using a physical-geometric camera model derived from an analytical formulation of a ray-tracing projection model. In addition to scanning underwater structures, this work presents the 3D acquisition of semi-submerged structures and the correction of refraction effects. As in-situ calibration in water is complex and time-consuming, the challenge of transferring an in-air scanner calibration to water without re-calibration is investigated, as well as self-calibration techniques for structured light. The system was successfully deployed in various configurations for both static scanning and mobile mapping. An evaluation of the calibration and 3D reconstruction using reference objects and a comparison of free-form surfaces in clear water demonstrate the high accuracy potential in the range of one millimeter to less than one centimeter, depending on the measurement distance. Mobile underwater mapping and motion compensation based on visual-inertial odometry is demonstrated using a new optical underwater scanner based on fringe projection. Continuous registration of individual scans allows the acquisition of 3D models from an underwater vehicle. RGB images captured in parallel are used to create 3D point clouds of underwater scenes in full color. 3D maps are useful to the operator during the remote control of underwater vehicles and provide the building blocks to enable offshore inspection and surveying tasks. The advancing automation of the measurement technology will allow non-experts to use it, significantly reduce acquisition time and increase accuracy, making underwater metrology more cost-effective.}, subject = {Selbstkalibrierung}, language = {en} } @phdthesis{Krenzer2023, author = {Krenzer, Adrian}, title = {Machine learning to support physicians in endoscopic examinations with a focus on automatic polyp detection in images and videos}, doi = {10.25972/OPUS-31911}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-319119}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {Deep learning enables enormous progress in many computer vision-related tasks. Artificial Intel- ligence (AI) steadily yields new state-of-the-art results in the field of detection and classification. Thereby AI performance equals or exceeds human performance. Those achievements impacted many domains, including medical applications. One particular field of medical applications is gastroenterology. In gastroenterology, machine learning algorithms are used to assist examiners during interventions. One of the most critical concerns for gastroenterologists is the development of Colorectal Cancer (CRC), which is one of the leading causes of cancer-related deaths worldwide. Detecting polyps in screening colonoscopies is the essential procedure to prevent CRC. Thereby, the gastroenterologist uses an endoscope to screen the whole colon to find polyps during a colonoscopy. Polyps are mucosal growths that can vary in severity. This thesis supports gastroenterologists in their examinations with automated detection and clas- sification systems for polyps. The main contribution is a real-time polyp detection system. This system is ready to be installed in any gastroenterology practice worldwide using open-source soft- ware. The system achieves state-of-the-art detection results and is currently evaluated in a clinical trial in four different centers in Germany. The thesis presents two additional key contributions: One is a polyp detection system with ex- tended vision tested in an animal trial. Polyps often hide behind folds or in uninvestigated areas. Therefore, the polyp detection system with extended vision uses an endoscope assisted by two additional cameras to see behind those folds. If a polyp is detected, the endoscopist receives a vi- sual signal. While the detection system handles the additional two camera inputs, the endoscopist focuses on the main camera as usual. The second one are two polyp classification models, one for the classification based on shape (Paris) and the other on surface and texture (NBI International Colorectal Endoscopic (NICE) classification). Both classifications help the endoscopist with the treatment of and the decisions about the detected polyp. The key algorithms of the thesis achieve state-of-the-art performance. Outstandingly, the polyp detection system tested on a highly demanding video data set shows an F1 score of 90.25 \% while working in real-time. The results exceed all real-time systems in the literature. Furthermore, the first preliminary results of the clinical trial of the polyp detection system suggest a high Adenoma Detection Rate (ADR). In the preliminary study, all polyps were detected by the polyp detection system, and the system achieved a high usability score of 96.3 (max 100). The Paris classification model achieved an F1 score of 89.35 \% which is state-of-the-art. The NICE classification model achieved an F1 score of 81.13 \%. Furthermore, a large data set for polyp detection and classification was created during this thesis. Therefore a fast and robust annotation system called Fast Colonoscopy Annotation Tool (FastCAT) was developed. The system simplifies the annotation process for gastroenterologists. Thereby the i gastroenterologists only annotate key parts of the endoscopic video. Afterward, those video parts are pre-labeled by a polyp detection AI to speed up the process. After the AI has pre-labeled the frames, non-experts correct and finish the annotation. This annotation process is fast and ensures high quality. FastCAT reduces the overall workload of the gastroenterologist on average by a factor of 20 compared to an open-source state-of-art annotation tool.}, subject = {Deep Learning}, language = {en} }