@phdthesis{Somody2023, author = {Somody, Joseph Christian Campbell}, title = {Leveraging deep learning for identification and structural determination of novel protein complexes from \(in\) \(situ\) electron cryotomography of \(Mycoplasma\) \(pneumoniae\)}, doi = {10.25972/OPUS-31344}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-313447}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {The holy grail of structural biology is to study a protein in situ, and this goal has been fast approaching since the resolution revolution and the achievement of atomic resolution. A cell's interior is not a dilute environment, and proteins have evolved to fold and function as needed in that environment; as such, an investigation of a cellular component should ideally include the full complexity of the cellular environment. Imaging whole cells in three dimensions using electron cryotomography is the best method to accomplish this goal, but it comes with a limitation on sample thickness and produces noisy data unamenable to direct analysis. This thesis establishes a novel workflow to systematically analyse whole-cell electron cryotomography data in three dimensions and to find and identify instances of protein complexes in the data to set up a determination of their structure and identity for success. Mycoplasma pneumoniae is a very small parasitic bacterium with fewer than 700 protein-coding genes, is thin enough and small enough to be imaged in large quantities by electron cryotomography, and can grow directly on the grids used for imaging, making it ideal for exploratory studies in structural proteomics. As part of the workflow, a methodology for training deep-learning-based particle-picking models is established. As a proof of principle, a dataset of whole-cell Mycoplasma pneumoniae tomograms is used with this workflow to characterize a novel membrane-associated complex observed in the data. Ultimately, 25431 such particles are picked from 353 tomograms and refined to a density map with a resolution of 11 {\AA}. Making good use of orthogonal datasets to filter search space and verify results, structures were predicted for candidate proteins and checked for suitable fit in the density map. In the end, with this approach, nine proteins were found to be part of the complex, which appears to be associated with chaperone activity and interact with translocon machinery. Visual proteomics refers to the ultimate potential of in situ electron cryotomography: the comprehensive interpretation of tomograms. The workflow presented here is demonstrated to help in reaching that potential.}, subject = {Kryoelektronenmikroskopie}, language = {en} } @phdthesis{Huber2023, author = {Huber, Stephan}, title = {Proxemo: Documenting Observed Emotions in HCI}, doi = {10.25972/OPUS-30573}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-305730}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {For formative evaluations of user experience (UX) a variety of methods have been developed over the years. However, most techniques require the users to interact with the study as a secondary task. This active involvement in the evaluation is not inclusive of all users and potentially biases the experience currently being studied. Yet there is a lack of methods for situations in which the user has no spare cognitive resources. This condition occurs when 1) users' cognitive abilities are impaired (e.g., people with dementia) or 2) users are confronted with very demanding tasks (e.g., air traffic controllers). In this work we focus on emotions as a key component of UX and propose the new structured observation method Proxemo for formative UX evaluations. Proxemo allows qualified observers to document users' emotions by proxy in real time and then directly link them to triggers. Technically this is achieved by synchronising the timestamps of emotions documented by observers with a video recording of the interaction. In order to facilitate the documentation of observed emotions in highly diverse contexts we conceptualise and implement two separate versions of a documentation aid named Proxemo App. For formative UX evaluations of technology-supported reminiscence sessions with people with dementia, we create a smartwatch app to discreetly document emotions from the categories anger, general alertness, pleasure, wistfulness and pride. For formative UX evaluations of prototypical user interfaces with air traffic controllers we create a smartphone app to efficiently document emotions from the categories anger, boredom, surprise, stress and pride. Descriptive case studies in both application domains indicate the feasibility and utility of the method Proxemo and the appropriateness of the respectively adapted design of the Proxemo App. The third part of this work is a series of meta-evaluation studies to determine quality criteria of Proxemo. We evaluate Proxemo regarding its reliability, validity, thoroughness and effectiveness, and compare Proxemo's efficiency and the observers' experience to documentation with pen and paper. Proxemo is reliable, as well as more efficient, thorough and effective than handwritten notes and provides a better UX to observers. Proxemo compares well with existing methods where benchmarks are available. With Proxemo we contribute a validated structured observation method that has shown to meet requirements formative UX evaluations in the extreme contexts of users with cognitive impairments or high task demands. Proxemo is agnostic regarding researchers' theoretical approaches and unites reductionist and holistic perspectives within one method. Future work should explore the applicability of Proxemo for further domains and extend the list of audited quality criteria to include, for instance, downstream utility. With respect to basic research we strive to better understand the sources leading observers to empathic judgments and propose reminisce and older adults as model environment for investigating mixed emotions.}, subject = {Gef{\"u}hl}, language = {en} } @phdthesis{Nogatz2023, author = {Nogatz, Falco}, title = {Defining and Implementing Domain-Specific Languages with Prolog}, doi = {10.25972/OPUS-30187}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-301872}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {The landscape of today's programming languages is manifold. With the diversity of applications, the difficulty of adequately addressing and specifying the used programs increases. This often leads to newly designed and implemented domain-specific languages. They enable domain experts to express knowledge in their preferred format, resulting in more readable and concise programs. Due to its flexible and declarative syntax without reserved keywords, the logic programming language Prolog is particularly suitable for defining and embedding domain-specific languages. This thesis addresses the questions and challenges that arise when integrating domain-specific languages into Prolog. We compare the two approaches to define them either externally or internally, and provide assisting tools for each. The grammar of a formal language is usually defined in the extended Backus-Naur form. In this work, we handle this formalism as a domain-specific language in Prolog, and define term expansions that allow to translate it into equivalent definite clause grammars. We present the package library(dcg4pt) for SWI-Prolog, which enriches them by an additional argument to automatically process the term's corresponding parse tree. To simplify the work with definite clause grammars, we visualise their application by a web-based tracer. The external integration of domain-specific languages requires the programmer to keep the grammar, parser, and interpreter in sync. In many cases, domain-specific languages can instead be directly embedded into Prolog by providing appropriate operator definitions. In addition, we propose syntactic extensions for Prolog to expand its expressiveness, for instance to state logic formulas with their connectives verbatim. This allows to use all tools that were originally written for Prolog, for instance code linters and editors with syntax highlighting. We present the package library(plammar), a standard-compliant parser for Prolog source code, written in Prolog. It is able to automatically infer from example sentences the required operator definitions with their classes and precedences as well as the required Prolog language extensions. As a result, we can automatically answer the question: Is it possible to model these example sentences as valid Prolog clauses, and how? We discuss and apply the two approaches to internal and external integrations for several domain-specific languages, namely the extended Backus-Naur form, GraphQL, XPath, and a controlled natural language to represent expert rules in if-then form. The created toolchain with library(dcg4pt) and library(plammar) yields new application opportunities for static Prolog source code analysis, which we also present.}, subject = {PROLOG }, language = {en} } @phdthesis{Bleier2023, author = {Bleier, Michael}, title = {Underwater Laser Scanning - Refractive Calibration, Self-calibration and Mapping for 3D Reconstruction}, isbn = {978-3-945459-45-4}, doi = {10.25972/OPUS-32269}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-322693}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {There is great interest in affordable, precise and reliable metrology underwater: Archaeologists want to document artifacts in situ with high detail. In marine research, biologists require the tools to monitor coral growth and geologists need recordings to model sediment transport. Furthermore, for offshore construction projects, maintenance and inspection millimeter-accurate measurements of defects and offshore structures are essential. While the process of digitizing individual objects and complete sites on land is well understood and standard methods, such as Structure from Motion or terrestrial laser scanning, are regularly applied, precise underwater surveying with high resolution is still a complex and difficult task. Applying optical scanning techniques in water is challenging due to reduced visibility caused by turbidity and light absorption. However, optical underwater scanners provide significant advantages in terms of achievable resolution and accuracy compared to acoustic systems. This thesis proposes an underwater laser scanning system and the algorithms for creating dense and accurate 3D scans in water. It is based on laser triangulation and the main optical components are an underwater camera and a cross-line laser projector. The prototype is configured with a motorized yaw axis for capturing scans from a tripod. Alternatively, it is mounted to a moving platform for mobile mapping. The main focus lies on the refractive calibration of the underwater camera and laser projector, the image processing and 3D reconstruction. For highest accuracy, the refraction at the individual media interfaces must be taken into account. This is addressed by an optimization-based calibration framework using a physical-geometric camera model derived from an analytical formulation of a ray-tracing projection model. In addition to scanning underwater structures, this work presents the 3D acquisition of semi-submerged structures and the correction of refraction effects. As in-situ calibration in water is complex and time-consuming, the challenge of transferring an in-air scanner calibration to water without re-calibration is investigated, as well as self-calibration techniques for structured light. The system was successfully deployed in various configurations for both static scanning and mobile mapping. An evaluation of the calibration and 3D reconstruction using reference objects and a comparison of free-form surfaces in clear water demonstrate the high accuracy potential in the range of one millimeter to less than one centimeter, depending on the measurement distance. Mobile underwater mapping and motion compensation based on visual-inertial odometry is demonstrated using a new optical underwater scanner based on fringe projection. Continuous registration of individual scans allows the acquisition of 3D models from an underwater vehicle. RGB images captured in parallel are used to create 3D point clouds of underwater scenes in full color. 3D maps are useful to the operator during the remote control of underwater vehicles and provide the building blocks to enable offshore inspection and surveying tasks. The advancing automation of the measurement technology will allow non-experts to use it, significantly reduce acquisition time and increase accuracy, making underwater metrology more cost-effective.}, subject = {Selbstkalibrierung}, language = {en} } @phdthesis{Krenzer2023, author = {Krenzer, Adrian}, title = {Machine learning to support physicians in endoscopic examinations with a focus on automatic polyp detection in images and videos}, doi = {10.25972/OPUS-31911}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-319119}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {Deep learning enables enormous progress in many computer vision-related tasks. Artificial Intel- ligence (AI) steadily yields new state-of-the-art results in the field of detection and classification. Thereby AI performance equals or exceeds human performance. Those achievements impacted many domains, including medical applications. One particular field of medical applications is gastroenterology. In gastroenterology, machine learning algorithms are used to assist examiners during interventions. One of the most critical concerns for gastroenterologists is the development of Colorectal Cancer (CRC), which is one of the leading causes of cancer-related deaths worldwide. Detecting polyps in screening colonoscopies is the essential procedure to prevent CRC. Thereby, the gastroenterologist uses an endoscope to screen the whole colon to find polyps during a colonoscopy. Polyps are mucosal growths that can vary in severity. This thesis supports gastroenterologists in their examinations with automated detection and clas- sification systems for polyps. The main contribution is a real-time polyp detection system. This system is ready to be installed in any gastroenterology practice worldwide using open-source soft- ware. The system achieves state-of-the-art detection results and is currently evaluated in a clinical trial in four different centers in Germany. The thesis presents two additional key contributions: One is a polyp detection system with ex- tended vision tested in an animal trial. Polyps often hide behind folds or in uninvestigated areas. Therefore, the polyp detection system with extended vision uses an endoscope assisted by two additional cameras to see behind those folds. If a polyp is detected, the endoscopist receives a vi- sual signal. While the detection system handles the additional two camera inputs, the endoscopist focuses on the main camera as usual. The second one are two polyp classification models, one for the classification based on shape (Paris) and the other on surface and texture (NBI International Colorectal Endoscopic (NICE) classification). Both classifications help the endoscopist with the treatment of and the decisions about the detected polyp. The key algorithms of the thesis achieve state-of-the-art performance. Outstandingly, the polyp detection system tested on a highly demanding video data set shows an F1 score of 90.25 \% while working in real-time. The results exceed all real-time systems in the literature. Furthermore, the first preliminary results of the clinical trial of the polyp detection system suggest a high Adenoma Detection Rate (ADR). In the preliminary study, all polyps were detected by the polyp detection system, and the system achieved a high usability score of 96.3 (max 100). The Paris classification model achieved an F1 score of 89.35 \% which is state-of-the-art. The NICE classification model achieved an F1 score of 81.13 \%. Furthermore, a large data set for polyp detection and classification was created during this thesis. Therefore a fast and robust annotation system called Fast Colonoscopy Annotation Tool (FastCAT) was developed. The system simplifies the annotation process for gastroenterologists. Thereby the i gastroenterologists only annotate key parts of the endoscopic video. Afterward, those video parts are pre-labeled by a polyp detection AI to speed up the process. After the AI has pre-labeled the frames, non-experts correct and finish the annotation. This annotation process is fast and ensures high quality. FastCAT reduces the overall workload of the gastroenterologist on average by a factor of 20 compared to an open-source state-of-art annotation tool.}, subject = {Deep Learning}, language = {en} } @phdthesis{Geissler2022, author = {Geißler, Stefan}, title = {Performance Evaluation of Next-Generation Data Plane Architectures and their Components}, issn = {1432-8801}, doi = {10.25972/OPUS-26015}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-260157}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2022}, abstract = {In this doctoral thesis we cover the performance evaluation of next generation data plane architectures, comprised of complex software as well as programmable hardware components that allow fine granular configuration. In the scope of the thesis we propose mechanisms to monitor the performance of singular components and model key performance indicators of software based packet processing solutions. We present novel approaches towards network abstraction that allow the integration of heterogeneous data plane technologies into a singular network while maintaining total transparency between control and data plane. Finally, we investigate a full, complex system consisting of multiple software-based solutions and perform a detailed performance analysis. We employ simulative approaches to investigate overload control mechanisms that allow efficient operation under adversary conditions. The contributions of this work build the foundation for future research in the areas of network softwarization and network function virtualization.}, subject = {Leistungsbewertung}, language = {en} } @phdthesis{Loeffler2021, author = {L{\"o}ffler, Andre}, title = {Constrained Graph Layouts: Vertices on the Outer Face and on the Integer Grid}, edition = {1. Auflage}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-146-4}, doi = {10.25972/WUP-978-3-95826-147-1}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-215746}, school = {W{\"u}rzburg University Press}, pages = {viii, 161}, year = {2021}, abstract = {Constraining graph layouts - that is, restricting the placement of vertices and the routing of edges to obey certain constraints - is common practice in graph drawing. In this book, we discuss algorithmic results on two different restriction types: placing vertices on the outer face and on the integer grid. For the first type, we look into the outer k-planar and outer k-quasi-planar graphs, as well as giving a linear-time algorithm to recognize full and closed outer k-planar graphs Monadic Second-order Logic. For the second type, we consider the problem of transferring a given planar drawing onto the integer grid while perserving the original drawings topology; we also generalize a variant of Cauchy's rigidity theorem for orthogonal polyhedra of genus 0 to those of arbitrary genus.}, subject = {Graphenzeichnen}, language = {en} } @phdthesis{Peng2019, author = {Peng, Dongliang}, title = {An Optimization-Based Approach for Continuous Map Generalization}, edition = {1. Auflage}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-104-4}, doi = {10.25972/WUP-978-3-95826-105-1}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-174427}, school = {W{\"u}rzburg University Press}, pages = {xv, 132}, year = {2019}, abstract = {Maps are the main tool to represent geographical information. Geographical information is usually scale-dependent, so users need to have access to maps at different scales. In our digital age, the access is realized by zooming. As discrete changes during the zooming tend to distract users, smooth changes are preferred. This is why some digital maps are trying to make the zooming as continuous as they can. The process of producing maps at different scales with smooth changes is called continuous map generalization. In order to produce maps of high quality, cartographers often take into account additional requirements. These requirements are transferred to models in map generalization. Optimization for map generalization is important not only because it finds optimal solutions in the sense of the models, but also because it helps us to evaluate the quality of the models. Optimization, however, becomes more delicate when we deal with continuous map generalization. In this area, there are requirements not only for a specific map but also for relations between maps at difference scales. This thesis is about continuous map generalization based on optimization. First, we show the background of our research topics. Second, we find optimal sequences for aggregating land-cover areas. We compare the A\$^{\!\star}\$\xspace algorithm and integer linear programming in completing this task. Third, we continuously generalize county boundaries to provincial boundaries based on compatible triangulations. We morph between the two sets of boundaries, using dynamic programming to compute the correspondence. Fourth, we continuously generalize buildings to built-up areas by aggregating and growing. In this work, we group buildings with the help of a minimum spanning tree. Fifth, we define vertex trajectories that allow us to morph between polylines. We require that both the angles and the edge lengths change linearly over time. As it is impossible to fulfill all of these requirements simultaneously, we mediate between them using least-squares adjustment. Sixth, we discuss the performance of some commonly used data structures for a specific spatial problem. Seventh, we conclude this thesis and present open problems.}, subject = {Generalisierung }, language = {en} } @phdthesis{Niebler2019, author = {Niebler, Thomas}, title = {Extracting and Learning Semantics from Social Web Data}, doi = {10.25972/OPUS-17866}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-178666}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2019}, abstract = {Making machines understand natural language is a dream of mankind that existed since a very long time. Early attempts at programming machines to converse with humans in a supposedly intelligent way with humans relied on phrase lists and simple keyword matching. However, such approaches cannot provide semantically adequate answers, as they do not consider the specific meaning of the conversation. Thus, if we want to enable machines to actually understand language, we need to be able to access semantically relevant background knowledge. For this, it is possible to query so-called ontologies, which are large networks containing knowledge about real-world entities and their semantic relations. However, creating such ontologies is a tedious task, as often extensive expert knowledge is required. Thus, we need to find ways to automatically construct and update ontologies that fit human intuition of semantics and semantic relations. More specifically, we need to determine semantic entities and find relations between them. While this is usually done on large corpora of unstructured text, previous work has shown that we can at least facilitate the first issue of extracting entities by considering special data such as tagging data or human navigational paths. Here, we do not need to detect the actual semantic entities, as they are already provided because of the way those data are collected. Thus we can mainly focus on the problem of assessing the degree of semantic relatedness between tags or web pages. However, there exist several issues which need to be overcome, if we want to approximate human intuition of semantic relatedness. For this, it is necessary to represent words and concepts in a way that allows easy and highly precise semantic characterization. This also largely depends on the quality of data from which these representations are constructed. In this thesis, we extract semantic information from both tagging data created by users of social tagging systems and human navigation data in different semantic-driven social web systems. Our main goal is to construct high quality and robust vector representations of words which can the be used to measure the relatedness of semantic concepts. First, we show that navigation in the social media systems Wikipedia and BibSonomy is driven by a semantic component. After this, we discuss and extend methods to model the semantic information in tagging data as low-dimensional vectors. Furthermore, we show that tagging pragmatics influences different facets of tagging semantics. We then investigate the usefulness of human navigational paths in several different settings on Wikipedia and BibSonomy for measuring semantic relatedness. Finally, we propose a metric-learning based algorithm in adapt pre-trained word embeddings to datasets containing human judgment of semantic relatedness. This work contributes to the field of studying semantic relatedness between words by proposing methods to extract semantic relatedness from web navigation, learn highquality and low-dimensional word representations from tagging data, and to learn semantic relatedness from any kind of vector representation by exploiting human feedback. Applications first and foremest lie in ontology learning for the Semantic Web, but also semantic search or query expansion.}, subject = {Semantik}, language = {en} } @phdthesis{Borrmann2018, author = {Borrmann, Dorit}, title = {Multi-modal 3D mapping - Combining 3D point clouds with thermal and color information}, isbn = {978-3-945459-20-1}, issn = {1868-7474}, doi = {10.25972/OPUS-15708}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-157085}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2018}, abstract = {Imagine a technology that automatically creates a full 3D thermal model of an environment and detects temperature peaks in it. For better orientation in the model it is enhanced with color information. The current state of the art for analyzing temperature related issues is thermal imaging. It is relevant for energy efficiency but also for securing important infrastructure such as power supplies and temperature regulation systems. Monitoring and analysis of the data for a large building is tedious as stable conditions need to be guaranteed for several hours and detailed notes about the pose and the environment conditions for each image must be taken. For some applications repeated measurements are necessary to monitor changes over time. The analysis of the scene is only possible through expertise and experience. This thesis proposes a robotic system that creates a full 3D model of the environment with color and thermal information by combining thermal imaging with the technology of terrestrial laser scanning. The addition of a color camera facilitates the interpretation of the data and allows for other application areas. The data from all sensors collected at different positions is joined in one common reference frame using calibration and scan matching. The first part of the thesis deals with 3D point cloud processing with the emphasis on accessing point cloud data efficiently, detecting planar structures in the data and registering multiple point clouds into one common coordinate system. The second part covers the autonomous exploration and data acquisition with a mobile robot with the objective to minimize the unseen area in 3D space. Furthermore, the combination of different modalities, color images, thermal images and point cloud data through calibration is elaborated. The last part presents applications for the the collected data. Among these are methods to detect the structure of building interiors for reconstruction purposes and subsequent detection and classification of windows. A system to project the gathered thermal information back into the scene is presented as well as methods to improve the color information and to join separately acquired point clouds and photo series. A full multi-modal 3D model contains all the relevant geometric information about the recorded scene and enables an expert to fully analyze it off-site. The technology clears the path for automatically detecting points of interest thereby helping the expert to analyze the heat flow as well as localize and identify heat leaks. The concept is modular and neither limited to achieving energy efficiency nor restricted to the use in combination with a mobile platform. It also finds its application in fields such as archaeology and geology and can be extended by further sensors.}, subject = {Punktwolke}, language = {en} } @phdthesis{Fleszar2018, author = {Fleszar, Krzysztof}, title = {Network-Design Problems in Graphs and on the Plane}, edition = {1. Auflage}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-076-4 (Print)}, doi = {10.25972/WUP-978-3-95826-077-1}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-154904}, school = {W{\"u}rzburg University Press}, pages = {xi, 204}, year = {2018}, abstract = {A network design problem defines an infinite set whose elements, called instances, describe relationships and network constraints. It asks for an algorithm that, given an instance of this set, designs a network that respects the given constraints and at the same time optimizes some given criterion. In my thesis, I develop algorithms whose solutions are optimum or close to an optimum value within some guaranteed bound. I also examine the computational complexity of these problems. Problems from two vast areas are considered: graphs and the Euclidean plane. In the Maximum Edge Disjoint Paths problem, we are given a graph and a subset of vertex pairs that are called terminal pairs. We are asked for a set of paths where the endpoints of each path form a terminal pair. The constraint is that any two paths share at most one inner vertex. The optimization criterion is to maximize the cardinality of the set. In the hard-capacitated k-Facility Location problem, we are given an integer k and a complete graph where the distances obey a given metric and where each node has two numerical values: a capacity and an opening cost. We are asked for a subset of k nodes, called facilities, and an assignment of all the nodes, called clients, to the facilities. The constraint is that the number of clients assigned to a facility cannot exceed the facility's capacity value. The optimization criterion is to minimize the total cost which consists of the total opening cost of the facilities and the total distance between the clients and the facilities they are assigned to. In the Stabbing problem, we are given a set of axis-aligned rectangles in the plane. We are asked for a set of horizontal line segments such that, for every rectangle, there is a line segment crossing its left and right edge. The optimization criterion is to minimize the total length of the line segments. In the k-Colored Non-Crossing Euclidean Steiner Forest problem, we are given an integer k and a finite set of points in the plane where each point has one of k colors. For every color, we are asked for a drawing that connects all the points of the same color. The constraint is that drawings of different colors are not allowed to cross each other. The optimization criterion is to minimize the total length of the drawings. In the Minimum Rectilinear Polygon for Given Angle Sequence problem, we are given an angle sequence of left (+90°) turns and right (-90°) turns. We are asked for an axis-parallel simple polygon where the angles of the vertices yield the given sequence when walking around the polygon in counter-clockwise manner. The optimization criteria considered are to minimize the perimeter, the area, and the size of the axis-parallel bounding box of the polygon.}, subject = {Euklidische Ebene}, language = {en} } @phdthesis{Wojtkowiak2018, author = {Wojtkowiak, Harald}, title = {Planungssystem zur Steigerung der Autonomie von Kleinstsatelliten}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-163569}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2018}, abstract = {Der Betrieb von Satelliten wird sich in Zukunft gravierend {\"a}ndern. Die bisher ausge{\"u}bte konventionelle Vorgehensweise, bei der die Planung der vom Satelliten auszuf{\"u}hrenden Aktivit{\"a}ten sowie die Kontrolle hier{\"u}ber ausschließlich vom Boden aus erfolgen, st{\"o}ßt bei heutigen Anwendungen an ihre Grenzen. Im schlimmsten Fall verhindert dieser Umstand sogar die Erschließung bisher ungenutzter M{\"o}glichkeiten. Der Gewinn eines Satelliten, sei es in Form wissenschaftlicher Daten oder der Vermarktung satellitengest{\"u}tzter Dienste, wird daher nicht optimal ausgesch{\"o}pft. Die Ursache f{\"u}r dieses Problem l{\"a}sst sich im Grunde auf eine ausschlaggebende Tatsache zur{\"u}ckf{\"u}hren: Konventionelle Satelliten k{\"o}nnen ihr Verhalten, d.h. die Folge ihrer T{\"a}tigkeiten, nicht eigenst{\"a}ndig anpassen. Stattdessen erstellt das Bedienpersonal am Boden - vor allem die Operatoren - mit Hilfe von Planungssoftware feste Ablaufpl{\"a}ne, die dann in Form von Kommandosequenzen von den Bodenstationen aus an die jeweiligen Satelliten hochgeladen werden. Dort werden die Befehle lediglich {\"u}berpr{\"u}ft, interpretiert und strikt ausgef{\"u}hrt. Die Abarbeitung erfolgt linear. Situationsbedingte {\"A}nderungen, wie sie vergleichsweise bei der Codeausf{\"u}hrung von Softwareprogrammen durch Kontrollkonstrukte, zum Beispiel Schleifen und Verzweigungen, {\"u}blich sind, sind typischerweise nicht vorgesehen. Der Operator ist daher die einzige Instanz, die das Verhalten des Satelliten mittels Kommandierung, per Upload, beeinflussen kann, und auch nur dann, wenn ein direkter Funkkontakt zwischen Satellit und Bodenstation besteht. Die dadurch m{\"o}glichen Reaktionszeiten des Satelliten liegen bestenfalls bei einigen Sekunden, falls er sich im Wirkungsbereich der Bodenstation befindet. Außerhalb des Kontaktfensters kann sich die Zeitschranke, gegeben durch den Orbit und die aktuelle Position des Satelliten, von einigen Minuten bis hin zu einigen Stunden erstrecken. Die Signallaufzeiten der Funk{\"u}bertragung verl{\"a}ngern die Reaktionszeiten um weitere Sekunden im erdnahen Bereich. Im interplanetaren Raum erstrecken sich die Zeitspannen aufgrund der immensen Entfernungen sogar auf mehrere Minuten. Dadurch bedingt liegt die derzeit technologisch m{\"o}gliche, bodengest{\"u}tzte, Reaktionszeit von Satelliten bestenfalls im Bereich von einigen Sekunden. Diese Einschr{\"a}nkung stellt ein schweres Hindernis f{\"u}r neuartige Satellitenmissionen, bei denen insbesondere nichtdeterministische und kurzzeitige Ph{\"a}nomene (z.B. Blitze und Meteoreintritte in die Erdatmosph{\"a}re) Gegenstand der Beobachtungen sind, dar. Die langen Reaktionszeiten des konventionellen Satellitenbetriebs verhindern die Realisierung solcher Missionen, da die verz{\"o}gerte Reaktion erst erfolgt, nachdem das zu beobachtende Ereignis bereits abgeschlossen ist. Die vorliegende Dissertation zeigt eine M{\"o}glichkeit, das durch die langen Reaktionszeiten entstandene Problem zu l{\"o}sen, auf. Im Zentrum des L{\"o}sungsansatzes steht dabei die Autonomie. Im Wesentlichen geht es dabei darum, den Satelliten mit der F{\"a}higkeit auszustatten, sein Verhalten, d.h. die Folge seiner T{\"a}tigkeiten, eigenst{\"a}ndig zu bestimmen bzw. zu {\"a}ndern. Dadurch wird die direkte Abh{\"a}ngigkeit des Satelliten vom Operator bei Reaktionen aufgehoben. Im Grunde wird der Satellit in die Lage versetzt, sich selbst zu kommandieren. Die Idee der Autonomie wurde im Rahmen der zugrunde liegenden Forschungsarbeiten umgesetzt. Das Ergebnis ist ein autonomes Planungssystem. Dabei handelt es sich um ein Softwaresystem, mit dem sich autonomes Verhalten im Satelliten realisieren l{\"a}sst. Es kann an unterschiedliche Satellitenmissionen angepasst werden. Ferner deckt es verschiedene Aspekte des autonomen Satellitenbetriebs, angefangen bei der generellen Entscheidungsfindung der T{\"a}tigkeiten, {\"u}ber die zeitliche Ablaufplanung unter Einbeziehung von Randbedingungen (z.B. Ressourcen) bis hin zur eigentlichen Ausf{\"u}hrung, d.h. Kommandierung, ab. Das Planungssystem kommt als Anwendung in ASAP, einer autonomen Sensorplattform, zum Einsatz. Es ist ein optisches System und dient der Detektion von kurzzeitigen Ph{\"a}nomenen und Ereignissen in der Erdatmosph{\"a}re. Die Forschungsarbeiten an dem autonomen Planungssystem, an ASAP sowie an anderen zu diesen in Bezug stehenden Systemen wurden an der Professur f{\"u}r Raumfahrttechnik des Lehrstuhls Informatik VIII der Julius-Maximilians-Universit{\"a}t W{\"u}rzburg durchgef{\"u}hrt.}, subject = {Planungssystem}, language = {de} } @phdthesis{Baier2018, author = {Baier, Pablo A.}, title = {Simulator for Minimally Invasive Vascular Interventions: Hardware and Software}, isbn = {978-3-945459-22-5}, doi = {10.25972/OPUS-16119}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-161190}, school = {Universit{\"a}t W{\"u}rzburg}, pages = {118}, year = {2018}, abstract = {A complete simulation system is proposed that can be used as an educational tool by physicians in training basic skills of Minimally Invasive Vascular Interventions. In the first part, a surface model is developed to assemble arteries having a planar segmentation. It is based on Sweep Surfaces and can be extended to T- and Y-like bifurcations. A continuous force vector field is described, representing the interaction between the catheter and the surface. The computation time of the force field is almost unaffected when the resolution of the artery is increased. The mechanical properties of arteries play an essential role in the study of the circulatory system dynamics, which has been becoming increasingly important in the treatment of cardiovascular diseases. In Virtual Reality Simulators, it is crucial to have a tissue model that responds in real time. In this work, the arteries are discretized by a two dimensional mesh and the nodes are connected by three kinds of linear springs. Three tissue layers (Intima, Media, Adventitia) are considered and, starting from the stretch-energy density, some of the elasticity tensor components are calculated. The physical model linearizes and homogenizes the material response, but it still contemplates the geometric nonlinearity. In general, if the arterial stretch varies by 1\% or less, then the agreement between the linear and nonlinear models is trustworthy. In the last part, the physical model of the wire proposed by Konings is improved. As a result, a simpler and more stable method is obtained to calculate the equilibrium configuration of the wire. In addition, a geometrical method is developed to perform relaxations. It is particularly useful when the wire is hindered in the physical method because of the boundary conditions. The physical and the geometrical methods are merged, resulting in efficient relaxations. Tests show that the shape of the virtual wire agrees with the experiment. The proposed algorithm allows real-time executions and the hardware to assemble the simulator has a low cost.}, subject = {Computersimulation}, language = {en} } @phdthesis{Budig2018, author = {Budig, Benedikt}, title = {Extracting Spatial Information from Historical Maps: Algorithms and Interaction}, edition = {1. Auflage}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-092-4}, doi = {10.25972/WUP-978-3-95826-093-1}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-160955}, school = {W{\"u}rzburg University Press}, pages = {viii, 160}, year = {2018}, abstract = {Historical maps are fascinating documents and a valuable source of information for scientists of various disciplines. Many of these maps are available as scanned bitmap images, but in order to make them searchable in useful ways, a structured representation of the contained information is desirable. This book deals with the extraction of spatial information from historical maps. This cannot be expected to be solved fully automatically (since it involves difficult semantics), but is also too tedious to be done manually at scale. The methodology used in this book combines the strengths of both computers and humans: it describes efficient algorithms to largely automate information extraction tasks and pairs these algorithms with smart user interactions to handle what is not understood by the algorithm. The effectiveness of this approach is shown for various kinds of spatial documents from the 16th to the early 20th century.}, subject = {Karte}, language = {en} } @phdthesis{Ostermayer2017, author = {Ostermayer, Ludwig}, title = {Integration of Prolog and Java with the Connector Architecture CAPJa}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-150713}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2017}, abstract = {Modern software is often realized as a modular combination of subsystems for, e. g., knowledge management, visualization, verification, or the interaction with users. As a result, software libraries from possibly different programming languages have to work together. Even more complex the case is if different programming paradigms have to be combined. This type of diversification of programming languages and paradigms in just one software application can only be mastered by mechanisms for a seamless integration of the involved programming languages. However, the integration of the common logic programming language Prolog and the popular object-oriented programming language Java is complicated by various interoperability problems which stem on the one hand from the paradigmatic gap between the programming languages, and on the other hand, from the diversity of the available Prolog systems. The subject of the thesis is the investigation of novel mechanisms for the integration of logic programming in Prolog and object-oriented programming in Java. We are particularly interested in an object-oriented, uniform approach which is not specific to just one Prolog system. Therefore, we have first identified several important criteria for the seamless integration of Prolog and Java from the object-oriented perspective. The main contribution of the thesis is a novel integration framework called the Connector Architecture for Prolog and Java (CAPJa). The framework is completely implemented in Java and imposes no modifications to the Java Virtual Machine or Prolog. CAPJa provides a semi-automated mechanism for the integration of Prolog predicates into Java. For compact, readable, and object-oriented queries to Prolog, CAPJa exploits lambda expressions with conditional and relational operators in Java. The communication between Java and Prolog is based on a fully automated mapping of Java objects to Prolog terms, and vice versa. In Java, an extensible system of gateways provides connectivity with various Prolog system and, moreover, makes any connected Prolog system easily interchangeable, without major adaption in Java.}, subject = {Logische Programmierung}, language = {en} } @phdthesis{Aschenbrenner2017, author = {Aschenbrenner, Doris}, title = {Human Robot Interaction Concepts for Human Supervisory Control and Telemaintenance Applications in an Industry 4.0 Environment}, isbn = {978-3-945459-18-8}, doi = {10.25972/OPUS-15052}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-150520}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2017}, abstract = {While teleoperation of technical highly sophisticated systems has already been a wide field of research, especially for space and robotics applications, the automation industry has not yet benefited from its results. Besides the established fields of application, also production lines with industrial robots and the surrounding plant components are in need of being remotely accessible. This is especially critical for maintenance or if an unexpected problem cannot be solved by the local specialists. Special machine manufacturers, especially robotics companies, sell their technology worldwide. Some factories, for example in emerging economies, lack qualified personnel for repair and maintenance tasks. When a severe failure occurs, an expert of the manufacturer needs to fly there, which leads to long down times of the machine or even the whole production line. With the development of data networks, a huge part of those travels can be omitted, if appropriate teleoperation equipment is provided. This thesis describes the development of a telemaintenance system, which was established in an active production line for research purposes. The customer production site of Braun in Marktheidenfeld, a factory which belongs to Procter \& Gamble, consists of a six-axis cartesian industrial robot by KUKA Industries, a two-component injection molding system and an assembly unit. The plant produces plastic parts for electric toothbrushes. In the research projects "MainTelRob" and "Bayern.digital", during which this plant was utilised, the Zentrum f{\"u}r Telematik e.V. (ZfT) and its project partners develop novel technical approaches and procedures for modern telemaintenance. The term "telemaintenance" hereby refers to the integration of computer science and communication technologies into the maintenance strategy. It is particularly interesting for high-grade capital-intensive goods like industrial robots. Typical telemaintenance tasks are for example the analysis of a robot failure or difficult repair operations. The service department of KUKA Industries is responsible for the worldwide distributed customers who own more than one robot. Currently such tasks are offered via phone support and service staff which travels abroad. They want to expand their service activities on telemaintenance and struggle with the high demands of teleoperation especially regarding security infrastructure. In addition, the facility in Marktheidenfeld has to keep up with the high international standards of Procter \& Gamble and wants to minimize machine downtimes. Like 71.6 \% of all German companies, P\&G sees a huge potential for early information on their production system, but complains about the insufficient quality and the lack of currentness of data. The main research focus of this work lies on the human machine interface for all human tasks in a telemaintenance setup. This thesis provides own work in the use of a mobile device in context of maintenance, describes new tools on asynchronous remote analysis and puts all parts together in an integrated telemaintenance infrastructure. With the help of Augmented Reality, the user performance and satisfaction could be raised. A special regard is put upon the situation awareness of the remote expert realized by different camera viewpoints. In detail the work consists of: - Support of maintenance tasks with a mobile device - Development and evaluation of a context-aware inspection tool - Comparison of a new touch-based mobile robot programming device to the former teach pendant - Study on Augmented Reality support for repair tasks with a mobile device - Condition monitoring for a specific plant with industrial robot - Human computer interaction for remote analysis of a single plant cycle - A big data analysis tool for a multitude of cycles and similar plants - 3D process visualization for a specific plant cycle with additional virtual information - Network architecture in hardware, software and network infrastructure - Mobile device computer supported collaborative work for telemaintenance - Motor exchange telemaintenance example in running production environment - Augmented reality supported remote plant visualization for better situation awareness}, subject = {Fernwartung}, language = {en} } @phdthesis{Houshiar2017, author = {Houshiar, Hamidreza}, title = {Documentation and mapping with 3D point cloud processing}, isbn = {978-3-945459-14-0}, doi = {10.25972/OPUS-14449}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-144493}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2017}, abstract = {3D point clouds are a de facto standard for 3D documentation and modelling. The advances in laser scanning technology broadens the usability and access to 3D measurement systems. 3D point clouds are used in many disciplines such as robotics, 3D modelling, archeology and surveying. Scanners are able to acquire up to a million of points per second to represent the environment with a dense point cloud. This represents the captured environment with a very high degree of detail. The combination of laser scanning technology with photography adds color information to the point clouds. Thus the environment is represented more realistically. Full 3D models of environments, without any occlusion, require multiple scans. Merging point clouds is a challenging process. This thesis presents methods for point cloud registration based on the panorama images generated from the scans. Image representation of point clouds introduces 2D image processing methods to 3D point clouds. Several projection methods for the generation of panorama maps of point clouds are presented in this thesis. Additionally, methods for point cloud reduction and compression based on the panorama maps are proposed. Due to the large amounts of data generated from the 3D measurement systems these methods are necessary to improve the point cloud processing, transmission and archiving. This thesis introduces point cloud processing methods as a novel framework for the digitisation of archeological excavations. The framework replaces the conventional documentation methods for excavation sites. It employs point clouds for the generation of the digital documentation of an excavation with the help of an archeologist on-site. The 3D point cloud is used not only for data representation but also for analysis and knowledge generation. Finally, this thesis presents an autonomous indoor mobile mapping system. The mapping system focuses on the sensor placement planning method. Capturing a complete environment requires several scans. The sensor placement planning method solves for the minimum required scans to digitise large environments. Combining this method with a navigation system on a mobile robot platform enables it to acquire data fully autonomously. This thesis introduces a novel hole detection method for point clouds to detect obscured parts of a captured environment. The sensor placement planning method selects the next scan position with the most coverage of the obscured environment. This reduces the required number of scans. The navigation system on the robot platform consist of path planning, path following and obstacle avoidance. This guarantees the safe navigation of the mobile robot platform between the scan positions. The sensor placement planning method is designed as a stand alone process that could be used with a mobile robot platform for autonomous mapping of an environment or as an assistant tool for the surveyor on scanning projects.}, subject = {3D Punktwolke}, language = {en} } @phdthesis{Kindermann2016, author = {Kindermann, Philipp}, title = {Angular Schematization in Graph Drawing}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-020-7 (print)}, doi = {10.25972/WUP-978-3-95826-021-4}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-112549}, school = {W{\"u}rzburg University Press}, pages = {184}, year = {2016}, abstract = {Graphs are a frequently used tool to model relationships among entities. A graph is a binary relation between objects, that is, it consists of a set of objects (vertices) and a set of pairs of objects (edges). Networks are common examples of modeling data as a graph. For example, relationships between persons in a social network, or network links between computers in a telecommunication network can be represented by a graph. The clearest way to illustrate the modeled data is to visualize the graphs. The field of Graph Drawing deals with the problem of finding algorithms to automatically generate graph visualizations. The task is to find a "good" drawing, which can be measured by different criteria such as number of crossings between edges or the used area. In this thesis, we study Angular Schematization in Graph Drawing. By this, we mean drawings with large angles (for example, between the edges at common vertices or at crossing points). The thesis consists of three parts. First, we deal with the placement of boxes. Boxes are axis-parallel rectangles that can, for example, contain text. They can be placed on a map to label important sites, or can be used to describe semantic relationships between words in a word network. In the second part of the thesis, we consider graph drawings visually guide the viewer. These drawings generally induce large angles between edges that meet at a vertex. Furthermore, the edges are drawn crossing-free and in a way that makes them easy to follow for the human eye. The third and final part is devoted to crossings with large angles. In drawings with crossings, it is important to have large angles between edges at their crossing point, preferably right angles.}, language = {en} } @phdthesis{Busch2016, author = {Busch, Stephan}, title = {Robust, Flexible and Efficient Design for Miniature Satellite Systems}, isbn = {978-3-945459-10-2}, doi = {10.25972/OPUS-13652}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-136523}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2016}, abstract = {Small satellites contribute significantly in the rapidly evolving innovation in space engineering, in particular in distributed space systems for global Earth observation and communication services. Significant mass reduction by miniaturization, increased utilization of commercial high-tech components, and in particular standardization are the key drivers for modern miniature space technology. This thesis addresses key fields in research and development on miniature satellite technology regarding efficiency, flexibility, and robustness. Here, these challenges are addressed by the University of Wuerzburg's advanced pico-satellite bus, realizing a generic modular satellite architecture and standardized interfaces for all subsystems. The modular platform ensures reusability, scalability, and increased testability due to its flexible subsystem interface which allows efficient and compact integration of the entire satellite in a plug-and-play manner. Beside systematic design for testability, a high degree of operational robustness is achieved by the consequent implementation of redundancy of crucial subsystems. This is combined with efficient fault detection, isolation and recovery mechanisms. Thus, the UWE-3 platform, and in particular the on-board data handling system and the electrical power system, offers one of the most efficient pico-satellite architectures launched in recent years and provides a solid basis for future extensions. The in-orbit performance results of the pico-satellite UWE-3 are presented and summarize successful operations since its launch in 2013. Several software extensions and adaptations have been uploaded to UWE-3 increasing its capabilities. Thus, a very flexible platform for in-orbit software experiments and for evaluations of innovative concepts was provided and tested.}, subject = {Kleinsatellit}, language = {en} } @phdthesis{Ullmann2015, author = {Ullmann, Tobias}, title = {Characterization of Arctic Environment by Means of Polarimetric Synthetic Aperture Radar (PolSAR) Data and Digital Elevation Models (DEM)}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-115719}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {The ecosystem of the high northern latitudes is affected by the recently changing environmental conditions. The Arctic has undergone a significant climatic change over the last decades. The land coverage is changing and a phenological response to the warming is apparent. Remotely sensed data can assist the monitoring and quantification of these changes. The remote sensing of the Arctic was predominantly carried out by the usage of optical sensors but these encounter problems in the Arctic environment, e.g. the frequent cloud cover or the solar geometry. In contrast, the imaging of Synthetic Aperture Radar is not affected by the cloud cover and the acquisition of radar imagery is independent of the solar illumination. The objective of this work was to explore how polarimetric Synthetic Aperture Radar (PolSAR) data of TerraSAR-X, TanDEM-X, Radarsat-2 and ALOS PALSAR and interferometric-derived digital elevation model data of the TanDEM-X Mission can contribute to collect meaningful information on the actual state of the Arctic Environment. The study was conducted for Canadian sites of the Mackenzie Delta Region and Banks Island and in situ reference data were available for the assessment. The up-to-date analysis of the PolSAR data made the application of the Non-Local Means filtering and of the decomposition of co-polarized data necessary. The Non-Local Means filter showed a high capability to preserve the image values, to keep the edges and to reduce the speckle. This supported not only the suitability for the interpretation but also for the classification. The classification accuracies of Non-Local Means filtered data were in average +10\% higher compared to unfiltered images. The correlation of the co- and quad-polarized decomposition features was high for classes with distinct surface or double bounce scattering and a usage of the co-polarized data is beneficial for regions of natural land coverage and for low vegetation formations with little volume scattering. The evaluation further revealed that the X- and C-Band were most sensitive to the generalized land cover classes. It was found that the X-Band data were sensitive to low vegetation formations with low shrub density, the C-Band data were sensitive to the shrub density and the shrub dominated tundra. In contrast, the L-Band data were less sensitive to the land cover. Among the different dual-polarized data the HH/VV-polarized data were identified to be most meaningful for the characterization and classification, followed by the HH/HV-polarized and the VV/VH-polarized data. The quad-polarized data showed highest sensitivity to the land cover but differences to the co-polarized data were small. The accuracy assessment showed that spectral information was required for accurate land cover classification. The best results were obtained when spectral and radar information was combined. The benefit of including radar data in the classification was up to +15\% accuracy and most significant for the classes wetland and sparse vegetated tundra. The best classifications were realized with quad-polarized C-Band and multispectral data and with co-polarized X-Band and multispectral data. The overall accuracy was up to 80\% for unsupervised and up to 90\% for supervised classifications. The results indicated that the shortwave co-polarized data show promise for the classification of tundra land cover since the polarimetric information is sensitive to low vegetation and the wetlands. Furthermore, co-polarized data provide a higher spatial resolution than the quad-polarized data. The analysis of the intermediate digital elevation model data of the TanDEM-X showed a high potential for the characterization of the surface morphology. The basic and relative topographic features were shown to be of high relevance for the quantification of the surface morphology and an area-wide application is feasible. In addition, these data were of value for the classification and delineation of landforms. Such classifications will assist the delineation of geomorphological units and have potential to identify locations of actual and future morphologic activity.}, subject = {Mackenzie-River-Delta}, language = {en} }