@phdthesis{Kosub2001, author = {Kosub, Sven}, title = {Complexity and Partitions}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-2808}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2001}, abstract = {Computational complexity theory usually investigates the complexity of sets, i.e., the complexity of partitions into two parts. But often it is more appropriate to represent natural problems by partitions into more than two parts. A particularly interesting class of such problems consists of classification problems for relations. For instance, a binary relation R typically defines a partitioning of the set of all pairs (x,y) into four parts, classifiable according to the cases where R(x,y) and R(y,x) hold, only R(x,y) or only R(y,x) holds or even neither R(x,y) nor R(y,x) is true. By means of concrete classification problems such as Graph Embedding or Entailment (for propositional logic), this thesis systematically develops tools, in shape of the boolean hierarchy of NP-partitions and its refinements, for the qualitative analysis of the complexity of partitions generated by NP-relations. The Boolean hierarchy of NP-partitions is introduced as a generalization of the well-known and well-studied Boolean hierarchy (of sets) over NP. Whereas the latter hierarchy has a very simple structure, the situation is much more complicated for the case of partitions into at least three parts. To get an idea of this hierarchy, alternative descriptions of the partition classes are given in terms of finite, labeled lattices. Based on these characterizations the Embedding Conjecture is established providing the complete information on the structure of the hierarchy. This conjecture is supported by several results. A natural extension of the Boolean hierarchy of NP-partitions emerges from the lattice-characterization of its classes by considering partition classes generated by finite, labeled posets. It turns out that all significant ideas translate from the case of lattices. The induced refined Boolean hierarchy of NP-partitions enables us more accuratly capturing the complexity of certain relations (such as Graph Embedding) and a description of projectively closed partition classes.}, subject = {Partition }, language = {en} } @phdthesis{Loeffler2021, author = {L{\"o}ffler, Andre}, title = {Constrained Graph Layouts: Vertices on the Outer Face and on the Integer Grid}, edition = {1. Auflage}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-146-4}, doi = {10.25972/WUP-978-3-95826-147-1}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-215746}, school = {W{\"u}rzburg University Press}, pages = {viii, 161}, year = {2021}, abstract = {Constraining graph layouts - that is, restricting the placement of vertices and the routing of edges to obey certain constraints - is common practice in graph drawing. In this book, we discuss algorithmic results on two different restriction types: placing vertices on the outer face and on the integer grid. For the first type, we look into the outer k-planar and outer k-quasi-planar graphs, as well as giving a linear-time algorithm to recognize full and closed outer k-planar graphs Monadic Second-order Logic. For the second type, we consider the problem of transferring a given planar drawing onto the integer grid while perserving the original drawings topology; we also generalize a variant of Cauchy's rigidity theorem for orthogonal polyhedra of genus 0 to those of arbitrary genus.}, subject = {Graphenzeichnen}, language = {en} } @phdthesis{Fink2014, author = {Fink, Martin}, title = {Crossings, Curves, and Constraints in Graph Drawing}, publisher = {W{\"u}rzburg University Press}, isbn = {978-3-95826-002-3 (print)}, doi = {10.25972/WUP-978-3-95826-003-0}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-98235}, school = {W{\"u}rzburg University Press}, pages = {222}, year = {2014}, abstract = {In many cases, problems, data, or information can be modeled as graphs. Graphs can be used as a tool for modeling in any case where connections between distinguishable objects occur. Any graph consists of a set of objects, called vertices, and a set of connections, called edges, such that any edge connects a pair of vertices. For example, a social network can be modeled by a graph by transforming the users of the network into vertices and friendship relations between users into edges. Also physical networks like computer networks or transportation networks, for example, the metro network of a city, can be seen as graphs. For making graphs and, thereby, the data that is modeled, well-understandable for users, we need a visualization. Graph drawing deals with algorithms for visualizing graphs. In this thesis, especially the use of crossings and curves is investigated for graph drawing problems under additional constraints. The constraints that occur in the problems investigated in this thesis especially restrict the positions of (a part of) the vertices; this is done either as a hard constraint or as an optimization criterion.}, subject = {Graphenzeichnen}, language = {en} } @phdthesis{Nogatz2023, author = {Nogatz, Falco}, title = {Defining and Implementing Domain-Specific Languages with Prolog}, doi = {10.25972/OPUS-30187}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-301872}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {The landscape of today's programming languages is manifold. With the diversity of applications, the difficulty of adequately addressing and specifying the used programs increases. This often leads to newly designed and implemented domain-specific languages. They enable domain experts to express knowledge in their preferred format, resulting in more readable and concise programs. Due to its flexible and declarative syntax without reserved keywords, the logic programming language Prolog is particularly suitable for defining and embedding domain-specific languages. This thesis addresses the questions and challenges that arise when integrating domain-specific languages into Prolog. We compare the two approaches to define them either externally or internally, and provide assisting tools for each. The grammar of a formal language is usually defined in the extended Backus-Naur form. In this work, we handle this formalism as a domain-specific language in Prolog, and define term expansions that allow to translate it into equivalent definite clause grammars. We present the package library(dcg4pt) for SWI-Prolog, which enriches them by an additional argument to automatically process the term's corresponding parse tree. To simplify the work with definite clause grammars, we visualise their application by a web-based tracer. The external integration of domain-specific languages requires the programmer to keep the grammar, parser, and interpreter in sync. In many cases, domain-specific languages can instead be directly embedded into Prolog by providing appropriate operator definitions. In addition, we propose syntactic extensions for Prolog to expand its expressiveness, for instance to state logic formulas with their connectives verbatim. This allows to use all tools that were originally written for Prolog, for instance code linters and editors with syntax highlighting. We present the package library(plammar), a standard-compliant parser for Prolog source code, written in Prolog. It is able to automatically infer from example sentences the required operator definitions with their classes and precedences as well as the required Prolog language extensions. As a result, we can automatically answer the question: Is it possible to model these example sentences as valid Prolog clauses, and how? We discuss and apply the two approaches to internal and external integrations for several domain-specific languages, namely the extended Backus-Naur form, GraphQL, XPath, and a controlled natural language to represent expert rules in if-then form. The created toolchain with library(dcg4pt) and library(plammar) yields new application opportunities for static Prolog source code analysis, which we also present.}, subject = {PROLOG }, language = {en} } @phdthesis{Klein2014, author = {Klein, Dominik Werner}, title = {Design and Evaluation of Components for Future Internet Architectures}, issn = {1432-8801}, doi = {10.25972/OPUS-9313}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-93134}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {Die derzeitige Internetarchitektur wurde nicht in einem geplanten Prozess konzipiert und entwickelt, sondern hat vielmehr eine evolutionsartige Entwicklung hinter sich. Ausl{\"o}ser f{\"u}r die jeweiligen Evolutionsschritte waren dabei meist aufstrebende Anwendungen, welche neue Anforderungen an die zugrundeliegende Netzarchitektur gestellt haben. Um diese Anforderungen zu erf{\"u}llen, wurden h{\"a}ufig neuartige Dienste oder Protokolle spezifiziert und in die bestehende Architektur integriert. Dieser Prozess ist jedoch meist mit hohem Aufwand verbunden und daher sehr tr{\"a}ge, was die Entwicklung und Verbreitung innovativer Dienste beeintr{\"a}chtigt. Derzeitig diskutierte Konzepte wie Software-Defined Networking (SDN) oder Netzvirtualisierung (NV) werden als eine M{\"o}glichkeit angesehen, die Altlasten der bestehenden Internetarchitektur zu l{\"o}sen. Beiden Konzepten gemein ist die Idee, logische Netze {\"u}ber dem physikalischen Substrat zu betreiben. Diese logischen Netze sind hochdynamisch und k{\"o}nnen so flexibel an die Anforderungen der jeweiligen Anwendungen angepasst werden. Insbesondere erlaubt das Konzept der Virtualisierung intelligentere Netzknoten, was innovative neue Anwendungsf{\"a}lle erm{\"o}glicht. Ein h{\"a}ufig in diesem Zusammenhang diskutierter Anwendungsfall ist die Mobilit{\"a}t sowohl von Endger{\"a}ten als auch von Diensten an sich. Die Mobilit{\"a}t der Dienste wird hierbei ausgenutzt, um die Zugriffsverz{\"o}gerung oder die belegten Ressourcen im Netz zu reduzieren, indem die Dienste zum Beispiel in f{\"u}r den Nutzer geographisch nahe Datenzentren migriert werden. Neben den reinen Mechanismen bez{\"u}glich Dienst- und Endger{\"a}temobilit{\"a}t sind in diesem Zusammenhang auch geeignete {\"U}berwachungsl{\"o}sungen relevant, welche die vom Nutzer wahrgenommene Dienstg{\"u}te bewerten k{\"o}nnen. Diese L{\"o}sungen liefern wichtige Entscheidungshilfen f{\"u}r die Migration oder {\"u}berwachen m{\"o}gliche Effekte der Migration auf die erfahrene Dienstg{\"u}te beim Nutzer. Im Falle von Video Streaming erm{\"o}glicht ein solcher Anwendungsfall die flexible Anpassung der Streaming Topologie f{\"u}r mobile Nutzer, um so die Videoqualit{\"a}t unabh{\"a}ngig vom Zugangsnetz aufrechterhalten zu k{\"o}nnen. Im Rahmen dieser Doktorarbeit wird der beschriebene Anwendungsfall am Beispiel einer Video Streaming Anwendung n{\"a}her analysiert und auftretende Herausforderungen werden diskutiert. Des Weiteren werden L{\"o}sungsans{\"a}tze vorgestellt und bez{\"u}glich ihrer Effizienz ausgewertet. Im Detail besch{\"a}ftigt sich die Arbeit mit der Leistungsanalyse von Mechanismen f{\"u}r die Dienstmobilit{\"a}t und entwickelt eine Architektur zur Optimierung der Dienstmobilit{\"a}t. Im Bereich Endger{\"a}temobilit{\"a}t werden Verbesserungen entwickelt, welche die Latenz zwischen Endger{\"a}t und Dienst reduzieren oder die Konnektivit{\"a}t unabh{\"a}ngig vom Zugangsnetz gew{\"a}hrleisten. Im letzten Teilbereich wird eine L{\"o}sung zur {\"U}berwachung der Videoqualit{\"a}t im Netz entwickelt und bez{\"u}glich ihrer Genauigkeit analysiert.}, subject = {Leistungsbewertung}, language = {en} } @phdthesis{Binder2006, author = {Binder, Andreas}, title = {Die stochastische Wissenschaft und zwei Teilsysteme eines Web-basierten Informations- und Anwendungssystems zu ihrer Etablierung}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-26146}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2006}, abstract = {Das stochastische Denken, die Bernoullische Stochastik und dessen informationstechnologische Umsetzung, namens Stochastikon stellen die Grundlage f{\"u}r das Verst{\"a}ndnis und die erfolgreiche Nutzung einer stochastischen Wissenschaft dar. Im Rahmen dieser Arbeit erfolgt eine Kl{\"a}rung des Begriffs des stochastischen Denkens, eine anschauliche Darstellung der von Elart von Collani entwickelten Bernoullischen Stochastik und eine Beschreibung von Stochastikon. Dabei werden sowohl das Gesamtkonzept von Stochastikon, sowie die Ziele, Aufgaben und die Realisierung der beiden Teilsysteme namens Mentor und Encyclopedia vorgestellt. Das stochastische Denken erlaubt eine realit{\"a}tsnahe Sichtweise der Dinge, d.h. eine Sichtweise, die mit den menschlichen Beobachtungen und Erfahrungen im Einklang steht und somit die Unsicherheit {\"u}ber zuk{\"u}nftige Entwicklungen ber{\"u}cksichtigt. Der in diesem Kontext verwendete Begriff der Unsicherheit bezieht sich ausschließlich auf zuk{\"u}nftige Entwicklungen und {\"a}ußert sich in Variabilit{\"a}t. Quellen der Unsicherheit sind einerseits die menschliche Ignoranz und andererseits der Zufall. Unter Ignoranz wird hierbei die Unwissenheit des Menschen {\"u}ber die unbekannten, aber feststehenden Fakten verstanden, die die Anfangsbedingungen der zuk{\"u}nftigen Entwicklung repr{\"a}sentieren. Die Bernoullische Stochastik liefert ein Regelwerk und erm{\"o}glicht die Entwicklung eines quantitativen Modells zur Beschreibung der Unsicherheit und expliziter Einbeziehung der beiden Quellen Ignoranz und Zufall. Das Modell tr{\"a}gt den Namen Bernoulli-Raum und bildet die Grundlage f{\"u}r die Herleitung quantitativer Verfahren, um zuverl{\"a}ssige und genaue Aussagen sowohl {\"u}ber die nicht-existente zuf{\"a}llige Zukunft (Vorhersageverfahren), als auch {\"u}ber die unbekannte feststehende Vergangenheit (Messverfahren). Das Softwaresystem Stochastikon implementiert die Bernoullische Stochastik in Form einer Reihe autarker, miteinander kommunizierender Teilsysteme. Ziel des Teilsystems Encyclopedia ist die Bereitstellung und Bewertung stochastischen Wissens. Das Teilsystem Mentor dient der Unterst{\"u}tzung des Anwenders bei der Probleml{\"o}sungsfindung durch Identifikation eines richtigen Modells bzw. eines korrekten Bernoulli-Raums. Der L{\"o}sungsfindungsprozess selber enth{\"a}lt keinerlei Unsicherheit. Die ganze Unsicherheit steckt in der L{\"o}sung, d.h. im Bernoulli-Raum, der explizit die vorhandene Unwissenheit (Ignoranz) und den vorliegenden Zufall abdeckend enth{\"a}lt.}, subject = {Stochastik}, language = {de} } @phdthesis{Houshiar2017, author = {Houshiar, Hamidreza}, title = {Documentation and mapping with 3D point cloud processing}, isbn = {978-3-945459-14-0}, doi = {10.25972/OPUS-14449}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-144493}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2017}, abstract = {3D point clouds are a de facto standard for 3D documentation and modelling. The advances in laser scanning technology broadens the usability and access to 3D measurement systems. 3D point clouds are used in many disciplines such as robotics, 3D modelling, archeology and surveying. Scanners are able to acquire up to a million of points per second to represent the environment with a dense point cloud. This represents the captured environment with a very high degree of detail. The combination of laser scanning technology with photography adds color information to the point clouds. Thus the environment is represented more realistically. Full 3D models of environments, without any occlusion, require multiple scans. Merging point clouds is a challenging process. This thesis presents methods for point cloud registration based on the panorama images generated from the scans. Image representation of point clouds introduces 2D image processing methods to 3D point clouds. Several projection methods for the generation of panorama maps of point clouds are presented in this thesis. Additionally, methods for point cloud reduction and compression based on the panorama maps are proposed. Due to the large amounts of data generated from the 3D measurement systems these methods are necessary to improve the point cloud processing, transmission and archiving. This thesis introduces point cloud processing methods as a novel framework for the digitisation of archeological excavations. The framework replaces the conventional documentation methods for excavation sites. It employs point clouds for the generation of the digital documentation of an excavation with the help of an archeologist on-site. The 3D point cloud is used not only for data representation but also for analysis and knowledge generation. Finally, this thesis presents an autonomous indoor mobile mapping system. The mapping system focuses on the sensor placement planning method. Capturing a complete environment requires several scans. The sensor placement planning method solves for the minimum required scans to digitise large environments. Combining this method with a navigation system on a mobile robot platform enables it to acquire data fully autonomously. This thesis introduces a novel hole detection method for point clouds to detect obscured parts of a captured environment. The sensor placement planning method selects the next scan position with the most coverage of the obscured environment. This reduces the required number of scans. The navigation system on the robot platform consist of path planning, path following and obstacle avoidance. This guarantees the safe navigation of the mobile robot platform between the scan positions. The sensor placement planning method is designed as a stand alone process that could be used with a mobile robot platform for autonomous mapping of an environment or as an assistant tool for the surveyor on scanning projects.}, subject = {3D Punktwolke}, language = {en} } @phdthesis{Kaussner2003, author = {Kaußner, Armin}, title = {Dynamische Szenerien in der Fahrsimulation}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-8286}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2003}, abstract = {In der Arbeit wird ein neues Konzept f{\"u}r Fahrsimulator-Datenbasen vorgestellt. Der Anwender entwirft eine auf seine Fragestellung zugeschnittene Datenbasis mithilfe einer einfachen Skriptsprache. Das Straßennetzwerk wird auf einer topologischen Ebene rep{\"a}sentiert. In jedem Simulationsschritt wird hieraus im Sichtbarkeitsbereich des Fahrers die geometrische Rep{\"a}sentation berechnet. Die f{\"u}r den Fahrer unsichtbaren Teile des Straßenetzwerks k{\"o}nnen w{\"a}hrend der Simulation ver{\"a}ndert werden. Diese Ver{\"a}nderungen k{\"o}nnen von der Route des Fahrers oder von den in der Simulation erhobenen Messerten abh{\"a}ngen. Zudem kann der Anwender das Straßennetzwerk interaktiv ver{\"a}ndern. Das vorgestellte Konzept bietet zahlreiche M{\"o}glichkeiten zur Erzeugung reproduzierbarer Szenarien f{\"u}r Experimente in Fahrsimulatoren.}, subject = {Straßenverkehr}, language = {de} } @phdthesis{Menth2004, author = {Menth, Michael}, title = {Efficient admission control and routing for resilient communication networks}, doi = {10.25972/OPUS-846}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-9949}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2004}, abstract = {This work is subdivided into two main areas: resilient admission control and resilient routing. The work gives an overview of the state of the art of quality of service mechanisms in communication networks and proposes a categorization of admission control (AC) methods. These approaches are investigated regarding performance, more precisely, regarding the potential resource utilization by dimensioning the capacity for a network with a given topology, traffic matrix, and a required flow blocking probability. In case of a failure, the affected traffic is rerouted over backup paths which increases the traffic rate on the respective links. To guarantee the effectiveness of admission control also in failure scenarios, the increased traffic rate must be taken into account for capacity dimensioning and leads to resilient AC. Capacity dimensioning is not feasible for existing networks with already given link capacities. For the application of resilient NAC in this case, the size of distributed AC budgets must be adapted according to the traffic matrix in such a way that the maximum blocking probability for all flows is minimized and that the capacity of all links is not exceeded by the admissible traffic rate in any failure scenario. Several algorithms for the solution of that problem are presented and compared regarding their efficiency and fairness. A prototype for resilient AC was implemented in the laboratories of Siemens AG in Munich within the scope of the project KING. Resilience requires additional capacity on the backup paths for failure scenarios. The amount of this backup capacity depends on the routing and can be minimized by routing optimization. New protection switching mechanisms are presented that deviate the traffic quickly around outage locations. They are simple and can be implemented, e.g, by MPLS technology. The Self-Protecting Multi-Path (SPM) is a multi-path consisting of disjoint partial paths. The traffic is distributed over all faultless partial paths according to an optimized load balancing function both in the working case and in failure scenarios. Performance studies show that the network topology and the traffic matrix also influence the amount of required backup capacity significantly. The example of the COST-239 network illustrates that conventional shortest path routing may need 50\% more capacity than the optimized SPM if all single link and node failures are protected.}, subject = {Kommunikation}, language = {en} } @phdthesis{Wolz2003, author = {Wolz, Frank}, title = {Ein generisches Konzept zur Modellierung und Bewertung feldprogrammierbarer Architekturen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-7944}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2003}, abstract = {Gegenstand der Arbeit stellt eine erstmalig unternommene, architektur{\"u}bergreifende Studie {\"u}ber feldprogrammierbare Logikbausteine zur Implementierung synchroner Schaltkreise dar. Zun{\"a}chst wird ein Modell f{\"u}r allgemeine feldprogrammiebare Architekturen basierend auf periodischen Graphen definiert. Schließlich werden Bewertungsmaße f{\"u}r Architekturen und Schaltkreislayouts angegeben zur Charakterisierung struktureller Eigenschaften hinsichtlich des Verhaltens in Chipfl{\"a}chenverbrauch und Signalverz{\"o}gerung. Ferner wird ein generisches Layout-Werkzeug entwickelt, das f{\"u}r beliebige Architekturen und Schaltkreise Implementierungen berechnen und bewerten kann. Abschließend werden neun ressourcenminimalistische Architekturen mit Maschen- und mit Inselstruktur einander gegen{\"u}bergestellt.}, subject = {Gay-Array-Bauelement}, language = {de} } @phdthesis{Niebler2019, author = {Niebler, Thomas}, title = {Extracting and Learning Semantics from Social Web Data}, doi = {10.25972/OPUS-17866}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-178666}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2019}, abstract = {Making machines understand natural language is a dream of mankind that existed since a very long time. Early attempts at programming machines to converse with humans in a supposedly intelligent way with humans relied on phrase lists and simple keyword matching. However, such approaches cannot provide semantically adequate answers, as they do not consider the specific meaning of the conversation. Thus, if we want to enable machines to actually understand language, we need to be able to access semantically relevant background knowledge. For this, it is possible to query so-called ontologies, which are large networks containing knowledge about real-world entities and their semantic relations. However, creating such ontologies is a tedious task, as often extensive expert knowledge is required. Thus, we need to find ways to automatically construct and update ontologies that fit human intuition of semantics and semantic relations. More specifically, we need to determine semantic entities and find relations between them. While this is usually done on large corpora of unstructured text, previous work has shown that we can at least facilitate the first issue of extracting entities by considering special data such as tagging data or human navigational paths. Here, we do not need to detect the actual semantic entities, as they are already provided because of the way those data are collected. Thus we can mainly focus on the problem of assessing the degree of semantic relatedness between tags or web pages. However, there exist several issues which need to be overcome, if we want to approximate human intuition of semantic relatedness. For this, it is necessary to represent words and concepts in a way that allows easy and highly precise semantic characterization. This also largely depends on the quality of data from which these representations are constructed. In this thesis, we extract semantic information from both tagging data created by users of social tagging systems and human navigation data in different semantic-driven social web systems. Our main goal is to construct high quality and robust vector representations of words which can the be used to measure the relatedness of semantic concepts. First, we show that navigation in the social media systems Wikipedia and BibSonomy is driven by a semantic component. After this, we discuss and extend methods to model the semantic information in tagging data as low-dimensional vectors. Furthermore, we show that tagging pragmatics influences different facets of tagging semantics. We then investigate the usefulness of human navigational paths in several different settings on Wikipedia and BibSonomy for measuring semantic relatedness. Finally, we propose a metric-learning based algorithm in adapt pre-trained word embeddings to datasets containing human judgment of semantic relatedness. This work contributes to the field of studying semantic relatedness between words by proposing methods to extract semantic relatedness from web navigation, learn highquality and low-dimensional word representations from tagging data, and to learn semantic relatedness from any kind of vector representation by exploiting human feedback. Applications first and foremest lie in ontology learning for the Semantic Web, but also semantic search or query expansion.}, subject = {Semantik}, language = {en} } @phdthesis{Budig2018, author = {Budig, Benedikt}, title = {Extracting Spatial Information from Historical Maps: Algorithms and Interaction}, edition = {1. Auflage}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-092-4}, doi = {10.25972/WUP-978-3-95826-093-1}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-160955}, school = {W{\"u}rzburg University Press}, pages = {viii, 160}, year = {2018}, abstract = {Historical maps are fascinating documents and a valuable source of information for scientists of various disciplines. Many of these maps are available as scanned bitmap images, but in order to make them searchable in useful ways, a structured representation of the contained information is desirable. This book deals with the extraction of spatial information from historical maps. This cannot be expected to be solved fully automatically (since it involves difficult semantics), but is also too tedious to be done manually at scale. The methodology used in this book combines the strengths of both computers and humans: it describes efficient algorithms to largely automate information extraction tasks and pairs these algorithms with smart user interactions to handle what is not understood by the algorithm. The effectiveness of this approach is shown for various kinds of spatial documents from the 16th to the early 20th century.}, subject = {Karte}, language = {en} } @phdthesis{Glasser2001, author = {Glaßer, Christian}, title = {Forbidden-Patterns and Word Extensions for Concatenation Hierarchies}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-1179153}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2001}, abstract = {Starfree regular languages can be build up from alphabet letters by using only Boolean operations and concatenation. The complexity of these languages can be measured with the so-called dot-depth. This measure leads to concatenation hierarchies like the dot-depth hierarchy (DDH) and the closely related Straubing-Th{\´e}rien hierarchy (STH). The question whether the single levels of these hierarchies are decidable is still open and is known as the dot-depth problem. In this thesis we prove/reprove the decidability of some lower levels of both hierarchies. More precisely, we characterize these levels in terms of patterns in finite automata (subgraphs in the transition graph) that are not allowed. Therefore, such characterizations are called forbidden-pattern characterizations. The main results of the thesis are as follows: forbidden-pattern characterization for level 3/2 of the DDH (this implies the decidability of this level) decidability of the Boolean hierarchy over level 1/2 of the DDH definition of decidable hierarchies having close relations to the DDH and STH Moreover, we prove/reprove the decidability of the levels 1/2 and 3/2 of both hierarchies in terms of forbidden-pattern characterizations. We show the decidability of the Boolean hierarchies over level 1/2 of the DDH and over level 1/2 of the STH. A technique which uses word extensions plays the central role in the proofs of these results. With this technique it is possible to treat the levels 1/2 and 3/2 of both hierarchies in a uniform way. Furthermore, it can be used to prove the decidability of the mentioned Boolean hierarchies. Among other things we provide a combinatorial tool that allows to partition words of arbitrary length into factors of bounded length such that every second factor u leads to a loop with label u in a given finite automaton.}, subject = {Automatentheorie}, language = {en} } @phdthesis{Reith2001, author = {Reith, Steffen}, title = {Generalized Satisfiability Problems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-74}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2001}, abstract = {In the last 40 years, complexity theory has grown to a rich and powerful field in theoretical computer science. The main task of complexity theory is the classification of problems with respect to their consumption of resources (e.g., running time or required memory). To study the computational complexity (i.e., consumption of resources) of problems, similar problems are grouped into so called complexity classes. During the systematic study of numerous problems of practical relevance, no efficient algorithm for a great number of studied problems was found. Moreover, it was unclear whether such algorithms exist. A major breakthrough in this situation was the introduction of the complexity classes P and NP and the identification of hardest problems in NP. These hardest problems of NP are nowadays known as NP-complete problems. One prominent example of an NP-complete problem is the satisfiability problem of propositional formulas (SAT). Here we get a propositional formula as an input and it must be decided whether an assignment for the propositional variables exists, such that this assignment satisfies the given formula. The intensive study of NP led to numerous related classes, e.g., the classes of the polynomial-time hierarchy PH, P, \#P, PP, NL, L and \#L. During the study of these classes, problems related to propositional formulas were often identified to be complete problems for these classes. Hence some questions arise: Why is SAT so hard to solve? Are there modifications of SAT which are complete for other well-known complexity classes? In the context of these questions a result by E. Post is extremely useful. He identified and characterized all classes of Boolean functions being closed under superposition. It is possible to study problems which are connected to generalized propositional logic by using this result, which was done in this thesis. Hence, many different problems connected to propositional logic were studied and classified with respect to their computational complexity, clearing the borderline between easy and hard problems.}, subject = {Erf{\"u}llbarkeitsproblem}, language = {en} } @phdthesis{Schaefer2003, author = {Sch{\"a}fer, Dirk}, title = {Globale Selbstlokalisation autonomer mobiler Roboter - Ein Schl{\"u}sselproblem der Service-Robotik}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-7601}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2003}, abstract = {Die Dissertation behandelt die Problemstellung der globalen Selbstlokalisation autonomer mobiler Roboter, welche folgendermaßen beschrieben werden kann: Ein mobiler Roboter, eingesetzt in einem Geb{\"a}ude, kann unter Umst{\"a}nden das Wissen {\"u}ber seinen Standort verlieren. Man geht nun davon aus, dass dem Roboter eine Geb{\"a}udekarte als Modell zur Verf{\"u}gung steht. Mit Hilfe eines Laser-Entfernungsmessers kann das mobile Ger{\"a}t neue Informationen aufnehmen und damit bei korrekter Zuordnung zur Modellkarte geeignete hypothetische Standorte ermitteln. In der Regel werden diese Positionen aber mehrdeutig sein. Indem sich der Roboter intelligent in seiner Einsatzumgebung bewegt, kann er die urspr{\"u}nglichen Sensordaten verifizieren und ermittelt im besten Fall seine tats{\"a}chliche Position.F{\"u}r diese Problemstellung wird ein neuer L{\"o}sungsansatz in Theorie und Praxis pr{\"a}sentiert, welcher die jeweils aktuelle lokale Karte und damit alle Sensordaten mittels feature-basierter Matchingverfahren auf das Modell der Umgebung abbildet. Ein Explorationsalgorithmus bewegt den Roboter w{\"a}hrend der Bewegungsphase autonom zu Sensorpunkten, welche neue Informationen bereitstellen. W{\"a}hrend der Bewegungsphase werden dabei die bisherigen hypothetischen Positionen best{\"a}rkt oder geschw{\"a}cht, sodaß nach kurzer Zeit eine dominante Position, die tats{\"a}chliche Roboterposition,{\"u}brigbleibt.}, subject = {Mobiler Roboter}, language = {de} } @phdthesis{Schmidt2011, author = {Schmidt, Marco}, title = {Ground Station Networks for Efficient Operation of Distributed Small Satellite Systems}, isbn = {978-3-923959-77-8}, doi = {10.25972/OPUS-4984}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-64999}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2011}, abstract = {The field of small satellite formations and constellations attracted growing attention, based on recent advances in small satellite engineering. The utilization of distributed space systems allows the realization of innovative applications and will enable improved temporal and spatial resolution in observation scenarios. On the other side, this new paradigm imposes a variety of research challenges. In this monograph new networking concepts for space missions are presented, using networks of ground stations. The developed approaches combine ground station resources in a coordinated way to achieve more robust and efficient communication links. Within this thesis, the following topics were elaborated to improve the performance in distributed space missions: Appropriate scheduling of contact windows in a distributed ground system is a necessary process to avoid low utilization of ground stations. The theoretical basis for the novel concept of redundant scheduling was elaborated in detail. Additionally to the presented algorithm was a scheduling system implemented, its performance was tested extensively with real world scheduling problems. In the scope of data management, a system was developed which autonomously synchronizes data frames in ground station networks and uses this information to detect and correct transmission errors. The system was validated with hardware in the loop experiments, demonstrating the benefits of the developed approach.}, subject = {Kleinsatellit}, language = {en} } @phdthesis{Duelli2012, author = {Duelli, Michael}, title = {Heuristic Design and Provisioning of Resilient Multi-Layer Networks}, doi = {10.25972/OPUS-5600}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-69433}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2012}, abstract = {To jointly provide different services/technologies, like IP and Ethernet or IP and SDH/SONET, in a single network, equipment of multiple technologies needs to be deployed to the sites/Points of Presence (PoP) and interconnected with each other. Therein, a technology may provide transport functionality to other technologies and increase the number of available resources by using multiplexing techniques. By providing its own switching functionality, each technology creates connections in a logical layer which leads to the notion of multi-layer networks. The design of such networks comprises the deployment and interconnection of components to suit to given traffic demands. To prevent traffic loss due to failures of networking equipment, protection mechanisms need to be established. In multi-layer networks, protection usually can be applied in any of the considered layers. In turn, the hierarchical structure of multi-layer networks also bears shared risk groups (SRG). To achieve a cost-optimal resilient network, an appropriate combination of multiplexing techniques, technologies, and their interconnections needs to be found. Thus, network design is a combinatorial problem with a large parameter and solution space. After the design stage, the resources of a multi-layer network can be provided to traffic demands. Especially, dynamic capacity provisioning requires interaction of sites and layers, as well as accurate retrieval of constraint information. In recent years, generalized multiprotocol label switching (GMPLS) and path computation elements (PCE) have emerged as possible approaches for these challenges. Like the design, the provisioning of multi-layer networks comprises a variety of optimization parameters, like blocking probability, resilience, and energy efficiency. In this work, we introduce several efficient heuristics to approach the considered optimization problems. We perform capital expenditure (CAPEX)-aware design of multi-layer networks from scratch, based on IST NOBEL phase 2 project's cost and equipment data. We comprise traffic and resilience requirements in different and multiple layers as well as different network architectures. On top of the designed networks, we consider the dynamic provisioning of multi-layer traffic based on the GMPLS and PCE architecture. We evaluate different PCE deployments, information retrieval strategies, and re-optimization. Finally, we show how information about provisioning utilization can be used to provide a feedback for network design.}, subject = {Mehrschichtsystem}, language = {en} } @phdthesis{Aschenbrenner2017, author = {Aschenbrenner, Doris}, title = {Human Robot Interaction Concepts for Human Supervisory Control and Telemaintenance Applications in an Industry 4.0 Environment}, isbn = {978-3-945459-18-8}, doi = {10.25972/OPUS-15052}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-150520}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2017}, abstract = {While teleoperation of technical highly sophisticated systems has already been a wide field of research, especially for space and robotics applications, the automation industry has not yet benefited from its results. Besides the established fields of application, also production lines with industrial robots and the surrounding plant components are in need of being remotely accessible. This is especially critical for maintenance or if an unexpected problem cannot be solved by the local specialists. Special machine manufacturers, especially robotics companies, sell their technology worldwide. Some factories, for example in emerging economies, lack qualified personnel for repair and maintenance tasks. When a severe failure occurs, an expert of the manufacturer needs to fly there, which leads to long down times of the machine or even the whole production line. With the development of data networks, a huge part of those travels can be omitted, if appropriate teleoperation equipment is provided. This thesis describes the development of a telemaintenance system, which was established in an active production line for research purposes. The customer production site of Braun in Marktheidenfeld, a factory which belongs to Procter \& Gamble, consists of a six-axis cartesian industrial robot by KUKA Industries, a two-component injection molding system and an assembly unit. The plant produces plastic parts for electric toothbrushes. In the research projects "MainTelRob" and "Bayern.digital", during which this plant was utilised, the Zentrum f{\"u}r Telematik e.V. (ZfT) and its project partners develop novel technical approaches and procedures for modern telemaintenance. The term "telemaintenance" hereby refers to the integration of computer science and communication technologies into the maintenance strategy. It is particularly interesting for high-grade capital-intensive goods like industrial robots. Typical telemaintenance tasks are for example the analysis of a robot failure or difficult repair operations. The service department of KUKA Industries is responsible for the worldwide distributed customers who own more than one robot. Currently such tasks are offered via phone support and service staff which travels abroad. They want to expand their service activities on telemaintenance and struggle with the high demands of teleoperation especially regarding security infrastructure. In addition, the facility in Marktheidenfeld has to keep up with the high international standards of Procter \& Gamble and wants to minimize machine downtimes. Like 71.6 \% of all German companies, P\&G sees a huge potential for early information on their production system, but complains about the insufficient quality and the lack of currentness of data. The main research focus of this work lies on the human machine interface for all human tasks in a telemaintenance setup. This thesis provides own work in the use of a mobile device in context of maintenance, describes new tools on asynchronous remote analysis and puts all parts together in an integrated telemaintenance infrastructure. With the help of Augmented Reality, the user performance and satisfaction could be raised. A special regard is put upon the situation awareness of the remote expert realized by different camera viewpoints. In detail the work consists of: - Support of maintenance tasks with a mobile device - Development and evaluation of a context-aware inspection tool - Comparison of a new touch-based mobile robot programming device to the former teach pendant - Study on Augmented Reality support for repair tasks with a mobile device - Condition monitoring for a specific plant with industrial robot - Human computer interaction for remote analysis of a single plant cycle - A big data analysis tool for a multitude of cycles and similar plants - 3D process visualization for a specific plant cycle with additional virtual information - Network architecture in hardware, software and network infrastructure - Mobile device computer supported collaborative work for telemaintenance - Motor exchange telemaintenance example in running production environment - Augmented reality supported remote plant visualization for better situation awareness}, subject = {Fernwartung}, language = {en} } @phdthesis{Selbach2011, author = {Selbach, Stefan}, title = {Hybride bitparallele Volltextsuche}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-66476}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2011}, abstract = {Der große Vorteil eines q-Gramm Indexes liegt darin, dass es m{\"o}glich ist beliebige Zeichenketten in einer Dokumentensammlung zu suchen. Ein Nachteil jedoch liegt darin, dass bei gr{\"o}ßer werdenden Datenmengen dieser Index dazu neigt, sehr groß zu werden, was mit einem deutlichem Leistungsabfall verbunden ist. In dieser Arbeit wird eine neuartige Technik vorgestellt, die die Leistung eines q-Gramm Indexes mithilfe zus{\"a}tzlicher M-Matrizen f{\"u}r jedes q-Gramm und durch die Kombination mit einem invertierten Index erh{\"o}ht. Eine M-Matrix ist eine Bit-Matrix, die Informationen {\"u}ber die Positionen eines q-Gramms enth{\"a}lt. Auch bei der Kombination von zwei oder mehreren Q-Grammen bieten diese M-Matrizen Informationen {\"u}ber die Positionen der Kombination. Dies kann verwendet werden, um die Komplexit{\"a}t der Zusammenf{\"u}hrung der q-Gramm Trefferlisten f{\"u}r eine gegebene Suchanfrage zu reduzieren und verbessert die Leistung des n-Gramm-invertierten Index. Die Kombination mit einem termbasierten invertierten Index beschleunigt die durchschnittliche Suchzeit zus{\"a}tzlich und vereint die Vorteile beider Index-Formate. Redundante Informationen werden in dem q-Gramm Index reduziert und weitere Funktionalit{\"a}t hinzugef{\"u}gt, wie z.B. die Bewertung von Treffern nach Relevanz, die M{\"o}glichkeit, nach Konzepten zu suchen oder Indexpartitionierungen nach Wichtigkeit der enthaltenen Terme zu erstellen.}, subject = {Information Retrieval}, language = {de} } @phdthesis{Ostermayer2017, author = {Ostermayer, Ludwig}, title = {Integration of Prolog and Java with the Connector Architecture CAPJa}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-150713}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2017}, abstract = {Modern software is often realized as a modular combination of subsystems for, e. g., knowledge management, visualization, verification, or the interaction with users. As a result, software libraries from possibly different programming languages have to work together. Even more complex the case is if different programming paradigms have to be combined. This type of diversification of programming languages and paradigms in just one software application can only be mastered by mechanisms for a seamless integration of the involved programming languages. However, the integration of the common logic programming language Prolog and the popular object-oriented programming language Java is complicated by various interoperability problems which stem on the one hand from the paradigmatic gap between the programming languages, and on the other hand, from the diversity of the available Prolog systems. The subject of the thesis is the investigation of novel mechanisms for the integration of logic programming in Prolog and object-oriented programming in Java. We are particularly interested in an object-oriented, uniform approach which is not specific to just one Prolog system. Therefore, we have first identified several important criteria for the seamless integration of Prolog and Java from the object-oriented perspective. The main contribution of the thesis is a novel integration framework called the Connector Architecture for Prolog and Java (CAPJa). The framework is completely implemented in Java and imposes no modifications to the Java Virtual Machine or Prolog. CAPJa provides a semi-automated mechanism for the integration of Prolog predicates into Java. For compact, readable, and object-oriented queries to Prolog, CAPJa exploits lambda expressions with conditional and relational operators in Java. The communication between Java and Prolog is based on a fully automated mapping of Java objects to Prolog terms, and vice versa. In Java, an extensible system of gateways provides connectivity with various Prolog system and, moreover, makes any connected Prolog system easily interchangeable, without major adaption in Java.}, subject = {Logische Programmierung}, language = {en} }