@article{SchittkowskiZilloberZotemantel1994, author = {Schittkowski, K. and Zillober, Christian and Zotemantel, R.}, title = {Numerical comparison of nonlinear programming algorithms for structural optimization}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-31971}, year = {1994}, abstract = {No abstract available}, language = {en} } @phdthesis{Baier1998, author = {Baier, Herbert}, title = {Operators of Higher Order}, publisher = {Shaker Verlag}, isbn = {3-8265-4008-5}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-140799}, school = {Universit{\"a}t W{\"u}rzburg}, pages = {V, 95}, year = {1998}, abstract = {Motivated by results on interactive proof systems we investigate the computational power of quantifiers applied to well-known complexity classes. In special, we are interested in existential, universal and probabilistic bounded error quantifiers ranging over words and sets of words, i.e. oracles if we think in a Turing machine model. In addition to the standard oracle access mechanism, we also consider quantifiers ranging over oracles to which access is restricted in a certain way.}, subject = {Komplexit{\"a}tstheorie}, language = {en} } @phdthesis{Schmitz2000, author = {Schmitz, Heinz}, title = {The Forbidden Pattern Approach to Concatenation Hierarchies}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-2832}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2000}, abstract = {The thesis looks at the question asking for the computability of the dot-depth of star-free regular languages. Here one has to determine for a given star-free regular language the minimal number of alternations between concatenation on one hand, and intersection, union, complement on the other hand. This question was first raised in 1971 (Brzozowski/Cohen) and besides the extended star-heights problem usually refered to as one of the most difficult open questions on regular languages. The dot-depth problem can be captured formally by hierarchies of classes of star-free regular languages B(0), B(1/2), B(1), B(3/2),... and L(0), L(1/2), L(1), L(3/2),.... which are defined via alternating the closure under concatenation and Boolean operations, beginning with single alphabet letters. Now the question of dot-depth is the question whether these hierarchy classes have decidable membership problems. The thesis makes progress on this question using the so-called forbidden pattern approach: Classes of regular languages are characterized in terms of patterns in finite automata (subgraphs in the transition graph) that are not allowed. Such a characterization immediately implies the decidability of the respective class, since the absence of a certain pattern in a given automaton can be effectively verified. Before this work, the decidability of B(0), B(1/2), B(1) and L(0), L(1/2), L(1), L(3/2) were known. Here a detailed study of these classes with help of forbidden patterns is given which leads to new insights into their inner structure. Furthermore, the decidability of B(3/2) is proven. Based on these results a theory of pattern iteration is developed which leads to the introduction of two new hierarchies of star-free regular languages. These hierarchies are decidable on one hand, on the other hand they are in close connection to the classes B(n) and L(n). It remains an open question here whether they may in fact coincide. Some evidence is given in favour of this conjecture which opens a new way to attack the dot-depth problem. Moreover, it is shown that the class L(5/2) is decidable in the restricted case of a two-letter alphabet.}, subject = {Sternfreie Sprache}, language = {en} } @phdthesis{Kluegl2000, author = {Kl{\"u}gl, Franziska}, title = {Aktivit{\"a}tsbasierte Verhaltensmodellierung und ihre Unterst{\"u}tzung bei Multiagentensimulationen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-2874}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2000}, abstract = {Durch Zusammenf{\"u}hrung traditioneller Methoden zur individuenbasierten Simulation und dem Konzept der Multiagentensysteme steht mit der Multiagentensimulation eine Methodik zur Verf{\"u}gung, die es erm{\"o}glicht, sowohl technisch als auch konzeptionell eine neue Ebene an Detaillierung bei Modellbildung und Simulation zu erreichen. Ein Modell beruht dabei auf dem Konzept einer Gesellschaft: Es besteht aus einer Menge interagierender, aber in ihren Entscheidungen autonomen Einheiten, den Agenten. Diese {\"a}ndern durch ihre Aktionen ihre Umwelt und reagieren ebenso auf die f{\"u}r sie wahrnehmbaren {\"A}nderungen in der Umwelt. Durch die Simulation jedes Agenten zusammen mit der Umwelt, in der er "lebt", wird die Dynamik im Gesamtsystem beobachtbar. In der vorliegenden Dissertation wurde ein Repr{\"a}sentationsschema f{\"u}r Multiagentensimulationen entwickelt werden, das es Fachexperten, wie zum Beispiel Biologen, erm{\"o}glicht, selbst{\"a}ndig ohne traditionelles Programmieren Multiagentenmodelle zu implementieren und mit diesen Experimente durchzuf{\"u}hren. Dieses deklarative Schema beruht auf zwei Basiskonzepten: Der K{\"o}rper eines Agenten besteht aus Zustandsvariablen. Das Verhalten des Agenten kann mit Regeln beschrieben werden. Ausgehend davon werden verschiedene Strukturierungsans{\"a}tze behandelt. Das wichtigste Konzept ist das der "Aktivit{\"a}t", einer Art "Verhaltenszustand": W{\"a}hrend der Agent in einer Aktivit{\"a}t A verweilt, f{\"u}hrt er die zugeh{\"o}rigen Aktionen aus und dies solange, bis eine Regel feuert, die diese Aktivit{\"a}t beendet und eine neue Aktivit{\"a}t ausw{\"a}hlt. Durch Indizierung dieser Regeln bei den zugeh{\"o}rigen Aktivit{\"a}ten und Einf{\"u}hrung von abstrakten Aktivit{\"a}ten entsteht ein Schema f{\"u}r eine vielf{\"a}ltig strukturierbare Verhaltensbeschreibung. Zu diesem Schema wurde ein Interpreter entwickelt, der ein derartig repr{\"a}sentiertes Modell ausf{\"u}hrt und so Simulationsexperimente mit dem Multiagentenmodell erlaubt. Auf dieser Basis wurde die Modellierungs- und Experimentierumgebung SeSAm ("Shell f{\"u}r Simulierte Agentensysteme") entwickelt. Sie verwendet vorhandene Konzepte aus dem visuellen Programmieren. Mit dieser Umgebung wurden Anwendungsmodelle aus verschiedenen Dom{\"a}nen realisiert: Neben abstrakten Spielbeispielen waren dies vor allem Fragestellungen zu sozialen Insekten, z.B. zum Verhalten von Ameisen, Bienen oder der Interaktion zwischen Bienenv{\"o}lkern und Milbenpopulationen.}, subject = {Agent }, language = {de} } @phdthesis{Reith2001, author = {Reith, Steffen}, title = {Generalized Satisfiability Problems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-74}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2001}, abstract = {In the last 40 years, complexity theory has grown to a rich and powerful field in theoretical computer science. The main task of complexity theory is the classification of problems with respect to their consumption of resources (e.g., running time or required memory). To study the computational complexity (i.e., consumption of resources) of problems, similar problems are grouped into so called complexity classes. During the systematic study of numerous problems of practical relevance, no efficient algorithm for a great number of studied problems was found. Moreover, it was unclear whether such algorithms exist. A major breakthrough in this situation was the introduction of the complexity classes P and NP and the identification of hardest problems in NP. These hardest problems of NP are nowadays known as NP-complete problems. One prominent example of an NP-complete problem is the satisfiability problem of propositional formulas (SAT). Here we get a propositional formula as an input and it must be decided whether an assignment for the propositional variables exists, such that this assignment satisfies the given formula. The intensive study of NP led to numerous related classes, e.g., the classes of the polynomial-time hierarchy PH, P, \#P, PP, NL, L and \#L. During the study of these classes, problems related to propositional formulas were often identified to be complete problems for these classes. Hence some questions arise: Why is SAT so hard to solve? Are there modifications of SAT which are complete for other well-known complexity classes? In the context of these questions a result by E. Post is extremely useful. He identified and characterized all classes of Boolean functions being closed under superposition. It is possible to study problems which are connected to generalized propositional logic by using this result, which was done in this thesis. Hence, many different problems connected to propositional logic were studied and classified with respect to their computational complexity, clearing the borderline between easy and hard problems.}, subject = {Erf{\"u}llbarkeitsproblem}, language = {en} } @phdthesis{Kosub2001, author = {Kosub, Sven}, title = {Complexity and Partitions}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-2808}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2001}, abstract = {Computational complexity theory usually investigates the complexity of sets, i.e., the complexity of partitions into two parts. But often it is more appropriate to represent natural problems by partitions into more than two parts. A particularly interesting class of such problems consists of classification problems for relations. For instance, a binary relation R typically defines a partitioning of the set of all pairs (x,y) into four parts, classifiable according to the cases where R(x,y) and R(y,x) hold, only R(x,y) or only R(y,x) holds or even neither R(x,y) nor R(y,x) is true. By means of concrete classification problems such as Graph Embedding or Entailment (for propositional logic), this thesis systematically develops tools, in shape of the boolean hierarchy of NP-partitions and its refinements, for the qualitative analysis of the complexity of partitions generated by NP-relations. The Boolean hierarchy of NP-partitions is introduced as a generalization of the well-known and well-studied Boolean hierarchy (of sets) over NP. Whereas the latter hierarchy has a very simple structure, the situation is much more complicated for the case of partitions into at least three parts. To get an idea of this hierarchy, alternative descriptions of the partition classes are given in terms of finite, labeled lattices. Based on these characterizations the Embedding Conjecture is established providing the complete information on the structure of the hierarchy. This conjecture is supported by several results. A natural extension of the Boolean hierarchy of NP-partitions emerges from the lattice-characterization of its classes by considering partition classes generated by finite, labeled posets. It turns out that all significant ideas translate from the case of lattices. The induced refined Boolean hierarchy of NP-partitions enables us more accuratly capturing the complexity of certain relations (such as Graph Embedding) and a description of projectively closed partition classes.}, subject = {Partition }, language = {en} } @phdthesis{Wirth2001, author = {Wirth, Hans-Christoph}, title = {Multicriteria Approximation of Network Design and Network Upgrade Problems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-2845}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2001}, abstract = {Network planning has come to great importance during the past decades. Today's telecommunication, traffic systems, and logistics would not have been evolved to the current state without careful analysis of the underlying network problems and precise implementation of the results obtained from those examinations. Graphs with node and arc attributes are a very useful tool to model realistic applications, while on the other hand they are well understood in theory. We investigate network design problems which are motivated particularly from applications in communication networks and logistics. Those problems include the search for homogeneous subgraphs in edge labeled graphs where either the total number of labels or the reload cost are subject to optimize. Further, we investigate some variants of the dial a ride problem. On the other hand, we use node and edge upgrade models to deal with the fact that in many cases one prefers to change existing networks rather than implementing a newly computed solution from scratch. We investigate the construction of bottleneck constrained forests under a node upgrade model, as well as several flow cost problems under a edge based upgrade model. All problems are examined within a framework of multi-criteria optimization. Many of the problems can be shown to be NP-hard, with the consequence that, under the widely accepted assumption that P is not equal to NP, there cannot exist efficient algorithms for solving the problems. This motivates the development of approximation algorithms which compute near-optimal solutions with provable performance guarantee in polynomial time.}, subject = {Netzplantechnik}, language = {en} } @phdthesis{Glasser2001, author = {Glaßer, Christian}, title = {Forbidden-Patterns and Word Extensions for Concatenation Hierarchies}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-1179153}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2001}, abstract = {Starfree regular languages can be build up from alphabet letters by using only Boolean operations and concatenation. The complexity of these languages can be measured with the so-called dot-depth. This measure leads to concatenation hierarchies like the dot-depth hierarchy (DDH) and the closely related Straubing-Th{\´e}rien hierarchy (STH). The question whether the single levels of these hierarchies are decidable is still open and is known as the dot-depth problem. In this thesis we prove/reprove the decidability of some lower levels of both hierarchies. More precisely, we characterize these levels in terms of patterns in finite automata (subgraphs in the transition graph) that are not allowed. Therefore, such characterizations are called forbidden-pattern characterizations. The main results of the thesis are as follows: forbidden-pattern characterization for level 3/2 of the DDH (this implies the decidability of this level) decidability of the Boolean hierarchy over level 1/2 of the DDH definition of decidable hierarchies having close relations to the DDH and STH Moreover, we prove/reprove the decidability of the levels 1/2 and 3/2 of both hierarchies in terms of forbidden-pattern characterizations. We show the decidability of the Boolean hierarchies over level 1/2 of the DDH and over level 1/2 of the STH. A technique which uses word extensions plays the central role in the proofs of these results. With this technique it is possible to treat the levels 1/2 and 3/2 of both hierarchies in a uniform way. Furthermore, it can be used to prove the decidability of the mentioned Boolean hierarchies. Among other things we provide a combinatorial tool that allows to partition words of arbitrary length into factors of bounded length such that every second factor u leads to a loop with label u in a given finite automaton.}, subject = {Automatentheorie}, language = {en} } @phdthesis{Karch2002, author = {Karch, Oliver}, title = {Where am I? - Indoor localization based on range measurements}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-8442}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2002}, abstract = {Nowadays, robotics plays an important role in increasing fields of application. There exist many environments or situations where mobile robots instead of human beings are used, since the tasks are too hazardous, uncomfortable, repetitive, or costly for humans to perform. The autonomy and the mobility of the robot are often essential for a good solution of these problems. Thus, such a robot should at least be able to answer the question "Where am I?". This thesis investigates the problem of self-localizing a robot in an indoor environment using range measurements. That is, a robot equipped with a range sensor wakes up inside a building and has to determine its position using only its sensor data and a map of its environment. We examine this problem from an idealizing point of view (reducing it into a pure geometric one) and further investigate a method of Guibas, Motwani, and Raghavan from the field of computational geometry to solving it. Here, so-called visibility skeletons, which can be seen as coarsened representations of visibility polygons, play a decisive role. In the major part of this thesis we analyze the structures and the occurring complexities in the framework of this scheme. It turns out that the main source of complication are so-called overlapping embeddings of skeletons into the map polygon, for which we derive some restrictive visibility constraints. Based on these results we are able to improve one of the occurring complexity bounds in the sense that we can formulate it with respect to the number of reflex vertices instead of the total number of map vertices. This also affects the worst-case bound on the preprocessing complexity of the method. The second part of this thesis compares the previous idealizing assumptions with the properties of real-world environments and discusses the occurring problems. In order to circumvent these problems, we use the concept of distance functions, which model the resemblance between the sensor data and the map, and appropriately adapt the above method to the needs of realistic scenarios. In particular, we introduce a distance function, namely the polar coordinate metric, which seems to be well suited to the localization problem. Finally, we present the RoLoPro software where most of the discussed algorithms are implemented (including the polar coordinate metric).}, subject = {Autonomer Roboter}, language = {en} } @phdthesis{Hoehn2002, author = {H{\"o}hn, Holger}, title = {Multimediale, datenbankgest{\"u}tzte Lehr- und Lernplattformen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-4049}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2002}, abstract = {Die Dissertation befaßt sich mit der Entwicklung einer multimedialen, datenbankgest{\"u}tzten Lehr- und Lernplattform. Die entwickelten Module erm{\"o}glichen und erweitern nicht nur die M{\"o}glichkeit des Selbststudiums f{\"u}r den Studenten sondern erleichtern auch die Arbeit der Dozenten. Außerdem wird auch die Zusammenarbeit und der Austausch von Lernobjekten zwischen verschiedenen Institutionen erm{\"o}glicht. In der Lehr- und Lernplattform k{\"o}nnen verschiedene Lernobjekt-Typen verwaltet werden. Exemplarisch wurden die Typen Bilder, 3D-Animationen, Vorlesungen, Lerntexte, Fallbeispiele und Quizelemente integriert. Die Lehr- und Lernplattform besteht aus drei Bausteinen: 1. In der Lernobjekt-Datenbank werden alle Lernobjekt-Typen und Lernobjekte verwaltet. 2. Autorenwerkzeuge dienen zur Erstellung von Lernobjekten. 3. In der Lernplattform werden die Lernobjekte den Studenten zum (Selbst-)Lernen pr{\"a}sentiert. Neben den Vorteilen, die der Einsatz von E-Learning im allgemeinen bietet, wie die flexible Lernorganisation oder die Nutzung von Lerninhalten unabh{\"a}ngig von Ort und Zeit, zeichnet sich die entwickelte Lehr- und Lernplattform besonders durch folgende Punkte aus: Generierung von Lerninhalten h{\"o}herer Qualit{\"a}t durch multizentrische Expertenb{\"u}ndelung und Arbeitsteilung, Erweiterbarkeit auf andere, neue Lernobjekt-Typen, Verwaltbarkeit, Konsistenz, Flexibilit{\"a}t, geringer Verwaltungsaufwand, Navigationsm{\"o}glichkeiten f{\"u}r den Studenten, Personalisierbarkeit und Konformit{\"a}t zu internationalen Standards. Sowohl bei der Modellierung als auch bei der Umsetzung wurde darauf geachtet, m{\"o}glichst gut die Anforderungen der Dermatologie bei gleichzeitiger Erweiterbarkeit auf andere, {\"a}hnliche Szenarien zu erf{\"u}llen. Besonders einfach sollte die Anpassung der Plattform f{\"u}r andere bildorientierte Disziplinen sein.}, subject = {Multimedia}, language = {de} } @phdthesis{Schaefer2003, author = {Sch{\"a}fer, Dirk}, title = {Globale Selbstlokalisation autonomer mobiler Roboter - Ein Schl{\"u}sselproblem der Service-Robotik}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-7601}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2003}, abstract = {Die Dissertation behandelt die Problemstellung der globalen Selbstlokalisation autonomer mobiler Roboter, welche folgendermaßen beschrieben werden kann: Ein mobiler Roboter, eingesetzt in einem Geb{\"a}ude, kann unter Umst{\"a}nden das Wissen {\"u}ber seinen Standort verlieren. Man geht nun davon aus, dass dem Roboter eine Geb{\"a}udekarte als Modell zur Verf{\"u}gung steht. Mit Hilfe eines Laser-Entfernungsmessers kann das mobile Ger{\"a}t neue Informationen aufnehmen und damit bei korrekter Zuordnung zur Modellkarte geeignete hypothetische Standorte ermitteln. In der Regel werden diese Positionen aber mehrdeutig sein. Indem sich der Roboter intelligent in seiner Einsatzumgebung bewegt, kann er die urspr{\"u}nglichen Sensordaten verifizieren und ermittelt im besten Fall seine tats{\"a}chliche Position.F{\"u}r diese Problemstellung wird ein neuer L{\"o}sungsansatz in Theorie und Praxis pr{\"a}sentiert, welcher die jeweils aktuelle lokale Karte und damit alle Sensordaten mittels feature-basierter Matchingverfahren auf das Modell der Umgebung abbildet. Ein Explorationsalgorithmus bewegt den Roboter w{\"a}hrend der Bewegungsphase autonom zu Sensorpunkten, welche neue Informationen bereitstellen. W{\"a}hrend der Bewegungsphase werden dabei die bisherigen hypothetischen Positionen best{\"a}rkt oder geschw{\"a}cht, sodaß nach kurzer Zeit eine dominante Position, die tats{\"a}chliche Roboterposition,{\"u}brigbleibt.}, subject = {Mobiler Roboter}, language = {de} } @phdthesis{Kaussner2003, author = {Kaußner, Armin}, title = {Dynamische Szenerien in der Fahrsimulation}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-8286}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2003}, abstract = {In der Arbeit wird ein neues Konzept f{\"u}r Fahrsimulator-Datenbasen vorgestellt. Der Anwender entwirft eine auf seine Fragestellung zugeschnittene Datenbasis mithilfe einer einfachen Skriptsprache. Das Straßennetzwerk wird auf einer topologischen Ebene rep{\"a}sentiert. In jedem Simulationsschritt wird hieraus im Sichtbarkeitsbereich des Fahrers die geometrische Rep{\"a}sentation berechnet. Die f{\"u}r den Fahrer unsichtbaren Teile des Straßenetzwerks k{\"o}nnen w{\"a}hrend der Simulation ver{\"a}ndert werden. Diese Ver{\"a}nderungen k{\"o}nnen von der Route des Fahrers oder von den in der Simulation erhobenen Messerten abh{\"a}ngen. Zudem kann der Anwender das Straßennetzwerk interaktiv ver{\"a}ndern. Das vorgestellte Konzept bietet zahlreiche M{\"o}glichkeiten zur Erzeugung reproduzierbarer Szenarien f{\"u}r Experimente in Fahrsimulatoren.}, subject = {Straßenverkehr}, language = {de} } @phdthesis{Wolz2003, author = {Wolz, Frank}, title = {Ein generisches Konzept zur Modellierung und Bewertung feldprogrammierbarer Architekturen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-7944}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2003}, abstract = {Gegenstand der Arbeit stellt eine erstmalig unternommene, architektur{\"u}bergreifende Studie {\"u}ber feldprogrammierbare Logikbausteine zur Implementierung synchroner Schaltkreise dar. Zun{\"a}chst wird ein Modell f{\"u}r allgemeine feldprogrammiebare Architekturen basierend auf periodischen Graphen definiert. Schließlich werden Bewertungsmaße f{\"u}r Architekturen und Schaltkreislayouts angegeben zur Charakterisierung struktureller Eigenschaften hinsichtlich des Verhaltens in Chipfl{\"a}chenverbrauch und Signalverz{\"o}gerung. Ferner wird ein generisches Layout-Werkzeug entwickelt, das f{\"u}r beliebige Architekturen und Schaltkreise Implementierungen berechnen und bewerten kann. Abschließend werden neun ressourcenminimalistische Architekturen mit Maschen- und mit Inselstruktur einander gegen{\"u}bergestellt.}, subject = {Gay-Array-Bauelement}, language = {de} } @phdthesis{Kluge2004, author = {Kluge, Boris}, title = {Motion coordination for a mobile robot in dynamic environments}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-15508}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2004}, abstract = {Generating coordinated motion for a mobile robot operating in natural, continuously changing environments among moving obstacles such as humans is a complex task which requires the solution of various sub problems. In this thesis, we will cover the topics of perception and navigation in dynamic environments, as well as reasoning about the motion of the obstacles and of the robot itself. Perception is mainly considered for a laser range finder, and an according method for obstacle detection and tracking is proposed. Network optimization algorithms are used for data association in the tracking step, resulting in considerable robustness with respect to clutter by small objects. Navigation in general is accomplished using an adaptation of the velocity obstacle approach to the given vehicle kinematics, and cooperative motion coordination between the robot and a human guide is achieved using an appropriate selection rule for collision-free velocities. Next, the robot is enabled to compare its path to the path of a human guide using one of a collection of presented distance measures, which permits the detection of exceptional conditions. Furthermore, a taxonomy for the assessment of situations concerning the robot is presented, and following a summary of existing approaches to more intelligent and comprehensive perception, we propose a method for obstruction detection. Finally, a new approach to reflective navigation behaviors is described where the robot reasons about intelligent moving obstacles in its environment, which allows to adjust the character of the robot motion from regardful and defensive to more self-confident and aggressive behaviors.}, subject = {Bewegungsablauf}, language = {de} } @phdthesis{Menth2004, author = {Menth, Michael}, title = {Efficient admission control and routing for resilient communication networks}, doi = {10.25972/OPUS-846}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-9949}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2004}, abstract = {This work is subdivided into two main areas: resilient admission control and resilient routing. The work gives an overview of the state of the art of quality of service mechanisms in communication networks and proposes a categorization of admission control (AC) methods. These approaches are investigated regarding performance, more precisely, regarding the potential resource utilization by dimensioning the capacity for a network with a given topology, traffic matrix, and a required flow blocking probability. In case of a failure, the affected traffic is rerouted over backup paths which increases the traffic rate on the respective links. To guarantee the effectiveness of admission control also in failure scenarios, the increased traffic rate must be taken into account for capacity dimensioning and leads to resilient AC. Capacity dimensioning is not feasible for existing networks with already given link capacities. For the application of resilient NAC in this case, the size of distributed AC budgets must be adapted according to the traffic matrix in such a way that the maximum blocking probability for all flows is minimized and that the capacity of all links is not exceeded by the admissible traffic rate in any failure scenario. Several algorithms for the solution of that problem are presented and compared regarding their efficiency and fairness. A prototype for resilient AC was implemented in the laboratories of Siemens AG in Munich within the scope of the project KING. Resilience requires additional capacity on the backup paths for failure scenarios. The amount of this backup capacity depends on the routing and can be minimized by routing optimization. New protection switching mechanisms are presented that deviate the traffic quickly around outage locations. They are simple and can be implemented, e.g, by MPLS technology. The Self-Protecting Multi-Path (SPM) is a multi-path consisting of disjoint partial paths. The traffic is distributed over all faultless partial paths according to an optimized load balancing function both in the working case and in failure scenarios. Performance studies show that the network topology and the traffic matrix also influence the amount of required backup capacity significantly. The example of the COST-239 network illustrates that conventional shortest path routing may need 50\% more capacity than the optimized SPM if all single link and node failures are protected.}, subject = {Kommunikation}, language = {en} } @phdthesis{Baumeister2004, author = {Baumeister, Joachim}, title = {Agile development of diagnostic knowledge systems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-9698}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2004}, abstract = {The success of diagnostic knowledge systems has been proved over the last decades. Nowadays, intelligent systems are embedded in machines within various domains or are used in interaction with a user for solving problems. However, although such systems have been applied very successfully the development of a knowledge system is still a critical issue. Similarly to projects dealing with customized software at a highly innovative level a precise specification often cannot be given in advance. Moreover, necessary requirements of the knowledge system can be defined not until the project has been started or are changing during the development phase. Many success factors depend on the feedback given by users, which can be provided if preliminary demonstrations of the system can be delivered as soon as possible, e.g., for interactive systems validation the duration of the system dialog. This thesis motivates that classical, document-centered approaches cannot be applied in such a setting. We cope with this problem by introducing an agile process model for developing diagnostic knowledge systems, mainly inspired by the ideas of the eXtreme Programming methodology known in software engineering. The main aim of the presented work is to simplify the engineering process for domain specialists formalizing the knowledge themselves. The engineering process is supported at a primary level by the introduction of knowledge containers, that define an organized view of knowledge contained in the system. Consequently, we provide structured procedures as a recommendation for filling these containers. The actual knowledge is acquired and formalized right from start, and the integration to runnable knowledge systems is done continuously in order to allow for an early and concrete feedback. In contrast to related prototyping approaches the validity and maintainability of the collected knowledge is ensured by appropriate test methods and restructuring techniques, respectively. Additionally, we propose learning methods to support the knowledge acquisition process sufficiently. The practical significance of the process model strongly depends on the available tools supporting the application of the process model. We present the system family d3web and especially the system d3web.KnowME as a highly integrated development environment for diagnostic knowledge systems. The process model and its activities, respectively, are evaluated in two real life applications: in a medical and in an environmental project the benefits of the agile development are clearly demonstrated.}, language = {en} } @phdthesis{Heck2005, author = {Heck, Klaus}, title = {Wireless LAN performance studies in the context of 4G networks}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-14896}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2005}, abstract = {Wireless communication is nothing new. The first data transmissions based on electromagnetic waves have been successfully performed at the end of the 19th century. However, it took almost another century until the technology was ripe for mass market. The first mobile communication systems based on the transmission of digital data were introduced in the late 1980s. Within just a couple of years they have caused a revolution in the way people communicate. The number of cellular phones started to outnumber the fixed telephone lines in many countries and is still rising. New technologies in 3G systems, such as UMTS, allow higher data rates and support various kinds of multimedia services. Nevertheless, the end of the road in wireless communication is far from being reached. In the near future, the Internet and cellular phone systems are expected to be integrated to a new form of wireless system. Bandwidth requirements for a rich set of wireless services, e.g.\ video telephony, video streaming, online gaming, will be easily met. The transmission of voice data will just be another IP based service. On the other hand, building such a system is by far not an easy task. The problems in the development of the UMTS system showed the high complexity of wireless systems with support for bandwidth-hungry, IP-based services. But the technological challenges are just one difficulty. Telecommunication systems are planned on a world-wide basis, such that standard bodies, governments, institutions, hardware vendors, and service providers have to find agreements and compromises on a number of different topics. In this work, we provide the reader with a discussion of many of the topics involved in the planning of a Wireless LAN system that is capable of being integrated into the 4th generation mobile networks (4G) that is being discussed nowadays. Therefore, it has to be able to cope with interactive voice and video traffic while still offering high data rates for best effort traffic. Let us assume a scenario where a huge office complex is completely covered with Wireless LAN access points. Different antenna systems are applied in order to reduce the number of access points that are needed on the one hand, while optimizing the coverage on the other. No additional infrastructure is implemented. Our goal is to evaluate whether the Wireless LAN technology is capable of dealing with the various demands of such a scenario. First, each single access point has to be capable of supporting best-effort and Quality of Service (QoS) demanding applications simultaneously. The IT infrastructure in our scenario consists solely of Wireless LAN, such that it has to allow users surfing the Web, while others are involved in voice calls or video conferences. Then, there is the problem of overlapping cells. Users attached to one access point produce interference for others. However, the QoS support has to be maintained, which is not an easy task. Finally, there are nomadic users, which roam from one Wireless LAN cell to another even during a voice call. There are mechanisms in the standard that allow for mobility, but their capabilities for QoS support are yet to be studied. This shows the large number of unresolved issues when it comes to Wireless LAN in the context of 4G networks. In this work we want to tackle some of the problems.}, subject = {Drahtloses lokales Netz}, language = {en} } @phdthesis{Esser2005, author = {Eßer, Wolfram}, title = {Fehlertolerante Volltextsuche in elektronischen Enzyklop{\"a}dien und Heuristiken zur Fehlerratenverbesserung}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-14760}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2005}, abstract = {In der vorliegenden Arbeit wird das Konzept und die praktische Umsetzung einer fehlertoleranten Volltextsuche vorgestellt, welche die unscharfe Recherche nach Suchmustern in umfangreichen, digitalen, enzyklop{\"a}dischen Werken erm{\"o}glichen. Das dabei zur Anwendung kommende neue Verfahren, welches durch Gewichte gesteuert das urspr{\"u}ngliche Benutzer-Suchmuster in seiner Gestalt ver{\"a}ndert (Weighted Pattern Morphing, WPM) und anschließend mit einer nachgeschalteten exakten Volltextsuche sucht, konnte in zahlreichen kommerziellen Anwendungsf{\"a}llen seine Praxistauglichkeit beweisen. Darunter ist die Anwendung zur unscharfen Suche in einer mittelalterlichen, handschriftlichen Chronik besonders interessant, da diese die fr{\"u}hneuhochdeutsche Sprache verwendet und es zur damaligen Zeit noch keine vereinheitlichte Rechtschreibung gab. Aber nicht nur bei der Endbenutzer-Suche kann WPM eingesetzt werden - auch im redaktionellen Umfeld konnten mit dem Verfahren noch mehrere hundert Tippfehler in einem bereits mehrfach lektorierten digitalen Lexikon gefunden werden. Dabei arbeitet das Verfahren deutlich sch{\"a}rfer, als die sonst zur unscharfen Suche (und damit zur Fehler-Suche) verwendete Edit-Distanz. Abschließend wird in der Arbeit noch ein Verfahren vorgestellt, mit dem aus einem 3D-Drahtgitter-Modell und den Faksimile-Scans einer mittelalterlichen Handschrift automatisch ein virtuelles Buch zum Durchbl{\"a}ttern am PC erstellt wurde.}, subject = {Volltextdatenbank}, language = {de} } @phdthesis{Eichelberger2005, author = {Eichelberger, Holger}, title = {Aesthetics and automatic layout of UML class diagrams}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-14831}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2005}, abstract = {In the last years, visual methods have been introduced in industrial software production and teaching of software engineering. In particular, the international standardization of a graphical software engineering language, the Unified Modeling Language (UML) was a reason for this tendency. Unfortunately, various problems exist in concrete realizations of tools, e.g. due to a missing compliance to the standard. One problem is the automatic layout, which is required for a consistent automatic software design. The thesis derives reasons and criteria for an automatic layout method, which produces drawings of UML class diagrams according to the UML specification and issues of human computer interaction, e.g. readability. A unique set of aesthetic criteria is combined from four different disciplines involved in this topic. Based on these aethetic rules, a hierarchical layout algorithm is developed, analyzed, measured by specialized measuring techniques and compared to related work. Then, the realization of the algorithm as a Java framework is given as an architectural description. Finally, adaptions to anticipated future changes of the UML, improvements of the framework and example drawings of the implementation are given.}, subject = {URL}, language = {en} } @phdthesis{Boehler2005, author = {B{\"o}hler, Elmar}, title = {Algebraic closures in complexity theory}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-16106}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2005}, abstract = {We use algebraic closures and structures which are derived from these in complexity theory. We classify problems with Boolean circuits and Boolean constraints according to their complexity. We transfer algebraic structures to structural complexity. We use the generation problem to classify important complexity classes.}, subject = {Komplexit{\"a}tstheorie}, language = {en} } @phdthesis{Betz2005, author = {Betz, Christian}, title = {Scalable authoring of diagnostic case based training systems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-17885}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2005}, abstract = {Diagnostic Case Based Training Systems (D-CBT) provide learners with a means to learn and exercise knowledge in a realistic context. In medical education, D-CBT Systems present virtual patients to the learners who are asked to examine, diagnose and state therapies for these patients. Due a number of conflicting and changing requirements, e.g. time for learning, authoring effort, several systems were developed so far. These systems range from simple, easy-to-use presentation systems to highly complex knowledge based systems supporting explorative learning. This thesis presents an approach and tools to create D-CBT systems from existing sources (documents, e.g. dismissal records) using existing tools (word processors): Authors annotate and extend the documents to model the knowledge. A scalable knowledge representation is able to capture the content on multiple levels, from simple to highly structured knowledge. Thus, authoring of D-CBT systems requires less prerequisites and pre-knowledge and is faster than approaches using specialized authoring environments. Also, authors can iteratively add and structure more knowledge to adapt training cases to their learners needs. The theses also discusses the application of the same approach to other domains, especially to knowledge acquisition for the Semantic Web.}, subject = {Computerunterst{\"u}tztes Lernen}, language = {en} } @phdthesis{Binder2006, author = {Binder, Andreas}, title = {Die stochastische Wissenschaft und zwei Teilsysteme eines Web-basierten Informations- und Anwendungssystems zu ihrer Etablierung}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-26146}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2006}, abstract = {Das stochastische Denken, die Bernoullische Stochastik und dessen informationstechnologische Umsetzung, namens Stochastikon stellen die Grundlage f{\"u}r das Verst{\"a}ndnis und die erfolgreiche Nutzung einer stochastischen Wissenschaft dar. Im Rahmen dieser Arbeit erfolgt eine Kl{\"a}rung des Begriffs des stochastischen Denkens, eine anschauliche Darstellung der von Elart von Collani entwickelten Bernoullischen Stochastik und eine Beschreibung von Stochastikon. Dabei werden sowohl das Gesamtkonzept von Stochastikon, sowie die Ziele, Aufgaben und die Realisierung der beiden Teilsysteme namens Mentor und Encyclopedia vorgestellt. Das stochastische Denken erlaubt eine realit{\"a}tsnahe Sichtweise der Dinge, d.h. eine Sichtweise, die mit den menschlichen Beobachtungen und Erfahrungen im Einklang steht und somit die Unsicherheit {\"u}ber zuk{\"u}nftige Entwicklungen ber{\"u}cksichtigt. Der in diesem Kontext verwendete Begriff der Unsicherheit bezieht sich ausschließlich auf zuk{\"u}nftige Entwicklungen und {\"a}ußert sich in Variabilit{\"a}t. Quellen der Unsicherheit sind einerseits die menschliche Ignoranz und andererseits der Zufall. Unter Ignoranz wird hierbei die Unwissenheit des Menschen {\"u}ber die unbekannten, aber feststehenden Fakten verstanden, die die Anfangsbedingungen der zuk{\"u}nftigen Entwicklung repr{\"a}sentieren. Die Bernoullische Stochastik liefert ein Regelwerk und erm{\"o}glicht die Entwicklung eines quantitativen Modells zur Beschreibung der Unsicherheit und expliziter Einbeziehung der beiden Quellen Ignoranz und Zufall. Das Modell tr{\"a}gt den Namen Bernoulli-Raum und bildet die Grundlage f{\"u}r die Herleitung quantitativer Verfahren, um zuverl{\"a}ssige und genaue Aussagen sowohl {\"u}ber die nicht-existente zuf{\"a}llige Zukunft (Vorhersageverfahren), als auch {\"u}ber die unbekannte feststehende Vergangenheit (Messverfahren). Das Softwaresystem Stochastikon implementiert die Bernoullische Stochastik in Form einer Reihe autarker, miteinander kommunizierender Teilsysteme. Ziel des Teilsystems Encyclopedia ist die Bereitstellung und Bewertung stochastischen Wissens. Das Teilsystem Mentor dient der Unterst{\"u}tzung des Anwenders bei der Probleml{\"o}sungsfindung durch Identifikation eines richtigen Modells bzw. eines korrekten Bernoulli-Raums. Der L{\"o}sungsfindungsprozess selber enth{\"a}lt keinerlei Unsicherheit. Die ganze Unsicherheit steckt in der L{\"o}sung, d.h. im Bernoulli-Raum, der explizit die vorhandene Unwissenheit (Ignoranz) und den vorliegenden Zufall abdeckend enth{\"a}lt.}, subject = {Stochastik}, language = {de} } @misc{Hoehn2006, type = {Master Thesis}, author = {H{\"o}hn, Winfried}, title = {Mustererkennung in Fr{\"u}hdrucken}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-30429}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2006}, abstract = {No abstract available}, subject = {Mustererkennung}, language = {de} } @phdthesis{Meister2006, author = {Meister, Daniel}, title = {The complexity of membership problems for finite recurrent systems and minimal triangulations}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-18837}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2006}, abstract = {The dissertation thesis studies the complexity of membership problems. Generally, membership problems consider the question whether a given object belongs to a set. Object and set are part of the input. The thesis studies the complexity of membership problems for two special kinds of sets. The first problem class asks whether a given natural number belongs to a set of natural numbers. The set of natural numbers is defined via finite recurrent systems: sets are built by iterative application of operations, like union, intersection, complementation and arithmetical operations, to already defined sets. This general problem implies further problems by restricting the set of used operations. The thesis contains completeness results for well-known complexity classes as well as undecidability results for these problems. The second problem class asks whether a given graph is a minimal triangulation of another graph. A graph is a triangulation of another graph, if it is a chordal spanning supergraph of the second graph. If no proper supergraph of the first graph is a triangulation of the second graph, the first graph is a minimal triangulation of the second graph. The complexity of the membership problem for minimal triangulations of several graph classes is investigated. Restricted variants are solved by linear-time algorithms. These algorithms rely on appropriate characterisations of minimal triangulations.}, subject = {Komplexit{\"a}t}, language = {en} } @phdthesis{Atzmueller2006, author = {Atzm{\"u}ller, Martin}, title = {Knowledge-Intensive Subgroup Mining - Techniques for Automatic and Interactive Discovery}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-21004}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2006}, abstract = {Data mining has proved its significance in various domains and applications. As an important subfield of the general data mining task, subgroup mining can be used, e.g., for marketing purposes in business domains, or for quality profiling and analysis in medical domains. The goal is to efficiently discover novel, potentially useful and ultimately interesting knowledge. However, in real-world situations these requirements often cannot be fulfilled, e.g., if the applied methods do not scale for large data sets, if too many results are presented to the user, or if many of the discovered patterns are already known to the user. This thesis proposes a combination of several techniques in order to cope with the sketched problems: We discuss automatic methods, including heuristic and exhaustive approaches, and especially present the novel SD-Map algorithm for exhaustive subgroup discovery that is fast and effective. For an interactive approach we describe techniques for subgroup introspection and analysis, and we present advanced visualization methods, e.g., the zoomtable that directly shows the most important parameters of a subgroup and that can be used for optimization and exploration. We also describe various visualizations for subgroup comparison and evaluation in order to support the user during these essential steps. Furthermore, we propose to include possibly available background knowledge that is easy to formalize into the mining process. We can utilize the knowledge in many ways: To focus the search process, to restrict the search space, and ultimately to increase the efficiency of the discovery method. We especially present background knowledge to be applied for filtering the elements of the problem domain, for constructing abstractions, for aggregating values of attributes, and for the post-processing of the discovered set of patterns. Finally, the techniques are combined into a knowledge-intensive process supporting both automatic and interactive methods for subgroup mining. The practical significance of the proposed approach strongly depends on the available tools. We introduce the VIKAMINE system as a highly-integrated environment for knowledge-intensive active subgroup mining. Also, we present an evaluation consisting of two parts: With respect to objective evaluation criteria, i.e., comparing the efficiency and the effectiveness of the subgroup discovery methods, we provide an experimental evaluation using generated data. For that task we present a novel data generator that allows a simple and intuitive specification of the data characteristics. The results of the experimental evaluation indicate that the novel SD-Map method outperforms the other described algorithms using data sets similar to the intended application concerning the efficiency, and also with respect to precision and recall for the heuristic methods. Subjective evaluation criteria include the user acceptance, the benefit of the approach, and the interestingness of the results. We present five case studies utilizing the presented techniques: The approach has been successfully implemented in medical and technical applications using real-world data sets. The method was very well accepted by the users that were able to discover novel, useful, and interesting knowledge.}, subject = {Data Mining}, language = {en} } @phdthesis{Binzenhoefer2007, author = {Binzenh{\"o}fer, Andreas}, title = {Performance Analysis of Structured Overlay Networks}, doi = {10.25972/OPUS-2250}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-26291}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2007}, abstract = {Overlay networks establish logical connections between users on top of the physical network. While randomly connected overlay networks provide only a best effort service, a new generation of structured overlay systems based on Distributed Hash Tables (DHTs) was proposed by the research community. However, there is still a lack of understanding the performance of such DHTs. Additionally, those architectures are highly distributed and therefore appear as a black box to the operator. Yet an operator does not want to lose control over his system and needs to be able to continuously observe and examine its current state at runtime. This work addresses both problems and shows how the solutions can be combined into a more self-organizing overlay concept. At first, we evaluate the performance of structured overlay networks under different aspects and thereby illuminate in how far such architectures are able to support carrier-grade applications. Secondly, to enable operators to monitor and understand their deployed system in more detail, we introduce both active as well as passive methods to gather information about the current state of the overlay network.}, subject = {Overlay-Netz}, language = {en} } @phdthesis{Herrler2007, author = {Herrler, Rainer}, title = {Agentenbasierte Simulation zur Ablaufoptimierung in Krankenh{\"a}usern und anderen verteilten, dynamischen Umgebungen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-24483}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2007}, abstract = {Verteilte dynamische Systeme unter lokalen und globalen Gesichtspunkten zu optimieren ist eine schwierige Aufgabe. Zwar sind grunds{\"a}tzliche Auswirkungen einzelner Maßnahmen h{\"a}ufig bekannt, durch widerstrebende Ziele, Wechselwirkungen zwischen Prozessen und Nebenwirkungen von Maßnahmen ist ein analytisches Vorgehen bei der Optimierung nicht m{\"o}glich. Besonders schwierig wird es, wenn lokale Einheiten einerseits ihre Ziele und Autonomie behalten sollen, aber durch zentrale Vorgaben bzw. Anreize so gesteuert werden sollen, dass ein {\"u}bergeordnetes Ziel erreicht wird. Ein praktisches Beispiel dieses allgemeinen Optimierungsproblems findet sich im Gesundheitswesen. Das Management von modernen Kliniken ist stets mit dem Problem konfrontiert, die Qualit{\"a}t der Pflege zu gew{\"a}hrleisten und gleichzeitig kosteneffizient zu arbeiten. Hier gilt es unter gegeben Rahmenbedingungen und bei Respektierung der Autonomie der Funktionseinheiten, Optimierungsmaßnahmen zu finden und durchzuf{\"u}hren. Vorhandene Werkzeuge zur Simulation und Modellierung bieten f{\"u}r diese Aufgabe keine ausreichend guten Vorgehensmodelle und Modellierungsmechanismen. Die agentenbasierte Simulation erm{\"o}glicht die Abbildung solcher Systeme und die Durchf{\"u}hrung von Simulationsexperimenten zur Bewertung einzelner Maßnahmen. Es werden L{\"o}sungswege und Werkzeuge vorgestellt und evaluiert, die den Benutzer bei der Formalisierung des Wissens und der Modellierung solch komplexer Szenarien unterst{\"u}tzen und ein systematisches Vorgehen zur Optimierung erm{\"o}glichen.}, subject = {Simulation}, language = {de} } @phdthesis{Milbrandt2007, author = {Milbrandt, Jens}, title = {Performance Evaluation of Efficient Resource Management Concepts for Next Generation IP Networks}, doi = {10.25972/OPUS-1991}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-23332}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2007}, abstract = {Next generation networks (NGNs) must integrate the services of current circuit-switched telephone networks and packet-switched data networks. This convergence towards a unified communication infrastructure necessitates from the high capital expenditures (CAPEX) and operational expenditures (OPEX) due to the coexistence of separate networks for voice and data. In the end, NGNs must offer the same services as these legacy networks and, therefore, they must provide a low-cost packet-switched solution with real-time transport capabilities for telephony and multimedia applications. In addition, NGNs must be fault-tolerant to guarantee user satisfaction and to support business-critical processes also in case of network failures. A key technology for the operation of NGNs is the Internet Protocol (IP) which evolved to a common and well accepted standard for networking in the Internet during the last 25 years. There are two basically different approaches to achieve QoS in IP networks. With capacity overprovisioning (CO), an IP network is equipped with sufficient bandwidth such that network congestion becomes very unlikely and QoS is maintained most of the time. The second option to achieve QoS in IP networks is admission control (AC). AC represents a network-inherent intelligence that admits real-time traffic flows to a single link or an entire network only if enough resources are available such that the requirements on packet loss and delay can be met. Otherwise, the request of a new flow is blocked. This work focuses on resource management and control mechanisms for NGNs, in particular on AC and associated bandwidth allocation methods. The first contribution consists of a new link-oriented AC method called experience-based admission control (EBAC) which is a hybrid approach dealing with the problems inherent to conventional AC mechanisms like parameter-based or measurement-based AC (PBAC/MBAC). PBAC provides good QoS but suffers from poor resource utilization and, vice versa, MBAC uses resources efficiently but is susceptible to QoS violations. Hence, EBAC aims at increasing the resource efficiency while maintaining the QoS which increases the revenues of ISPs and postpones their CAPEX for infrastructure upgrades. To show the advantages of EBAC, we first review today's AC approaches and then develop the concept of EBAC. EBAC is a simple mechanism that safely overbooks the capacity of a single link to increase its resource utilization. We evaluate the performance of EBAC by its simulation under various traffic conditions. The second contribution concerns dynamic resource allocation in transport networks which implement a specific network admission control (NAC) architecture. In general, the performance of different NAC systems may be evaluated by conventional methods such as call blocking analysis which has often been applied in the context of multi-service asynchronous transfer mode (ATM) networks. However, to yield more practical results than abstract blocking probabilities, we propose a new method to compare different AC approaches by their respective bandwidth requirements. To present our new method for comparing different AC systems, we first give an overview of network resource management (NRM) in general. Then we present the concept of adaptive bandwidth allocation (ABA) in capacity tunnels and illustrate the analytical performance evaluation framework to compare different AC systems by their capacity requirements. Different network characteristics influence the performance of ABA. Therefore, the impact of various traffic demand models and tunnel implementations, and the influence of resilience requirements is investigated. In conclusion, the resources in NGNs must be exclusively dedicated to admitted traffic to guarantee QoS. For that purpose, robust and efficient concepts for NRM are required to control the requested bandwidth with regard to the available transmission capacity. Sophisticated AC will be a key function for NRM in NGNs and, therefore, efficient resource management concepts like experience-based admission control and adaptive bandwidth allocation for admission-controlled capacity tunnels, as presented in this work are appealing for NGN solutions.}, subject = {Ressourcenmanagement}, language = {en} } @phdthesis{Travers2007, author = {Travers, Stephen}, title = {Structural Properties of NP-Hard Sets and Uniform Characterisations of Complexity Classes}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-27124}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2007}, abstract = {This thesis is devoted to the study of computational complexity theory, a branch of theoretical computer science. Computational complexity theory investigates the inherent difficulty in designing efficient algorithms for computational problems. By doing so, it analyses the scalability of computational problems and algorithms and places practical limits on what computers can actually accomplish. Computational problems are categorised into complexity classes. Among the most important complexity classes are the class NP and the subclass of NP-complete problems, which comprises many important optimisation problems in the field of operations research. Moreover, with the P-NP-problem, the class NP represents the most important unsolved question in computer science. The first part of this thesis is devoted to the study of NP-complete-, and more generally, NP-hard problems. It aims at improving our understanding of this important complexity class by systematically studying how altering NP-hard sets affects their NP-hardness. This research is related to longstanding open questions concerning the complexity of unions of disjoint NP-complete sets, and the existence of sparse NP-hard sets. The second part of the thesis is also dedicated to complexity classes but takes a different perspective: In a sense, after investigating the interior of complexity classes in the first part, the focus shifts to the description of complexity classes and thereby to the exterior in the second part. It deals with the description of complexity classes through leaf languages, a uniform framework which allows us to characterise a great variety of important complexity classes. The known concepts are complemented by a new leaf-language model. To a certain extent, this new approach combines the advantages of the known models. The presented results give evidence that the connection between the theory of formal languages and computational complexity theory might be closer than formerly known.}, subject = {Berechnungskomplexit{\"a}t}, language = {en} } @phdthesis{Hopfner2008, author = {Hopfner, Marbod}, title = {Source Code Analysis, Management, and Visualization for PROLOG}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-36300}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2008}, abstract = {This thesis deals with the management and analysis of source code, which is represented in XML. Using the elementary methods of the XML repository, the XML source code representation is accessed, changed, updated, and saved. We reason about the source code, refactor source code and we visualize dependency graphs for call analysis. The visualized dependencies between files, modules, or packages are used to structure the source code in order to get a system, which is easily to comprehend, to modify and to complete. Sophisticated methods have been developed to slice the source code in order to obtain a working package of a large system, containing only a specific functionality. The basic methods, on which the visualizations and analyses are built on can be changed like changing a plug-in. The visualization methods can be reused in order to handle arbitrary source code representations, e.g., JAML, PHPML, PROLOGML. Dependencies of other context can be visualized, too, e.g., ER diagrams, or website references. The tool SCAV supports source code visualization and analyzing methods.}, subject = {Refactoring}, language = {en} } @misc{Feineis2008, type = {Master Thesis}, author = {Feineis, Markus}, title = {Wortgenaue Annotation digitalisierter mittelalterlicher Handschriften}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-30448}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2008}, abstract = {No abstract available}, subject = {Annotation}, language = {de} } @phdthesis{Maeder2008, author = {M{\"a}der, Andreas}, title = {Performance Models for UMTS 3.5G Mobile Wireless Systems}, doi = {10.25972/OPUS-2766}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-32525}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2008}, abstract = {Mobile telecommunication systems of the 3.5th generation (3.5G) constitute a first step towards the requirements of an all-IP world. As the denotation suggests, 3.5G systems are not completely new designed from scratch. Instead, they are evolved from existing 3G systems like UMTS or cdma2000. 3.5G systems are primarily designed and optimized for packet-switched best-effort traffic, but they are also intended to increase system capacity by exploiting available radio resources more efficiently. Systems based on cdma2000 are enhanced with 1xEV-DO (EV-DO: evolution, data-optimized). In the UMTS domain, the 3G partnership project (3GPP) specified the High Speed Packet Access (HSPA) family, consisting of High Speed Downlink Packet Access (HSDPA) and its counterpart High Speed Uplink Packet Access (HSUPA) or Enhanced Uplink. The focus of this monograph is on HSPA systems, although the operation principles of other 3.5G systems are similar. One of the main contributions of our work are performance models which allow a holistic view on the system. The models consider user traffic on flow-level, such that only on significant changes of the system state a recalculation of parameters like bandwidth is necessary. The impact of lower layers is captured by stochastic models. This approach combines accurate modeling and the ability to cope with computational complexity. Adopting this approach to HSDPA, we develop a new physical layer abstraction model that takes radio resources, scheduling discipline, radio propagation and mobile device capabilities into account. Together with models for the calculation of network-wide interference and transmit powers, a discrete-event simulation and an analytical model based on a queuing-theoretical approach are proposed. For the Enhanced Uplink, we develop analytical models considering independent and correlated other-cell interference.}, subject = {Mobilfunk}, language = {en} } @phdthesis{Tischler2008, author = {Tischler, German}, title = {Theory and Applications of Parametric Weighted Finite Automata}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-28145}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2008}, abstract = {Parametric weighted finite automata (PWFA) are a multi-dimensional generalization of weighted finite automata. The expressiveness of PWFA contains the expressiveness of weighted finite automata as well as the expressiveness of affine iterated function system. The thesis discusses theory and applications of PWFA. The properties of PWFA definable sets are studied and it is shown that some fractal generator systems can be simulated using PWFA and that various real and complex functions can be represented by PWFA. Furthermore, the decoding of PWFA and the interpretation of PWFA definable sets is discussed.}, subject = {Automat }, language = {en} } @phdthesis{Martin2008, author = {Martin, R{\"u}diger}, title = {Resilience, Provisioning, and Control for the Network of the Future}, doi = {10.25972/OPUS-2504}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-28497}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2008}, abstract = {The Internet sees an ongoing transformation process from a single best-effort service network into a multi-service network. In addition to traditional applications like e-mail,WWW-traffic, or file transfer, future generation networks (FGNs) will carry services with real-time constraints and stringent availability and reliability requirements like Voice over IP (VoIP), video conferencing, virtual private networks (VPNs) for finance, other real-time business applications, tele-medicine, or tele-robotics. Hence, quality of service (QoS) guarantees and resilience to failures are crucial characteristics of an FGN architecture. At the same time, network operations must be efficient. This necessitates sophisticated mechanisms for the provisioning and the control of future communication infrastructures. In this work we investigate such echanisms for resilient FGNs. There are many aspects of the provisioning and control of resilient FGNs such as traffic matrix estimation, traffic characterization, traffic forecasting, mechanisms for QoS enforcement also during failure cases, resilient routing, or calability concerns for future routing and addressing mechanisms. In this work we focus on three important aspects for which performance analysis can deliver substantial insights: load balancing for multipath Internet routing, fast resilience concepts, and advanced dimensioning techniques for resilient networks. Routing in modern communication networks is often based on multipath structures, e.g., equal-cost multipath routing (ECMP) in IP networks, to facilitate traffic engineering and resiliency. When multipath routing is applied, load balancing algorithms distribute the traffic over available paths towards the destination according to pre-configured distribution values. State-of-the-art load balancing algorithms operate either on the packet or the flow level. Packet level mechanisms achieve highly accurate traffic distributions, but are known to have negative effects on the performance of transport protocols and should not be applied. Flow level mechanisms avoid performance degradations, but at the expense of reduced accuracy. These inaccuracies may have unpredictable effects on link capacity requirements and complicate resource management. Thus, it is important to exactly understand the accuracy and dynamics of load balancing algorithms in order to be able to exercise better network control. Knowing about their weaknesses, it is also important to look for alternatives and to assess their applicability in different networking scenarios. This is the first aspect of this work. Component failures are inevitable during the operation of communication networks and lead to routing disruptions if no special precautions are taken. In case of a failure, the robust shortest-path routing of the Internet reconverges after some time to a state where all nodes are again reachable - provided physical connectivity still exists. But stringent availability and reliability criteria of new services make a fast reaction to failures obligatory for resilient FGNs. This led to the development of fast reroute (FRR) concepts for MPLS and IP routing. The operations of MPLS-FRR have already been standardized. Still, the standards leave some degrees of freedom for the resilient path layout and it is important to understand the tradeoffs between different options for the path layout to efficiently provision resilient FGNs. In contrast, the standardization for IP-FRR is an ongoing process. The applicability and possible combinations of different concepts still are open issues. IP-FRR also facilitates a comprehensive resilience framework for IP routing covering all steps of the failure recovery cycle. These points constitute another aspect of this work. Finally, communication networks are usually over-provisioned, i.e., they have much more capacity installed than actually required during normal operation. This is a precaution for various challenges such as network element failures. An alternative to this capacity overprovisioning (CO) approach is admission control (AC). AC blocks new flows in case of imminent overload due to unanticipated events to protect the QoS for already admitted flows. On the one hand, CO is generally viewed as a simple mechanism, AC as a more complex mechanism that complicates the network control plane and raises interoperability issues. On the other hand, AC appears more cost-efficient than CO. To obtain advanced provisioning methods for resilient FGNs, it is important to find suitable models for irregular events, such as failures and different sources of overload, and to incorporate them into capacity dimensioning methods. This allows for a fair comparison between CO and AC in various situations and yields a better understanding of the strengths and weaknesses of both concepts. Such an advanced capacity dimensioning method for resilient FGNs represents the third aspect of this work.}, subject = {Backbone-Netz}, language = {en} } @phdthesis{Driewer2008, author = {Driewer, Frauke}, title = {Teleoperation Interfaces in Human-Robot Teams}, isbn = {978-3-923959-57-0}, doi = {10.25972/OPUS-2955}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-36351}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2008}, abstract = {Diese Arbeit besch{\"a}ftigt sich mit der Verbesserung von Mensch-Roboter Interaktion in Mensch-Roboter Teams f{\"u}r Teleoperation Szenarien, wie z.B. robotergest{\"u}tzte Feuerwehreins{\"a}tze. Hierbei wird ein Konzept und eine Architektur f{\"u}r ein System zur Unterst{\"u}tzung von Teleoperation von Mensch-Roboter Teams vorgestellt. Die Anforderungen an Informationsaustausch und -verarbeitung, insbesondere f{\"u}r die Anwendung Rettungseinsatz, werden ausgearbeitet. Weiterhin wird das Design der Benutzerschnittstellen f{\"u}r Mensch-Roboter Teams dargestellt und Prinzipien f{\"u}r Teleoperation-Systeme und Benutzerschnittstellen erarbeitet. Alle Studien und Ans{\"a}tze werden in einem Prototypen-System implementiert und in verschiedenen Benutzertests abgesichert. Erweiterungsm{\"o}glichkeiten zum Einbinden von 3D Sensordaten und die Darstellung auf Stereovisualisierungssystemen werden gezeigt.}, subject = {Robotik}, language = {en} } @phdthesis{Spoerhase2009, author = {Spoerhase, Joachim}, title = {Competitive and Voting Location}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-52978}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2009}, abstract = {We consider competitive location problems where two competing providers place their facilities sequentially and users can decide between the competitors. We assume that both competitors act non-cooperatively and aim at maximizing their own benefits. We investigate the complexity and approximability of such problems on graphs, in particular on simple graph classes such as trees and paths. We also develop fast algorithms for single competitive location problems where each provider places a single facilty. Voting location, in contrast, aims at identifying locations that meet social criteria. The provider wants to satisfy the users (customers) of the facility to be opened. In general, there is no location that is favored by all users. Therefore, a satisfactory compromise has to be found. To this end, criteria arising from voting theory are considered. The solution of the location problem is understood as the winner of a virtual election among the users of the facilities, in which the potential locations play the role of the candidates and the users represent the voters. Competitive and voting location problems turn out to be closely related.}, subject = {Standortproblem}, language = {en} } @phdthesis{Saska2009, author = {Saska, Martin}, title = {Trajectory planning and optimal control for formations of autonomous robots}, isbn = {978-3-923959-56-3}, doi = {10.25972/OPUS-4622}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-53175}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2009}, abstract = {In this thesis, we present novel approaches for formation driving of nonholonomic robots and optimal trajectory planning to reach a target region. The methods consider a static known map of the environment as well as unknown and dynamic obstacles detected by sensors of the formation. The algorithms are based on leader following techniques, where the formation of car-like robots is maintained in a shape determined by curvilinear coordinates. Beyond this, the general methods of formation driving are specialized and extended for an application of airport snow shoveling. Detailed descriptions of the algorithms complemented by relevant stability and convergence studies will be provided in the following chapters. Furthermore, discussions of the applicability will be verified by various simulations in existing robotic environments and also by a hardware experiment.}, subject = {Autonomer Roboter}, language = {en} } @phdthesis{KuhnAndriotti2009, author = {Kuhn Andriotti, Gustavo}, title = {Prospect Theory Multi-Agent Based Simulations for Non-Rational Route Choice Decision Making Modelling}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-40483}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2009}, abstract = {Simulations (MASim) and non-rational behaviour. This non-rational behaviour is here based on the Prospect Theory [KT79] (PT), which is compared to the rational behaviour in the Expected Utility Theory [vNM07] (EUT). This model was used to design a modified Q-Learning [Wat89, WD92] algorithm. The PT based Q-Learning was then integrated into a proposed agent architecture. Because much attention is given to a limited interpretation of Simon's definition of bounded-rationality, this interpretation is broadened here. Both theories, rationality and the non-rationality, are compared and the discordance in their results discussed. The main contribution of this work is to show that an alternative is available to the EUT that is more suitable for human decision-makers modelling. The evidences show that rationality is not appropriated for modelling persons. Therefore, instead of fine-tuning the existent model the use of another one is proposed and evaluated. To tackle this, the route choice problem was adopted to perform the experiments. To evaluate the proposed model three traffic scenarios are simulated and their results analysed.}, subject = {Mehragentensystem}, language = {en} } @phdthesis{Hossfeld2009, author = {Hoßfeld, Tobias}, title = {Performance Evaluation of Future Internet Applications and Emerging User Behavior}, doi = {10.25972/OPUS-3067}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-37570}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2009}, abstract = {In future telecommunication systems, we observe an increasing diversity of access networks. The separation of transport services and applications or services leads to multi-network services, i.e., a future service has to work transparently to the underlying network infrastructure. Multi-network services with edge-based intelligence, like P2P file sharing or the Skype VoIP service, impose new traffic control paradigms on the future Internet. Such services adapt the amount of consumed bandwidth to reach different goals. A selfish behavior tries to keep the QoE of a single user above a certain level. Skype, for instance, repeats voice samples depending on the perceived end-to-end loss. From the viewpoint of a single user, the replication of voice data overcomes the degradation caused by packet loss and enables to maintain a certain QoE. The cost for this achievement is a higher amount of consumed bandwidth. However, if the packet loss is caused by congestion in the network, this additionally required bandwidth even worsens the network situation. Altruistic behavior, on the other side, would reduce the bandwidth consumption in such a way that the pressure on the network is released and thus the overall network performance is improved. In this monograph, we analyzed the impact of the overlay, P2P, and QoE paradigms in future Internet applications and the interactions from the observing user behavior. The shift of intelligence toward the edge is accompanied by a change in the emerging user behavior and traffic profile, as well as a change from multi-service networks to multi-networks services. In addition, edge-based intelligence may lead to a higher dynamics in the network topology, since the applications are often controlled by an overlay network, which can rapidly change in size and structure as new nodes can leave or join the overlay network in an entirely distributed manner. As a result, we found that the performance evaluation of such services provides new challenges, since novel key performance factors have to be first identified, like pollution of P2P systems, and appropriate models of the emerging user behavior are required, e.g. taking into account user impatience. As common denominator of the presented studies in this work, we focus on a user-centric view when evaluating the performance of future Internet applications. For a subscriber of a certain application or service, the perceived quality expressed as QoE will be the major criterion of the user's satisfaction with the network and service providers. We selected three different case studies and characterized the application's performance from the end user's point of view. Those are (1) cooperation in mobile P2P file sharing networks, (2) modeling of online TV recording services, and (3) QoE of edge-based VoIP applications. The user-centric approach facilitates the development of new mechanisms to overcome problems arising from the changing user behavior. An example is the proposed CycPriM cooperation strategy, which copes with selfish user behavior in mobile P2P file sharing system. An adequate mechanism has also been shown to be efficient in a heterogeneous B3G network with mobile users conducting vertical handovers between different wireless access technologies. The consideration of the user behavior and the user perceived quality guides to an appropriate modeling of future Internet applications. In the case of the online TV recording service, this enables the comparison between different technical realizations of the system, e.g. using server clusters or P2P technology, to properly dimension the installed network elements and to assess the costs for service providers. Technologies like P2P help to overcome phenomena like flash crowds and improve scalability compared to server clusters, which may get overloaded in such situations. Nevertheless, P2P technology invokes additional challenges and different user behavior to that seen in traditional client/server systems. Beside the willingness to share files and the churn of users, peers may be malicious and offer fake contents to disturb the data dissemination. Finally, the understanding and the quantification of QoE with respect to QoS degradations permits designing sophisticated edge-based applications. To this end, we identified and formulated the IQX hypothesis as an exponential interdependency between QoE and QoS parameters, which we validated for different examples. The appropriate modeling of the emerging user behavior taking into account the user's perceived quality and its interactions with the overlay and P2P paradigm will finally help to design future Internet applications.}, subject = {Leistungsbewertung}, language = {en} } @phdthesis{Hess2009, author = {Hess, Martin}, title = {Motion coordination and control in systems of nonholonomic autonomous vehicles}, isbn = {978-3-923959-55-6}, doi = {10.25972/OPUS-3794}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-46442}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2009}, abstract = {This work focuses on coordination methods and the control of motion in groups of nonholonomic wheeled mobile robots, in particular of the car-like type. These kind of vehicles are particularly restricted in their mobility. In the main part of this work the two problems of formation motion coordination and of rendezvous in distributed multi-vehicle systems are considered. We introduce several enhancements to an existing motion planning approach for formations of nonholonomic mobile robots. Compared to the original method, the extended approach is able to handle time-varying reference speeds as well as adjustments of the formation's shape during reference trajectory segments with continuously differentiable curvature. Additionally, undesired discontinuities in the speed and steering profiles of the vehicles are avoided. Further, the scenario of snow shoveling on an airfield by utilizing multiple formations of autonomous snowplows is discussed. We propose solutions to the subproblems of motion planning for the formations and tracking control for the individual vehicles. While all situations that might occur have been tested in a simulation environment, we also verified the developed tracking controller in real robot hardware experiments. The task of the rendezvous problem in groups of car-like robots is to drive all vehicles to a common position by means of decentralized control laws. Typically there exists no direct interaction link between all of the vehicles. In this work we present decentralized rendezvous control laws for vehicles with free and with bounded steering. The convergence properties of the approaches are analyzed by utilizing Lyapunov based techniques. Furthermore, they are evaluated within various simulation experiments, while the bounded steering case is also verified within laboratory hardware experiments. Finally we introduce a modification to the bounded steering system that increases the convergence speed at the expense of a higher traveled distance of the vehicles.}, subject = {Robotik}, language = {en} } @misc{Kaempgen2009, type = {Master Thesis}, author = {Kaempgen, Benedikt}, title = {Deskriptives Data-Mining f{\"u}r Entscheidungstr{\"a}ger: Eine Mehrfachfallstudie}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-46343}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2009}, abstract = {Das Potenzial der Wissensentdeckung in Daten wird h{\"a}ufig nicht ausgenutzt, was haupts{\"a}chlich auf Barrieren zwischen dem Entwicklerteam und dem Endnutzer des Data-Mining zur{\"u}ckzuf{\"u}hren ist. In dieser Arbeit wird ein transparenter Ansatz zum Beschreiben und Erkl{\"a}ren von Daten f{\"u}r Entscheidungstr{\"a}ger vorgestellt. In Entscheidungstr{\"a}ger-zentrierten Aufgaben werden die Projektanforderungen definiert und die Ergebnisse zu einer Geschichte zusammengestellt. Eine Anforderung besteht dabei aus einem tabellarischen Bericht und ggf. Mustern in seinem Inhalt, jeweils verst{\"a}ndlich f{\"u}r einen Entscheidungstr{\"a}ger. Die technischen Aufgaben bestehen aus einer Datenpr{\"u}fung, der Integration der Daten in einem Data-Warehouse sowie dem Generieren von Berichten und dem Entdecken von Mustern wie in den Anforderungen beschrieben. Mehrere Data-Mining-Projekte k{\"o}nnen durch Wissensmanagement sowie eine geeignete Infrastruktur voneinander profitieren. Der Ansatz wurde in zwei Projekten unter Verwendung von ausschließlich Open-Source-Software angewendet.}, subject = {Data Mining}, language = {de} } @phdthesis{Oechsner2010, author = {Oechsner, Simon}, title = {Performance Challenges and Optimization Potential of Peer-to-Peer Overlay Technologies}, doi = {10.25972/OPUS-4159}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-50015}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2010}, abstract = {In today's Internet, building overlay structures to provide a service is becoming more and more common. This approach allows for the utilization of client resources, thus being more scalable than a client-server model in this respect. However, in these architectures the quality of the provided service depends on the clients and is therefore more complex to manage. Resource utilization, both at the clients themselves and in the underlying network, determine the efficiency of the overlay application. Here, a trade-off exists between the resource providers and the end users that can be tuned via overlay mechanisms. Thus, resource management and traffic management is always quality-of-service management as well. In this monograph, the three currently significant and most widely used overlay types in the Internet are considered. These overlays are implemented in popular applications which only recently have gained importance. Thus, these overlay networks still face real-world technical challenges which are of high practical relevance. We identify the specific issues for each of the considered overlays, and show how their optimization affects the trade-offs between resource efficiency and service quality. Thus, we supply new insights and system knowledge that is not provided by previous work.}, subject = {Overlay-Netz}, language = {en} } @phdthesis{Henjes2010, author = {Henjes, Robert}, title = {Performance Evaluation of Publish/Subscribe Middleware Architectures}, doi = {10.25972/OPUS-4536}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-53388}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2010}, abstract = {While developing modern applications, it is necessary to ensure an efficient and performant communication between different applications. In current environments, a middleware software is used, which supports the publish/subscribe communication pattern. Using this communication pattern, a publisher sends information encapsulated in messages to the middleware. A subscriber registers its interests at the middleware. The monograph describes three different steps to determine the performance of such a system. In a first step, the message throughput performance of a publish/subscribe in different scenarios is measured using a Java Message Service (JMS) based implementation. In the second step the maximum achievable message throughput is described by adapted models depending on the filter complexity and the replication grade. Using the model, the performance characteristics of a specific system in a given scenario can be determined. These numbers are used for the queuing model described in the third part of the thesis, which supports the dimensioning of a system in realistic scenarios. Additionally, we introduce a method to approximate an M/G/1 system numerically in an efficient way, which can be used for real time analysis to predict the expected performance in a certain scenario. Finally, the analytical model is used to investigate different possibilities to ensure the scalability of the maximum achievable message throughput of the overall system.}, subject = {Middleware}, language = {en} } @inproceedings{OPUS4-4233, title = {9. Fachgespr{\"a}ch Sensornetze der GI/ITG Fachgruppe Kommunikation und Verteilte Systeme}, editor = {Kolla, Reiner}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-51106}, year = {2010}, abstract = {J{\"a}hrliches Fachgespr{\"a}ch zu Sensornetzen der GI/ITG Fachgruppe Kommunikation und Verteilte Systeme, 16. - 17. September 2010, Universit{\"a}t W{\"u}rzburg}, subject = {Drahtloses Sensorsystem}, language = {mul} } @phdthesis{Zeiger2010, author = {Zeiger, Florian}, title = {Internet Protocol based networking of mobile robots}, isbn = {978-3-923959-59-4}, doi = {10.25972/OPUS-4661}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-54776}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2010}, abstract = {This work is composed of three main parts: remote control of mobile systems via Internet, ad-hoc networks of mobile robots, and remote control of mobile robots via 3G telecommunication technologies. The first part gives a detailed state of the art and a discussion of the problems to be solved in order to teleoperate mobile robots via the Internet. The focus of the application to be realized is set on a distributed tele-laboratory with remote experiments on mobile robots which can be accessed world-wide via the Internet. Therefore, analyses of the communication link are used in order to realize a robust system. The developed and implemented architecture of this distributed tele-laboratory allows for a smooth access also with a variable or low link quality. The second part covers the application of ad-hoc networks for mobile robots. The networking of mobile robots via mobile ad-hoc networks is a very promising approach to realize integrated telematic systems without relying on preexisting communication infrastructure. Relevant civilian application scenarios are for example in the area of search and rescue operations where first responders are supported by multi-robot systems. Here, mobile robots, humans, and also existing stationary sensors can be connected very fast and efficient. Therefore, this work investigates and analyses the performance of different ad-hoc routing protocols for IEEE 802.11 based wireless networks in relevant scenarios. The analysis of the different protocols allows for an optimization of the parameter settings in order to use these ad-hoc routing protocols for mobile robot teleoperation. Also guidelines for the realization of such telematics systems are given. Also traffic shaping mechanisms of application layer are presented which allow for a more efficient use of the communication link. An additional application scenario, the integration of a small size helicopter into an IP based ad-hoc network, is presented. The teleoperation of mobile robots via 3G telecommunication technologies is addressed in the third part of this work. The high availability, high mobility, and the high bandwidth provide a very interesting opportunity to realize scenarios for the teleoperation of mobile robots or industrial remote maintenance. This work analyses important parameters of the UMTS communication link and investigates also the characteristics for different data streams. These analyses are used to give guidelines which are necessary for the realization of or industrial remote maintenance or mobile robot teleoperation scenarios. All the results and guidelines for the design of telematic systems in this work were derived from analyses and experiments with real hardware.}, subject = {Robotik}, language = {en} } @phdthesis{Sauer2010, author = {Sauer, Markus}, title = {Mixed-Reality for Enhanced Robot Teleoperation}, isbn = {978-3-923959-67-9}, doi = {10.25972/OPUS-4666}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-55083}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2010}, abstract = {In den letzten Jahren ist die Forschung in der Robotik soweit fortgeschritten, dass die Mensch-Maschine Schnittstelle zunehmend die kritischste Komponente f{\"u}r eine hohe Gesamtperformanz von Systemen zur Navigation und Koordination von Robotern wird. In dieser Dissertation wird untersucht wie Mixed-Reality Technologien f{\"u}r Nutzerschnittstellen genutzt werden k{\"o}nnen, um diese Gesamtperformanz zu erh{\"o}hen. Hierzu werden Konzepte und Technologien entwickelt, die durch Evaluierung mit Nutzertest ein optimiertes und anwenderbezogenes Design von Mixed-Reality Nutzerschnittstellen erm{\"o}glichen. Er werden somit sowohl die technische Anforderungen als auch die menschlichen Faktoren f{\"u}r ein konsistentes Systemdesign ber{\"u}cksichtigt. Nach einer detaillierten Problemanalyse und der Erstellung eines Systemmodels, das den Menschen als Schl{\"u}sselkomponente mit einbezieht, wird zun{\"a}chst die Anwendung der neuartigen 3D-Time-of-Flight Kamera zur Navigation von Robotern, aber auch f{\"u}r den Einsatz in Mixed-Reality Schnittstellen analysiert und optimiert. Weiterhin wird gezeigt, wie sich der Netzwerkverkehr des Videostroms als wichtigstes Informationselement der meisten Nutzerschnittstellen f{\"u}r die Navigationsaufgabe auf der Netzwerk Applikationsebene in typischen Multi-Roboter Netzwerken mit dynamischen Topologien und Lastsituation optimieren l{\"a}sst. Hierdurch ist es m{\"o}glich in sonst in sonst typischen Ausfallszenarien den Videostrom zu erhalten und die Bildrate zu stabilisieren. Diese fortgeschrittenen Technologien werden dann auch dem entwickelten Konzept der generischen 3D Mixed Reality Schnittselle eingesetzt. Dieses Konzept erm{\"o}glicht eine integrierte 3D Darstellung der verf{\"u}gbaren Information, so dass r{\"a}umliche Beziehungen von Informationen aufrechterhalten werden und somit die Anzahl der mentalen Transformationen beim menschlichen Bediener reduziert wird. Gleichzeitig werden durch diesen Ansatz auch immersive Stereo Anzeigetechnologien unterst{\"u}tzt, welche zus{\"a}tzlich das r{\"a}umliche Verst{\"a}ndnis der entfernten Situation f{\"o}rdern. Die in der Dissertation vorgestellten und evaluierten Ans{\"a}tze nutzen auch die Tatsache, dass sich eine lokale Autonomie von Robotern heute sehr robust realisieren l{\"a}sst. Dies wird zum Beispiel zur Realisierung eines Assistenzsystems mit variabler Autonomie eingesetzt. Hierbei erh{\"a}lt der Fernbediener {\"u}ber eine Kraftr{\"u}ckkopplung kombiniert mit einer integrierten Augmented Reality Schnittstelle, einen Eindruck {\"u}ber die Situation am entfernten Arbeitsbereich, aber auch {\"u}ber die aktuelle Navigationsintention des Roboters. Die durchgef{\"u}hrten Nutzertests belegen die signifikante Steigerung der Navigationsperformanz durch den entwickelten Ansatz. Die robuste lokale Autonomie erm{\"o}glicht auch den in der Dissertation eingef{\"u}hrten Ansatz der pr{\"a}diktiven Mixed-Reality Schnittstelle. Die durch diesen Ansatz entkoppelte Regelschleife {\"u}ber den Menschen erm{\"o}glicht es die Sichtbarkeit von unvermeidbaren Systemverz{\"o}gerungen signifikant zu reduzieren. Zus{\"a}tzlich k{\"o}nnen durch diesen Ansatz beide f{\"u}r die Navigation hilfreichen Blickwinkel in einer 3D-Nutzerschnittstelle kombiniert werden - der exozentrische Blickwinkel und der egozentrische Blickwinkel als Augmented Reality Sicht.}, subject = {Mobiler Roboter}, language = {en} } @phdthesis{Klein2010, author = {Klein, Alexander}, title = {Performance Issues of MAC and Routing Protocols in Wireless Sensor Networks}, doi = {10.25972/OPUS-4465}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-52870}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2010}, abstract = {The focus of this work lies on the communication issues of Medium Access Control (MAC) and routing protocols in the context of WSNs. The communication challenges in these networks mainly result from high node density, low bandwidth, low energy constraints and the hardware limitations in terms of memory, computational power and sensing capabilities of low-power transceivers. For this reason, the structure of WSNs is always kept as simple as possible to minimize the impact of communication issues. Thus, the majority of WSNs apply a simple one hop star topology since multi-hop communication has high demands on the routing protocol since it increases the bandwidth requirements of the network. Moreover, medium access becomes a challenging problem due to the fact that low-power transceivers are very limited in their sensing capabilities. The first contribution is represented by the Backoff Preamble-based MAC Protocol with Sequential Contention Resolution (BPS-MAC) which is designed to overcome the limitations of low-power transceivers. Two communication issues, namely the Clear Channel Assessment (CCA) delay and the turnaround time, are directly addressed by the protocol. The CCA delay represents the period of time which is required by the transceiver to detect a busy radio channel while the turnaround time specifies the period of time which is required to switch between receive and transmit mode. Standard Carrier Sense Multiple Access (CSMA) protocols do not achieve high performance in terms of packet loss if the traffic is highly correlated due to the fact that the transceiver is not able to sense the medium during the switching phase. Therefore, a node may start to transmit data while another node is already transmitting since it has sensed an idle medium right before it started to switch its transceiver from receive to transmit mode. The BPS-MAC protocol uses a new sequential preamble-based medium access strategy which can be adapted to the hardware capabilities of the transceivers. The protocol achieves a very low packet loss rate even in wireless networks with high node density and event-driven traffic without the need of synchronization. This makes the protocol attractive to applications such as structural health monitoring, where event suppression is not an option. Moreover, acknowledgments or complex retransmission strategies become almost unnecessary since the sequential preamble-based contention resolution mechanism minimizes the collision probability. However, packets can still be lost as a consequence of interference or other issues which affect signal propagation. The second contribution consists of a new routing protocol which is able to quickly detect topology changes without generating a large amount of overhead. The key characteristics of the Statistic-Based Routing (SBR) protocol are high end-to-end reliability (in fixed and mobile networks), load balancing capabilities, a smooth continuous routing metric, quick adaptation to changing network conditions, low processing and memory requirements, low overhead, support of unidirectional links and simplicity. The protocol can establish routes in a hybrid or a proactive mode and uses an adaptive continuous routing metric which makes it very flexible in terms of scalability while maintaining stable routes. The hybrid mode is optimized for low-power WSNs since routes are only established on demand. The difference of the hybrid mode to reactive routing strategies is that routing messages are periodically transmitted to maintain already established routes. However, the protocol stops the transmission of routing messages if no data packets are transmitted for a certain time period in order to minimize the routing overhead and the energy consumption. The proactive mode is designed for high data rate networks which have less energy constraints. In this mode, the protocol periodically transmits routing messages to establish routes in a proactive way even in the absence of data traffic. Thus, nodes in the network can immediately transmit data since the route to the destination is already established in advance. In addition, a new delay-based routing message forwarding strategy is introduced. The forwarding strategy is part of SBR but can also be applied to many routing protocols in order to modify the established topology. The strategy can be used, e.g. in mobile networks, to decrease the packet loss by deferring routing messages with respect to the neighbor change rate. Thus, nodes with a stable neighborhood forward messages faster than nodes within a fast changing neighborhood. As a result, routes are established through nodes with correlated movement which results in fewer topology changes due to higher link durations.}, subject = {Routing}, language = {en} } @phdthesis{Fehler2010, author = {Fehler, Manuel}, title = {Kalibrierung Agenten-basierter Simulationen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-64762}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2010}, abstract = {In der vorliegenden Arbeit wird das Problem der Kalibrierung Agenten-basierter Simulationen (ABS) behandelt, also das Problem, die Parameterwerte eines Agenten-basierten Simulationsmodells so einzustellen, dass valides Simulationsverhalten erreicht wird. Das Kalibrierungsproblem f{\"u}r Simulationen an sich ist nicht neu und ist im Rahmen klassischer Simulationsparadigmen, wie z.B. der Makro-Simulation, fester Bestandteil der Forschung. Im Vergleich zu den dort betrachteten Kalibrierungsproblemen zeichnet sich das Kalibrierungsproblem f{\"u}r ABS jedoch durch eine Reihe zus{\"a}tzlicher Herausforderungen aus, welche die direkte Anwendung existierender Kalibrierungsverfahren in begrenzter Zeit erschweren, bzw. nicht mehr sinnvoll zulassen. Die L{\"o}sung dieser Probleme steht im Zentrum dieser Dissertation: Das Ziel besteht darin, den Nutzer bei der Kalibrierung von ABS auf der Basis von unzureichenden, potentiell fehlerhaften Daten und Wissen zu unterst{\"u}tzen. Dabei sollen drei Hauptprobleme gel{\"o}st werden: 1)Vereinfachung der Kalibrierung großer Agenten-Parametermengen auf der Mikro- Ebene in Agenten-basierten Simulationen durch Ausnutzung der spezifischen Struktur von ABS (n{\"a}mlich dem Aufbau aus einer Menge von Agentenmodellen). 2)Kalibrierung Agenten-basierter Simulationen, so dass auf allen relevanten Beobachtungsebenen valides Simulationsverhalten erzeugt wird (mindestens Mikro und Makro-Ebene). Als erschwerende Randbedingung muss die Kalibrierung unter der Voraussetzung einer Makro-Mikro-Wissensl{\"u}cke durchgef{\"u}hrt werden. 3)Kalibrierung Agenten-basierter Simulationen auf der Mikro-Ebene unter der Voraussetzung, dass zur Kalibrierung einzelner Agentenmodelle nicht ausreichend und potentiell verf{\"a}lschte Daten zur Verhaltensvalidierung zur Verf{\"u}gung stehen. Hierzu wird in dieser Arbeit das sogenannte Makro-Mikro-Verfahren zur Kalibrierung von Agenten-basierten Simulationen entwickelt. Das Verfahren besteht aus einem Basisverfahren, das im Verlauf der Arbeit um verschiedene Zusatzverfahren erweitert wird. Das Makro-Mikro-Verfahren und seine Erweiterungen sollen dazu dienen, die Modellkalibrierung trotz stark verrauschter Daten und eingeschr{\"a}nktem Wissen {\"u}ber die Wirkungszusammenh{\"a}nge im Originalsystem geeignet zu erm{\"o}glichen und dabei den Kalibrierungsprozess zu beschleunigen: 1) Makro-Mikro-Kalibrierungsverfahren: Das in dieser Arbeit entwickelte Makro- Mikro-Verfahren unterst{\"u}tzt den Nutzer durch eine kombinierte Kalibrierung auf der Mikro- und der Makro-Beobachtungsebene, die gegebenenfalls durch Zwischenebenen erweitert werden kann. Der Grundgedanke des Verfahrens besteht darin, das Kalibrierungsproblem in eines auf aggregierter Verhaltensebene und eines auf der Ebene des Mikro-Agentenverhaltens aufzuteilen. Auf der Makro-Ebene wird nach validen idealen aggregierten Verhaltensmodellen (IVM) der Agenten gesucht. Auf der Mikro-Ebene wird versucht die individuellen Modelle der Agenten auf Basis des erw{\"u}nschten Gesamtverhaltens und der ermittelten IVM so zu kalibrieren, das insgesamt Simulationsverhalten entsteht, das sowohl auf Mikro- als auch auf Makro-Ebene valide ist. 2) Erweiterung 1: Robuste Kalibrierung: Um den Umgang mit potentiell verrauschten Validierungskriterien (d.h. mit verrauschten Daten {\"u}ber ein Originalsystem, auf denen die Validierungskriterien der Simulation beruhen) und Modellteilen w{\"a}hrend der Kalibrierung von ABS zu erm{\"o}glichen, wird eine robuste Kalibrierungstechnik zur Anwendung im Makro-Mikro-Verfahren entwickelt. 3) Erweiterung 2: Kalibrierung mit Heterogenit{\"a}tssuche: Als zweite Erweiterung des Makro-Mikro-Verfahrens wird ein Verfahren entwickelt, das das Problem des unklaren Detaillierungsgrades von ABS auf der Ebene der Parameterwerte adressiert. Prinzipiell kann zwar jeder Agent unterschiedliche Parameterwerte verwenden, obwohl eine geringere Heterogenit{\"a}t zur Erzeugung validen Verhaltens ausreichend w{\"a}re. Die entwickelte Erweiterung versucht, w{\"a}hrend der Kalibrierung, eine geeignete Heterogenit{\"a}tsauspr{\"a}gung f{\"u}r die Parameterwerte der Agenten zu ermitteln. Unter einer Heterogenit{\"a}tsauspr{\"a}gung wird dabei eine Einteilung der simulierten Agenten in Gruppen mit jeweils gleichen Parameterwerten verstanden. Die Heterogenit{\"a}tssuche dient dazu, einen Kompromiss zu finden zwischen der Notwendigkeit, sehr große Parametersuchr{\"a}ume durchsuchen zu m{\"u}ssen und gleichzeitig den Suchraum so klein wie m{\"o}glich halten zu wollen.}, subject = {Computersimulation}, language = {de} } @phdthesis{Pries2010, author = {Pries, Jan Rastin}, title = {Performance Optimization of Wireless Infrastructure and Mesh Networks}, doi = {10.25972/OPUS-3723}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-46097}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2010}, abstract = {Future broadband wireless networks should be able to support not only best effort traffic but also real-time traffic with strict Quality of Service (QoS) constraints. In addition, their available resources are scare and limit the number of users. To facilitate QoS guarantees and increase the maximum number of concurrent users, wireless networks require careful planning and optimization. In this monograph, we studied three aspects of performance optimization in wireless networks: resource optimization in WLAN infrastructure networks, quality of experience control in wireless mesh networks, and planning and optimization of wireless mesh networks. An adaptive resource management system is required to effectively utilize the limited resources on the air interface and to guarantee QoS for real-time applications. Thereby, both WLAN infrastructure and WLAN mesh networks have to be considered. An a-priori setting of the access parameters is not meaningful due to the contention-based medium access and the high dynamics of the system. Thus, a management system is required which dynamically adjusts the channel access parameters based on the network load. While this is sufficient for wireless infrastructure networks, interferences on neighboring paths and self-interferences have to be considered for wireless mesh networks. In addition, a careful channel allocation and route assignment is needed. Due to the large parameter space, standard optimization techniques fail for optimizing large wireless mesh networks. In this monograph, we reveal that biology-inspired optimization techniques, namely genetic algorithms, are well-suitable for the planning and optimization of wireless mesh networks. Although genetic algorithms generally do not always find the optimal solution, we show that with a good parameter set for the genetic algorithm, the overall throughput of the wireless mesh network can be significantly improved while still sharing the resources fairly among the users.}, subject = {IEEE 802.11}, language = {en} } @phdthesis{Zhai2010, author = {Zhai, Xiaomin}, title = {Design, Development and Evaluation of a Virtual Classroom and Teaching Contents for Bernoulli Stochastics}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-56106}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2010}, abstract = {This thesis is devoted to Bernoulli Stochastics, which was initiated by Jakob Bernoulli more than 300 years ago by his master piece 'Ars conjectandi', which can be translated as 'Science of Prediction'. Thus, Jakob Bernoulli's Stochastics focus on prediction in contrast to the later emerging disciplines probability theory, statistics and mathematical statistics. Only recently Jakob Bernoulli's focus was taken up von Collani, who developed a unified theory of uncertainty aiming at making reliable and accurate predictions. In this thesis, teaching material as well as a virtual classroom are developed for fostering ideas and techniques initiated by Jakob Bernoulli and elaborated by Elart von Collani. The thesis is part of an extensively construed project called 'Stochastikon' aiming at introducing Bernoulli Stochastics as a unified science of prediction and measurement under uncertainty. This ambitious aim shall be reached by the development of an internet-based comprehensive system offering the science of Bernoulli Stochastics on any level of application. So far it is planned that the 'Stochastikon' system (http://www.stochastikon.com/) will consist of five subsystems. Two of them are developed and introduced in this thesis. The first one is the e-learning programme 'Stochastikon Magister' and the second one 'Stochastikon Graphics' that provides the entire Stochastikon system with graphical illustrations. E-learning is the outcome of merging education and internet techniques. E-learning is characterized by the facts that teaching and learning are independent of place and time and of the availability of specially trained teachers. Knowledge offering as well as knowledge transferring are realized by using modern information technologies. Nowadays more and more e-learning environments are based on the internet as the primary tool for communication and presentation. E-learning presentation tools are for instance text-files, pictures, graphics, audio and videos, which can be networked with each other. There could be no limit as to the access to teaching contents. Moreover, the students can adapt the speed of learning to their individual abilities. E-learning is particularly appropriate for newly arising scientific and technical disciplines, which generally cannot be presented by traditional learning methods sufficiently well, because neither trained teachers nor textbooks are available. The first part of this dissertation introduces the state of the art of e-learning in statistics, since statistics and Bernoulli Stochastics are both based on probability theory and exhibit many similar features. Since Stochastikon Magister is the first e-learning programme for Bernoulli Stochastics, the educational statistics systems is selected for the purpose of comparison and evaluation. This makes sense as both disciplines are an attempt to handle uncertainty and use methods that often can be directly compared. The second part of this dissertation is devoted to Bernoulli Stochastics. This part aims at outlining the content of two courses, which have been developed for the anticipated e-learning programme Stochastikon Magister in order to show the difficulties in teaching, understanding and applying Bernoulli Stochastics. The third part discusses the realization of the e-learning programme Stochastikon Magister, its design and implementation, which aims at offering a systematic learning of principles and techniques developed in Bernoulli Stochastics. The resulting e-learning programme differs from the commonly developed e-learning programmes as it is an attempt to provide a virtual classroom that simulates all the functions of real classroom teaching. This is in general not necessary, since most of the e-learning programmes aim at supporting existing classroom teaching. The forth part presents two empirical evaluations of Stochastikon Magister. The evaluations are performed by means of comparisons between traditional classroom learning in statistics and e-learning of Bernoulli Stochastics. The aim is to assess the usability and learnability of Stochastikon Magister. Finally, the fifth part of this dissertation is added as an appendix. It refers to Stochastikon Graphics, the fifth component of the entire Stochastikon system. Stochastikon Graphics provides the other components with graphical representations of concepts, procedures and results obtained or used in the framework of Bernoulli Stochastics. The primary aim of this thesis is the development of an appropriate software for the anticipated e-learning environment meant for Bernoulli Stochastics, while the preparation of the necessary teaching material constitutes only a secondary aim used for demonstrating the functionality of the e-learning platform and the scientific novelty of Bernoulli Stochastics. To this end, a first version of two teaching courses are developed, implemented and offered on-line in order to collect practical experiences. The two courses, which were developed as part of this projects are submitted as a supplement to this dissertation. For the time being the first experience with the e-learning programme Stochastikon Magister has been made. Students of different faculties of the University of W{\"u}rzburg, as well as researchers and engineers, who are involved in the Stochastikon project have obtained access to Stochastikon Magister via internet. They have registered for Stochastikon Magister and participated in the course programme. This thesis reports on two assessments of these first experiences and the results will lead to further improvements with respect to content and organization of Stochastikon Magister.}, subject = {Moment }, language = {en} } @inproceedings{SchlosserJarschelDuellietal.2010, author = {Schlosser, Daniel and Jarschel, Michael and Duelli, Michael and Hoßfeld, Tobias and Hoffmann, Klaus and Hoffmann, Marco and Morper, Hans Jochen and Jurca, Dan and Khan, Ashiq}, title = {A Use Case Driven Approach to Network Virtualization}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-55611}, year = {2010}, abstract = {In today's Internet, services are very different in their requirements on the underlying transport network. In the future, this diversity will increase and it will be more difficult to accommodate all services in a single network. A possible approach to cope with this diversity within future networks is the introduction of support for running isolated networks for different services on top of a single shared physical substrate. This would also enable easy network management and ensure an economically sound operation. End-customers will readily adopt this approach as it enables new and innovative services without being expensive. In order to arrive at a concept that enables this kind of network, it needs to be designed around and constantly checked against realistic use cases. In this contribution, we present three use cases for future networks. We describe functional blocks of a virtual network architecture, which are necessary to support these use cases within the network. Furthermore, we discuss the interfaces needed between the functional blocks and consider standardization issues that arise in order to achieve a global consistent control and management structure of virtual networks.}, subject = {Virtualisierung}, language = {en} } @article{MontenegroDannemann2011, author = {Montenegro, Sergio and Dannemann, Frank}, title = {Experiences and Best Practice Requirements Engineering for Small Satellites}, series = {Computing Science and Technology International Journal}, volume = {1}, journal = {Computing Science and Technology International Journal}, number = {2}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-153307}, year = {2011}, abstract = {The design and implementation of a satellite mission is divided into several different phases. Parallel to these phases an evolution of requirements will take place. Because so many people in different locations and from different background have to work in different subsystems concurrently the ideas and concepts of different subsystems and different locations will diverge. We have to bring them together again. To do this we introduce synchronization points. We bring representatives from all subsystems and all location in a Concurrent Engineering Facility (CEF) room together. Between CEF sessions the different subsystems will diverge again, but each time the diversion will be smaller. Our subjective experience from test projects says this CEF sessions are most effective in the first phases of the development, from Requirements engineering until first coarse design. After Design and the concepts are fix, the developers are going to implementation and the concept divergences will be much smaller, therefore the CEF sessions are not a very big help any more.}, language = {en} } @article{ArhondakisFrousiosIliopoulosetal.2011, author = {Arhondakis, Stilianos and Frousios, Kimon and Iliopoulos, Costas S. and Pissis, Solon P. and Tischler, German and Kossida, Sophia}, title = {Transcriptome map of mouse isochores}, series = {BMC Genomics}, volume = {12}, journal = {BMC Genomics}, number = {511}, doi = {10.1186/1471-2164-12-511}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-142773}, pages = {1-9}, year = {2011}, abstract = {Background: The availability of fully sequenced genomes and the implementation of transcriptome technologies have increased the studies investigating the expression profiles for a variety of tissues, conditions, and species. In this study, using RNA-seq data for three distinct tissues (brain, liver, and muscle), we investigate how base composition affects mammalian gene expression, an issue of prime practical and evolutionary interest. Results: We present the transcriptome map of the mouse isochores (DNA segments with a fairly homogeneous base composition) for the three different tissues and the effects of isochores' base composition on their expression activity. Our analyses also cover the relations between the genes' expression activity and their localization in the isochore families. Conclusions: This study is the first where next-generation sequencing data are used to associate the effects of both genomic and genic compositional properties to their corresponding expression activity. Our findings confirm previous results, and further support the existence of a relationship between isochores and gene expression. This relationship corroborates that isochores are primarily a product of evolutionary adaptation rather than a simple by-product of neutral evolutionary processes.}, language = {en} } @phdthesis{Reitwiessner2011, author = {Reitwießner, Christian}, title = {Multiobjective Optimization and Language Equations}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-70146}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2011}, abstract = {Praktische Optimierungsprobleme beinhalten oft mehrere gleichberechtigte, sich jedoch widersprechende Kriterien. Beispielsweise will man bei einer Reise zugleich m{\"o}glichst schnell ankommen, sie soll aber auch nicht zu teuer sein. Im ersten Teil dieser Arbeit wird die algorithmische Beherrschbarkeit solcher mehrkriterieller Optimierungsprobleme behandelt. Es werden zun{\"a}chst verschiedene L{\"o}sungsbegriffe diskutiert und auf ihre Schwierigkeit hin verglichen. Interessanterweise stellt sich heraus, dass diese Begriffe f{\"u}r ein einkriterielles Problem stets gleich schwer sind, sie sich ab zwei Kriterien allerdings stark unterscheiden k{\"o}nen (außer es gilt P = NP). In diesem Zusammenhang wird auch die Beziehung zwischen Such- und Entscheidungsproblemen im Allgemeinen untersucht. Schließlich werden neue und verbesserte Approximationsalgorithmen f{\"u}r verschieden Varianten des Problems des Handlungsreisenden gefunden. Dabei wird mit Mitteln der Diskrepanztheorie eine Technik entwickelt, die ein grundlegendes Hindernis der Mehrkriteriellen Optimierung aus dem Weg schafft: Gegebene L{\"o}sungen so zu kombinieren, dass die neue L{\"o}sung in allen Kriterien m{\"o}glichst ausgewogen ist und gleichzeitig die Struktur der L{\"o}sungen nicht zu stark zerst{\"o}rt wird. Der zweite Teil der Arbeit widmet sich verschiedenen Aspekten von Gleichungssystemen f{\"u}r (formale) Sprachen. Einerseits werden konjunktive und Boolesche Grammatiken untersucht. Diese sind Erweiterungen der kontextfreien Grammatiken um explizite Durchschnitts- und Komplementoperationen. Es wird unter anderem gezeigt, dass man bei konjunktiven Grammatiken die Vereinigungsoperation stark einschr{\"a}nken kann, ohne dabei die erzeugte Sprache zu {\"a}ndern. Außerdem werden bestimmte Schaltkreise untersucht, deren Gatter keine Wahrheitswerte sondern Mengen von Zahlen berechnen. F{\"u}r diese Schaltkreise wird das {\"A}quivalenzproblem betrachtet, also die Frage ob zwei gegebene Schaltkreise die gleiche Menge berechnen oder nicht. Es stellt sich heraus, dass, abh{\"a}ngig von den erlaubten Gattertypen, die Komplexit{\"a}t des {\"A}quivalenzproblems stark variiert und f{\"u}r verschiedene Komplexit{\"a}tsklassen vollst{\"a}ndig ist, also als (parametrisierter) Vertreter f{\"u}r diese Klassen stehen kann.}, subject = {Mehrkriterielle Optimierung}, language = {en} } @phdthesis{Staehle2011, author = {Staehle, Barbara}, title = {Modeling and Optimization Methods for Wireless Sensor and Mesh Networks}, doi = {10.25972/OPUS-4967}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-64884}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2011}, abstract = {Im Internet der Zukunft werden Menschen nicht nur mit Menschen, sondern auch mit „Dingen", und sogar „Dinge" mit „Dingen" kommunizieren. Zus{\"a}tzlich wird das Bed{\"u}rfnis steigen, immer und {\"u}berall Zugang zum Internet zu haben. Folglich gewinnen drahtlose Sensornetze (WSNs) und drahtlose Mesh-Netze (WMNs) an Bedeutung, da sie Daten {\"u}ber die Umwelt ins Internet liefern, beziehungsweise einfache Internet-Zugangsm{\"o}glichkeiten schaffen. In den vier Teilen dieser Arbeit werden unterschiedliche Modellierungs- und Optimierungsmethoden f{\"u}r WSNs und WMNs vorgestellt. Der Energieverbrauch ist die wichtigste Metrik, wenn es darum geht die Kommunikation in einem WSN zu optimieren. Da sich in der Literatur sehr viele unterschiedliche Energiemodelle finden, untersucht der erste Teil der Arbeit welchen Einfluss unterschiedliche Energiemodelle auf die Optimierung von WSNs haben. Aufbauend auf diesen {\"U}berlegungen besch{\"a}ftigt sich der zweite Teil der Arbeit mit drei Problemen, die {\"u}berwunden werden m{\"u}ssen um eine standardisierte energieeffiziente Kommunikations-L{\"o}sung f{\"u}r WSNs basierend auf IEEE 802.15.4 und ZigBee zu realisieren. F{\"u}r WMNs sind beide Probleme von geringem Interesse, die Leistung des Netzes jedoch umso mehr. Der dritte Teil der Arbeit f{\"u}hrt daher Algorithmen f{\"u}r die Berechnung des Max-Min fairen (MMF) Netzwerk-Durchsatzes in WMNs mit mehreren Linkraten und Internet-Gateways ein. Der letzte Teil der Arbeit untersucht die Auswirkungen des LRA-Konzeptes. Dessen grundlegende Idee ist die folgende. Falls f{\"u}r einen Link eine niedrigere Datenrate als theoretisch m{\"o}glich verwendet wird, sinkt zwar der Link-Durchsatz, jedoch ist unter Umst{\"a}nden eine gr{\"o}ßere Anzahl von gleichzeitigen {\"U}bertragungen m{\"o}glich und der Gesamt-Durchsatz des Netzes kann sich erh{\"o}hen. Mithilfe einer analytischen LRA Formulierung und einer systematischen Studie kann gezeigt werden, dass eine netzwerkweite Zuordnung robusterer Datenraten als n{\"o}tig zu einer Erh{\"o}hung des MMF Netzwerk-Durchsatzes f{\"u}hrt. Desweitern kann gezeigt werden, dass sich LRA positiv auf die Leistungsf{\"a}higkeit eines IEEE 802.11 WMNs auswirkt und f{\"u}r die Optimierung des Netzes genutzt werden kann.}, subject = {Drahtloses Sensorsystem}, language = {en} } @phdthesis{Schmidt2011, author = {Schmidt, Marco}, title = {Ground Station Networks for Efficient Operation of Distributed Small Satellite Systems}, isbn = {978-3-923959-77-8}, doi = {10.25972/OPUS-4984}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-64999}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2011}, abstract = {The field of small satellite formations and constellations attracted growing attention, based on recent advances in small satellite engineering. The utilization of distributed space systems allows the realization of innovative applications and will enable improved temporal and spatial resolution in observation scenarios. On the other side, this new paradigm imposes a variety of research challenges. In this monograph new networking concepts for space missions are presented, using networks of ground stations. The developed approaches combine ground station resources in a coordinated way to achieve more robust and efficient communication links. Within this thesis, the following topics were elaborated to improve the performance in distributed space missions: Appropriate scheduling of contact windows in a distributed ground system is a necessary process to avoid low utilization of ground stations. The theoretical basis for the novel concept of redundant scheduling was elaborated in detail. Additionally to the presented algorithm was a scheduling system implemented, its performance was tested extensively with real world scheduling problems. In the scope of data management, a system was developed which autonomously synchronizes data frames in ground station networks and uses this information to detect and correct transmission errors. The system was validated with hardware in the loop experiments, demonstrating the benefits of the developed approach.}, subject = {Kleinsatellit}, language = {en} } @phdthesis{Schlosser2011, author = {Schlosser, Daniel}, title = {Quality of Experience Management in Virtual Future Networks}, doi = {10.25972/OPUS-5719}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-69986}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2011}, abstract = {Aktuell beobachten wir eine drastische Vervielf{\"a}ltigung der Dienste und Anwendungen, die das Internet f{\"u}r den Datentransport nutzen. Dabei unterscheiden sich die Anforderungen dieser Dienste an das Netzwerk deutlich. Das Netzwerkmanagement wird durch diese Diversit{\"a}t der nutzenden Dienste aber deutlich erschwert, da es einem Datentransportdienstleister kaum m{\"o}glich ist, die unterschiedlichen Verbindungen zu unterscheiden, ohne den Inhalt der transportierten Daten zu analysieren. Netzwerkvirtualisierung ist eine vielversprechende L{\"o}sung f{\"u}r dieses Problem, da sie es erm{\"o}glicht f{\"u}r verschiedene Dienste unterschiedliche virtuelle Netze auf dem gleichen physikalischen Substrat zu betreiben. Diese Diensttrennung erm{\"o}glicht es, jedes einzelne Netz anwendungsspezifisch zu steuern. Ziel einer solchen Netzsteuerung ist es, sowohl die vom Nutzer erfahrene Dienstg{\"u}te als auch die Kosteneffizienz des Datentransports zu optimieren. Dar{\"u}ber hinaus wird es mit Netzwerkvirtualisierung m{\"o}glich das physikalische Netz so weit zu abstrahieren, dass die aktuell fest verzahnten Rollen von Netzwerkbesitzer und Netzwerkbetreiber entkoppelt werden k{\"o}nnen. Dar{\"u}ber hinaus stellt Netzwerkvirtualisierung sicher, dass unterschiedliche Datennetze, die gleichzeitig auf dem gleichen physikalischen Netz betrieben werden, sich gegenseitig weder beeinflussen noch st{\"o}ren k{\"o}nnen. Diese Arbeit  besch{\"a}ftigt sich mit ausgew{\"a}hlten Aspekten dieses Themenkomplexes und fokussiert sich darauf, ein virtuelles Netzwerk mit bestm{\"o}glicher Dienstqualit{\"a}t f{\"u}r den Nutzer zu betreiben und zu steuern. Daf{\"u}r wird ein Top-down-Ansatz gew{\"a}hlt, der von den Anwendungsf{\"a}llen, einer m{\"o}glichen Netzwerkvirtualisierungs-Architektur und aktuellen M{\"o}glichkeiten der Hardwarevirtualisierung ausgeht. Im Weiteren fokussiert sich die Arbeit dann in Richtung Bestimmung und Optimierung der vom Nutzer erfahrenen Dienstqualit{\"a}t (QoE) auf Applikationsschicht und diskutiert M{\"o}glichkeiten zur Messung und {\"U}berwachung von wesentlichen Netzparametern in virtualisierten Netzen.}, subject = {Netzwerkmanagement}, language = {en} } @phdthesis{Selbach2011, author = {Selbach, Stefan}, title = {Hybride bitparallele Volltextsuche}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-66476}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2011}, abstract = {Der große Vorteil eines q-Gramm Indexes liegt darin, dass es m{\"o}glich ist beliebige Zeichenketten in einer Dokumentensammlung zu suchen. Ein Nachteil jedoch liegt darin, dass bei gr{\"o}ßer werdenden Datenmengen dieser Index dazu neigt, sehr groß zu werden, was mit einem deutlichem Leistungsabfall verbunden ist. In dieser Arbeit wird eine neuartige Technik vorgestellt, die die Leistung eines q-Gramm Indexes mithilfe zus{\"a}tzlicher M-Matrizen f{\"u}r jedes q-Gramm und durch die Kombination mit einem invertierten Index erh{\"o}ht. Eine M-Matrix ist eine Bit-Matrix, die Informationen {\"u}ber die Positionen eines q-Gramms enth{\"a}lt. Auch bei der Kombination von zwei oder mehreren Q-Grammen bieten diese M-Matrizen Informationen {\"u}ber die Positionen der Kombination. Dies kann verwendet werden, um die Komplexit{\"a}t der Zusammenf{\"u}hrung der q-Gramm Trefferlisten f{\"u}r eine gegebene Suchanfrage zu reduzieren und verbessert die Leistung des n-Gramm-invertierten Index. Die Kombination mit einem termbasierten invertierten Index beschleunigt die durchschnittliche Suchzeit zus{\"a}tzlich und vereint die Vorteile beider Index-Formate. Redundante Informationen werden in dem q-Gramm Index reduziert und weitere Funktionalit{\"a}t hinzugef{\"u}gt, wie z.B. die Bewertung von Treffern nach Relevanz, die M{\"o}glichkeit, nach Konzepten zu suchen oder Indexpartitionierungen nach Wichtigkeit der enthaltenen Terme zu erstellen.}, subject = {Information Retrieval}, language = {de} } @article{HoernleinMandelIflandetal.2011, author = {H{\"o}rnlein, Alexander and Mandel, Alexander and Ifland, Marianus and L{\"u}neberg, Edeltraud and Deckert, J{\"u}rgen and Puppe, Frank}, title = {Akzeptanz medizinischer Trainingsf{\"a}lle als Erg{\"a}nzung zu Vorlesungen}, series = {GMS Zeitschrift f{\"u}r Medizinische Ausbildung}, volume = {28}, journal = {GMS Zeitschrift f{\"u}r Medizinische Ausbildung}, number = {3}, doi = {10.3205/zma000754}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-133569}, pages = {Doc42}, year = {2011}, abstract = {Introduction: Medical training cases (virtual patients) are in widespread use for student education. Most publications report about development and experiences in one course with training cases. In this paper we compare the acceptance of different training case courses with different usages deployed as supplement to lectures of the medical faculty of Wuerzburg university during a period of three semesters. Methods: The training cases were developed with the authoring tool CaseTrain and are available for students via the Moodle-based eLearning platform WueCampus at Wuerzburg university. Various data about usage and acceptance is automatically collected. Results: From WS (winter semester) 08/09 till WS 09/10 19 courses with about 200 cases were available. In each semester, about 550 different medical students from W{\"u}rzburg and 50 students from other universities processed about 12000 training cases and filled in about 2000 evaluation forms. In different courses, the usage varied between less than 50 and more than 5000 processed cases. Discussion: Although students demand training cases as supplement to all lectures, the data show that the usage does not primarily depend on the quality of the available training cases. Instead, the training cases of nearly all case collections were processed extremely often shortly before the examination. It shows that the degree of usage depends primarily on the perceived relevance of the training cases for the examination."}, language = {de} } @article{MandelHoernleinIflandetal.2011, author = {Mandel, Alexander and H{\"o}rnlein, Alexander and Ifland, Marianus and L{\"u}neburg, Edeltraud and Deckert, J{\"u}rgen and Puppe, Frank}, title = {Aufwandsanalyse f{\"u}r computerunterst{\"u}tzte Multiple-Choice Papierklausuren}, series = {GMS Journal for Medical Education}, volume = {28}, journal = {GMS Journal for Medical Education}, number = {4}, doi = {10.3205/zma000767}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-134386}, pages = {1-15, Doc55}, year = {2011}, abstract = {Introduction: Multiple-choice-examinations are still fundamental for assessment in medical degree programs. In addition to content related research, the optimization of the technical procedure is an important question. Medical examiners face three options: paper-based examinations with or without computer support or completely electronic examinations. Critical aspects are the effort for formatting, the logistic effort during the actual examination, quality, promptness and effort of the correction, the time for making the documents available for inspection by the students, and the statistical analysis of the examination results. Methods: Since three semesters a computer program for input and formatting of MC-questions in medical and other paper-based examinations is used and continuously improved at Wuerzburg University. In the winter semester (WS) 2009/10 eleven, in the summer semester (SS) 2010 twelve and in WS 2010/11 thirteen medical examinations were accomplished with the program and automatically evaluated. For the last two semesters the remaining manual workload was recorded. Results: The cost of the formatting and the subsequent analysis including adjustments of the analysis of an average examination with about 140 participants and about 35 questions was 5-7 hours for exams without complications in the winter semester 2009/2010, about 2 hours in SS 2010 and about 1.5 hours in the winter semester 2010/11. Including exams with complications, the average time was about 3 hours per exam in SS 2010 and 2.67 hours for the WS 10/11. Discussion: For conventional multiple-choice exams the computer-based formatting and evaluation of paper-based exams offers a significant time reduction for lecturers in comparison with the manual correction of paper-based exams and compared to purely electronically conducted exams it needs a much simpler technological infrastructure and fewer staff during the exam."}, language = {de} } @phdthesis{Baunach2012, author = {Baunach, Marcel}, title = {Advances in Distributed Real-Time Sensor/Actuator Systems Operation - Operating Systems, Communication, and Application Design Concepts -}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-76489}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2012}, abstract = {This work takes a close look at several quite different research areas related to the design of networked embedded sensor/actuator systems. The variety of the topics illustrates the potential complexity of current sensor network applications; especially when enriched with actuators for proactivity and environmental interaction. Besides their conception, development, installation and long-term operation, we'll mainly focus on more "low-level" aspects: Compositional hardware and software design, task cooperation and collaboration, memory management, and real-time operation will be addressed from a local node perspective. In contrast, inter-node synchronization, communication, as well as sensor data acquisition, aggregation, and fusion will be discussed from a rather global network view. The diversity in the concepts was intentionally accepted to finally facilitate the reliable implementation of truly complex systems. In particular, these should go beyond the usual "sense and transmit of sensor data", but show how powerful today's networked sensor/actuator systems can be despite of their low computational performance and constrained hardware: If their resources are only coordinated efficiently!}, subject = {Eingebettetes System}, language = {en} } @phdthesis{Dang2012, author = {Dang, Nghia Duc}, title = {Konzeption und Evaluation eines hybriden, skalierbaren Werkzeugs zur mechatronischen Systemdiagnose am Beispiel eines Diagnosesystems f{\"u}r freie Kfz-Werkst{\"a}tten}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-70774}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2012}, abstract = {Die Entwicklung eines wissensbasierten Systems, speziell eines Diagnosesystems, ist eine Teildisziplin der k{\"u}nstlichen Intelligenz und angewandten Informatik. Im Laufe der Forschung auf diesem Gebiet wurden verschiedene L{\"o}sungsans{\"a}tze mit unterschiedlichem Erfolg bei der Anwendung in der Kraftfahrzeugdiagnose entwickelt. Diagnosesysteme in Vertragswerkst{\"a}tten, das heißt in Fahrzeughersteller gebundenen Werkst{\"a}tten, wenden haupts{\"a}chlich die fallbasierte Diagnostik an. Zum einen h{\"a}lt sich hier die Fahrzeugvielfalt in Grenzen und zum anderen besteht eine Meldepflicht bei neuen, nicht im System vorhandenen F{\"a}llen. Die freien Werkst{\"a}tten verf{\"u}gen nicht {\"u}ber eine solche Datenbank. Somit ist der fallbasierte Ansatz schwer umsetzbar. In freien Werkst{\"a}tten - Fahrzeughersteller unabh{\"a}ngigen Werkst{\"a}tten - basiert die Fehlersuche haupts{\"a}chlich auf Fehlerb{\"a}umen. Wegen der wachsenden Fahrzeugkomplexit{\"a}t, welche wesentlich durch die stark zunehmende Anzahl der durch mechatronische Systeme realisierten Funktionen bedingt ist, und der steigenden Typenvielfalt ist die gef{\"u}hrte Fehlersuche in freien Werkst{\"a}tten nicht immer zielf{\"u}hrend. Um die Unterst{\"u}tzung des Personals von freien Werkst{\"a}tten bei der zuk{\"u}nftigen Fehlersuche zu gew{\"a}hrleisten, werden neue Generationen von herstellerunabh{\"a}ngigen Diagnosetools ben{\"o}tigt, die die Probleme der Variantenvielfalt und Komplexit{\"a}t l{\"o}sen. In der vorliegenden Arbeit wird ein L{\"o}sungsansatz vorgestellt, der einen qualitativen, modellbasierten Diagnoseansatz mit einem auf heuristischem Diagnosewissen basierenden Ansatz vereint. Neben der Grundlage zur Wissenserhebung werden in dieser Arbeit die theoretische Grundlage zur Beherrschung der Variantenvielfalt sowie die Tests f{\"u}r die erstellten Diagnosemodelle behandelt. Die Diagnose ist symptombasiert und die Inferenzmechanismen zur Verarbeitung des Diagnosewissens sind eine Kombination aus Propagierung der abweichenden physikalischen Gr{\"o}ßen im Modell und der Auswertung des heuristischen Wissens. Des Weiteren werden in dieser Arbeit verschiedene Aspekte der Realisierung der entwickelten theoretischen Grundlagen dargestellt, zum Beispiel: Systemarchitektur, Wissenserhebungsprozess, Ablauf des Diagnosevorgangs in den Werkst{\"a}tten. Die Evaluierung der entwickelten L{\"o}sung bei der Wissenserhebung in Form von Modellerstellungen und Modellierungsworkshops sowie Feldtests dient nicht nur zur Best{\"a}tigung des entwickelten Ansatzes, sondern auch zur Ideenfindung f{\"u}r die Integration der entwickelten Tools in die existierende IT-Infrastruktur.}, subject = {Diagnosesystem}, language = {de} } @phdthesis{Zinner2012, author = {Zinner, Thomas}, title = {Performance Modeling of QoE-Aware Multipath Video Transmission in the Future Internet}, doi = {10.25972/OPUS-6106}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-72324}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2012}, abstract = {Internet applications are becoming more and more flexible to support diverge user demands and network conditions. This is reflected by technical concepts, which provide new adaptation mechanisms to allow fine grained adjustment of the application quality and the corresponding bandwidth requirements. For the case of video streaming, the scalable video codec H.264/SVC allows the flexible adaptation of frame rate, video resolution and image quality with respect to the available network resources. In order to guarantee a good user-perceived quality (Quality of Experience, QoE) it is necessary to adjust and optimize the video quality accurately. But not only have the applications of the current Internet changed. Within network and transport, new technologies evolved during the last years providing a more flexible and efficient usage of data transport and network resources. One of the most promising technologies is Network Virtualization (NV) which is seen as an enabler to overcome the ossification of the Internet stack. It provides means to simultaneously operate multiple logical networks which allow for example application-specific addressing, naming and routing, or their individual resource management. New transport mechanisms like multipath transmission on the network and transport layer aim at an efficient usage of available transport resources. However, the simultaneous transmission of data via heterogeneous transport paths and communication technologies inevitably introduces packet reordering. Additional mechanisms and buffers are required to restore the correct packet order and thus to prevent a disturbance of the data transport. A proper buffer dimensioning as well as the classification of the impact of varying path characteristics like bandwidth and delay require appropriate evaluation methods. Additionally, for a path selection mechanism real time evaluation mechanisms are needed. A better application-network interaction and the corresponding exchange of information enable an efficient adaptation of the application to the network conditions and vice versa. This PhD thesis analyzes a video streaming architecture utilizing multipath transmission and scalable video coding and develops the following optimization possibilities and results: Analysis and dimensioning methods for multipath transmission, quantification of the adaptation possibilities to the current network conditions with respect to the QoE for H.264/SVC, and evaluation and optimization of a future video streaming architecture, which allows a better interaction of application and network.}, subject = {Video{\"u}bertragung}, language = {en} } @phdthesis{Duelli2012, author = {Duelli, Michael}, title = {Heuristic Design and Provisioning of Resilient Multi-Layer Networks}, doi = {10.25972/OPUS-5600}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-69433}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2012}, abstract = {To jointly provide different services/technologies, like IP and Ethernet or IP and SDH/SONET, in a single network, equipment of multiple technologies needs to be deployed to the sites/Points of Presence (PoP) and interconnected with each other. Therein, a technology may provide transport functionality to other technologies and increase the number of available resources by using multiplexing techniques. By providing its own switching functionality, each technology creates connections in a logical layer which leads to the notion of multi-layer networks. The design of such networks comprises the deployment and interconnection of components to suit to given traffic demands. To prevent traffic loss due to failures of networking equipment, protection mechanisms need to be established. In multi-layer networks, protection usually can be applied in any of the considered layers. In turn, the hierarchical structure of multi-layer networks also bears shared risk groups (SRG). To achieve a cost-optimal resilient network, an appropriate combination of multiplexing techniques, technologies, and their interconnections needs to be found. Thus, network design is a combinatorial problem with a large parameter and solution space. After the design stage, the resources of a multi-layer network can be provided to traffic demands. Especially, dynamic capacity provisioning requires interaction of sites and layers, as well as accurate retrieval of constraint information. In recent years, generalized multiprotocol label switching (GMPLS) and path computation elements (PCE) have emerged as possible approaches for these challenges. Like the design, the provisioning of multi-layer networks comprises a variety of optimization parameters, like blocking probability, resilience, and energy efficiency. In this work, we introduce several efficient heuristics to approach the considered optimization problems. We perform capital expenditure (CAPEX)-aware design of multi-layer networks from scratch, based on IST NOBEL phase 2 project's cost and equipment data. We comprise traffic and resilience requirements in different and multiple layers as well as different network architectures. On top of the designed networks, we consider the dynamic provisioning of multi-layer traffic based on the GMPLS and PCE architecture. We evaluate different PCE deployments, information retrieval strategies, and re-optimization. Finally, we show how information about provisioning utilization can be used to provide a feedback for network design.}, subject = {Mehrschichtsystem}, language = {en} } @article{MuysomsCampanelliChampaultetal.2012, author = {Muysoms, F. and Campanelli, G. and Champault, G. and DeBeaux, A. C. and Dietz, U. A. and Jeekel, J. and Klinge, U. and K{\"a}ckerling, F. and Mandala, M. and Montgomery, A. and Morales Conde, S. and Puppe, F. and Simmermacher, R. K. J. and Asmieta Aski, M. and Miserez, M.}, title = {EuraHS: the development of an international online platform for registration and outcome measurement of ventral abdominal wall hernia repair}, series = {Hernia}, volume = {16}, journal = {Hernia}, number = {3}, doi = {10.1007/s10029-012-0912-7}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-126691}, pages = {239-250}, year = {2012}, abstract = {BACKGROUND: Although the repair of ventral abdominal wall hernias is one of the most commonly performed operations, many aspects of their treatment are still under debate or poorly studied. In addition, there is a lack of good definitions and classifications that make the evaluation of studies and meta-analyses in this field of surgery difficult. MATERIALS AND METHODS: Under the auspices of the board of the European Hernia Society and following the previously published classifications on inguinal and on ventral hernias, a working group was formed to create an online platform for registration and outcome measurement of operations for ventral abdominal wall hernias. Development of such a registry involved reaching agreement about clear definitions and classifications on patient variables, surgical procedures and mesh materials used, as well as outcome parameters. The EuraHS working group (European registry for abdominal wall hernias) comprised of a multinational European expert panel with specific interest in abdominal wall hernias. Over five working group meetings, consensus was reached on definitions for the data to be recorded in the registry. RESULTS: A set of well-described definitions was made. The previously reported EHS classifications of hernias will be used. Risk factors for recurrences and co-morbidities of patients were listed. A new severity of comorbidity score was defined. Post-operative complications were classified according to existing classifications as described for other fields of surgery. A new 3-dimensional numerical quality-of-life score, EuraHS-QoL score, was defined. An online platform is created based on the definitions and classifications, which can be used by individual surgeons, surgical teams or for multicentre studies. A EuraHS website is constructed with easy access to all the definitions, classifications and results from the database. CONCLUSION: An online platform for registration and outcome measurement of abdominal wall hernia repairs with clear definitions and classifications is offered to the surgical community. It is hoped that this registry could lead to better evidence-based guidelines for treatment of abdominal wall hernias based on hernia variables, patient variables, available hernia repair materials and techniques.}, language = {en} } @article{BuchinBuchinByrkaetal.2012, author = {Buchin, Kevin and Buchin, Maike and Byrka, Jaroslaw and N{\"o}llenburg, Martin and Okamoto, Yoshio and Silveira, Rodrigo I. and Wolff, Alexander}, title = {Drawing (Complete) Binary Tanglegrams}, series = {Algorithmica}, volume = {62}, journal = {Algorithmica}, doi = {10.1007/s00453-010-9456-3}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-124622}, pages = {309-332}, year = {2012}, abstract = {A binary tanglegram is a drawing of a pair of rooted binary trees whose leaf sets are in one-to-one correspondence; matching leaves are connected by inter-tree edges. For applications, for example, in phylogenetics, it is essential that both trees are drawn without edge crossings and that the inter-tree edges have as few crossings as possible. It is known that finding a tanglegram with the minimum number of crossings is NP-hard and that the problem is fixed-parameter tractable with respect to that number. We prove that under the Unique Games Conjecture there is no constant-factor approximation for binary trees. We show that the problem is NP-hard even if both trees are complete binary trees. For this case we give an O(n 3)-time 2-approximation and a new, simple fixed-parameter algorithm. We show that the maximization version of the dual problem for binary trees can be reduced to a version of MaxCut for which the algorithm of Goemans and Williamson yields a 0.878-approximation.}, language = {en} } @article{MuysomsCampanelliChampaultetal.2012, author = {Muysoms, F. and Campanelli, G. and Champault, G. G. and DeBeaux, A. C. and Dietz, U. A. and Jeekel, J. and Klinge, U. and K{\"o}ckerling, F. and Mandala, V. and Montgomery, A. and Morales Conde, S. and Puppe, F. and Simmermacher, R. K. J. and Śmietański, M. and Miserez, M.}, title = {EuraHS: the development of an international online platform for registration and outcome measurement of ventral abdominal wall hernia repair}, series = {Hernia}, volume = {16}, journal = {Hernia}, number = {3}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-124728}, pages = {239-250}, year = {2012}, abstract = {Background Although the repair of ventral abdominal wall hernias is one of the most commonly performed operations, many aspects of their treatment are still under debate or poorly studied. In addition, there is a lack of good definitions and classifications that make the evaluation of studies and meta-analyses in this field of surgery difficult. Materials and methods Under the auspices of the board of the European Hernia Society and following the previously published classifications on inguinal and on ventral hernias, a working group was formed to create an online platform for registration and outcome measurement of operations for ventral abdominal wall hernias. Development of such a registry involved reaching agreement about clear definitions and classifications on patient variables, surgical procedures and mesh materials used, as well as outcome parameters. The EuraHS working group (European registry for abdominal wall hernias) comprised of a multinational European expert panel with specific interest in abdominal wall hernias. Over five working group meetings, consensus was reached on definitions for the data to be recorded in the registry. Results A set of well-described definitions was made. The previously reported EHS classifications of hernias will be used. Risk factors for recurrences and co-morbidities of patients were listed. A new severity of comorbidity score was defined. Post-operative complications were classified according to existing classifications as described for other fields of surgery. A new 3-dimensional numerical quality-of-life score, EuraHS-QoL score, was defined. An online platform is created based on the definitions and classifications, which can be used by individual surgeons, surgical teams or for multicentre studies. A EuraHS website is constructed with easy access to all the definitions, classifications and results from the database. Conclusion An online platform for registration and outcome measurement of abdominal wall hernia repairs with clear definitions and classifications is offered to the surgical community. It is hoped that this registry could lead to better evidence-based guidelines for treatment of abdominal wall hernias based on hernia variables, patient variables, available hernia repair materials and techniques.}, language = {en} } @article{BoehlerCreignouGalotaetal.2012, author = {B{\"o}hler, Elmar and Creignou, Nadia and Galota, Matthias and Reith, Steffen and Schnoor, Henning and Vollmer, Heribert}, title = {Complexity Classifications for Different Equivalence and Audit Problems for Boolean Circuits}, series = {Logical Methods in Computer Science}, volume = {8}, journal = {Logical Methods in Computer Science}, number = {3:27}, doi = {10.2168/LMCS-8(3:27)2012}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-131121}, pages = {1 -- 25}, year = {2012}, abstract = {We study Boolean circuits as a representation of Boolean functions and conskier different equivalence, audit, and enumeration problems. For a number of restricted sets of gate types (bases) we obtain efficient algorithms, while for all other gate types we show these problems are at least NP-hard.}, language = {en} } @article{AtienzadeCastroCortesetal.2012, author = {Atienza, Nieves and de Castro, Natalia and Cort{\´e}s, Carmen and Garrido, M. {\´A}ngeles and Grima, Clara I. and Hern{\´a}ndez, Gregorio and M{\´a}rquez, Alberto and Moreno-Gonz{\´a}lez, Auxiliadora and N{\"o}llenburg, Martin and Portillo, Jos{\´e} Ram{\´o}n and Reyes, Pedro and Valenzuela, Jes{\´u}s and Trinidad Villar, Maria and Wolff, Alexander}, title = {Cover contact graphs}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-78845}, year = {2012}, abstract = {We study problems that arise in the context of covering certain geometric objects called seeds (e.g., points or disks) by a set of other geometric objects called cover (e.g., a set of disks or homothetic triangles). We insist that the interiors of the seeds and the cover elements are pairwise disjoint, respectively, but they can touch. We call the contact graph of a cover a cover contact graph (CCG). We are interested in three types of tasks, both in the general case and in the special case of seeds on a line: (a) deciding whether a given seed set has a connected CCG, (b) deciding whether a given graph has a realization as a CCG on a given seed set, and (c) bounding the sizes of certain classes of CCG's. Concerning (a) we give efficient algorithms for the case that seeds are points and show that the problem becomes hard if seeds and covers are disks. Concerning (b) we show that this problem is hard even for point seeds and disk covers (given a fixed correspondence between graph vertices and seeds). Concerning (c) we obtain upper and lower bounds on the number of CCG's for point seeds.}, subject = {Informatik}, language = {de} } @article{WolffRutter2012, author = {Wolff, Alexander and Rutter, Iganz}, title = {Augmenting the Connectivity of Planar and Geometric Graphs}, series = {Journal of Graph Algorithms and Applications}, journal = {Journal of Graph Algorithms and Applications}, doi = {10.7155/jgaa.00275}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-97587}, year = {2012}, abstract = {In this paper we study connectivity augmentation problems. Given a connected graph G with some desirable property, we want to make G 2-vertex connected (or 2-edge connected) by adding edges such that the resulting graph keeps the property. The aim is to add as few edges as possible. The property that we consider is planarity, both in an abstract graph-theoretic and in a geometric setting, where vertices correspond to points in the plane and edges to straight-line segments. We show that it is NP-hard to � nd a minimum-cardinality augmentation that makes a planar graph 2-edge connected. For making a planar graph 2-vertex connected this was known. We further show that both problems are hard in the geometric setting, even when restricted to trees. The problems remain hard for higher degrees of connectivity. On the other hand we give polynomial-time algorithms for the special case of convex geometric graphs. We also study the following related problem. Given a planar (plane geometric) graph G, two vertices s and t of G, and an integer c, how many edges have to be added to G such that G is still planar (plane geometric) and contains c edge- (or vertex-) disjoint s{t paths? For the planar case we give a linear-time algorithm for c = 2. For the plane geometric case we give optimal worst-case bounds for c = 2; for c = 3 we characterize the cases that have a solution.}, language = {en} } @phdthesis{Lehrieder2013, author = {Lehrieder, Frank}, title = {Performance Evaluation and Optimization of Content Distribution using Overlay Networks}, doi = {10.25972/OPUS-6420}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-76018}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2013}, abstract = {The work presents a performance evaluation and optimization of so-called overlay networks for content distribution in the Internet. Chapter 1 describes the importance which have such networks in today's Internet, for example, for the transmission of video content. The focus of this work is on overlay networks based on the peer-to-peer principle. These are characterized by the fact that users who download content, also contribute to the distribution process by sharing parts of the data to other users. This enables efficient content distribution because each user not only consumes resources in the system, but also provides its own resources. Chapter 2 of the monograph contains a detailed description of the functionality of today's most popular overlay network BitTorrent. It explains the various components and their interaction. This is followed by an illustration of why such overlay networks for Internet service providers (ISPs) are problematic. The reason lies in the large amount of inter-ISP traffic that is produced by these overlay networks. Since this inter-ISP traffic leads to high costs for ISPs, they try to reduce it by improved mechanisms for overlay networks. One optimization approach is the use of topology awareness within the overlay networks. It provides users of the overlay networks with information about the underlying physical network topology. This allows them to avoid inter-ISP traffic by exchanging data preferrentially with other users that are connected to the same ISP. Another approach to save inter-ISP traffic is caching. In this case the ISP provides additional computers in its network, called caches, which store copies of popular content. The users of this ISP can then obtain such content from the cache. This prevents that the content must be retrieved from locations outside of the ISP's network, and saves costly inter-ISP traffic in this way. In the third chapter of the thesis, the results of a comprehensive measurement study of overlay networks, which can be found in today's Internet, are presented. After a short description of the measurement methodology, the results of the measurements are described. These results contain data on a variety of characteristics of current P2P overlay networks in the Internet. These include the popularity of content, i.e., how many users are interested in specific content, the evolution of the popularity and the size of the files. The distribution of users within the Internet is investigated in detail. Special attention is given to the number of users that exchange a particular file within the same ISP. On the basis of these measurement results, an estimation of the traffic savings that can achieved by topology awareness is derived. This new estimation is of scientific and practical importance, since it is not limited to individual ISPs and files, but considers the whole Internet and the total amount of data exchanged in overlay networks. Finally, the characteristics of regional content are considered, in which the popularity is limited to certain parts of the Internet. This is for example the case of videos in German, Italian or French language. Chapter 4 of the thesis is devoted to the optimization of overlay networks for content distribution through caching. It presents a deterministic flow model that describes the influence of caches. On the basis of this model, it derives an estimate of the inter-ISP traffic that is generated by an overlay network, and which part can be saved by caches. The results show that the influence of the cache depends on the structure of the overlay networks, and that caches can also lead to an increase in inter-ISP traffic under certain circumstances. The described model is thus an important tool for ISPs to decide for which overlay networks caches are useful and to dimension them. Chapter 5 summarizes the content of the work and emphasizes the importance of the findings. In addition, it explains how the findings can be applied to the optimization of future overlay networks. Special attention is given to the growing importance of video-on-demand and real-time video transmissions.}, subject = {Leistungsbewertung}, language = {en} } @article{BeckerCaminitiFiorellaetal.2013, author = {Becker, Martin and Caminiti, Saverio and Fiorella, Donato and Francis, Louise and Gravino, Pietro and Haklay, Mordechai (Muki) and Hotho, Andreas and Loreto, Virrorio and Mueller, Juergen and Ricchiuti, Ferdinando and Servedio, Vito D. P. and Sirbu, Alina and Tria, Franesca}, title = {Awareness and Learning in Participatory Noise Sensing}, series = {PLOS ONE}, volume = {8}, journal = {PLOS ONE}, number = {12}, issn = {1932-6203}, doi = {10.1371/journal.pone.0081638}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-127675}, pages = {e81638}, year = {2013}, abstract = {The development of ICT infrastructures has facilitated the emergence of new paradigms for looking at society and the environment over the last few years. Participatory environmental sensing, i.e. directly involving citizens in environmental monitoring, is one example, which is hoped to encourage learning and enhance awareness of environmental issues. In this paper, an analysis of the behaviour of individuals involved in noise sensing is presented. Citizens have been involved in noise measuring activities through the WideNoise smartphone application. This application has been designed to record both objective (noise samples) and subjective (opinions, feelings) data. The application has been open to be used freely by anyone and has been widely employed worldwide. In addition, several test cases have been organised in European countries. Based on the information submitted by users, an analysis of emerging awareness and learning is performed. The data show that changes in the way the environment is perceived after repeated usage of the application do appear. Specifically, users learn how to recognise different noise levels they are exposed to. Additionally, the subjective data collected indicate an increased user involvement in time and a categorisation effect between pleasant and less pleasant environments.}, language = {en} } @article{ElsebergBorrmannNuechter2013, author = {Elseberg, Jan and Borrmann, Dorit and N{\"u}chter, Andreas}, title = {Algorithmic Solutions for Computing Precise Maximum Likelihood 3D Point Clouds from Mobile Laser Scanning Platforms}, series = {Remote Sensing}, volume = {5}, journal = {Remote Sensing}, number = {11}, doi = {10.3390/rs5115871}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-130478}, pages = {5871-5906}, year = {2013}, abstract = {Mobile laser scanning puts high requirements on the accuracy of the positioning systems and the calibration of the measurement system. We present a novel algorithmic approach for calibration with the goal of improving the measurement accuracy of mobile laser scanners. We describe a general framework for calibrating mobile sensor platforms that estimates all configuration parameters for any arrangement of positioning sensors, including odometry. In addition, we present a novel semi-rigid Simultaneous Localization and Mapping (SLAM) algorithm that corrects the vehicle position at every point in time along its trajectory, while simultaneously improving the quality and precision of the entire acquired point cloud. Using this algorithm, the temporary failure of accurate external positioning systems or the lack thereof can be compensated for. We demonstrate the capabilities of the two newly proposed algorithms on a wide variety of datasets.}, language = {en} } @article{GageikStrohmeierMontenegro2013, author = {Gageik, Nils and Strohmeier, Michael and Montenegro, Sergio}, title = {Waypoint flight parameter comparison of an autonomous UAV}, series = {International Journal of Artificial Intelligence \& Applications (IJAIA)}, journal = {International Journal of Artificial Intelligence \& Applications (IJAIA)}, doi = {10.5121/ijaia.2013.4304}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-96833}, year = {2013}, abstract = {The present paper compares the effect of different waypoint parameters on the flight performance of a special autonomous indoor UAV (unmanned aerial vehicle) fusing ultrasonic, inertial, pressure and optical sensors for 3D positioning and controlling. The investigated parameters are the acceptance threshold for reaching a waypoint as well as the maximal waypoint step size or block size. The effect of these parameters on the flight time and accuracy of the flight path is investigated. Therefore the paper addresses how the acceptance threshold and step size influence the speed and accuracy of the autonomous flight and thus influence the performance of the presented autonomous quadrocopter under real indoor navigation circumstances. Furthermore the paper demonstrates a drawback of the standard potential field method for navigation of such autonomous quadrocopters and points to an improvement.}, language = {en} } @article{GageikStrohmeierMontenegro2013, author = {Gageik, Nils and Strohmeier, Michael and Montenegro, Sergio}, title = {An Autonomous UAV with an Optical Flow Sensor for Positioning and Navigation}, series = {International Journal of Advanced Robotic Systems}, journal = {International Journal of Advanced Robotic Systems}, doi = {10.5772/56813}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-96368}, year = {2013}, abstract = {A procedure to control all six DOF (degrees of freedom) of a UAV (unmanned aerial vehicle) without an external reference system and to enable fully autonomous flight is presented here. For 2D positioning the principle of optical flow is used. Together with the output of height estimation, fusing ultrasonic, infrared and inertial and pressure sensor data, the 3D position of the UAV can be computed, controlled and steered. All data processing is done on the UAV. An external computer with a pathway planning interface is for commanding purposes only. The presented system is part of the AQopterI8 project, which aims to develop an autonomous flying quadrocopter for indoor application. The focus of this paper is 2D positioning using an optical flow sensor. As a result of the performed evaluation, it can be concluded that for position hold, the standard deviation of the position error is 10cm and after landing the position error is about 30cm.}, language = {en} } @phdthesis{Herrmann2013, author = {Herrmann, Christian}, title = {Robotic Motion Compensation for Applications in Radiation Oncology}, isbn = {978-3-923959-88-4}, doi = {10.25972/OPUS-6727}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-79045}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2013}, abstract = {Aufgrund vieler Verbesserungen der Behandlungsmethoden im Laufe der letzten 60 Jahre, erlaubt die Strahlentherapie heutzutage pr{\"a}zise Behandlungen von statischen Tumoren. Jedoch birgt die Bestrahlung von sich bewegenden Tumoren noch große Herausforderungen in sich, da bewegliche Tumore oft den Behandlungsstrahl verlassen. Dabei reduziert sich die Strahlendosis im Tumor w{\"a}hrend sich diese im umliegenden gesunden Gewebe erh{\"o}ht. Diese Forschungsarbeit zielt darauf ab, die Grenzen der Strahlentherapie zu erweitern, um pr{\"a}zise Behandlungen von beweglichen Tumoren zu erm{\"o}glichen. Der Fokus der Arbeit liegt auf der Erstellung eines Echtzeitsystems zur aktiven Kompensation von Tumorbewegungen durch robotergest{\"u}tzte Methoden. W{\"a}hrend Behandlungen befinden sich Patienten auf einer Patientenliege, mit der statische Lagerungsfehler vor Beginn einer Behandlung korrigiert werden. Die in dieser Arbeit verwendete Patientenliege "HexaPOD" ist ein paralleler Manipulator mit sechs Freiheitsgraden, der große Lasten innerhalb eines eingeschr{\"a}nkten Arbeitsbereichs pr{\"a}zise positionieren kann. Obwohl der HexaPOD urspr{\"u}nglich nicht f{\"u}r dynamische Anwendungen konzipiert wurde, wird dieser f{\"u}r eine dauerhafte Bewegungskompensation eingesetzt, in dem Patienten so bewegt werden, dass Tumore pr{\"a}zise im Zentralstrahl w{\"a}hrend der Dauer einer gesamten Behandlung verbleiben. Um ein echtzeitf{\"a}higes Kompensationssystem auf Basis des HexaPODs zu realisieren, muss eine Reihe an Herausforderungen bew{\"a}ltigt werden. Echtzeitaspekte werden einerseits durch die Verwendung eines harten Echtzeitbetriebssystems abgedeckt, andererseits durch die Messung und Sch{\"a}tzung von Latenzzeiten aller physikalischen Gr{\"o}ßen im System, z.B. Messungen der Tumor- und Atemposition. Neben der konsistenten und durchg{\"a}ngigen Ber{\"u}cksichtigung von akkuraten Zeitinformation, werden alle software-induzierten Latenzen adaptiv ausgeglichen. Dies erfordert Vorhersagen der Tumorposition in die nahe Zukunft. Zahlreiche Pr{\"a}diktoren zur Atem- und Tumorpositionsvorhersage werden vorgeschlagen und anhand verschiedenster Metriken evaluiert. Erweiterungen der Pr{\"a}diktionsalgorithmen werden eingef{\"u}hrt, die sowohl Atem- als auch Tumorpositionsinformationen fusionieren, um Vorhersagen ohne explizites Korrelationsmodell zu erm{\"o}glichen. Die Vorhersagen bestimmen den zuk{\"u}nftigen Bewegungspfad des HexaPODs, um Tumorbewegungen zu kompensieren. Dazu werden verschiedene Regler entwickelt, die eine Trajektorienverfolgung mit dem HexaPOD erm{\"o}glichen. Auf der Basis von linearer und nicht-linearer dynamischer Modellierung des HexaPODs mit Methoden der Systemidentifikation, wird zun{\"a}chst ein modellpr{\"a}diktiver Regler entwickelt. Ein zweiter Regler wird auf Basis einer Annahme {\"u}ber das Arbeitsprinzip des internen Reglers im HexaPOD entworfen. Schließlich wird ein dritter Regler vorgeschlagen, der beide vorhergehenden Regler miteinander kombiniert. F{\"u}r jeden dieser Regler werden vergleichende Ergebnisse aus Experimenten mit realer Hardware und menschlichen Versuchspersonen pr{\"a}sentiert und diskutiert. Dar{\"u}ber hinaus wird die geeignete Wahl von freien Parametern in den Reglern vorgestellt. Neben einer pr{\"a}zisen Verfolgung der Referenztrajektorie spielt der Patientenkomfort eine entscheidende Rolle f{\"u}r die Akzeptanz des Systems. Es wird gezeigt, dass die Regler glatte Trajektorien realisieren k{\"o}nnen, um zu garantieren, dass sich Patienten wohl f{\"u}hlen w{\"a}hrend ihre Tumorbewegung mit Genauigkeiten im Submillimeterbereich ausgeglichen wird. Gesamtfehler werden im Kompensationssystem analysiert, in dem diese zu Trajektorienverfolgungsfehlern und Pr{\"a}diktionsfehlern in Beziehung gesetzt werden. Durch Ausnutzung von Eigenschaften verschiedener Pr{\"a}diktoren wird gezeigt, dass die Startzeit des Systems bis die Verfolgung der Referenztrajektorie erreicht ist, wenige Sekunden betr{\"a}gt. Dies gilt insbesondere f{\"u}r den Fall eines initial ruhenden HexaPODs und ohne Vorwissen {\"u}ber Tumorbewegungen. Dies zeigt die Eignung des Systems f{\"u}r die sehr kurz fraktionierten Behandlungen von Lungentumoren. Das Tumorkompensationssystem wurde ausschließlich auf Basis von klinischer Standard-Hardware entwickelt, die in vielen Behandlungsr{\"a}umen zu finden ist. Durch ein einfaches und flexibles Design k{\"o}nnen Behandlungsr{\"a}ume in kosteneffizienter Weise um M{\"o}glichkeiten der Bewegungskompensation erg{\"a}nzt werden. Dar{\"u}ber hinaus werden aktuelle Behandlungsmethoden wie intensit{\"a}tsmodulierte Strahlentherapie oder Volumetric Modulated Arc Therapy in keiner Weise eingeschr{\"a}nkt. Aufgrund der Unterst{\"u}tzung verschiedener Kompensationsmodi kann das System auf alle beweglichen Tumore angewendet werden, unabh{\"a}ngig davon ob die Bewegungen vorhersagbar (Lungentumore) oder nicht vorhersagbar (Prostatatumore) sind. Durch Integration von geeigneten Methoden zur Tumorpositionsbestimmung kann das System auf einfache Weise zur Kompensation von anderen Tumoren erweitert werden.}, subject = {Robotik}, language = {en} } @misc{Sieber2013, type = {Master Thesis}, author = {Sieber, Christian}, title = {Holistic Evaluation of Novel Adaptation Logics for DASH and SVC}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-92362}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2013}, abstract = {Streaming of videos has become the major traffic generator in today's Internet and the video traffic share is still increasing. According to Cisco's annual Visual Networking Index report, in 2012, 60\% of the global Internet IP traffic was generated by video streaming services. Furthermore, the study predicts further increase to 73\% by 2017. At the same time, advances in the fields of mobile communications and embedded devices lead to a widespread adoption of Internet video enabled mobile and wireless devices (e.g. Smartphones). The report predicts that by 2017, the traffic originating from mobile and wireless devices will exceed the traffic from wired devices and states that mobile video traffic was the source of roughly half of the mobile IP traffic at the end of 2012. With the increasing importance of Internet video streaming in today's world, video content provider find themselves in a highly competitive market where user expectations are high and customer loyalty depends strongly on the user's satisfaction with the provided service. In particular paying customers expect their viewing experience to be the same across all their viewing devices and independently of their currently utilized Internet access technology. However, providing video streaming services is costly in terms of storage space, required bandwidth and generated traffic. Therefore, content providers face a trade-off between the user perceived Quality of Experience (QoE) and the costs for providing the service. Today, a variety of transport and application protocols exist for providing video streaming services, but the one utilized depends on the scenario in mind. Video streaming services can be divided up in three categories: Video conferencing, IPTV and Video-on-Demand services. IPTV and video-conferencing have severe real-time constraints and thus utilize mostly datagram-based protocols like the RTP/UDP protocol for the video transmission. Video-on-Demand services in contrast can profit from pre-encoded content, buffers at the end user's device, and mostly utilize TCP-based protocols in combination with progressive streaming for the media delivery. In recent years, the HTTP protocol on top of the TCP protocol gained widespread popularity as a cost-efficient way to distribute pre-encoded video content to customers via progressive streaming. This is due to the fact that HTTP-based video streaming profits from a well-established infrastructure which was originally implemented to efficiently satisfy the increasing demand for web browsing and file downloads. Large Content Delivery Networks (CDN) are the key components of that distribution infrastructure. CDNs prevent expensive long-haul data traffic and delays by distributing HTTP content to world-wide locations close to the customers. As of 2012, already 53\% of the global video traffic in the Internet originates from Content Delivery Networks and that percentage is expected to increase to 65\% by the year 2017. Furthermore, HTTP media streaming profits from existing HTTP caching infrastructure, ease of NAT and proxy traversal and firewall friendliness. Video delivery through heterogeneous wired and wireless communications networks is prone to distortions due to insufficient network resources. This is especially true in wireless scenarios, where user mobility and insufficient signal strength can result in a very poor transport service performance (e.g. high packet loss, delays and low and varying bandwidth). A poor performance of the transport in turn may degrade the Quality of Experience as perceived by the user, either due to buffer underruns (i.e. playback interruptions) for TCP-based delivery or image distortions for datagram-based real-time video delivery. In order to overcome QoE degradations due to insufficient network resources, content provider have to consider adaptive video streaming. One possibility to implement this for HTTP/TCP streaming is by partitioning the content into small segments, encode the segments into different quality levels and provide access to the segments and the quality level details (e.g. resolution, average bitrate). During the streaming session, a client-centric adaptation algorithm can use the supplied details to adapt the playback to the current environment. However, a lack of a common HTTP adaptive streaming standard led to multiple proprietary solutions developed by major Internet companies like Microsoft (Smooth Streaming), Apple (HTTP Live Streaming) and Adobe (HTTP Dynamic Streaming) loosely based on the aforementioned principle. In 2012, the ISO/IEC published the Dynamic Adaptive Streaming over HTTP (MPEG-DASH) standard. As of today, DASH is becoming widely accepted with major companies announcing their support or having already implemented the standard into their products. MPEG-DASH is typically used with single layer codecs like H.264/AVC, but recent publications show that scalable video coding can use the existing HTTP infrastructure more efficiently. Furthermore, the layered approach of scalable video coding extends the adaptation options for the client, since already downloaded segments can be enhanced at a later time. The influence of distortions on the perceived QoE for non-adaptive video streaming are well reviewed and published. For HTTP streaming, the QoE of the user is influenced by the initial delay (i.e. the time the client pre-buffers video data) and the length and frequency of playback interruptions due to a depleted video playback buffer. Studies highlight that even low stalling times and frequencies have a negative impact on the QoE of the user and should therefore be avoided. The first contribution of this thesis is the identification of QoE influence factors of adaptive video streaming by the means of crowd-sourcing and a laboratory study. MPEG-DASH does not specify how to adapt the playback to the available bandwidth and therefore the design of a download/adaptation algorithm is left to the developer of the client logic. The second contribution of this thesis is the design of a novel user-centric adaption logic for DASH with SVC. Other download algorithms for segmented HTTP streaming with single layer and scalable video coding have been published lately. However, there is little information about the behavior of these algorithms regarding the identified QoE-influence factors. The third contribution is a user-centric performance evaluation of three existing adaptation algorithms and a comparison to the proposed algorithm. In the performance evaluation we also evaluate the fairness of the algorithms. In one fairness scenario, two clients deploy the same adaptation algorithm and share one Internet connection. For a fair adaptation algorithm, we expect the behavior of the two clients to be identical. In a second fairness scenario, one client shares the Internet connection with a large HTTP file download and we expect an even bandwidth distribution between the video streaming and the file download. The forth contribution of this thesis is an evaluation of the behavior of the algorithms in a two-client and HTTP cross traffic scenario. The remainder of this thesis is structured as follows. Chapter II gives a brief introduction to video coding with H.264, the HTTP adaptive streaming standard MPEG-DASH, the investigated adaptation algorithms and metrics of Quality of Experience (QoE) for video streaming. Chapter III presents the methodology and results of the subjective studies conducted in the course of this thesis to identify the QoE influence factors of adaptive video streaming. In Chapter IV, we introduce the proposed adaptation algorithm and the methodology of the performance evaluation. Chapter V highlights the results of the performance evaluation and compares the investigated adaptation algorithms. Section VI summarizes the main findings and gives an outlook towards QoE-centric management of DASH with SVC.}, subject = {DASH}, language = {en} } @phdthesis{Sun2014, author = {Sun, Kaipeng}, title = {Six Degrees of Freedom Object Pose Estimation with Fusion Data from a Time-of-flight Camera and a Color Camera}, isbn = {978-3-923959-97-6}, doi = {10.25972/OPUS-10508}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-105089}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {Object six Degrees of Freedom (6DOF) pose estimation is a fundamental problem in many practical robotic applications, where the target or an obstacle with a simple or complex shape can move fast in cluttered environments. In this thesis, a 6DOF pose estimation algorithm is developed based on the fused data from a time-of-flight camera and a color camera. The algorithm is divided into two stages, an annealed particle filter based coarse pose estimation stage and a gradient decent based accurate pose optimization stage. In the first stage, each particle is evaluated with sparse representation. In this stage, the large inter-frame motion of the target can be well handled. In the second stage, the range data based conventional Iterative Closest Point is extended by incorporating the target appearance information and used for calculating the accurate pose by refining the coarse estimate from the first stage. For dealing with significant illumination variations during the tracking, spherical harmonic illumination modeling is investigated and integrated into both stages. The robustness and accuracy of the proposed algorithm are demonstrated through experiments on various objects in both indoor and outdoor environments. Moreover, real-time performance can be achieved with graphics processing unit acceleration.}, subject = {Mustererkennung}, language = {en} } @phdthesis{Xu2014, author = {Xu, Zhihao}, title = {Cooperative Formation Controller Design for Time-Delay and Optimality Problems}, isbn = {978-3-923959-96-9}, doi = {10.25972/OPUS-10555}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-105555}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {This dissertation presents controller design methodologies for a formation of cooperative mobile robots to perform trajectory tracking and convoy protection tasks. Two major problems related to multi-agent formation control are addressed, namely the time-delay and optimality problems. For the task of trajectory tracking, a leader-follower based system structure is adopted for the controller design, where the selection criteria for controller parameters are derived through analyses of characteristic polynomials. The resulting parameters ensure the stability of the system and overcome the steady-state error as well as the oscillation behavior under time-delay effect. In the convoy protection scenario, a decentralized coordination strategy for balanced deployment of mobile robots is first proposed. Based on this coordination scheme, optimal controller parameters are generated in both centralized and decentralized fashion to achieve dynamic convoy protection in a unified framework, where distributed optimization technique is applied in the decentralized strategy. This unified framework takes into account the motion of the target to be protected, and the desired system performance, for instance, minimal energy to spend, equal inter-vehicle distance to keep, etc. Both trajectory tracking and convoy protection tasks are demonstrated through simulations and real-world hardware experiments based on the robotic equipment at Department of Computer Science VII, University of W{\"u}rzburg.}, subject = {Optimalwertregelung}, language = {en} } @phdthesis{Tzschichholz2014, author = {Tzschichholz, Tristan}, title = {Relative pose estimation of known rigid objects using a novel approach to high-level PMD-/CCD- sensor data fusion with regard to applications in space}, isbn = {978-3-923959-95-2}, issn = {1868-7474}, doi = {10.25972/OPUS-10391}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-103918}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {In this work, a novel method for estimating the relative pose of a known object is presented, which relies on an application-specific data fusion process. A PMD-sensor in conjunction with a CCD-sensor is used to perform the pose estimation. Furthermore, the work provides a method for extending the measurement range of the PMD sensor along with the necessary calibration methodology. Finally, extensive measurements on a very accurate Rendezvous and Docking testbed are made to evaluate the performance, what includes a detailed discussion of lighting conditions.}, subject = {Bildverarbeitung}, language = {en} } @phdthesis{Witek2014, author = {Witek, Maximilian}, title = {Multiobjective Traveling Salesman Problems and Redundancy of Complete Sets}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-110740}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {The first part of this thesis deals with the approximability of the traveling salesman problem. This problem is defined on a complete graph with edge weights, and the task is to find a Hamiltonian cycle of minimum weight that visits each vertex exactly once. We study the most important multiobjective variants of this problem. In the multiobjective case, the edge weights are vectors of natural numbers with one component for each objective, and since weight vectors are typically incomparable, the optimal Hamiltonian cycle does not exist. Instead we consider the Pareto set, which consists of those Hamiltonian cycles that are not dominated by some other, strictly better Hamiltonian cycles. The central goal in multiobjective optimization and in the first part of this thesis in particular is the approximation of such Pareto sets. We first develop improved approximation algorithms for the two-objective metric traveling salesman problem on multigraphs and for related Hamiltonian path problems that are inspired by the single-objective Christofides' heuristic. We further show arguments indicating that our algorithms are difficult to improve. Furthermore we consider multiobjective maximization versions of the traveling salesman problem, where the task is to find Hamiltonian cycles with high weight in each objective. We generalize single-objective techniques to the multiobjective case, where we first compute a cycle cover with high weight and then remove an edge with low weight in each cycle. Since weight vectors are often incomparable, the choice of the edges of low weight is non-trivial. We develop a general lemma that solves this problem and enables us to generalize the single-objective maximization algorithms to the multiobjective case. We obtain improved, randomized approximation algorithms for the multiobjective maximization variants of the traveling salesman problem. We conclude the first part by developing deterministic algorithms for these problems. The second part of this thesis deals with redundancy properties of complete sets. We call a set autoreducible if for every input instance x we can efficiently compute some y that is different from x but that has the same membership to the set. If the set can be split into two equivalent parts, then it is called weakly mitotic, and if the splitting is obtained by an efficiently decidable separator set, then it is called mitotic. For different reducibility notions and complexity classes, we analyze how redundant its complete sets are. Previous research in this field concentrates on polynomial-time computable reducibility notions. The main contribution of this part of the thesis is a systematic study of the redundancy properties of complete sets for typical complexity classes and reducibility notions that are computable in logarithmic space. We use different techniques to show autoreducibility and mitoticity that depend on the size of the complexity class and the strength of the reducibility notion considered. For small complexity classes such as NL and P we use self-reducible, complete sets to show that all complete sets are autoreducible. For large complexity classes such as PSPACE and EXP we apply diagonalization methods to show that all complete sets are even mitotic. For intermediate complexity classes such as NP and the remaining levels of the polynomial-time hierarchy we establish autoreducibility of complete sets by locally checking computational transcripts. In many cases we can show autoreducibility of complete sets, while mitoticity is not known to hold. We conclude the second part by showing that in some cases, autoreducibility of complete sets at least implies weak mitoticity.}, subject = {Mehrkriterielle Optimierung}, language = {en} } @phdthesis{Schoeneberg2014, author = {Sch{\"o}neberg, Hendrik}, title = {Semiautomatische Metadaten-Extraktion und Qualit{\"a}tsmanagement in Workflow-Systemen zur Digitalisierung historischer Dokumente}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-104878}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {Performing Named Entity Recognition on ancient documents is a time-consuming, complex and error-prone manual task. It is a prerequisite though to being able to identify related documents and correlate between named entities in distinct sources, helping to precisely recreate historic events. In order to reduce the manual effort, automated classification approaches could be leveraged. Classifying terms in ancient documents in an automated manner poses a difficult task due to the sources' challenging syntax and poor conservation states. This thesis introduces and evaluates approaches that can cope with complex syntactial environments by using statistical information derived from a term's context and combining it with domain-specific heuristic knowledge to perform a classification. Furthermore this thesis demonstrates how metadata generated by these approaches can be used as error heuristics to greatly improve the performance of workflow systems for digitizations of early documents.}, subject = {Klassifikation}, language = {de} } @techreport{KounevBrosigHuber2014, author = {Kounev, Samuel and Brosig, Fabian and Huber, Nikolaus}, title = {The Descartes Modeling Language}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-104887}, pages = {91}, year = {2014}, abstract = {This technical report introduces the Descartes Modeling Language (DML), a new architecture-level modeling language for modeling Quality-of-Service (QoS) and resource management related aspects of modern dynamic IT systems, infrastructures and services. DML is designed to serve as a basis for self-aware resource management during operation ensuring that system QoS requirements are continuously satisfied while infrastructure resources are utilized as efficiently as possible.}, subject = {Ressourcenmanagement}, language = {en} } @article{GageikReinthalBenzetal.2014, author = {Gageik, Nils and Reinthal, Eric and Benz, Paul and Montenegro, Sergio}, title = {Complementary Vision based Data Fusion for Robust Positioning and Directed Flight of an Autonomous Quadrocopter}, doi = {10.5121/ijaia.2014.5501}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-113621}, year = {2014}, abstract = {The present paper describes an improved 4 DOF (x/y/z/yaw) vision based positioning solution for fully 6 DOF autonomous UAVs, optimised in terms of computation and development costs as well as robustness and performance. The positioning system combines Fourier transform-based image registration (Fourier Tracking) and differential optical flow computation to overcome the drawbacks of a single approach. The first method is capable of recognizing movement in four degree of freedom under variable lighting conditions, but suffers from low sample rate and high computational costs. Differential optical flow computation, on the other hand, enables a very high sample rate to gain control robustness. This method, however, is limited to translational movement only and performs poor in bad lighting conditions. A reliable positioning system for autonomous flights with free heading is obtained by fusing both techniques. Although the vision system can measure the variable altitude during flight, infrared and ultrasonic sensors are used for robustness. This work is part of the AQopterI8 project, which aims to develop an autonomous flying quadrocopter for indoor application and makes autonomous directed flight possible.}, language = {en} } @article{AliMontenegro2014, author = {Ali, Quasim and Montenegro, Sergio}, title = {A Matlab Implementation of Differential GPS for Low-cost GPS Receivers}, doi = {10.12716/1001.08.03.03}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-113618}, year = {2014}, abstract = {A number of public codes exist for GPS positioning and baseline determination in off-line mode. However, no software code exists for DGPS exploiting correction factors at base stations, without relying on double difference information. In order to accomplish it, a methodology is introduced in MATLAB environment for DGPS using C/A pseudoranges on single frequency L1 only to make it feasible for low-cost GPS receivers. Our base station is at accurately surveyed reference point. Pseudoranges and geometric ranges are compared at base station to compute the correction factors. These correction factors are then handed over to rover for all valid satellites observed during an epoch. The rover takes it into account for its own true position determination for corresponding epoch. In order to validate the proposed algorithm, our rover is also placed at a pre-determined location. The proposed code is an appropriate and simple to use tool for post-processing of GPS raw data for accurate position determination of a rover e.g. Unmanned Aerial Vehicle during post-mission analysis.}, language = {en} } @article{MontenegroAliGageik2014, author = {Montenegro, Sergio and Ali, Qasim and Gageik, Nils}, title = {A review on Distributed Control of Cooperating MINI UAVs}, doi = {10.5121/ijaia.2014.5401}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-113009}, year = {2014}, abstract = {Mini Unmanned Aerial Vehicles (MUAVs) are becoming popular research platform and drawing considerable attention, particularly during the last decade due to their multi-dimensional applications in almost every walk of life. MUAVs range from simple toys found at electronic supermarkets for entertainment purpose to highly sophisticated commercial platforms performing novel assignments like offshore wind power station inspection and 3D modelling of buildings. This paper presents an overview of the main aspects in the domain of distributed control of cooperating MUAVs to facilitate the potential users in this fascinating field. Furthermore it gives an overview on state of the art in MUAV technologies e.g. Photonic Mixer Devices (PMD) camera, distributed control methods and on-going work and challenges, which is the motivation for many researchers all over the world to work in this field.}, language = {en} } @phdthesis{Reutelshoefer2014, author = {Reutelsh{\"o}fer, Jochen}, title = {A Meta-Engineering Approach for Document-Centered Knowledge Acquisition}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-107523}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {Today knowledge base authoring for the engineering of intelligent systems is performed mainly by using tools with graphical user interfaces. An alternative human-computer interaction para- digm is the maintenance and manipulation of electronic documents, which provides several ad- vantages with respect to the social aspects of knowledge acquisition. Until today it hardly has found any attention as a method for knowledge engineering. This thesis provides a comprehensive discussion of document-centered knowledge acquisition with knowledge markup languages. There, electronic documents are edited by the knowledge authors and the executable knowledge base entities are captured by markup language expressions within the documents. The analysis of this approach reveals significant advantages as well as new challenges when compared to the use of traditional GUI-based tools. Some advantages of the approach are the low barriers for domain expert participation, the simple integration of informal descriptions, and the possibility of incremental knowledge for- malization. It therefore provides good conditions for building up a knowledge acquisition pro- cess based on the mixed-initiative strategy, being a flexible combination of direct and indirect knowledge acquisition. Further it turns out that document-centered knowledge acquisition with knowledge markup languages provides high potential for creating customized knowledge au- thoring environments, tailored to the needs of the current knowledge engineering project and its participants. The thesis derives a process model to optimally exploit this customization po- tential, evolving a project specific authoring environment by an agile process on the meta level. This meta-engineering process continuously refines the three aspects of the document space: The employed markup languages, the scope of the informal knowledge, and the structuring and organization of the documents. The evolution of the first aspect, the markup languages, plays a key role, implying the design of project specific markup languages that are easily understood by the knowledge authors and that are suitable to capture the required formal knowledge precisely. The goal of the meta-engineering process is to create a knowledge authoring environment, where structure and presentation of the domain knowledge comply well to the users' mental model of the domain. In that way, the approach can help to ease major issues of knowledge-based system development, such as high initial development costs and long-term maintenance problems. In practice, the application of the meta-engineering approach for document-centered knowl- edge acquisition poses several technical challenges that need to be coped with by appropriate tool support. In this thesis KnowWE, an extensible document-centered knowledge acquisition environment is presented. The system is designed to support the technical tasks implied by the meta-engineering approach, as for instance design and implementation of new markup lan- guages, content refactoring, and authoring support. It is used to evaluate the approach in several real-world case-studies from different domains, such as medicine or engineering for instance. We end the thesis by a summary and point out further interesting research questions consid- ering the document-centered knowledge acquisition approach.}, subject = {Wissenstechnik}, language = {en} } @phdthesis{Ifland2014, author = {Ifland, Marianus}, title = {Feedback-Generierung f{\"u}r offene, strukturierte Aufgaben in E-Learning-Systemen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-106348}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {Bei Lernprozessen spielt das Anwenden der zu erlernenden T{\"a}tigkeit eine wichtige Rolle. Im Kontext der Ausbildung an Schulen und Hochschulen bedeutet dies, dass es wichtig ist, Sch{\"u}lern und Studierenden ausreichend viele {\"U}bungsm{\"o}glichkeiten anzubieten. Die von Lehrpersonal bei einer "Korrektur" erstellte R{\"u}ckmeldung, auch Feedback genannt, ist jedoch teuer, da der zeitliche Aufwand je nach Art der Aufgabe betr{\"a}chtlich ist. Eine L{\"o}sung dieser Problematik stellen E-Learning-Systeme dar. Geeignete Systeme k{\"o}nnen nicht nur Lernstoff pr{\"a}sentieren, sondern auch {\"U}bungsaufgaben anbieten und nach deren Bearbeitung quasi unmittelbar entsprechendes Feedback generieren. Es ist jedoch im Allgemeinen nicht einfach, maschinelle Verfahren zu implementieren, die Bearbeitungen von {\"U}bungsaufgaben korrigieren und entsprechendes Feedback erstellen. F{\"u}r einige Aufgabentypen, wie beispielsweise Multiple-Choice-Aufgaben, ist dies zwar trivial, doch sind diese vor allem dazu gut geeignet, sogenanntes Faktenwissen abzupr{\"u}fen. Das Ein{\"u}ben von Lernzielen im Bereich der Anwendung ist damit kaum m{\"o}glich. Die Behandlung dieser nach g{\"a}ngigen Taxonomien h{\"o}heren kognitiven Lernziele erlauben sogenannte offene Aufgabentypen, deren Bearbeitung meist durch die Erstellung eines Freitexts in nat{\"u}rlicher Sprache erfolgt. Die Information bzw. das Wissen, das Lernende eingeben, liegt hier also in sogenannter „unstrukturierter" Form vor. Dieses unstrukturierte Wissen ist maschinell nur schwer verwertbar, sodass sich Trainingssysteme, die Aufgaben dieser Art stellen und entsprechende R{\"u}ckmeldung geben, bisher nicht durchgesetzt haben. Es existieren jedoch auch offene Aufgabentypen, bei denen Lernende das Wissen in strukturierter Form eingeben, so dass es maschinell leichter zu verwerten ist. F{\"u}r Aufgaben dieser Art lassen sich somit Trainingssysteme erstellen, die eine gute M{\"o}glichkeit darstellen, Sch{\"u}lern und Studierenden auch f{\"u}r praxisnahe Anwendungen viele {\"U}bungsm{\"o}glichkeiten zur Verf{\"u}gung zu stellen, ohne das Lehrpersonal zus{\"a}tzlich zu belasten. In dieser Arbeit wird beschrieben, wie bestimmte Eigenschaften von Aufgaben ausgenutzt werden, um entsprechende Trainingssysteme konzipieren und implementieren zu k{\"o}nnen. Es handelt sich dabei um Aufgaben, deren L{\"o}sungen strukturiert und maschinell interpretierbar sind. Im Hauptteil der Arbeit werden vier Trainingssysteme bzw. deren Komponenten beschrieben und es wird von den Erfahrungen mit deren Einsatz in der Praxis berichtet: Eine Komponente des Trainingssystems „CaseTrain" kann Feedback zu UML Klassendiagrammen erzeugen. Das neuartige Trainingssystem „WARP" generiert zu UML Aktivit{\"a}tsdiagrammen Feedback in mehreren Ebenen, u.a. indem es das durch Aktivit{\"a}tsdiagramme definierte Verhalten von Robotern in virtuellen Umgebungen visualisiert. Mit „{\"U}PS" steht ein Trainingssystem zur Verf{\"u}gung, mit welchem die Eingabe von SQL-Anfragen einge{\"u}bt werden kann. Eine weitere in „CaseTrain" implementierte Komponente f{\"u}r Bildmarkierungsaufgaben erm{\"o}glicht eine unmittelbare, automatische Bewertung entsprechender Aufgaben. Die Systeme wurden im Zeitraum zwischen 2011 und 2014 an der Universit{\"a}t W{\"u}rzburg in Vorlesungen mit bis zu 300 Studierenden eingesetzt und evaluiert. Die Evaluierung ergab eine hohe Nutzung und eine gute Bewertung der Studierenden der eingesetzten Konzepte, womit belegt wurde, dass elektronische Trainingssysteme f{\"u}r offene Aufgaben in der Praxis eingesetzt werden k{\"o}nnen.}, subject = {E-Learning}, language = {de} } @misc{Vorbach2014, type = {Master Thesis}, author = {Vorbach, Paul}, title = {Analysen und Heuristiken zur Verbesserung von OCR-Ergebnissen bei Frakturtexten}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-106527}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {Zahlreiche Digitalisierungsprojekte machen das Wissen vergangener Jahrhunderte jederzeit verf{\"u}gbar. Das volle Potenzial der Digitalisierung von Dokumenten entfaltet sich jedoch erst, wenn diese als durchsuchbare Volltexte verf{\"u}gbar gemacht werden. Mithilfe von OCR-Software kann die Erfassung weitestgehend automatisiert werden. Fraktur war ab dem 16. Jahrhundert bis zur Mitte des 20. Jahrhunderts die verbreitete Schrift des deutschen Sprachraums. Durch einige Besonderheiten von Fraktur bleiben die Erkennungsraten bei Frakturtexten aber meist deutlich hinter den Erkennungsergebnissen bei Antiquatexten zur{\"u}ck. Diese Arbeit konzentriert sich auf die Verbesserung der Erkennungsergebnisse der OCR-Software Tesseract bei Frakturtexten. Dazu wurden die Software und bestehende Sprachpakete gesondert auf die Eigenschaften von Fraktur hin analysiert. Durch spezielles Training und Anpassungen an der Software wurde anschließend versucht, die Ergebnisse zu verbessern und Erkenntnisse {\"u}ber die Effektivit{\"a}t verschiedener Ans{\"a}tze zu gewinnen. Die Zeichenfehlerraten konnten durch verschiedene Experimente von zuvor 2,5 Prozent auf 1,85 Prozent gesenkt werden. Außerdem werden Werkzeuge vorgestellt, die das Training neuer Schriftarten f{\"u}r Tesseract erleichtern und eine Evaluation der erzielten Verbesserungen erm{\"o}glichen.}, subject = {Optische Zeichenerkennung}, language = {de} } @phdthesis{Klein2014, author = {Klein, Dominik Werner}, title = {Design and Evaluation of Components for Future Internet Architectures}, issn = {1432-8801}, doi = {10.25972/OPUS-9313}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-93134}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {Die derzeitige Internetarchitektur wurde nicht in einem geplanten Prozess konzipiert und entwickelt, sondern hat vielmehr eine evolutionsartige Entwicklung hinter sich. Ausl{\"o}ser f{\"u}r die jeweiligen Evolutionsschritte waren dabei meist aufstrebende Anwendungen, welche neue Anforderungen an die zugrundeliegende Netzarchitektur gestellt haben. Um diese Anforderungen zu erf{\"u}llen, wurden h{\"a}ufig neuartige Dienste oder Protokolle spezifiziert und in die bestehende Architektur integriert. Dieser Prozess ist jedoch meist mit hohem Aufwand verbunden und daher sehr tr{\"a}ge, was die Entwicklung und Verbreitung innovativer Dienste beeintr{\"a}chtigt. Derzeitig diskutierte Konzepte wie Software-Defined Networking (SDN) oder Netzvirtualisierung (NV) werden als eine M{\"o}glichkeit angesehen, die Altlasten der bestehenden Internetarchitektur zu l{\"o}sen. Beiden Konzepten gemein ist die Idee, logische Netze {\"u}ber dem physikalischen Substrat zu betreiben. Diese logischen Netze sind hochdynamisch und k{\"o}nnen so flexibel an die Anforderungen der jeweiligen Anwendungen angepasst werden. Insbesondere erlaubt das Konzept der Virtualisierung intelligentere Netzknoten, was innovative neue Anwendungsf{\"a}lle erm{\"o}glicht. Ein h{\"a}ufig in diesem Zusammenhang diskutierter Anwendungsfall ist die Mobilit{\"a}t sowohl von Endger{\"a}ten als auch von Diensten an sich. Die Mobilit{\"a}t der Dienste wird hierbei ausgenutzt, um die Zugriffsverz{\"o}gerung oder die belegten Ressourcen im Netz zu reduzieren, indem die Dienste zum Beispiel in f{\"u}r den Nutzer geographisch nahe Datenzentren migriert werden. Neben den reinen Mechanismen bez{\"u}glich Dienst- und Endger{\"a}temobilit{\"a}t sind in diesem Zusammenhang auch geeignete {\"U}berwachungsl{\"o}sungen relevant, welche die vom Nutzer wahrgenommene Dienstg{\"u}te bewerten k{\"o}nnen. Diese L{\"o}sungen liefern wichtige Entscheidungshilfen f{\"u}r die Migration oder {\"u}berwachen m{\"o}gliche Effekte der Migration auf die erfahrene Dienstg{\"u}te beim Nutzer. Im Falle von Video Streaming erm{\"o}glicht ein solcher Anwendungsfall die flexible Anpassung der Streaming Topologie f{\"u}r mobile Nutzer, um so die Videoqualit{\"a}t unabh{\"a}ngig vom Zugangsnetz aufrechterhalten zu k{\"o}nnen. Im Rahmen dieser Doktorarbeit wird der beschriebene Anwendungsfall am Beispiel einer Video Streaming Anwendung n{\"a}her analysiert und auftretende Herausforderungen werden diskutiert. Des Weiteren werden L{\"o}sungsans{\"a}tze vorgestellt und bez{\"u}glich ihrer Effizienz ausgewertet. Im Detail besch{\"a}ftigt sich die Arbeit mit der Leistungsanalyse von Mechanismen f{\"u}r die Dienstmobilit{\"a}t und entwickelt eine Architektur zur Optimierung der Dienstmobilit{\"a}t. Im Bereich Endger{\"a}temobilit{\"a}t werden Verbesserungen entwickelt, welche die Latenz zwischen Endger{\"a}t und Dienst reduzieren oder die Konnektivit{\"a}t unabh{\"a}ngig vom Zugangsnetz gew{\"a}hrleisten. Im letzten Teilbereich wird eine L{\"o}sung zur {\"U}berwachung der Videoqualit{\"a}t im Netz entwickelt und bez{\"u}glich ihrer Genauigkeit analysiert.}, subject = {Leistungsbewertung}, language = {en} } @phdthesis{Fink2014, author = {Fink, Martin}, title = {Crossings, Curves, and Constraints in Graph Drawing}, publisher = {W{\"u}rzburg University Press}, isbn = {978-3-95826-002-3 (print)}, doi = {10.25972/WUP-978-3-95826-003-0}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-98235}, school = {W{\"u}rzburg University Press}, pages = {222}, year = {2014}, abstract = {In many cases, problems, data, or information can be modeled as graphs. Graphs can be used as a tool for modeling in any case where connections between distinguishable objects occur. Any graph consists of a set of objects, called vertices, and a set of connections, called edges, such that any edge connects a pair of vertices. For example, a social network can be modeled by a graph by transforming the users of the network into vertices and friendship relations between users into edges. Also physical networks like computer networks or transportation networks, for example, the metro network of a city, can be seen as graphs. For making graphs and, thereby, the data that is modeled, well-understandable for users, we need a visualization. Graph drawing deals with algorithms for visualizing graphs. In this thesis, especially the use of crossings and curves is investigated for graph drawing problems under additional constraints. The constraints that occur in the problems investigated in this thesis especially restrict the positions of (a part of) the vertices; this is done either as a hard constraint or as an optimization criterion.}, subject = {Graphenzeichnen}, language = {en} } @phdthesis{Jarschel2014, author = {Jarschel, Michael}, title = {An Assessment of Applications and Performance Analysis of Software Defined Networking}, issn = {1432-8801}, doi = {10.25972/OPUS-10079}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-100795}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {With the introduction of OpenFlow by the Stanford University in 2008, a process began in the area of network research, which questions the predominant approach of fully distributed network control. OpenFlow is a communication protocol that allows the externalization of the network control plane from the network devices, such as a router, and to realize it as a logically-centralized entity in software. For this concept, the term "Software Defined Networking" (SDN) was coined during scientific discourse. For the network operators, this concept has several advantages. The two most important can be summarized under the points cost savings and flexibility. Firstly, it is possible through the uniform interface for network hardware ("Southbound API"), as implemented by OpenFlow, to combine devices and software from different manufacturers, which increases the innovation and price pressure on them. Secondly, the realization of the network control plane as a freely programmable software with open interfaces ("Northbound API") provides the opportunity to adapt it to the individual circumstances of the operator's network and to exchange information with the applications it serves. This allows the network to be more flexible and to react more quickly to changing circumstances as well as transport the traffic more effectively and tailored to the user's "Quality of Experience" (QoE). The approach of a separate network control layer for packet-based networks is not new and has already been proposed several times in the past. Therefore, the SDN approach has raised many questions about its feasibility in terms of efficiency and applicability. These questions are caused to some extent by the fact that there is no generally accepted definition of the SDN concept to date. It is therefore a part of this thesis to derive such a definition. In addition, several of the open issues are investigated. This Investigations follow the three aspects: Performance Evaluation of Software Defined Networking, applications on the SDN control layer, and the usability of SDN Northbound-API for creation application-awareness in network operation. Performance Evaluation of Software Defined Networking: The question of the efficiency of an SDN-based system was from the beginning one of the most important. In this thesis, experimental measurements of the performance of OpenFlow-enabled switch hardware and control software were conducted for the purpose of answering this question. The results of these measurements were used as input parameters for establishing an analytical model of the reactive SDN approach. Through the model it could be determined that the performance of the software control layer, often called "Controller", is crucial for the overall performance of the system, but that the approach is generally viable. Based on this finding a software for analyzing the performance of SDN controllers was developed. This software allows the emulation of the forwarding layer of an SDN network towards the control software and can thus determine its performance in different situations and configurations. The measurements with this software showed that there are quite significant differences in the behavior of different control software implementations. Among other things it has been shown that some show different characteristics for various switches, in particular in terms of message processing speed. Under certain circumstances this can lead to network failures. Applications on the SDN control layer: The core piece of software defined networking are the intelligent network applications that operate on the control layer. However, their development is still in its infancy and little is known about the technical possibilities and their limitations. Therefore, the relationship between an SDN-based and classical implementation of a network function is investigated in this thesis. This function is the monitoring of network links and the traffic they carry. A typical approach for this task has been built based on Wiretapping and specialized measurement hardware and compared with an implementation based on OpenFlow switches and a special SDN control application. The results of the comparison show that the SDN version can compete in terms of measurement accuracy for bandwidth and delay estimation with the traditional measurement set-up. However, a compromise has to be found for measurements below the millisecond range. Another question regarding the SDN control applications is whether and how well they can solve existing problems in networks. Two programs have been developed based on SDN in this thesis to solve two typical network issues. Firstly, the tool "IPOM", which enables considerably more flexibility in the study of effects of network structures for a researcher, who is confined to a fixed physical test network topology. The second software provides an interface between the Cloud Orchestration Software "OpenNebula" and an OpenFlow controller. The purpose of this software was to investigate experimentally whether a pre-notification of the network of an impending relocation of a virtual service in a data center is sufficient to ensure the continuous operation of that service. This was demonstrated on the example of a video service. Usability of the SDN Northbound API for creating application-awareness in network operation: Currently, the fact that the network and the applications that run on it are developed and operated separately leads to problems in network operation. SDN offers with the Northbound-API an open interface that enables the exchange between information of both worlds during operation. One aim of this thesis was to investigate whether this interface can be exploited so that the QoE experienced by the user can be maintained on high level. For this purpose, the QoE influence factors were determined on a challenging application by means of a subjective survey study. The application is cloud gaming, in which the calculation of video game environments takes place in the cloud and is transported via video over the network to the user. It was shown that apart from the most important factor influencing QoS, i.e., packet loss on the downlink, also the type of game type and its speed play a role. This demonstrates that in addition to QoS the application state is important and should be communicated to the network. Since an implementation of such a state conscious SDN for the example of Cloud Gaming was not possible due to its proprietary implementation, in this thesis the application "YouTube video streaming" was chosen as an alternative. For this application, status information is retrievable via the "Yomo" tool and can be used for network control. It was shown that an SDN-based implementation of an application-aware network has distinct advantages over traditional network management methods and the user quality can be obtained in spite of disturbances.}, subject = {Leistungsbewertung}, language = {en} } @phdthesis{Hock2014, author = {Hock, David Rog{\´e}r}, title = {Analysis and Optimization of Resilient Routing in Core Communication Networks}, issn = {1432-8801}, doi = {10.25972/OPUS-10168}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-101681}, school = {Universit{\"a}t W{\"u}rzburg}, pages = {175}, year = {2014}, abstract = {Routing is one of the most important issues in any communication network. It defines on which path packets are transmitted from the source of a connection to the destination. It allows to control the distribution of flows between different locations in the network and thereby is a means to influence the load distribution or to reach certain constraints imposed by particular applications. As failures in communication networks appear regularly and cannot be completely avoided, routing is required to be resilient against such outages, i.e., routing still has to be able to forward packets on backup paths even if primary paths are not working any more. Throughout the years, various routing technologies have been introduced that are very different in their control structure, in their way of working, and in their ability to handle certain failure cases. Each of the different routing approaches opens up their own specific questions regarding configuration, optimization, and inclusion of resilience issues. This monograph investigates, with the example of three particular routing technologies, some concrete issues regarding the analysis and optimization of resilience. It thereby contributes to a better general, technology-independent understanding of these approaches and of their diverse potential for the use in future network architectures. The first considered routing type, is decentralized intra-domain routing based on administrative IP link costs and the shortest path principle. Typical examples are common today's intra-domain routing protocols OSPF and IS-IS. This type of routing includes automatic restoration abilities in case of failures what makes it in general very robust even in the case of severe network outages including several failed components. Furthermore, special IP-Fast Reroute mechanisms allow for a faster reaction on outages. For routing based on link costs, traffic engineering, e.g. the optimization of the maximum relative link load in the network, can be done indirectly by changing the administrative link costs to adequate values. The second considered routing type, MPLS-based routing, is based on the a priori configuration of primary and backup paths, so-called Label Switched Paths. The routing layout of MPLS paths offers more freedom compared to IP-based routing as it is not restricted by any shortest path constraints but any paths can be setup. However, this in general involves a higher configuration effort. Finally, in the third considered routing type, typically centralized routing using a Software Defined Networking (SDN) architecture, simple switches only forward packets according to routing decisions made by centralized controller units. SDN-based routing layouts offer the same freedom as for explicit paths configured using MPLS. In case of a failure, new rules can be setup by the controllers to continue the routing in the reduced topology. However, new resilience issues arise caused by the centralized architecture. If controllers are not reachable anymore, the forwarding rules in the single nodes cannot be adapted anymore. This might render a rerouting in case of connection problems in severe failure scenarios infeasible.}, subject = {Leistungsbewertung}, language = {en} } @phdthesis{Lemmerich2014, author = {Lemmerich, Florian}, title = {Novel Techniques for Efficient and Effective Subgroup Discovery}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-97812}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {Large volumes of data are collected today in many domains. Often, there is so much data available, that it is difficult to identify the relevant pieces of information. Knowledge discovery seeks to obtain novel, interesting and useful information from large datasets. One key technique for that purpose is subgroup discovery. It aims at identifying descriptions for subsets of the data, which have an interesting distribution with respect to a predefined target concept. This work improves the efficiency and effectiveness of subgroup discovery in different directions. For efficient exhaustive subgroup discovery, algorithmic improvements are proposed for three important variations of the standard setting: First, novel optimistic estimate bounds are derived for subgroup discovery with numeric target concepts. These allow for skipping the evaluation of large parts of the search space without influencing the results. Additionally, necessary adaptations to data structures for this setting are discussed. Second, for exceptional model mining, that is, subgroup discovery with a model over multiple attributes as target concept, a generic extension of the well-known FP-tree data structure is introduced. The modified data structure stores intermediate condensed data representations, which depend on the chosen model class, in the nodes of the trees. This allows the application for many popular model classes. Third, subgroup discovery with generalization-aware measures is investigated. These interestingness measures compare the target share or mean value in the subgroup with the respective maximum value in all its generalizations. For this setting, a novel method for deriving optimistic estimates is proposed. In contrast to previous approaches, the novel measures are not exclusively based on the anti-monotonicity of instance coverage, but also takes the difference of coverage between the subgroup and its generalizations into account. In all three areas, the advances lead to runtime improvements of more than an order of magnitude. The second part of the contributions focuses on the \emph{effectiveness} of subgroup discovery. These improvements aim to identify more interesting subgroups in practical applications. For that purpose, the concept of expectation-driven subgroup discovery is introduced as a new family of interestingness measures. It computes the score of a subgroup based on the difference between the actual target share and the target share that could be expected given the statistics for the separate influence factors that are combined to describe the subgroup. In doing so, previously undetected interesting subgroups are discovered, while other, partially redundant findings are suppressed. Furthermore, this work also approaches practical issues of subgroup discovery: In that direction, the VIKAMINE II tool is presented, which extends its predecessor with a rebuild user interface, novel algorithms for automatic discovery, new interactive mining techniques, as well novel options for result presentation and introspection. Finally, some real-world applications are described that utilized the presented techniques. These include the identification of influence factors on the success and satisfaction of university students and the description of locations using tagging data of geo-referenced images.}, subject = {Data Mining}, language = {en} } @inproceedings{JannidisRegerWeimeretal.2015, author = {Jannidis, Fotis and Reger, Isabella and Weimer, Lukas and Krug, Markus and Puppe, Frank}, title = {Automatische Erkennung von Figuren in deutschsprachigen Romanen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-143332}, pages = {7}, year = {2015}, abstract = {Eine wichtige Grundlage f{\"u}r die quantitative Analyse von Erz{\"a}hltexten, etwa eine Netzwerkanalyse der Figurenkonstellation, ist die automatische Erkennung von Referenzen auf Figuren in Erz{\"a}hltexten, ein Sonderfall des generischen NLP-Problems der Named Entity Recognition. Bestehende, auf Zeitungstexten trainierte Modelle sind f{\"u}r literarische Texte nur eingeschr{\"a}nkt brauchbar, da die Einbeziehung von Appellativen in die Named Entity-Definition und deren h{\"a}ufige Verwendung in Romantexten zu einem schlechten Ergebnis f{\"u}hrt. Dieses Paper stellt eine anhand eines manuell annotierten Korpus auf deutschsprachige Romane des 19. Jahrhunderts angepasste NER-Komponente vor.}, subject = {Digital Humanities}, language = {de} } @phdthesis{Bregenzer2015, author = {Bregenzer, J{\"u}rgen}, title = {Effizienter Einsatz von Multicore-Architekturen in der Steuerungstechnik}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-010-8 (Print)}, doi = {10.25972/WUP-978-3-95826-011-5}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-106239}, school = {W{\"u}rzburg University Press}, pages = {185}, year = {2015}, abstract = {Der Einsatz von Multicore-Prozessoren in der industriellen Steuerungstechnik birgt sowohl Chancen als auch Risiken. Die vorliegende Dissertation entwickelt und bewertet aus diesem Grund generische Strategien zur Nutzung dieser Prozessorarchitektur unter Ber{\"u}cksichtigung der spezifischen Rahmenbedingungen und Anforderungen dieser Dom{\"a}ne. Multicore-Prozessoren bieten die Chance zur Konsolidierung derzeit auf dedizierter Hardware ausgef{\"u}hrter heterogener Steuerungssubsysteme unter einer bisher nicht erreichbaren temporalen Isolation. In diesem Kontext definiert die vorliegende Dissertation die spezifischen Anforderungen, die eine integrierte Ausf{\"u}hrung in der Dom{\"a}ne der industriellen Automatisierung erf{\"u}llen muss. Eine Vorbedingung f{\"u}r ein derartiges Szenario stellt allerdings der Einsatz einer geeigneten Konsolidierungsl{\"o}sung dar. Mit einem virtualisierten und einem hybriden Konsolidierungsansatz werden deshalb zwei repr{\"a}sentative L{\"o}sungen f{\"u}r die Dom{\"a}ne eingebetteter Systeme vorgestellt, die schließlich hinsichtlich der zuvor definierten Kriterien evaluiert werden. Da die Taktraten von Prozessoren physikalische Grenzen erreicht haben, werden sich in der Steuerungstechnik signifikante Performanzsteigerungen zuk{\"u}nftig nur durch den Einsatz von Multicore-Prozessoren erzielen lassen. Dies hat zur Vorbedingung, dass die Firmware die Parallelit{\"a}t dieser Prozessorarchitektur in geeigneter Weise zu nutzen vermag. Leider entstehen bei der Parallelisierung eines komplexen Systems wie einer Automatisierungs-Firmware im Allgemeinen signifikante Aufw{\"a}nde. Infolgedessen sollten diesbez{\"u}gliche Entscheidungen nur auf Basis einer objektiven Abw{\"a}gung potentieller Alternativen getroffen werden. Allerdings macht die Systemkomplexit{\"a}t eine Absch{\"a}tzung der durch eine spezifische parallele Firmware-Architektur zu erwartenden Performanz zu einer anspruchsvollen Aufgabe. Dies gilt vor allem, da eine Parallelisierung gefordert wird, die f{\"u}r eine Vielzahl von Lastszenarien in Form gesteuerter Maschinen geeignet ist. Aus diesem Grund spezifiziert die vorliegende Dissertation eine anwendungsorientierte Methode zur Unterst{\"u}tzung von Entwurfsentscheidungen, die bei der Migration einer bestehenden Singlecore-Firmware auf eine homogene Multicore-Architektur zu treffen sind. Dies wird erreicht, indem in automatisierter Weise geeignete Firmware-Modelle auf Basis von dynamischem Profiling der Firmware unter mehreren repr{\"a}sentativen Lastszenarien erstellt werden. Im Anschluss daran werden diese Modelle um das Expertenwissen von Firmware-Entwicklern erweitert, bevor mittels multikriterieller genetischer Algorithmen der Entwurfsraum der Parallelisierungsalternativen exploriert wird. Schließlich kann eine spezifische L{\"o}sung der auf diese Weise hergeleiteten Pareto-Front auf Basis ihrer Bewertungsmetriken zur Implementierung durch einen Entwickler ausgew{\"a}hlt werden. Die vorliegende Arbeit schließt mit einer Fallstudie, welche die zuvor beschriebene Methode auf eine numerische Steuerungs-Firmware anwendet und dabei deren Potential f{\"u}r eine umfassende Unterst{\"u}tzung einer Firmware-Parallelisierung aufzeigt.}, subject = {Mehrkernprozessor}, language = {de} } @article{LauterbachBorrmannHessetal.2015, author = {Lauterbach, Helge A. and Borrmann, Dorit and Heß, Robin and Eck, Daniel and Schilling, Klaus and N{\"u}chter, Andreas}, title = {Evaluation of a Backpack-Mounted 3D Mobile Scanning System}, series = {Remote Sensing}, volume = {7}, journal = {Remote Sensing}, number = {10}, doi = {10.3390/rs71013753}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-126247}, pages = {13753-13781}, year = {2015}, abstract = {Recently, several backpack-mounted systems, also known as personal laser scanning systems, have been developed. They consist of laser scanners or cameras that are carried by a human operator to acquire measurements of the environment while walking. These systems were first designed to overcome the challenges of mapping indoor environments with doors and stairs. While the human operator inherently has the ability to open doors and to climb stairs, the flexible movements introduce irregularities of the trajectory to the system. To compete with other mapping systems, the accuracy of these systems has to be evaluated. In this paper, we present an extensive evaluation of our backpack mobile mapping system in indoor environments. It is shown that the system can deal with the normal human walking motion, but has problems with irregular jittering. Moreover, we demonstrate the applicability of the backpack in a suitable urban scenario.}, language = {en} } @phdthesis{Wamser2015, author = {Wamser, Florian}, title = {Performance Assessment of Resource Management Strategies for Cellular and Wireless Mesh Networks}, issn = {1432-8801}, doi = {10.25972/OPUS-11151}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-111517}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {The rapid growth in the field of communication networks has been truly amazing in the last decades. We are currently experiencing a continuation thereof with an increase in traffic and the emergence of new fields of application. In particular, the latter is interesting since due to advances in the networks and new devices, such as smartphones, tablet PCs, and all kinds of Internet-connected devices, new additional applications arise from different areas. What applies for all these services is that they come from very different directions and belong to different user groups. This results in a very heterogeneous application mix with different requirements and needs on the access networks. The applications within these networks typically use the network technology as a matter of course, and expect that it works in all situations and for all sorts of purposes without any further intervention. Mobile TV, for example, assumes that the cellular networks support the streaming of video data. Likewise, mobile-connected electricity meters rely on the timely transmission of accounting data for electricity billing. From the perspective of the communication networks, this requires not only the technical realization for the individual case, but a broad consideration of all circumstances and all requirements of special devices and applications of the users. Such a comprehensive consideration of all eventualities can only be achieved by a dynamic, customized, and intelligent management of the transmission resources. This management requires to exploit the theoretical capacity as much as possible while also taking system and network architecture as well as user and application demands into account. Hence, for a high level of customer satisfaction, all requirements of the customers and the applications need to be considered, which requires a multi-faceted resource management. The prerequisite for supporting all devices and applications is consequently a holistic resource management at different levels. At the physical level, the technical possibilities provided by different access technologies, e.g., more transmission antennas, modulation and coding of data, possible cooperation between network elements, etc., need to be exploited on the one hand. On the other hand, interference and changing network conditions have to be counteracted at physical level. On the application and user level, the focus should be on the customer demands due to the currently increasing amount of different devices and diverse applications (medical, hobby, entertainment, business, civil protection, etc.). The intention of this thesis is the development, investigation, and evaluation of a holistic resource management with respect to new application use cases and requirements for the networks. Therefore, different communication layers are investigated and corresponding approaches are developed using simulative methods as well as practical emulation in testbeds. The new approaches are designed with respect to different complexity and implementation levels in order to cover the design space of resource management in a systematic way. Since the approaches cannot be evaluated generally for all types of access networks, network-specific use cases and evaluations are finally carried out in addition to the conceptual design and the modeling of the scenario. The first part is concerned with management of resources at physical layer. We study distributed resource allocation approaches under different settings. Due to the ambiguous performance objectives, a high spectrum reuse is conducted in current cellular networks. This results in possible interference between cells that transmit on the same frequencies. The focus is on the identification of approaches that are able to mitigate such interference. Due to the heterogeneity of the applications in the networks, increasingly different application-specific requirements are experienced by the networks. Consequently, the focus is shifted in the second part from optimization of network parameters to consideration and integration of the application and user needs by adjusting network parameters. Therefore, application-aware resource management is introduced to enable efficient and customized access networks. As indicated before, approaches cannot be evaluated generally for all types of access networks. Consequently, the third contribution is the definition and realization of the application-aware paradigm in different access networks. First, we address multi-hop wireless mesh networks. Finally, we focus with the fourth contribution on cellular networks. Application-aware resource management is applied here to the air interface between user device and the base station. Especially in cellular networks, the intensive cost-driven competition among the different operators facilitates the usage of such a resource management to provide cost-efficient and customized networks with respect to the running applications.}, subject = {Leistungsbewertung}, language = {en} } @phdthesis{NavarroBullock2015, author = {Navarro Bullock, Beate}, title = {Privacy aware social information retrieval and spam filtering using folksonomies}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-120941}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {Social interactions as introduced by Web 2.0 applications during the last decade have changed the way the Internet is used. Today, it is part of our daily lives to maintain contacts through social networks, to comment on the latest developments in microblogging services or to save and share information snippets such as photos or bookmarks online. Social bookmarking systems are part of this development. Users can share links to interesting web pages by publishing bookmarks and providing descriptive keywords for them. The structure which evolves from the collection of annotated bookmarks is called a folksonomy. The sharing of interesting and relevant posts enables new ways of retrieving information from the Web. Users can search or browse the folksonomy looking at resources related to specific tags or users. Ranking methods known from search engines have been adjusted to facilitate retrieval in social bookmarking systems. Hence, social bookmarking systems have become an alternative or addendum to search engines. In order to better understand the commonalities and differences of social bookmarking systems and search engines, this thesis compares several aspects of the two systems' structure, usage behaviour and content. This includes the use of tags and query terms, the composition of the document collections and the rankings of bookmarks and search engine URLs. Searchers (recorded via session ids), their search terms and the clicked on URLs can be extracted from a search engine query logfile. They form similar links as can be found in folksonomies where a user annotates a resource with tags. We use this analogy to build a tripartite hypergraph from query logfiles (a logsonomy), and compare structural and semantic properties of log- and folksonomies. Overall, we have found similar behavioural, structural and semantic characteristics in both systems. Driven by this insight, we investigate, if folksonomy data can be of use in web information retrieval in a similar way to query log data: we construct training data from query logs and a folksonomy to build models for a learning-to-rank algorithm. First experiments show a positive correlation of ranking results generated from the ranking models of both systems. The research is based on various data collections from the social bookmarking systems BibSonomy and Delicious, Microsoft's search engine MSN (now Bing) and Google data. To maintain social bookmarking systems as a good source for information retrieval, providers need to fight spam. This thesis introduces and analyses different features derived from the specific characteristics of social bookmarking systems to be used in spam detection classification algorithms. Best results can be derived from a combination of profile, activity, semantic and location-based features. Based on the experiments, a spam detection framework which identifies and eliminates spam activities for the social bookmarking system BibSonomy has been developed. The storing and publication of user-related bookmarks and profile information raises questions about user data privacy. What kinds of personal information is collected and how do systems handle user-related items? In order to answer these questions, the thesis looks into the handling of data privacy in the social bookmarking system BibSonomy. Legal guidelines about how to deal with the private data collected and processed in social bookmarking systems are also presented. Experiments will show that the consideration of user data privacy in the process of feature design can be a first step towards strengthening data privacy.}, subject = {Information Retrieval}, language = {en} } @article{HamoudaOezkurSinhaetal.2015, author = {Hamouda, Khaled and Oezkur, Mehmet and Sinha, Bhanu and Hain, Johannes and Menkel, Hannah and Leistner, Marcus and Leyh, Rainer and Schimmer, Christoph}, title = {Different duration strategies of perioperative antibiotic prophylaxis in adult patients undergoing cardiac surgery: an observational study}, series = {Journal of Cardiothoracic Surgery}, volume = {10}, journal = {Journal of Cardiothoracic Surgery}, number = {25}, doi = {10.1186/s13019-015-0225-x}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-124977}, year = {2015}, abstract = {Background All international guidelines recommend perioperative antibiotic prophylaxis (PAB) should be routinely administered to patients undergoing cardiac surgery. However, the duration of PAB is heterogeneous and controversial. Methods Between 01.01.2011 and 31.12.2011, 1096 consecutive cardiac surgery patients were assigned to one of two groups receiving PAB with a second-generation cephalosporin for either 56 h (group I) or 32 h (group II). Patients' characteristics, intraoperative data, and the in-hospital follow-up were analysed. Primary endpoint was the incidence of surgical site infection (deep and superficial sternal wound-, and vein harvesting site infection; DSWI/SSWI/VHSI). Secondary endpoints were the incidence of respiratory-, and urinary tract infection, as well as the mortality rate. Results 615/1096 patients (56,1\%) were enrolled (group I: n = 283 versus group II: n = 332). There were no significant differences with regard to patient characteristics, comorbidities, and procedure-related variables. No statistically significant differences were demonstrated concerning primary and secondary endpoints. The incidence of DSWI/SSWI/VHSI were 4/283 (1,4\%), 5/283 (1,7\%), and 1/283 (0,3\%) in group I versus 6/332 (1,8\%), 9/332 (2,7\%), and 3/332 (0,9\%) in group II (p = 0,76/0,59/0,63). In univariate analyses female gender, age, peripheral arterial obstructive disease, operating-time, ICU-duration, transfusion, and respiratory insufficiency were determinants for nosocomial infections (all ≤ 0,05). Subgroup analyses of these high-risk patients did not show any differences between the two regimes (all ≥ 0,05). Conclusions Reducing the duration of PAB from 56 h to 32 h in adult cardiac surgery patients was not associated with an increase of nosocomial infection rate, but contributes to reduce antibiotic resistance and health care costs.}, language = {en} }