@phdthesis{Zink2024, author = {Zink, Johannes}, title = {Algorithms for Drawing Graphs and Polylines with Straight-Line Segments}, doi = {10.25972/OPUS-35475}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-354756}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2024}, abstract = {Graphs provide a key means to model relationships between entities. They consist of vertices representing the entities, and edges representing relationships between pairs of entities. To make people conceive the structure of a graph, it is almost inevitable to visualize the graph. We call such a visualization a graph drawing. Moreover, we have a straight-line graph drawing if each vertex is represented as a point (or a small geometric object, e.g., a rectangle) and each edge is represented as a line segment between its two vertices. A polyline is a very simple straight-line graph drawing, where the vertices form a sequence according to which the vertices are connected by edges. An example of a polyline in practice is a GPS trajectory. The underlying road network, in turn, can be modeled as a graph. This book addresses problems that arise when working with straight-line graph drawings and polylines. In particular, we study algorithms for recognizing certain graphs representable with line segments, for generating straight-line graph drawings, and for abstracting polylines. In the first part, we first examine, how and in which time we can decide whether a given graph is a stick graph, that is, whether its vertices can be represented as vertical and horizontal line segments on a diagonal line, which intersect if and only if there is an edge between them. We then consider the visual complexity of graphs. Specifically, we investigate, for certain classes of graphs, how many line segments are necessary for any straight-line graph drawing, and whether three (or more) different slopes of the line segments are sufficient to draw all edges. Last, we study the question, how to assign (ordered) colors to the vertices of a graph with both directed and undirected edges such that no neighboring vertices get the same color and colors are ascending along directed edges. Here, the special property of the considered graph is that the vertices can be represented as intervals that overlap if and only if there is an edge between them. The latter problem is motivated by an application in automated drawing of cable plans with vertical and horizontal line segments, which we cover in the second part. We describe an algorithm that gets the abstract description of a cable plan as input, and generates a drawing that takes into account the special properties of these cable plans, like plugs and groups of wires. We then experimentally evaluate the quality of the resulting drawings. In the third part, we study the problem of abstracting (or simplifying) a single polyline and a bundle of polylines. In this problem, the objective is to remove as many vertices as possible from the given polyline(s) while keeping each resulting polyline sufficiently similar to its original course (according to a given similarity measure).}, subject = {Graphenzeichnen}, language = {en} } @phdthesis{Memmel2019, author = {Memmel, Simon}, title = {Automatisierte Algorithmen zur Analyse der Migration und der strahleninduzierten DNA-Sch{\"a}den humaner Glioblastomzellen nach kombinierter PI3K/mTOR/Hsp90-Inhibierung}, doi = {10.25972/OPUS-18571}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-185710}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2019}, abstract = {Das hohe invasive Potential und die starke Resistenz gegen Radio-/Chemotherapie von Glioblastoma multiforme (GBM) Zellen machen sie zu dem t{\"o}dlichsten Tumor ihrer Art. Es ist deshalb von großem Interesse die Grundlagen, welche der Migrationsf{\"a}higkeit und DNA Reparatur zu Grunde liegen, besser zu verstehen. Im ersten Teil dieser Arbeit wurden zwei Algorithmen zur automatischen Analyse der Migration in der Einzelzellverfolgung und im Wundheilungsassay modifiziert. Die Auswertung der Daten konnte automatisch und somit schnell, effektiv und mit geringerem Arbeitsaufwand durchgef{\"u}hrt werden. Mit Hilfe dieser automatischen Algorithmen wurde die Migrationsf{\"a}higkeit von zwei GBM-Zelllinien (DK-MG und SNB19) untersucht. Zus{\"a}tzlich wurde die konfokale Laserscanning- sowie die hochaufl{\"o}sende dSTORM-Fluoreszenzmikroskopie verwendet um die, der Zellbewegung zu Grunde liegende, Struktur des F Aktin und der fokalen Adh{\"a}sionskinase (FAK) aufzul{\"o}sen und darzustellen. Unter Anwendung dieser genannten Methoden sind die Effekte des dualen PI3K/mTOR Inhibitors PI-103 alleine und in Kombination mit dem Hsp90 Inhibitor NVP AUY922 mit und ohne Bestrahlung auf die Bewegung untersucht worden. Es konnte festgestellt werden, dass sich beide Zelllinien deutlich in ihrem migratorischem Potential in vitro unterscheiden und zudem auch markante Unterschiede in ihrer Morphologie aufweisen. Die weniger invasiven DK MG-Zellen besitzen eine polarisierte Zellstruktur, wohingegen SNB19-Zellen sich durch multipolare ungerichtete Bewegung auszeichneten. Zudem wurde die Migration, durch PI3K/mTOR Inhibition mit PI-103 bei den DK-MG-Zellen (p53 wt, PTEN wt), sehr effektiv unterdr{\"u}ckt. Wohingegen sich die SNB19-Zellen (p53 mut, PTEN mut) resistent gegen diesen Inhibitor zeigten. Hsp90 Inhibition offenbarte in beiden Zelllinien einen starken inhibitorischen Effekt auf die Migration der Zellen sowie die Reorganisierung des F Aktinskelettes. In der zweiten H{\"a}lfte dieser Arbeit wurde ein Augenmerk auf die DNA-DSB-Reparatur der GBM Zellen nach ionisierender Strahlung gelegt. Zun{\"a}chst wurde eine automatische Analysesoftware „FocAn-3D" entwickelt, mit dessen Hilfe die DNA Doppelstrangbruchreparaturkinetik untersucht werden sollte. Diese Software erm{\"o}glicht es die gesamten Zellkerne mit ihren γH2AX-Foci in 3D-cLSM-Aufnahmen zu untersuchen. Es konnte somit eine Verbesserung der Genauigkeit in der Ausz{\"a}hlung der γH2AX-Foci erreicht werden, welche 2D beschr{\"a}nkter Software verwehrt bleibt. Mit FocAn-3D konnte der gesamte Verlauf der Induktions- und Abbauphase der γH2AX-Foci in DK MG- und SNB19-Zellen mit einem mathematischen Modell ausgewertet und dargestellt werden. Des Weiteren wurde die Nanometerstruktur von γH2AX- und pDNA-PKcs-Foci mittels hochaufl{\"o}sender dSTORM-Mikroskopie untersucht. Konventionelle Mikroskopiemethoden, begrenzt durch das Beugungslimit und einer Aufl{\"o}sung von ~200 nm, konnten die Nanometerstruktur (<100 nm) der Reparaturfoci bisher nicht darstellen. Mit Hilfe der beugungsunbegrenzten dSTORM-Mikroskopie war es m{\"o}glich in DK MG- und SNB19-Zellen die Nanometerstruktur genannten Reparaturproteine in den Foci mit einer Aufl{\"o}sung von bis zu ~20 nm darzustellen. γH2AX-Foci zeigten sich als eine Verteilung aus einzelnen Untereinheiten („Nanofoci") mit einem Durchmesser von ~45 nm. Dies l{\"a}sst die Vermutung zu, dass es sich hier um die elementare Substruktur der Foci und somit der γH2AX enthaltenen Nukleosome handelt. DNA-PK-Foci wiesen hingegen eine diffusere Verteilung auf. Die in dieser Arbeit ermittelten Unterschiede im Migrationsverhalten der Zellen rechtfertigen eine weitere pr{\"a}klinische Untersuchung der verwendeten Inhibitoren als potentielle Zelltherapeutika f{\"u}r die Behandlung von GBM. Zudem konnte sich dSTORM als machtvolles Hilfsmittel, sowohl zur Analyse der Migration zugrundeliegenden Zytoskelettstruktur und der Effekte der Hsp90 Inhibierung, als auch, der Nanostruktur der DNA-DSB-Reparaturfoci herausstellen. Es ist anzunehmen, dass beugungsunbegrenzte Mikroskopiemethoden sich als bedeutende Werkzeuge in der medizinischen und biologischen Erforschung der DNA-Reparaturmechanismen herausstellen werden. Das in dieser Arbeit entwickelte ImageJ Plugin „FocAn-3D" bewies sich ebenfalls als ein vielversprechendes Werkzeug f{\"u}r die Analyse der Reparaturkinetik. Mit Hilfe von „FocAn-3D" sollte es somit m{\"o}glich sein u.a. den Einfluss gezielter Inhibition auf den zeitlichen Verlauf der Induktion und des Abbaus der DNA-Reparaturmaschinerie genauer zu studieren.}, subject = {Glioblastom}, language = {de} } @phdthesis{Malik2017, author = {Malik, Michelle Uta}, title = {Schockraumaufnahme schwerverletzter oder kritisch kranker Patienten - gesch{\"a}tzte und tats{\"a}chliche Eintreffzeit im Vergleich. Eine Untersuchung zur Analyse der Prozessqualit{\"a}t des Alarmierungsalgorithmus}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-155801}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2017}, abstract = {Hintergrund: Die tats{\"a}chliche Ankunftszeit von schwerverletzten oder kritisch kranken Patienten im Schockraum einer Klinik stimmt nicht immer mit der von der Rettungsleitstelle angek{\"u}ndigten Ankunftszeit {\"u}berein. Im Rahmen einer retrospektiven Analyse an einem deutschen {\"u}berregionalen Traumazentrum wurde untersucht, ob der dortige Alarmierungsalgorithmus geeignet ist, Zeitabweichungen in der Patientenankunft zu kompensieren. Methode: Die Datenanalyse erfolgte retrospektiv. Es wurde die Differenz zwischen angek{\"u}ndigter und tats{\"a}chlicher Eintreffzeit aller {\"u}ber das Schockraumtelefon angek{\"u}ndigten und im Schockraum aufgenommenen Patienten von September 2010 bis M{\"a}rz 2011 ermittelt. Die Teamalarmierung erfolgte 10 Minuten vor angek{\"u}ndigter Patientenankunft. Ergebnisse: In die Untersuchung wurden 165 Patienten eingeschlossen. Bei 11\% aller Patienten und bei 9\% der prim{\"a}r {\"u}ber den Schockraum aufgenommenen Traumapatienten stimmten angek{\"u}ndigte und tats{\"a}chliche Ankunftszeit {\"u}berein. In 24\% aller F{\"a}lle lag die tats{\"a}chliche Ankunftszeit des Patienten vor der angek{\"u}ndigten Ankunftszeit. 3\% des gesamten Patientenkollektives und 0\% aus der Gruppe der schwer betroffenen Traumapatienten kamen vor der Teamversammlung im Schockraum an. Zu Wartezeiten des Teams von {\"u}ber 20 Minuten kam es in 9\% aller F{\"a}lle. Schlussfolgerung: Bei einer Teamalarmierung 10 Minuten vor angek{\"u}ndigter Ankunftszeit kann eine vollst{\"a}ndige Versammlung des Schockraumteams vor Ankunft des Patienten in 97\% aller F{\"a}lle erreicht werden. Gleichzeitig resultieren akzeptable Wartezeiten f{\"u}r das Team.}, subject = {Polytrauma}, language = {de} } @phdthesis{Loeffler2021, author = {L{\"o}ffler, Andre}, title = {Constrained Graph Layouts: Vertices on the Outer Face and on the Integer Grid}, edition = {1. Auflage}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-146-4}, doi = {10.25972/WUP-978-3-95826-147-1}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-215746}, school = {W{\"u}rzburg University Press}, pages = {viii, 161}, year = {2021}, abstract = {Constraining graph layouts - that is, restricting the placement of vertices and the routing of edges to obey certain constraints - is common practice in graph drawing. In this book, we discuss algorithmic results on two different restriction types: placing vertices on the outer face and on the integer grid. For the first type, we look into the outer k-planar and outer k-quasi-planar graphs, as well as giving a linear-time algorithm to recognize full and closed outer k-planar graphs Monadic Second-order Logic. For the second type, we consider the problem of transferring a given planar drawing onto the integer grid while perserving the original drawings topology; we also generalize a variant of Cauchy's rigidity theorem for orthogonal polyhedra of genus 0 to those of arbitrary genus.}, subject = {Graphenzeichnen}, language = {en} } @phdthesis{Hahn2010, author = {Hahn, Tim}, title = {Integrating neurobiological markers of depression: an fMRI-based pattern classification approach}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-49962}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2010}, abstract = {While depressive disorders are, to date, diagnosed based on behavioral symptoms and course of illness, the interest in neurobiological markers of psychiatric disorders has grown substantially in recent years. However, current classification approaches are mainly based on data from a single biomarker, making it difficult to predict diseases such as depression which are characterized by a complex pattern of symptoms. Accordingly, none of the previously investigated single biomarkers has shown sufficient predictive power for practical application. In this work, we therefore propose an algorithm which integrates neuroimaging data associated with multiple, symptom-related neural processes relevant in depression to improve classification accuracy. First, we identified the core-symptoms of depression from standard classification systems. Then, we designed and conducted three experimental paradigms probing psychological processes known to be related to these symptoms using functional Magnetic Resonance Imaging. In order to integrate the resulting 12 high-dimensional biomarkers, we developed a multi-source pattern recognition algorithm based on a combination of Gaussian Process Classifiers and decision trees. Applying this approach to a group of 30 healthy controls and 30 depressive in-patients who were on a variety of medications and displayed varying degrees of symptom-severity allowed for high-accuracy single-subject classification. Specifically, integrating biomarkers yielded an accuracy of 83\% while the best of the 12 single biomarkers alone classified a significantly lower number of subjects (72\%) correctly. Thus, integrated biomarker-based classification of a heterogeneous, real-life sample resulted in accuracy comparable to the highest ever achieved in previous single biomarker research. Furthermore, investigation of the final prediction model revealed that neural activation during the processing of neutral facial expressions, large rewards, and safety cues is most relevant for over-all classification. We conclude that combining brain activation related to the core-symptoms of depression using the multi-source pattern classification approach developed in this work substantially increases classification accuracy while providing a sparse relational biomarker-model for future prediction.}, subject = {Patientenklassifikation}, language = {en} } @phdthesis{Fleszar2018, author = {Fleszar, Krzysztof}, title = {Network-Design Problems in Graphs and on the Plane}, edition = {1. Auflage}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-076-4 (Print)}, doi = {10.25972/WUP-978-3-95826-077-1}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-154904}, school = {W{\"u}rzburg University Press}, pages = {xi, 204}, year = {2018}, abstract = {A network design problem defines an infinite set whose elements, called instances, describe relationships and network constraints. It asks for an algorithm that, given an instance of this set, designs a network that respects the given constraints and at the same time optimizes some given criterion. In my thesis, I develop algorithms whose solutions are optimum or close to an optimum value within some guaranteed bound. I also examine the computational complexity of these problems. Problems from two vast areas are considered: graphs and the Euclidean plane. In the Maximum Edge Disjoint Paths problem, we are given a graph and a subset of vertex pairs that are called terminal pairs. We are asked for a set of paths where the endpoints of each path form a terminal pair. The constraint is that any two paths share at most one inner vertex. The optimization criterion is to maximize the cardinality of the set. In the hard-capacitated k-Facility Location problem, we are given an integer k and a complete graph where the distances obey a given metric and where each node has two numerical values: a capacity and an opening cost. We are asked for a subset of k nodes, called facilities, and an assignment of all the nodes, called clients, to the facilities. The constraint is that the number of clients assigned to a facility cannot exceed the facility's capacity value. The optimization criterion is to minimize the total cost which consists of the total opening cost of the facilities and the total distance between the clients and the facilities they are assigned to. In the Stabbing problem, we are given a set of axis-aligned rectangles in the plane. We are asked for a set of horizontal line segments such that, for every rectangle, there is a line segment crossing its left and right edge. The optimization criterion is to minimize the total length of the line segments. In the k-Colored Non-Crossing Euclidean Steiner Forest problem, we are given an integer k and a finite set of points in the plane where each point has one of k colors. For every color, we are asked for a drawing that connects all the points of the same color. The constraint is that drawings of different colors are not allowed to cross each other. The optimization criterion is to minimize the total length of the drawings. In the Minimum Rectilinear Polygon for Given Angle Sequence problem, we are given an angle sequence of left (+90°) turns and right (-90°) turns. We are asked for an axis-parallel simple polygon where the angles of the vertices yield the given sequence when walking around the polygon in counter-clockwise manner. The optimization criteria considered are to minimize the perimeter, the area, and the size of the axis-parallel bounding box of the polygon.}, subject = {Euklidische Ebene}, language = {en} } @phdthesis{Atzmueller2006, author = {Atzm{\"u}ller, Martin}, title = {Knowledge-Intensive Subgroup Mining - Techniques for Automatic and Interactive Discovery}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-21004}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2006}, abstract = {Data mining has proved its significance in various domains and applications. As an important subfield of the general data mining task, subgroup mining can be used, e.g., for marketing purposes in business domains, or for quality profiling and analysis in medical domains. The goal is to efficiently discover novel, potentially useful and ultimately interesting knowledge. However, in real-world situations these requirements often cannot be fulfilled, e.g., if the applied methods do not scale for large data sets, if too many results are presented to the user, or if many of the discovered patterns are already known to the user. This thesis proposes a combination of several techniques in order to cope with the sketched problems: We discuss automatic methods, including heuristic and exhaustive approaches, and especially present the novel SD-Map algorithm for exhaustive subgroup discovery that is fast and effective. For an interactive approach we describe techniques for subgroup introspection and analysis, and we present advanced visualization methods, e.g., the zoomtable that directly shows the most important parameters of a subgroup and that can be used for optimization and exploration. We also describe various visualizations for subgroup comparison and evaluation in order to support the user during these essential steps. Furthermore, we propose to include possibly available background knowledge that is easy to formalize into the mining process. We can utilize the knowledge in many ways: To focus the search process, to restrict the search space, and ultimately to increase the efficiency of the discovery method. We especially present background knowledge to be applied for filtering the elements of the problem domain, for constructing abstractions, for aggregating values of attributes, and for the post-processing of the discovered set of patterns. Finally, the techniques are combined into a knowledge-intensive process supporting both automatic and interactive methods for subgroup mining. The practical significance of the proposed approach strongly depends on the available tools. We introduce the VIKAMINE system as a highly-integrated environment for knowledge-intensive active subgroup mining. Also, we present an evaluation consisting of two parts: With respect to objective evaluation criteria, i.e., comparing the efficiency and the effectiveness of the subgroup discovery methods, we provide an experimental evaluation using generated data. For that task we present a novel data generator that allows a simple and intuitive specification of the data characteristics. The results of the experimental evaluation indicate that the novel SD-Map method outperforms the other described algorithms using data sets similar to the intended application concerning the efficiency, and also with respect to precision and recall for the heuristic methods. Subjective evaluation criteria include the user acceptance, the benefit of the approach, and the interestingness of the results. We present five case studies utilizing the presented techniques: The approach has been successfully implemented in medical and technical applications using real-world data sets. The method was very well accepted by the users that were able to discover novel, useful, and interesting knowledge.}, subject = {Data Mining}, language = {en} } @phdthesis{Appold2015, author = {Appold, Christian}, title = {Symbolische BDD-basierte Modellpr{\"u}fung asynchroner nebenl{\"a}ufiger Systeme}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-137029}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {Today, information and communication systems are ubiquitous and consist very often of several interacting and communicating components. One reason is the widespread use of multi-core processors and the increasing amount of concurrent software for the efficient usage of multi-core processors. Also, the dissemination of distributed emergent technologies like sensor networks or the internet of things is growing. Additionally, a lot of internet protocols are client-server architectures with clients which execute computations in parallel and servers that can handle requests of several clients in parallel. Systems which consist of several interacting and communicating components are often very complex and due to their complexity also prone to errors. Errors in systems can have dramatic consequenses, especially in safety-critical areas where human life can be endangered by incorrect system behavior. Hence, it is inevitable to have methods that ensure the proper functioning of such systems. This thesis aims on improving the verifiability of asynchronous concurrent systems using symbolic model checking based on Binary Decision Diagrams (BDDs). An asynchronous concurrent system is a system that consists of several components, from which only one component can execute a transition at a time. Model checking is a formal verification technique. For a given system description and a set of desired properties, the validity of the properties for the system is decided in model checking automatically by software tools called model checkers. The main problem of model checking is the state-space explosion problem. One approach to reduce this problem is the use of symbolic model checking. There, system states and transitions are not stored explicitely as in explicit model checking. Instead, in symbolic model checking sets of states and sets of transitions are stored and also manipulated together. The data structure which is used in this thesis to store those sets are BDDs. BDD-based symbolic model checking has already been used successful in industry for several times. Nevertheless, BDD-based symbolic model checking still suffers from the state-space explosion problem and further improvements are necessary to improve its applicability. Central operations in BDD-based symbolic model checking are the computation of successor and predecessor states of a given set of states. Those computations are called image computations. They are applied repeatedly in BDD-based symbolic model checking to decide the validity of properties for a given system description. Hence, their efficient execution is crucial for the memory and runtime requirements of a model checker. In an image computation a BDD for a set of transitions and a BDD for a set of states are combined to compute a set of successor or predecessor states. Often, also the size of the BDDs to represent the transition relation is critical for the successful use of model checking. To further improve the applicability of symbolic model checking, we present in this thesis new data structures to store the transition relation of asynchronous concurrent systems. Additionally, we present new image computation algorithms. Both can lead to large runtime and memory reductions for BDD-based symbolic model checking. Asynchronous concurrent systems often contain symmetries. A technique to exploit those symmetries to diminish the state-space explosion problem is symmetry reduction. In this thesis we also present a new efficient algorithm for symmetry reduction in BDD-based symbolic model checking.}, subject = {Programmverifikation}, language = {de} }