@article{SchulzeTillichDandekaretal.2013, author = {Schulze, Katja and Tillich, Ulrich M. and Dandekar, Thomas and Frohme, Marcus}, title = {PlanktoVision - an automated analysis system for the identification of phytoplankton}, series = {BMC Bioinformatics}, journal = {BMC Bioinformatics}, doi = {10.1186/1471-2105-14-115}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-96395}, year = {2013}, abstract = {Background Phytoplankton communities are often used as a marker for the determination of fresh water quality. The routine analysis, however, is very time consuming and expensive as it is carried out manually by trained personnel. The goal of this work is to develop a system for an automated analysis. Results A novel open source system for the automated recognition of phytoplankton by the use of microscopy and image analysis was developed. It integrates the segmentation of the organisms from the background, the calculation of a large range of features, and a neural network for the classification of imaged organisms into different groups of plankton taxa. The analysis of samples containing 10 different taxa showed an average recognition rate of 94.7\% and an average error rate of 5.5\%. The presented system has a flexible framework which easily allows expanding it to include additional taxa in the future. Conclusions The implemented automated microscopy and the new open source image analysis system - PlanktoVision - showed classification results that were comparable or better than existing systems and the exclusion of non-plankton particles could be greatly improved. The software package is published as free software and is available to anyone to help make the analysis of water quality more reproducible and cost effective.}, language = {en} } @article{DandekarLiangKrueger2013, author = {Dandekar, Thomas and Liang, Chunguang and Kr{\"u}ger, Beate}, title = {GoSynthetic database tool to analyse natural and engineered molecular processes}, series = {Database}, journal = {Database}, doi = {10.1093/database/bat043}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-97023}, year = {2013}, abstract = {An essential topic for synthetic biologists is to understand the structure and function of biological processes and involved proteins and plan experiments accordingly. Remarkable progress has been made in recent years towards this goal. However, efforts to collect and present all information on processes and functions are still cumbersome. The database tool GoSynthetic provides a new, simple and fast way to analyse biological processes applying a hierarchical database. Four different search modes are implemented. Furthermore, protein interaction data, cross-links to organism-specific databases (17 organisms including six model organisms and their interactions), COG/KOG, GO and IntAct are warehoused. The built in connection to technical and engineering terms enables a simple switching between biological concepts and concepts from engineering, electronics and synthetic biology. The current version of GoSynthetic covers more than one million processes, proteins, COGs and GOs. It is illustrated by various application examples probing process differences and designing modifications.}, language = {en} } @phdthesis{Wolter2014, author = {Wolter, Steve}, title = {Single-molecule localization algorithms in super-resolution microscopy}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-109370}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {Lokalisationsmikroskopie ist eine Methodenklasse der superaufl{\"o}senden Fluoreszenzmikroskopie, deren Methoden sich durch stochastische zeitliche Isolation der Fluoreszenzemission auszeichnen. Das Blinkverhalten von Fluorophoren wird so ver{\"a}ndert, dass gleichzeitige Aktivierung von einander nahen Fluorophoren unwahrscheinlich ist. Bekannte okalisationsmikroskopische Methoden umfassen dSTORM, STORM, PALM, FPALM, oder GSDIM. Lokalisationsmikroskopie ist von hohem biologischem Interesse, weil sie die Aufl{\"o}sung des Fluoreszenzmikroskops bei minimalem technischem Aufwand um eine Gr{\"o}ßenordnung verbessert. Der verbundene Rechenaufwand ist allerdings erheblich, da Millionen von Fluoreszenzemissionen einzeln mit Nanometergenauigkeit lokalisiert werden m{\"u}ssen. Der Rechen- und Implementationsaufwand dieser Auswertung hat die Verbreitung der superaufl{\"o}senden Mikroskopie lange verz{\"o}gert. Diese Arbeit beschreibt meine algorithmische Grundstruktur f{\"u}r die Auswertung lokalisationsmikroskopischer Daten. Die Echtzeitf{\"a}higkeit, d.h. eine Auswertegeschwindigkeit oberhalb der Datenaufnahmegeschwindigkeit an normalen Messaufbauten, meines neuartigen und quelloffenen Programms wird demonstriert. Die Geschwindigkeit wird auf verbrauchermarktg{\"a}ngigen Prozessoren erreicht und dadurch spezialisierte Rechenzentren oder der Einsatz von Grafikkarten vermieden. Die Berechnung wird mit dem allgemein anerkannten Gaussschen Punktantwortmodell und einem Rauschmodell auf Basis der gr{\"o}ßten Poissonschen Wahrscheinlichkeit durchgef{\"u}hrt. Die algorithmische Grundstruktur wird erweitert, um robuste und optimale Zweifarbenauswertung zu realisieren und damit korrelative Mikroskopie zwischen verschiedenen Proteinen und Strukturen zu erm{\"o}glichen. Durch den Einsatz von kubischen Basissplines wird die Auswertung von dreidimensionalen Proben vereinfacht und stabilisiert, um pr{\"a}zisem Abbilden von mikrometerdicken Proben n{\"a}her zu kommen. Das Grenzverhalten von Lokalisationsalgorithmen bei hohen Emissionsdichten wird untersucht. Abschließend werden Algorithmen f{\"u}r die Anwendung der Lokalisationsmikroskopie auf verbreitete Probleme der Biologie aufgezeigt. Zellul{\"a}re Bewegung und Motilit{\"a}t werden anhand der in vitro Bewegung von Myosin-Aktin-Filamenten studiert. Lebendzellbildgebung mit hellen und stabilen organischen Fluorophoren wird mittels SNAP-tag-Fusionsproteinen realisiert. Die Analyse des Aufbaus von Proteinklumpen zeigt, wie Lokalisationsmikroskopie neue quantitative Ans{\"a}tze jenseits reiner Bildgebung bietet.}, subject = {Fluoreszenzmikroskopie}, language = {en} } @phdthesis{ZeeshangebMajeed2014, author = {Zeeshan [geb. Majeed], Saman}, title = {Implementation of Bioinformatics Methods for miRNA and Metabolic Modelling}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-102900}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {Dynamic interactions and their changes are at the forefront of current research in bioinformatics and systems biology. This thesis focusses on two particular dynamic aspects of cellular adaptation: miRNA and metabolites. miRNAs have an established role in hematopoiesis and megakaryocytopoiesis, and platelet miRNAs have potential as tools for understanding basic mechanisms of platelet function. The thesis highlights the possible role of miRNAs in regulating protein translation in platelet lifespan with relevance to platelet apoptosis and identifying involved pathways and potential key regulatory molecules. Furthermore, corresponding miRNA/target mRNAs in murine platelets are identified. Moreover, key miRNAs involved in aortic aneurysm are predicted by similar techniques. The clinical relevance of miRNAs as biomarkers, targets, resulting later translational therapeutics, and tissue specific restrictors of genes expression in cardiovascular diseases is also discussed. In a second part of thesis we highlight the importance of scientific software solution development in metabolic modelling and how it can be helpful in bioinformatics tool development along with software feature analysis such as performed on metabolic flux analysis applications. We proposed the "Butterfly" approach to implement efficiently scientific software programming. Using this approach, software applications were developed for quantitative Metabolic Flux Analysis and efficient Mass Isotopomer Distribution Analysis (MIDA) in metabolic modelling as well as for data management. "LS-MIDA" allows easy and efficient MIDA analysis and, with a more powerful algorithm and database, the software "Isotopo" allows efficient analysis of metabolic flows, for instance in pathogenic bacteria (Salmonella, Listeria). All three approaches have been published (see Appendices).}, subject = {miRNS}, language = {en} } @phdthesis{Karl2016, author = {Karl, Stefan}, title = {Control Centrality in Non-Linear Biological Networks}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-150838}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2016}, abstract = {Biological systems such as cells or whole organisms are governed by complex regulatory networks of transcription factors, hormones and other regulators which determine the behavior of the system depending on internal and external stimuli. In mathematical models of these networks, genes are represented by interacting "nodes" whose "value" represents the activity of the gene. Control processes in these regulatory networks are challenging to elucidate and quantify. Previous control centrality metrics, which aim to mathematically capture the ability of individual nodes to control biological systems, have been found to suffer from problems regarding biological plausibility. This thesis presents a new approach to control centrality in biological networks. Three types of network control are distinguished: Total control centrality quantifies the impact of gene mutations and identifies potential pharmacological targets such as genes involved in oncogenesis (e.g. zinc finger protein GLI2 or bone morphogenetic proteins in chondrocytes). Dynamic control centrality describes relaying functions as observed in signaling cascades (e.g control in mouse colon stem cells). Value control centrality measures the direct influence of the value of the node on the network (e.g. Indian hedgehog as an essential regulator of proliferation in chondrocytes). Well-defined network manipulations define all three centralities not only for nodes, but also for the interactions between them, enabling detailed insights into network pathways. The calculation of the new metrics is made possible by substantial computational improvements in the simulation algorithms for several widely used mathematical modeling paradigms for genetic regulatory networks, which are implemented in the regulatory network simulation framework Jimena created for this thesis. Applying the new metrics to biological networks and artificial random networks shows how these mathematical concepts correspond to experimentally verified gene functions and signaling pathways in immunity and cell differentiation. In contrast to controversial previous results even from the Barab{\´a}si group, all results indicate that the ability to control biological networks resides in only few driver nodes characterized by a high number of connections to the rest of the network. Autoregulatory loops strongly increase the controllability of the network, i.e. its ability to control itself, and biological networks are characterized by high controllability in conjunction with high robustness against mutations, a combination that can be achieved best in sparsely connected networks with densities (i.e. connections to nodes ratios) around 2.0 - 3.0. The new concepts are thus considerably narrowing the gap between network science and biology and can be used in various areas such as system modeling, plausibility trials and system analyses. Medical applications discussed in this thesis include the search for oncogenes and pharmacological targets, as well their functional characterization.}, subject = {Bioinformatik}, language = {en} } @phdthesis{Wolf2017, author = {Wolf, Beat}, title = {Reducing the complexity of OMICS data analysis}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-153687}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2017}, abstract = {The field of genetics faces a lot of challenges and opportunities in both research and diagnostics due to the rise of next generation sequencing (NGS), a technology that allows to sequence DNA increasingly fast and cheap. NGS is not only used to analyze DNA, but also RNA, which is a very similar molecule also present in the cell, in both cases producing large amounts of data. The big amount of data raises both infrastructure and usability problems, as powerful computing infrastructures are required and there are many manual steps in the data analysis which are complicated to execute. Both of those problems limit the use of NGS in the clinic and research, by producing a bottleneck both computationally and in terms of manpower, as for many analyses geneticists lack the required computing skills. Over the course of this thesis we investigated how computer science can help to improve this situation to reduce the complexity of this type of analysis. We looked at how to make the analysis more accessible to increase the number of people that can perform OMICS data analysis (OMICS groups various genomics data-sources). To approach this problem, we developed a graphical NGS data analysis pipeline aimed at a diagnostics environment while still being useful in research in close collaboration with the Human Genetics Department at the University of W{\"u}rzburg. The pipeline has been used in various research papers on covering subjects, including works with direct author participation in genomics, transcriptomics as well as epigenomics. To further validate the graphical pipeline, a user survey was carried out which confirmed that it lowers the complexity of OMICS data analysis. We also studied how the data analysis can be improved in terms of computing infrastructure by improving the performance of certain analysis steps. We did this both in terms of speed improvements on a single computer (with notably variant calling being faster by up to 18 times), as well as with distributed computing to better use an existing infrastructure. The improvements were integrated into the previously described graphical pipeline, which itself also was focused on low resource usage. As a major contribution and to help with future development of parallel and distributed applications, for the usage in genetics or otherwise, we also looked at how to make it easier to develop such applications. Based on the parallel object programming model (POP), we created a Java language extension called POP-Java, which allows for easy and transparent distribution of objects. Through this development, we brought the POP model to the cloud, Hadoop clusters and present a new collaborative distributed computing model called FriendComputing. The advances made in the different domains of this thesis have been published in various works specified in this document.}, subject = {Bioinformatik}, language = {en} } @phdthesis{PradaSalcedo2018, author = {Prada Salcedo, Juan Pablo}, title = {Image Processing and other bioinformatic tools for Neurobiology}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-157721}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2018}, abstract = {Neurobiology is widely supported by bioinformatics. Due to the big amount of data generated from the biological side a computational approach is required. This thesis presents four different cases of bioinformatic tools applied to the service of Neurobiology. The first two tools presented belong to the field of image processing. In the first case, we make use of an algorithm based on the wavelet transformation to assess calcium activity events in cultured neurons. We designed an open source tool to assist neurobiology researchers in the analysis of calcium imaging videos. Such analysis is usually done manually which is time consuming and highly subjective. Our tool speeds up the work and offers the possibility of an unbiased detection of the calcium events. Even more important is that our algorithm not only detects the neuron spiking activity but also local spontaneous activity which is normally discarded because it is considered irrelevant. We showed that this activity is determinant in the calcium dynamics in neurons and it is involved in important functions like signal modulation and memory and learning. The second project is a segmentation task. In our case we are interested in segmenting the neuron nuclei in electron microscopy images of c.elegans. Marking these structures is necessary in order to reconstruct the connectome of the organism. C.elegans is a great study case due to the simplicity of its nervous system (only 502 neurons). This worm, despite its simplicity has taught us a lot about neuronal mechanisms. There is still a lot of information we can extract from the c.elegans, therein lies the importance of reconstructing its connectome. There is a current version of the c.elegans connectome but it was done by hand and on a single subject which leaves a big room for errors. By automatizing the segmentation of the electron microscopy images we guarantee an unbiased approach and we will be able to verify the connectome on several subjects. For the third project we moved from image processing applications to biological modeling. Because of the high complexity of even small biological systems it is necessary to analyze them with the help of computational tools. The term in silico was coined to refer to such computational models of biological systems. We designed an in silico model of the TNF (Tumor necrosis factor) ligand and its two principal receptors. This biological system is of high relevance because it is involved in the inflammation process. Inflammation is of most importance as protection mechanism but it can also lead to complicated diseases (e.g. cancer). Chronic inflammation processes can be particularly dangerous in the brain. In order to better understand the dynamics that govern the TNF system we created a model using the BioNetGen language. This is a rule based language that allows one to simulate systems where multiple agents are governed by a single rule. Using our model we characterized the TNF system and hypothesized about the relation of the ligand with each of the two receptors. Our hypotheses can be later used to define drug targets in the system or possible treatments for chronic inflammation or lack of the inflammatory response. The final project deals with the protein folding problem. In our organism proteins are folded all the time, because only in their folded conformation are proteins capable of doing their job (with some very few exceptions). This folding process presents a great challenge for science because it has been shown to be an NP problem. NP means non deterministic Polynomial time problem. This basically means that this kind of problems cannot be efficiently solved. Nevertheless, somehow the body is capable of folding a protein in just milliseconds. This phenomenon puzzles not only biologists but also mathematicians. In mathematics NP problems have been studied for a long time and it is known that given the solution to one NP problem we could solve many of them (i.e. NP-complete problems). If we manage to understand how nature solves the protein folding problem then we might be able to apply this solution to many other problems. Our research intends to contribute to this discussion. Unfortunately, not to explain how nature solves the protein folding problem, but to explain that it does not solve the problem at all. This seems contradictory since I just mentioned that the body folds proteins all the time, but our hypothesis is that the organisms have learned to solve a simplified version of the NP problem. Nature does not solve the protein folding problem in its full complexity. It simply solves a small instance of the problem. An instance which is as simple as a convex optimization problem. We formulate the protein folding problem as an optimization problem to illustrate our claim and present some toy examples to illustrate the formulation. If our hypothesis is true, it means that protein folding is a simple problem. So we just need to understand and model the conditions of the vicinity inside the cell at the moment the folding process occurs. Once we understand this starting conformation and its influence in the folding process we will be able to design treatments for amyloid diseases such as Alzheimer's and Parkinson's. In summary this thesis project contributes to the neurobiology research field from four different fronts. Two are practical contributions with immediate benefits, such as the calcium imaging video analysis tool and the TNF in silico model. The neuron nuclei segmentation is a contribution for the near future. A step towards the full annotation of the c.elegans connectome and later for the reconstruction of the connectome of other species. And finally, the protein folding project is a first impulse to change the way we conceive the protein folding process in nature. We try to point future research in a novel direction, where the amino code is not the most relevant characteristic of the process but the conditions within the cell.}, subject = {Bildverarbeitung}, language = {en} } @phdthesis{Breitenbach2019, author = {Breitenbach, Tim}, title = {A mathematical optimal control based approach to pharmacological modulation with regulatory networks and external stimuli}, doi = {10.25972/OPUS-17436}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-174368}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2019}, abstract = {In this work models for molecular networks consisting of ordinary differential equations are extended by terms that include the interaction of the corresponding molecular network with the environment that the molecular network is embedded in. These terms model the effects of the external stimuli on the molecular network. The usability of this extension is demonstrated with a model of a circadian clock that is extended with certain terms and reproduces data from several experiments at the same time. Once the model including external stimuli is set up, a framework is developed in order to calculate external stimuli that have a predefined desired effect on the molecular network. For this purpose the task of finding appropriate external stimuli is formulated as a mathematical optimal control problem for which in order to solve it a lot of mathematical methods are available. Several methods are discussed and worked out in order to calculate a solution for the corresponding optimal control problem. The application of the framework to find pharmacological intervention points or effective drug combinations is pointed out and discussed. Furthermore the framework is related to existing network analysis tools and their combination for network analysis in order to find dedicated external stimuli is discussed. The total framework is verified with biological examples by comparing the calculated results with data from literature. For this purpose platelet aggregation is investigated based on a corresponding gene regulatory network and associated receptors are detected. Furthermore a transition from one to another type of T-helper cell is analyzed in a tumor setting where missing agents are calculated to induce the corresponding switch in vitro. Next a gene regulatory network of a myocardiocyte is investigated where it is shown how the presented framework can be used to compare different treatment strategies with respect to their beneficial effects and side effects quantitatively. Moreover a constitutively activated signaling pathway, which thus causes maleficent effects, is modeled and intervention points with corresponding treatment strategies are determined that steer the gene regulatory network from a pathological expression pattern to physiological one again.}, subject = {Bioinformatik}, language = {en} } @phdthesis{Yu2019, author = {Yu, Sung-Huan}, title = {Development and application of computational tools for RNA-Seq based transcriptome annotations}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-176468}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2019}, abstract = {In order to understand the regulation of gene expression in organisms, precise genome annotation is essential. In recent years, RNA-Seq has become a potent method for generating and improving genome annotations. However, this Approach is time consuming and often inconsistently performed when done manually. In particular, the discovery of non-coding RNAs benefits strongly from the application of RNA-Seq data but requires significant amounts of expert knowledge and is labor-intensive. As a part of my doctoral study, I developed a modular tool called ANNOgesic that can detect numerous transcribed genomic features, including non-coding RNAs, based on RNA-Seq data in a precise and automatic fashion with a focus on bacterial and achaeal species. The software performs numerous analyses and generates several visualizations. It can generate annotations of high-Resolution that are hard to produce using traditional annotation tools that are based only on genome sequences. ANNOgesic can detect numerous novel genomic Features like UTR-derived small non-coding RNAs for which no other tool has been developed before. ANNOgesic is available under an open source license (ISCL) at https://github.com/Sung-Huan/ANNOgesic. My doctoral work not only includes the development of ANNOgesic but also its application to annotate the transcriptome of Staphylococcus aureus HG003 - a strain which has been a insightful model in infection biology. Despite its potential as a model, a complete genome sequence and annotations have been lacking for HG003. In order to fill this gap, the annotations of this strain, including sRNAs and their functions, were generated using ANNOgesic by analyzing differential RNA-Seq data from 14 different samples (two media conditions with seven time points), as well as RNA-Seq data generated after transcript fragmentation. ANNOgesic was also applied to annotate several bacterial and archaeal genomes, and as part of this its high performance was demonstrated. In summary, ANNOgesic is a powerful computational tool for RNA-Seq based annotations and has been successfully applied to several species.}, subject = {Genom}, language = {en} }