@article{SteiningerKobsDavidsonetal.2021, author = {Steininger, Michael and Kobs, Konstantin and Davidson, Padraig and Krause, Anna and Hotho, Andreas}, title = {Density-based weighting for imbalanced regression}, series = {Machine Learning}, volume = {110}, journal = {Machine Learning}, number = {8}, issn = {1573-0565}, doi = {10.1007/s10994-021-06023-5}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-269177}, pages = {2187-2211}, year = {2021}, abstract = {In many real world settings, imbalanced data impedes model performance of learning algorithms, like neural networks, mostly for rare cases. This is especially problematic for tasks focusing on these rare occurrences. For example, when estimating precipitation, extreme rainfall events are scarce but important considering their potential consequences. While there are numerous well studied solutions for classification settings, most of them cannot be applied to regression easily. Of the few solutions for regression tasks, barely any have explored cost-sensitive learning which is known to have advantages compared to sampling-based methods in classification tasks. In this work, we propose a sample weighting approach for imbalanced regression datasets called DenseWeight and a cost-sensitive learning approach for neural network regression with imbalanced data called DenseLoss based on our weighting scheme. DenseWeight weights data points according to their target value rarities through kernel density estimation (KDE). DenseLoss adjusts each data point's influence on the loss according to DenseWeight, giving rare data points more influence on model training compared to common data points. We show on multiple differently distributed datasets that DenseLoss significantly improves model performance for rare data points through its density-based weighting scheme. Additionally, we compare DenseLoss to the state-of-the-art method SMOGN, finding that our method mostly yields better performance. Our approach provides more control over model training as it enables us to actively decide on the trade-off between focusing on common or rare cases through a single hyperparameter, allowing the training of better models for rare data points.}, language = {en} } @phdthesis{Klein2014, author = {Klein, Dominik Werner}, title = {Design and Evaluation of Components for Future Internet Architectures}, issn = {1432-8801}, doi = {10.25972/OPUS-9313}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-93134}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {Die derzeitige Internetarchitektur wurde nicht in einem geplanten Prozess konzipiert und entwickelt, sondern hat vielmehr eine evolutionsartige Entwicklung hinter sich. Ausl{\"o}ser f{\"u}r die jeweiligen Evolutionsschritte waren dabei meist aufstrebende Anwendungen, welche neue Anforderungen an die zugrundeliegende Netzarchitektur gestellt haben. Um diese Anforderungen zu erf{\"u}llen, wurden h{\"a}ufig neuartige Dienste oder Protokolle spezifiziert und in die bestehende Architektur integriert. Dieser Prozess ist jedoch meist mit hohem Aufwand verbunden und daher sehr tr{\"a}ge, was die Entwicklung und Verbreitung innovativer Dienste beeintr{\"a}chtigt. Derzeitig diskutierte Konzepte wie Software-Defined Networking (SDN) oder Netzvirtualisierung (NV) werden als eine M{\"o}glichkeit angesehen, die Altlasten der bestehenden Internetarchitektur zu l{\"o}sen. Beiden Konzepten gemein ist die Idee, logische Netze {\"u}ber dem physikalischen Substrat zu betreiben. Diese logischen Netze sind hochdynamisch und k{\"o}nnen so flexibel an die Anforderungen der jeweiligen Anwendungen angepasst werden. Insbesondere erlaubt das Konzept der Virtualisierung intelligentere Netzknoten, was innovative neue Anwendungsf{\"a}lle erm{\"o}glicht. Ein h{\"a}ufig in diesem Zusammenhang diskutierter Anwendungsfall ist die Mobilit{\"a}t sowohl von Endger{\"a}ten als auch von Diensten an sich. Die Mobilit{\"a}t der Dienste wird hierbei ausgenutzt, um die Zugriffsverz{\"o}gerung oder die belegten Ressourcen im Netz zu reduzieren, indem die Dienste zum Beispiel in f{\"u}r den Nutzer geographisch nahe Datenzentren migriert werden. Neben den reinen Mechanismen bez{\"u}glich Dienst- und Endger{\"a}temobilit{\"a}t sind in diesem Zusammenhang auch geeignete {\"U}berwachungsl{\"o}sungen relevant, welche die vom Nutzer wahrgenommene Dienstg{\"u}te bewerten k{\"o}nnen. Diese L{\"o}sungen liefern wichtige Entscheidungshilfen f{\"u}r die Migration oder {\"u}berwachen m{\"o}gliche Effekte der Migration auf die erfahrene Dienstg{\"u}te beim Nutzer. Im Falle von Video Streaming erm{\"o}glicht ein solcher Anwendungsfall die flexible Anpassung der Streaming Topologie f{\"u}r mobile Nutzer, um so die Videoqualit{\"a}t unabh{\"a}ngig vom Zugangsnetz aufrechterhalten zu k{\"o}nnen. Im Rahmen dieser Doktorarbeit wird der beschriebene Anwendungsfall am Beispiel einer Video Streaming Anwendung n{\"a}her analysiert und auftretende Herausforderungen werden diskutiert. Des Weiteren werden L{\"o}sungsans{\"a}tze vorgestellt und bez{\"u}glich ihrer Effizienz ausgewertet. Im Detail besch{\"a}ftigt sich die Arbeit mit der Leistungsanalyse von Mechanismen f{\"u}r die Dienstmobilit{\"a}t und entwickelt eine Architektur zur Optimierung der Dienstmobilit{\"a}t. Im Bereich Endger{\"a}temobilit{\"a}t werden Verbesserungen entwickelt, welche die Latenz zwischen Endger{\"a}t und Dienst reduzieren oder die Konnektivit{\"a}t unabh{\"a}ngig vom Zugangsnetz gew{\"a}hrleisten. Im letzten Teilbereich wird eine L{\"o}sung zur {\"U}berwachung der Videoqualit{\"a}t im Netz entwickelt und bez{\"u}glich ihrer Genauigkeit analysiert.}, subject = {Leistungsbewertung}, language = {en} } @unpublished{Nassourou2011, author = {Nassourou, Mohamadou}, title = {Design and Implementation of a Model-driven XML-based Integrated System Architecture for Assisting Analysis, Understanding, and Retention of Religious Texts:The Case of The Quran}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-65737}, year = {2011}, abstract = {Learning a book in general involves reading it, underlining important words, adding comments, summarizing some passages, and marking up some text or concepts. Once deeper understanding is achieved, one would like to organize and manage her/his knowledge in such a way that, it could be easily remembered and efficiently transmitted to others. This paper discusses about modeling religious texts using semantic XML markup based on frame-based knowledge representation, with the purpose of assisting understanding, retention, and sharing of knowledge they contain. In this study, books organized in terms of chapters made up of verses are considered as the source of knowledge to model. Some metadata representing the multiple perspectives of knowledge modeling are assigned to each chapter and verse. Chapters and verses with their metadata form a meta-model, which is represented using frames, and published on a web mashup. An XML-based annotation and visualization system equipped with user interfaces for creating static and dynamic metadata, annotating chapters' contents according to user selected semantics, and templates for publishing generated knowledge on the Internet, has been developed. The system has been applied to the Quran, and the result obtained shows that multiple perspectives of information modeling can be successfully applied to religious texts, in order to support analysis, understanding, and retention of the texts.}, subject = {Wissensrepr{\"a}sentation}, language = {en} } @unpublished{Nassourou2011, author = {Nassourou, Mohamadou}, title = {Design and Implementation of Architectures for Interactive Textual Documents Collation Systems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-56601}, year = {2011}, abstract = {One of the main purposes of textual documents collation is to identify a base text or closest witness to the base text, by analyzing and interpreting differences also known as types of changes that might exist between those documents. Based on this fact, it is reasonable to argue that, explicit identification of types of changes such as deletions, additions, transpositions, and mutations should be part of the collation process. The identification could be carried out by an interpretation module after alignment has taken place. Unfortunately existing collation software such as CollateX1 and Juxta2's collation engine do not have interpretation modules. In fact they implement the Gothenburg model [1] for collation process which does not include an interpretation unit. Currently both CollateX and Juxta's collation engine do not distinguish in their critical apparatus between the types of changes, and do not offer statistics about those changes. This paper presents a model for both integrated and distributed collation processes that improves the Gothenburg model. The model introduces an interpretation component for computing and distinguishing between the types of changes that documents could have undergone. Moreover two architectures implementing the model in order to solve the problem of interactive collation are discussed as well. Each architecture uses CollateX library, and provides on the one hand preprocessing functions for transforming input documents into CollateX input format, and on the other hand a post-processing module for enabling interactive collation. Finally simple algorithms for distinguishing between types of changes, and linking collated source documents with the collation results are also introduced.}, subject = {Softwarearchitektur}, language = {en} } @misc{Kaempgen2009, type = {Master Thesis}, author = {Kaempgen, Benedikt}, title = {Deskriptives Data-Mining f{\"u}r Entscheidungstr{\"a}ger: Eine Mehrfachfallstudie}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-46343}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2009}, abstract = {Das Potenzial der Wissensentdeckung in Daten wird h{\"a}ufig nicht ausgenutzt, was haupts{\"a}chlich auf Barrieren zwischen dem Entwicklerteam und dem Endnutzer des Data-Mining zur{\"u}ckzuf{\"u}hren ist. In dieser Arbeit wird ein transparenter Ansatz zum Beschreiben und Erkl{\"a}ren von Daten f{\"u}r Entscheidungstr{\"a}ger vorgestellt. In Entscheidungstr{\"a}ger-zentrierten Aufgaben werden die Projektanforderungen definiert und die Ergebnisse zu einer Geschichte zusammengestellt. Eine Anforderung besteht dabei aus einem tabellarischen Bericht und ggf. Mustern in seinem Inhalt, jeweils verst{\"a}ndlich f{\"u}r einen Entscheidungstr{\"a}ger. Die technischen Aufgaben bestehen aus einer Datenpr{\"u}fung, der Integration der Daten in einem Data-Warehouse sowie dem Generieren von Berichten und dem Entdecken von Mustern wie in den Anforderungen beschrieben. Mehrere Data-Mining-Projekte k{\"o}nnen durch Wissensmanagement sowie eine geeignete Infrastruktur voneinander profitieren. Der Ansatz wurde in zwei Projekten unter Verwendung von ausschließlich Open-Source-Software angewendet.}, subject = {Data Mining}, language = {de} } @article{WienrichCarolus2021, author = {Wienrich, Carolin and Carolus, Astrid}, title = {Development of an Instrument to Measure Conceptualizations and Competencies About Conversational Agents on the Example of Smart Speakers}, series = {Frontiers in Computer Science}, volume = {3}, journal = {Frontiers in Computer Science}, doi = {10.3389/fcomp.2021.685277}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-260198}, year = {2021}, abstract = {The concept of digital literacy has been introduced as a new cultural technique, which is regarded as essential for successful participation in a (future) digitized world. Regarding the increasing importance of AI, literacy concepts need to be extended to account for AI-related specifics. The easy handling of the systems results in increased usage, contrasting limited conceptualizations (e.g., imagination of future importance) and competencies (e.g., knowledge about functional principles). In reference to voice-based conversational agents as a concrete application of AI, the present paper aims for the development of a measurement to assess the conceptualizations and competencies about conversational agents. In a first step, a theoretical framework of "AI literacy" is transferred to the context of conversational agent literacy. Second, the "conversational agent literacy scale" (short CALS) is developed, constituting the first attempt to measure interindividual differences in the "(il) literate" usage of conversational agents. 29 items were derived, of which 170 participants answered. An explanatory factor analysis identified five factors leading to five subscales to assess CAL: storage and transfer of the smart speaker's data input; smart speaker's functional principles; smart speaker's intelligent functions, learning abilities; smart speaker's reach and potential; smart speaker's technological (surrounding) infrastructure. Preliminary insights into construct validity and reliability of CALS showed satisfying results. Third, using the newly developed instrument, a student sample's CAL was assessed, revealing intermediated values. Remarkably, owning a smart speaker did not lead to higher CAL scores, confirming our basic assumption that usage of systems does not guarantee enlightened conceptualizations and competencies. In sum, the paper contributes to the first insights into the operationalization and understanding of CAL as a specific subdomain of AI-related competencies.}, language = {en} } @phdthesis{Binder2006, author = {Binder, Andreas}, title = {Die stochastische Wissenschaft und zwei Teilsysteme eines Web-basierten Informations- und Anwendungssystems zu ihrer Etablierung}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-26146}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2006}, abstract = {Das stochastische Denken, die Bernoullische Stochastik und dessen informationstechnologische Umsetzung, namens Stochastikon stellen die Grundlage f{\"u}r das Verst{\"a}ndnis und die erfolgreiche Nutzung einer stochastischen Wissenschaft dar. Im Rahmen dieser Arbeit erfolgt eine Kl{\"a}rung des Begriffs des stochastischen Denkens, eine anschauliche Darstellung der von Elart von Collani entwickelten Bernoullischen Stochastik und eine Beschreibung von Stochastikon. Dabei werden sowohl das Gesamtkonzept von Stochastikon, sowie die Ziele, Aufgaben und die Realisierung der beiden Teilsysteme namens Mentor und Encyclopedia vorgestellt. Das stochastische Denken erlaubt eine realit{\"a}tsnahe Sichtweise der Dinge, d.h. eine Sichtweise, die mit den menschlichen Beobachtungen und Erfahrungen im Einklang steht und somit die Unsicherheit {\"u}ber zuk{\"u}nftige Entwicklungen ber{\"u}cksichtigt. Der in diesem Kontext verwendete Begriff der Unsicherheit bezieht sich ausschließlich auf zuk{\"u}nftige Entwicklungen und {\"a}ußert sich in Variabilit{\"a}t. Quellen der Unsicherheit sind einerseits die menschliche Ignoranz und andererseits der Zufall. Unter Ignoranz wird hierbei die Unwissenheit des Menschen {\"u}ber die unbekannten, aber feststehenden Fakten verstanden, die die Anfangsbedingungen der zuk{\"u}nftigen Entwicklung repr{\"a}sentieren. Die Bernoullische Stochastik liefert ein Regelwerk und erm{\"o}glicht die Entwicklung eines quantitativen Modells zur Beschreibung der Unsicherheit und expliziter Einbeziehung der beiden Quellen Ignoranz und Zufall. Das Modell tr{\"a}gt den Namen Bernoulli-Raum und bildet die Grundlage f{\"u}r die Herleitung quantitativer Verfahren, um zuverl{\"a}ssige und genaue Aussagen sowohl {\"u}ber die nicht-existente zuf{\"a}llige Zukunft (Vorhersageverfahren), als auch {\"u}ber die unbekannte feststehende Vergangenheit (Messverfahren). Das Softwaresystem Stochastikon implementiert die Bernoullische Stochastik in Form einer Reihe autarker, miteinander kommunizierender Teilsysteme. Ziel des Teilsystems Encyclopedia ist die Bereitstellung und Bewertung stochastischen Wissens. Das Teilsystem Mentor dient der Unterst{\"u}tzung des Anwenders bei der Probleml{\"o}sungsfindung durch Identifikation eines richtigen Modells bzw. eines korrekten Bernoulli-Raums. Der L{\"o}sungsfindungsprozess selber enth{\"a}lt keinerlei Unsicherheit. Die ganze Unsicherheit steckt in der L{\"o}sung, d.h. im Bernoulli-Raum, der explizit die vorhandene Unwissenheit (Ignoranz) und den vorliegenden Zufall abdeckend enth{\"a}lt.}, subject = {Stochastik}, language = {de} } @article{KruegerFriedrichFoersteretal.2012, author = {Krueger, Beate and Friedrich, Torben and F{\"o}rster, Frank and Bernhardt, J{\"o}rg and Gross, Roy and Dandekar, Thomas}, title = {Different evolutionary modifications as a guide to rewire two-component systems}, series = {Bioinformatics and Biology Insights}, volume = {6}, journal = {Bioinformatics and Biology Insights}, doi = {10.4137/BBI.S9356}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-123647}, pages = {97-128}, year = {2012}, abstract = {Two-component systems (TCS) are short signalling pathways generally occurring in prokaryotes. They frequently regulate prokaryotic stimulus responses and thus are also of interest for engineering in biotechnology and synthetic biology. The aim of this study is to better understand and describe rewiring of TCS while investigating different evolutionary scenarios. Based on large-scale screens of TCS in different organisms, this study gives detailed data, concrete alignments, and structure analysis on three general modification scenarios, where TCS were rewired for new responses and functions: (i) exchanges in the sequence within single TCS domains, (ii) exchange of whole TCS domains; (iii) addition of new components modulating TCS function. As a result, the replacement of stimulus and promotor cassettes to rewire TCS is well defined exploiting the alignments given here. The diverged TCS examples are non-trivial and the design is challenging. Designed connector proteins may also be useful to modify TCS in selected cases.}, language = {en} } @article{PetschkeStaab2018, author = {Petschke, Danny and Staab, Torsten E.M.}, title = {DLTPulseGenerator: a library for the simulation of lifetime spectra based on detector-output pulses}, series = {SoftwareX}, volume = {7}, journal = {SoftwareX}, doi = {10.1016/j.softx.2018.04.002}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-176883}, pages = {122-128}, year = {2018}, abstract = {The quantitative analysis of lifetime spectra relevant in both life and materials sciences presents one of the ill-posed inverse problems and, hence, leads to most stringent requirements on the hardware specifications and the analysis algorithms. Here we present DLTPulseGenerator, a library written in native C++ 11, which provides a simulation of lifetime spectra according to the measurement setup. The simulation is based on pairs of non-TTL detector output-pulses. Those pulses require the Constant Fraction Principle (CFD) for the determination of the exact timing signal and, thus, the calculation of the time difference i.e. the lifetime. To verify the functionality, simulation results were compared to experimentally obtained data using Positron Annihilation Lifetime Spectroscopy (PALS) on pure tin.}, language = {en} } @phdthesis{Houshiar2017, author = {Houshiar, Hamidreza}, title = {Documentation and mapping with 3D point cloud processing}, isbn = {978-3-945459-14-0}, doi = {10.25972/OPUS-14449}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-144493}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2017}, abstract = {3D point clouds are a de facto standard for 3D documentation and modelling. The advances in laser scanning technology broadens the usability and access to 3D measurement systems. 3D point clouds are used in many disciplines such as robotics, 3D modelling, archeology and surveying. Scanners are able to acquire up to a million of points per second to represent the environment with a dense point cloud. This represents the captured environment with a very high degree of detail. The combination of laser scanning technology with photography adds color information to the point clouds. Thus the environment is represented more realistically. Full 3D models of environments, without any occlusion, require multiple scans. Merging point clouds is a challenging process. This thesis presents methods for point cloud registration based on the panorama images generated from the scans. Image representation of point clouds introduces 2D image processing methods to 3D point clouds. Several projection methods for the generation of panorama maps of point clouds are presented in this thesis. Additionally, methods for point cloud reduction and compression based on the panorama maps are proposed. Due to the large amounts of data generated from the 3D measurement systems these methods are necessary to improve the point cloud processing, transmission and archiving. This thesis introduces point cloud processing methods as a novel framework for the digitisation of archeological excavations. The framework replaces the conventional documentation methods for excavation sites. It employs point clouds for the generation of the digital documentation of an excavation with the help of an archeologist on-site. The 3D point cloud is used not only for data representation but also for analysis and knowledge generation. Finally, this thesis presents an autonomous indoor mobile mapping system. The mapping system focuses on the sensor placement planning method. Capturing a complete environment requires several scans. The sensor placement planning method solves for the minimum required scans to digitise large environments. Combining this method with a navigation system on a mobile robot platform enables it to acquire data fully autonomously. This thesis introduces a novel hole detection method for point clouds to detect obscured parts of a captured environment. The sensor placement planning method selects the next scan position with the most coverage of the obscured environment. This reduces the required number of scans. The navigation system on the robot platform consist of path planning, path following and obstacle avoidance. This guarantees the safe navigation of the mobile robot platform between the scan positions. The sensor placement planning method is designed as a stand alone process that could be used with a mobile robot platform for autonomous mapping of an environment or as an assistant tool for the surveyor on scanning projects.}, subject = {3D Punktwolke}, language = {en} } @unpublished{Nassourou2010, author = {Nassourou, Mohamadou}, title = {Doing Webservices Composition by Content-based Mashup: Example of a Web-based Simulator for Itinerary Planning}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-50036}, year = {2010}, abstract = {Webservices composition is traditionally carried out using composition technologies such as Business Process Execution Language (BPEL) [1] and Web Service Choreography Interface (WSCI) [2]. The composition technology involves the process of web service discovery, invocation, and composition. However these technologies are not easy and flexible enough because they are mainly developer-centric. Moreover majority of websites have not yet embarked into the world of web service, although they have very important and useful information to offer. Is it because they have not understood the usefulness of web services or is it because of the costs? Whatever might be the answers to these questions, time and money are definitely required in order to create and offer web services. To avoid these expenditures, wrappers [7] to automatically generate webservices from websites would be a cheaper and easier solution. Mashups offer a different way of doing webservices composition. In web environment a Mashup is a web application that brings together data from several sources using webservices, APIs, wrappers and so on, in order to create entirely a new application that was not provided before. This paper presents first an overview of Mashups and the process of web service invocation and composition based on Mashup, then describes an example of a web-based simulator for navigation system in Germany.}, subject = {Mashup }, language = {en} } @article{BuchinBuchinByrkaetal.2012, author = {Buchin, Kevin and Buchin, Maike and Byrka, Jaroslaw and N{\"o}llenburg, Martin and Okamoto, Yoshio and Silveira, Rodrigo I. and Wolff, Alexander}, title = {Drawing (Complete) Binary Tanglegrams}, series = {Algorithmica}, volume = {62}, journal = {Algorithmica}, doi = {10.1007/s00453-010-9456-3}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-124622}, pages = {309-332}, year = {2012}, abstract = {A binary tanglegram is a drawing of a pair of rooted binary trees whose leaf sets are in one-to-one correspondence; matching leaves are connected by inter-tree edges. For applications, for example, in phylogenetics, it is essential that both trees are drawn without edge crossings and that the inter-tree edges have as few crossings as possible. It is known that finding a tanglegram with the minimum number of crossings is NP-hard and that the problem is fixed-parameter tractable with respect to that number. We prove that under the Unique Games Conjecture there is no constant-factor approximation for binary trees. We show that the problem is NP-hard even if both trees are complete binary trees. For this case we give an O(n 3)-time 2-approximation and a new, simple fixed-parameter algorithm. We show that the maximization version of the dual problem for binary trees can be reduced to a version of MaxCut for which the algorithm of Goemans and Williamson yields a 0.878-approximation.}, language = {en} } @article{DumicBjeloperaNuechter2021, author = {Dumic, Emil and Bjelopera, Anamaria and N{\"u}chter, Andreas}, title = {Dynamic point cloud compression based on projections, surface reconstruction and video compression}, series = {Sensors}, volume = {22}, journal = {Sensors}, number = {1}, issn = {1424-8220}, doi = {10.3390/s22010197}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-252231}, year = {2021}, abstract = {In this paper we will present a new dynamic point cloud compression based on different projection types and bit depth, combined with the surface reconstruction algorithm and video compression for obtained geometry and texture maps. Texture maps have been compressed after creating Voronoi diagrams. Used video compression is specific for geometry (FFV1) and texture (H.265/HEVC). Decompressed point clouds are reconstructed using a Poisson surface reconstruction algorithm. Comparison with the original point clouds was performed using point-to-point and point-to-plane measures. Comprehensive experiments show better performance for some projection maps: cylindrical, Miller and Mercator projections.}, language = {en} } @phdthesis{Kaussner2003, author = {Kaußner, Armin}, title = {Dynamische Szenerien in der Fahrsimulation}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-8286}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2003}, abstract = {In der Arbeit wird ein neues Konzept f{\"u}r Fahrsimulator-Datenbasen vorgestellt. Der Anwender entwirft eine auf seine Fragestellung zugeschnittene Datenbasis mithilfe einer einfachen Skriptsprache. Das Straßennetzwerk wird auf einer topologischen Ebene rep{\"a}sentiert. In jedem Simulationsschritt wird hieraus im Sichtbarkeitsbereich des Fahrers die geometrische Rep{\"a}sentation berechnet. Die f{\"u}r den Fahrer unsichtbaren Teile des Straßenetzwerks k{\"o}nnen w{\"a}hrend der Simulation ver{\"a}ndert werden. Diese Ver{\"a}nderungen k{\"o}nnen von der Route des Fahrers oder von den in der Simulation erhobenen Messerten abh{\"a}ngen. Zudem kann der Anwender das Straßennetzwerk interaktiv ver{\"a}ndern. Das vorgestellte Konzept bietet zahlreiche M{\"o}glichkeiten zur Erzeugung reproduzierbarer Szenarien f{\"u}r Experimente in Fahrsimulatoren.}, subject = {Straßenverkehr}, language = {de} } @article{NaglerNaegeleGillietal.2018, author = {Nagler, Matthias and N{\"a}gele, Thomas and Gilli, Christian and Fragner, Lena and Korte, Arthur and Platzer, Alexander and Farlow, Ashley and Nordborg, Magnus and Weckwerth, Wolfram}, title = {Eco-Metabolomics and Metabolic Modeling: Making the Leap From Model Systems in the Lab to Native Populations in the Field}, series = {Frontiers in Plant Science}, volume = {9}, journal = {Frontiers in Plant Science}, number = {1556}, issn = {1664-462X}, doi = {10.3389/fpls.2018.01556}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-189560}, year = {2018}, abstract = {Experimental high-throughput analysis of molecular networks is a central approach to characterize the adaptation of plant metabolism to the environment. However, recent studies have demonstrated that it is hardly possible to predict in situ metabolic phenotypes from experiments under controlled conditions, such as growth chambers or greenhouses. This is particularly due to the high molecular variance of in situ samples induced by environmental fluctuations. An approach of functional metabolome interpretation of field samples would be desirable in order to be able to identify and trace back the impact of environmental changes on plant metabolism. To test the applicability of metabolomics studies for a characterization of plant populations in the field, we have identified and analyzed in situ samples of nearby grown natural populations of Arabidopsis thaliana in Austria. A. thaliana is the primary molecular biological model system in plant biology with one of the best functionally annotated genomes representing a reference system for all other plant genome projects. The genomes of these novel natural populations were sequenced and phylogenetically compared to a comprehensive genome database of A. thaliana ecotypes. Experimental results on primary and secondary metabolite profiling and genotypic variation were functionally integrated by a data mining strategy, which combines statistical output of metabolomics data with genome-derived biochemical pathway reconstruction and metabolic modeling. Correlations of biochemical model predictions and population-specific genetic variation indicated varying strategies of metabolic regulation on a population level which enabled the direct comparison, differentiation, and prediction of metabolic adaptation of the same species to different habitats. These differences were most pronounced at organic and amino acid metabolism as well as at the interface of primary and secondary metabolism and allowed for the direct classification of population-specific metabolic phenotypes within geographically contiguous sampling sites.}, language = {en} } @techreport{OdhahGrassKraemer2022, type = {Working Paper}, author = {Odhah, Najib and Grass, Eckhard and Kraemer, Rolf}, title = {Effective Rate of URLLC with Short Block-Length Information Theory}, series = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, journal = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, doi = {10.25972/OPUS-28085}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-280859}, pages = {4}, year = {2022}, abstract = {Shannon channel capacity estimation, based on large packet length is used in traditional Radio Resource Management (RRM) optimization. This is good for the normal transmission of data in a wired or wireless system. For industrial automation and control, rather short packages are used due to the short-latency requirements. Using Shannon's formula leads in this case to inaccurate RRM solutions, thus another formula should be used to optimize radio resources in short block-length packet transmission, which is the basic of Ultra-Reliable Low-Latency Communications (URLLCs). The stringent requirement of delay Quality of Service (QoS) for URLLCs requires a link-level channel model rather than a physical level channel model. After finding the basic and accurate formula of the achievable rate of short block-length packet transmission, the RRM optimization problem can be accurately formulated and solved under the new constraints of URLLCs. In this short paper, the current mathematical models, which are used in formulating the effective transmission rate of URLLCs, will be briefly explained. Then, using this rate in RRM for URLLC will be discussed.}, subject = {Datennetz}, language = {en} } @article{MadeiraGromerLatoschiketal.2021, author = {Madeira, Octavia and Gromer, Daniel and Latoschik, Marc Erich and Pauli, Paul}, title = {Effects of Acrophobic Fear and Trait Anxiety on Human Behavior in a Virtual Elevated Plus-Maze}, series = {Frontiers in Virtual Reality}, volume = {2}, journal = {Frontiers in Virtual Reality}, doi = {10.3389/frvir.2021.635048}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-258709}, year = {2021}, abstract = {The Elevated Plus-Maze (EPM) is a well-established apparatus to measure anxiety in rodents, i.e., animals exhibiting an increased relative time spent in the closed vs. the open arms are considered anxious. To examine whether such anxiety-modulated behaviors are conserved in humans, we re-translated this paradigm to a human setting using virtual reality in a Cave Automatic Virtual Environment (CAVE) system. In two studies, we examined whether the EPM exploration behavior of humans is modulated by their trait anxiety and also assessed the individuals' levels of acrophobia (fear of height), claustrophobia (fear of confined spaces), sensation seeking, and the reported anxiety when on the maze. First, we constructed an exact virtual copy of the animal EPM adjusted to human proportions. In analogy to animal EPM studies, participants (N = 30) freely explored the EPM for 5 min. In the second study (N = 61), we redesigned the EPM to make it more human-adapted and to differentiate influences of trait anxiety and acrophobia by introducing various floor textures and lower walls of closed arms to the height of standard handrails. In the first experiment, hierarchical regression analyses of exploration behavior revealed the expected association between open arm avoidance and Trait Anxiety, an even stronger association with acrophobic fear. In the second study, results revealed that acrophobia was associated with avoidance of open arms with mesh-floor texture, whereas for trait anxiety, claustrophobia, and sensation seeking, no effect was detected. Also, subjects' fear rating was moderated by all psychometrics but trait anxiety. In sum, both studies consistently indicate that humans show no general open arm avoidance analogous to rodents and that human EPM behavior is modulated strongest by acrophobic fear, whereas trait anxiety plays a subordinate role. Thus, we conclude that the criteria for cross-species validity are met insufficiently in this case. Despite the exploratory nature, our studies provide in-depth insights into human exploration behavior on the virtual EPM.}, language = {en} } @phdthesis{Menth2004, author = {Menth, Michael}, title = {Efficient admission control and routing for resilient communication networks}, doi = {10.25972/OPUS-846}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-9949}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2004}, abstract = {This work is subdivided into two main areas: resilient admission control and resilient routing. The work gives an overview of the state of the art of quality of service mechanisms in communication networks and proposes a categorization of admission control (AC) methods. These approaches are investigated regarding performance, more precisely, regarding the potential resource utilization by dimensioning the capacity for a network with a given topology, traffic matrix, and a required flow blocking probability. In case of a failure, the affected traffic is rerouted over backup paths which increases the traffic rate on the respective links. To guarantee the effectiveness of admission control also in failure scenarios, the increased traffic rate must be taken into account for capacity dimensioning and leads to resilient AC. Capacity dimensioning is not feasible for existing networks with already given link capacities. For the application of resilient NAC in this case, the size of distributed AC budgets must be adapted according to the traffic matrix in such a way that the maximum blocking probability for all flows is minimized and that the capacity of all links is not exceeded by the admissible traffic rate in any failure scenario. Several algorithms for the solution of that problem are presented and compared regarding their efficiency and fairness. A prototype for resilient AC was implemented in the laboratories of Siemens AG in Munich within the scope of the project KING. Resilience requires additional capacity on the backup paths for failure scenarios. The amount of this backup capacity depends on the routing and can be minimized by routing optimization. New protection switching mechanisms are presented that deviate the traffic quickly around outage locations. They are simple and can be implemented, e.g, by MPLS technology. The Self-Protecting Multi-Path (SPM) is a multi-path consisting of disjoint partial paths. The traffic is distributed over all faultless partial paths according to an optimized load balancing function both in the working case and in failure scenarios. Performance studies show that the network topology and the traffic matrix also influence the amount of required backup capacity significantly. The example of the COST-239 network illustrates that conventional shortest path routing may need 50\% more capacity than the optimized SPM if all single link and node failures are protected.}, subject = {Kommunikation}, language = {en} } @article{KraftBirkReichertetal.2020, author = {Kraft, Robin and Birk, Ferdinand and Reichert, Manfred and Deshpande, Aniruddha and Schlee, Winfried and Langguth, Berthold and Baumeister, Harald and Probst, Thomas and Spiliopoulou, Myra and Pryss, R{\"u}diger}, title = {Efficient processing of geospatial mHealth data using a scalable crowdsensing platform}, series = {Sensors}, volume = {20}, journal = {Sensors}, number = {12}, issn = {1424-8220}, doi = {10.3390/s20123456}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-207826}, year = {2020}, abstract = {Smart sensors and smartphones are becoming increasingly prevalent. Both can be used to gather environmental data (e.g., noise). Importantly, these devices can be connected to each other as well as to the Internet to collect large amounts of sensor data, which leads to many new opportunities. In particular, mobile crowdsensing techniques can be used to capture phenomena of common interest. Especially valuable insights can be gained if the collected data are additionally related to the time and place of the measurements. However, many technical solutions still use monolithic backends that are not capable of processing crowdsensing data in a flexible, efficient, and scalable manner. In this work, an architectural design was conceived with the goal to manage geospatial data in challenging crowdsensing healthcare scenarios. It will be shown how the proposed approach can be used to provide users with an interactive map of environmental noise, allowing tinnitus patients and other health-conscious people to avoid locations with harmful sound levels. Technically, the shown approach combines cloud-native applications with Big Data and stream processing concepts. In general, the presented architectural design shall serve as a foundation to implement practical and scalable crowdsensing platforms for various healthcare scenarios beyond the addressed use case.}, language = {en} } @phdthesis{Wolz2003, author = {Wolz, Frank}, title = {Ein generisches Konzept zur Modellierung und Bewertung feldprogrammierbarer Architekturen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-7944}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2003}, abstract = {Gegenstand der Arbeit stellt eine erstmalig unternommene, architektur{\"u}bergreifende Studie {\"u}ber feldprogrammierbare Logikbausteine zur Implementierung synchroner Schaltkreise dar. Zun{\"a}chst wird ein Modell f{\"u}r allgemeine feldprogrammiebare Architekturen basierend auf periodischen Graphen definiert. Schließlich werden Bewertungsmaße f{\"u}r Architekturen und Schaltkreislayouts angegeben zur Charakterisierung struktureller Eigenschaften hinsichtlich des Verhaltens in Chipfl{\"a}chenverbrauch und Signalverz{\"o}gerung. Ferner wird ein generisches Layout-Werkzeug entwickelt, das f{\"u}r beliebige Architekturen und Schaltkreise Implementierungen berechnen und bewerten kann. Abschließend werden neun ressourcenminimalistische Architekturen mit Maschen- und mit Inselstruktur einander gegen{\"u}bergestellt.}, subject = {Gay-Array-Bauelement}, language = {de} } @unpublished{Nassourou2010, author = {Nassourou, Mohamadou}, title = {Empirical Study on Screen Scraping Web Service Creation: Case of Rhein-Main-Verkehrsverbund (RMV)}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-49396}, year = {2010}, abstract = {Internet is the biggest database that science and technology have ever produced. The world wide web is a large repository of information that cannot be used for automation by many applications due to its limited target audience. One of the solutions to the automation problem is to develop wrappers. Wrapping is a process whereby unstructured extracted information is transformed into a more structured one such as XML, which could be provided as webservice to other applications. A web service is a web page whose content is well structured so that a computer program can consume it automatically. This paper describes steps involved in constructing wrappers manually in order to automatically generate web services.}, subject = {HTML}, language = {en} } @techreport{GrossmannHomeyer2023, type = {Working Paper}, author = {Großmann, Marcel and Homeyer, Tobias}, title = {Emulation of Multipath Transmissions in P4 Networks with Kathar{\´a}}, series = {KuVS Fachgespr{\"a}ch - W{\"u}rzburg Workshop on Modeling, Analysis and Simulation of Next-Generation Communication Networks 2023 (WueWoWAS'23)}, journal = {KuVS Fachgespr{\"a}ch - W{\"u}rzburg Workshop on Modeling, Analysis and Simulation of Next-Generation Communication Networks 2023 (WueWoWAS'23)}, doi = {10.25972/OPUS-32209}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-322095}, pages = {4}, year = {2023}, abstract = {Packets sent over a network can either get lost or reach their destination. Protocols like TCP try to solve this problem by resending the lost packets. However, retransmissions consume a lot of time and are cumbersome for the transmission of critical data. Multipath solutions are quite common to address this reliability issue and are available on almost every layer of the ISO/OSI model. We propose a solution based on a P4 network to duplicate packets in order to send them to their destination via multiple routes. The last network hop ensures that only a single copy of the traffic is further forwarded to its destination by adopting a concept similar to Bloom filters. Besides, if fast delivery is requested we provide a P4 prototype, which randomly forwards the packets over different transmission paths. For reproducibility, we implement our approach in a container-based network emulation system called Kathar{\´a}.}, language = {en} } @article{OberdoerferHeidrichBirnstieletal.2021, author = {Oberd{\"o}rfer, Sebastian and Heidrich, David and Birnstiel, Sandra and Latoschik, Marc Erich}, title = {Enchanted by Your Surrounding? Measuring the Effects of Immersion and Design of Virtual Environments on Decision-Making}, series = {Frontiers in Virtual Reality}, volume = {2}, journal = {Frontiers in Virtual Reality}, doi = {10.3389/frvir.2021.679277}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-260101}, pages = {679277}, year = {2021}, abstract = {Impaired decision-making leads to the inability to distinguish between advantageous and disadvantageous choices. The impairment of a person's decision-making is a common goal of gambling games. Given the recent trend of gambling using immersive Virtual Reality it is crucial to investigate the effects of both immersion and the virtual environment (VE) on decision-making. In a novel user study, we measured decision-making using three virtual versions of the Iowa Gambling Task (IGT). The versions differed with regard to the degree of immersion and design of the virtual environment. While emotions affect decision-making, we further measured the positive and negative affect of participants. A higher visual angle on a stimulus leads to an increased emotional response. Thus, we kept the visual angle on the Iowa Gambling Task the same between our conditions. Our results revealed no significant impact of immersion or the VE on the IGT. We further found no significant difference between the conditions with regard to positive and negative affect. This suggests that neither the medium used nor the design of the VE causes an impairment of decision-making. However, in combination with a recent study, we provide first evidence that a higher visual angle on the IGT leads to an effect of impairment.}, language = {en} } @article{RodriguesWeissHewigetal.2021, author = {Rodrigues, Johannes and Weiß, Martin and Hewig, Johannes and Allen, John J. B.}, title = {EPOS: EEG Processing Open-Source Scripts}, series = {Frontiers in Neuroscience}, volume = {15}, journal = {Frontiers in Neuroscience}, issn = {1662-453X}, doi = {10.3389/fnins.2021.660449}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-240221}, year = {2021}, abstract = {Background: Since the replication crisis, standardization has become even more important in psychological science and neuroscience. As a result, many methods are being reconsidered, and researchers' degrees of freedom in these methods are being discussed as a potential source of inconsistencies across studies. New Method: With the aim of addressing these subjectivity issues, we have been working on a tutorial-like EEG (pre-)processing pipeline to achieve an automated method based on the semi-automated analysis proposed by Delorme and Makeig. Results: Two scripts are presented and explained step-by-step to perform basic, informed ERP and frequency-domain analyses, including data export to statistical programs and visual representations of the data. The open-source software EEGlab in MATLAB is used as the data handling platform, but scripts based on code provided by Mike Cohen (2014) are also included. Comparison with existing methods: This accompanying tutorial-like article explains and shows how the processing of our automated pipeline affects the data and addresses, especially beginners in EEG-analysis, as other (pre)-processing chains are mostly targeting rather informed users in specialized areas or only parts of a complete procedure. In this context, we compared our pipeline with a selection of existing approaches. Conclusion: The need for standardization and replication is evident, yet it is equally important to control the plausibility of the suggested solution by data exploration. Here, we provide the community with a tool to enhance the understanding and capability of EEG-analysis. We aim to contribute to comprehensive and reliable analyses for neuro-scientific research.}, language = {en} } @article{GehrkeBalbachRauchetal.2019, author = {Gehrke, Alexander and Balbach, Nico and Rauch, Yong-Mi and Degkwitz, Andreas and Puppe, Frank}, title = {Erkennung von handschriftlichen Unterstreichungen in Alten Drucken}, series = {Bibliothek Forschung und Praxis}, volume = {43}, journal = {Bibliothek Forschung und Praxis}, number = {3}, issn = {1865-7648}, doi = {10.1515/bfp-2019-2083}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-193377}, pages = {447 -- 452}, year = {2019}, abstract = {Die Erkennung handschriftlicher Artefakte wie Unterstreichungen in Buchdrucken erm{\"o}glicht R{\"u}ckschl{\"u}sse auf das Rezeptionsverhalten und die Provenienzgeschichte und wird auch f{\"u}r eine OCR ben{\"o}tigt. Dabei soll zwischen handschriftlichen Unterstreichungen und waagerechten Linien im Druck (z. B. Trennlinien usw.) unterschieden werden, da letztere nicht ausgezeichnet werden sollen. Im Beitrag wird ein Ansatz basierend auf einem auf Unterstreichungen trainierten Neuronalen Netz gem{\"a}ß der U-Net Architektur vorgestellt, dessen Ergebnisse in einem zweiten Schritt mit heuristischen Regeln nachbearbeitet werden. Die Evaluationen zeigen, dass Unterstreichungen sehr gut erkannt werden, wenn bei der Binarisierung der Scans nicht zu viele Pixel der Unterstreichung wegen geringem Kontrast verloren gehen. Zuk{\"u}nftig sollen die Worte oberhalb der Unterstreichung mit OCR transkribiert werden und auch andere Artefakte wie handschriftliche Notizen in alten Drucken erkannt werden.}, language = {de} } @techreport{SertbasBuelbuelErgencFischer2022, type = {Working Paper}, author = {Sertbas B{\"u}lb{\"u}l, Nurefsan and Ergenc, Doganalp and Fischer, Mathias}, title = {Evaluating Dynamic Path Reconfiguration for Time Sensitive Networks}, series = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, journal = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, doi = {10.25972/OPUS-28074}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-280743}, pages = {5}, year = {2022}, abstract = {In time-sensitive networks (TSN) based on 802.1Qbv, i.e., the time-aware Shaper (TAS) protocol, precise transmission schedules and, paths are used to ensure end-to-end deterministic communication. Such resource reservations for data flows are usually established at the startup time of an application and remain untouched until the flow ends. There is no way to migrate existing flows easily to alternative paths without inducing additional delay or wasting resources. Therefore, some of the new flows cannot be embedded due to capacity limitations on certain links which leads to sub-optimal flow assignment. As future networks will need to support a large number of lowlatency flows, accommodating new flows at runtime and adapting existing flows accordingly becomes a challenging problem. In this extended abstract we summarize a previously published paper of us [1]. We combine software-defined networking (SDN), which provides better control of network flows, with TSN to be able to seamlessly migrate time-sensitive flows. For that, we formulate an optimization problem and propose different dynamic path configuration strategies under deterministic communication requirements. Our simulation results indicate that regularly reconfiguring the flow assignments can improve the latency of time-sensitive flows and can increase the number of flows embedded in the network around 4\% in worst-case scenarios while still satisfying individual flow deadlines.}, subject = {Datennetz}, language = {en} } @article{AliMontenegro2016, author = {Ali, Qasim and Montenegro, Sergio}, title = {Explicit Model Following Distributed Control Scheme for Formation Flying of Mini UAVs}, series = {IEEE Access}, volume = {4}, journal = {IEEE Access}, number = {397-406}, doi = {10.1109/ACCESS.2016.2517203}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-146061}, year = {2016}, abstract = {A centralized heterogeneous formation flight position control scheme has been formulated using an explicit model following design, based on a Linear Quadratic Regulator Proportional Integral (LQR PI) controller. The leader quadcopter is a stable reference model with desired dynamics whose output is perfectly tracked by the two wingmen quadcopters. The leader itself is controlled through the pole placement control method with desired stability characteristics, while the two followers are controlled through a robust and adaptive LQR PI control method. Selected 3-D formation geometry and static stability are maintained under a number of possible perturbations. With this control scheme, formation geometry may also be switched to any arbitrary shape during flight, provided a suitable collision avoidance mechanism is incorporated. In case of communication loss between the leader and any of the followers, the other follower provides the data, received from the leader, to the affected follower. The stability of the closed-loop system has been analyzed using singular values. The proposed approach for the tightly coupled formation flight of mini unmanned aerial vehicles has been validated with the help of extensive simulations using MATLAB/Simulink, which provided promising results.}, language = {en} } @article{LodaKrebsDanhofetal.2019, author = {Loda, Sophia and Krebs, Jonathan and Danhof, Sophia and Schreder, Martin and Solimando, Antonio G. and Strifler, Susanne and Rasche, Leo and Kort{\"u}m, Martin and Kerscher, Alexander and Knop, Stefan and Puppe, Frank and Einsele, Hermann and Bittrich, Max}, title = {Exploration of artificial intelligence use with ARIES in multiple myeloma research}, series = {Journal of Clinical Medicine}, volume = {8}, journal = {Journal of Clinical Medicine}, number = {7}, issn = {2077-0383}, doi = {10.3390/jcm8070999}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-197231}, pages = {999}, year = {2019}, abstract = {Background: Natural language processing (NLP) is a powerful tool supporting the generation of Real-World Evidence (RWE). There is no NLP system that enables the extensive querying of parameters specific to multiple myeloma (MM) out of unstructured medical reports. We therefore created a MM-specific ontology to accelerate the information extraction (IE) out of unstructured text. Methods: Our MM ontology consists of extensive MM-specific and hierarchically structured attributes and values. We implemented "A Rule-based Information Extraction System" (ARIES) that uses this ontology. We evaluated ARIES on 200 randomly selected medical reports of patients diagnosed with MM. Results: Our system achieved a high F1-Score of 0.92 on the evaluation dataset with a precision of 0.87 and recall of 0.98. Conclusions: Our rule-based IE system enables the comprehensive querying of medical reports. The IE accelerates the extraction of data and enables clinicians to faster generate RWE on hematological issues. RWE helps clinicians to make decisions in an evidence-based manner. Our tool easily accelerates the integration of research evidence into everyday clinical practice.}, language = {en} } @article{WienrichLatoschik2021, author = {Wienrich, Carolin and Latoschik, Marc Erich}, title = {eXtended Artificial Intelligence: New Prospects of Human-AI Interaction Research}, series = {Frontiers in Virtual Reality}, volume = {2}, journal = {Frontiers in Virtual Reality}, doi = {10.3389/frvir.2021.686783}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-260296}, year = {2021}, abstract = {Artificial Intelligence (AI) covers a broad spectrum of computational problems and use cases. Many of those implicate profound and sometimes intricate questions of how humans interact or should interact with AIs. Moreover, many users or future users do have abstract ideas of what AI is, significantly depending on the specific embodiment of AI applications. Human-centered-design approaches would suggest evaluating the impact of different embodiments on human perception of and interaction with AI. An approach that is difficult to realize due to the sheer complexity of application fields and embodiments in reality. However, here XR opens new possibilities to research human-AI interactions. The article's contribution is twofold: First, it provides a theoretical treatment and model of human-AI interaction based on an XR-AI continuum as a framework for and a perspective of different approaches of XR-AI combinations. It motivates XR-AI combinations as a method to learn about the effects of prospective human-AI interfaces and shows why the combination of XR and AI fruitfully contributes to a valid and systematic investigation of human-AI interactions and interfaces. Second, the article provides two exemplary experiments investigating the aforementioned approach for two distinct AI-systems. The first experiment reveals an interesting gender effect in human-robot interaction, while the second experiment reveals an Eliza effect of a recommender system. Here the article introduces two paradigmatic implementations of the proposed XR testbed for human-AI interactions and interfaces and shows how a valid and systematic investigation can be conducted. In sum, the article opens new perspectives on how XR benefits human-centered AI design and development.}, language = {en} } @phdthesis{Niebler2019, author = {Niebler, Thomas}, title = {Extracting and Learning Semantics from Social Web Data}, doi = {10.25972/OPUS-17866}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-178666}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2019}, abstract = {Making machines understand natural language is a dream of mankind that existed since a very long time. Early attempts at programming machines to converse with humans in a supposedly intelligent way with humans relied on phrase lists and simple keyword matching. However, such approaches cannot provide semantically adequate answers, as they do not consider the specific meaning of the conversation. Thus, if we want to enable machines to actually understand language, we need to be able to access semantically relevant background knowledge. For this, it is possible to query so-called ontologies, which are large networks containing knowledge about real-world entities and their semantic relations. However, creating such ontologies is a tedious task, as often extensive expert knowledge is required. Thus, we need to find ways to automatically construct and update ontologies that fit human intuition of semantics and semantic relations. More specifically, we need to determine semantic entities and find relations between them. While this is usually done on large corpora of unstructured text, previous work has shown that we can at least facilitate the first issue of extracting entities by considering special data such as tagging data or human navigational paths. Here, we do not need to detect the actual semantic entities, as they are already provided because of the way those data are collected. Thus we can mainly focus on the problem of assessing the degree of semantic relatedness between tags or web pages. However, there exist several issues which need to be overcome, if we want to approximate human intuition of semantic relatedness. For this, it is necessary to represent words and concepts in a way that allows easy and highly precise semantic characterization. This also largely depends on the quality of data from which these representations are constructed. In this thesis, we extract semantic information from both tagging data created by users of social tagging systems and human navigation data in different semantic-driven social web systems. Our main goal is to construct high quality and robust vector representations of words which can the be used to measure the relatedness of semantic concepts. First, we show that navigation in the social media systems Wikipedia and BibSonomy is driven by a semantic component. After this, we discuss and extend methods to model the semantic information in tagging data as low-dimensional vectors. Furthermore, we show that tagging pragmatics influences different facets of tagging semantics. We then investigate the usefulness of human navigational paths in several different settings on Wikipedia and BibSonomy for measuring semantic relatedness. Finally, we propose a metric-learning based algorithm in adapt pre-trained word embeddings to datasets containing human judgment of semantic relatedness. This work contributes to the field of studying semantic relatedness between words by proposing methods to extract semantic relatedness from web navigation, learn highquality and low-dimensional word representations from tagging data, and to learn semantic relatedness from any kind of vector representation by exploiting human feedback. Applications first and foremest lie in ontology learning for the Semantic Web, but also semantic search or query expansion.}, subject = {Semantik}, language = {en} } @phdthesis{Budig2018, author = {Budig, Benedikt}, title = {Extracting Spatial Information from Historical Maps: Algorithms and Interaction}, edition = {1. Auflage}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-092-4}, doi = {10.25972/WUP-978-3-95826-093-1}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-160955}, school = {W{\"u}rzburg University Press}, pages = {viii, 160}, year = {2018}, abstract = {Historical maps are fascinating documents and a valuable source of information for scientists of various disciplines. Many of these maps are available as scanned bitmap images, but in order to make them searchable in useful ways, a structured representation of the contained information is desirable. This book deals with the extraction of spatial information from historical maps. This cannot be expected to be solved fully automatically (since it involves difficult semantics), but is also too tedious to be done manually at scale. The methodology used in this book combines the strengths of both computers and humans: it describes efficient algorithms to largely automate information extraction tasks and pairs these algorithms with smart user interactions to handle what is not understood by the algorithm. The effectiveness of this approach is shown for various kinds of spatial documents from the 16th to the early 20th century.}, subject = {Karte}, language = {en} } @article{KrenzerMakowskiHekaloetal.2022, author = {Krenzer, Adrian and Makowski, Kevin and Hekalo, Amar and Fitting, Daniel and Troya, Joel and Zoller, Wolfram G. and Hann, Alexander and Puppe, Frank}, title = {Fast machine learning annotation in the medical domain: a semi-automated video annotation tool for gastroenterologists}, series = {BioMedical Engineering OnLine}, volume = {21}, journal = {BioMedical Engineering OnLine}, number = {1}, doi = {10.1186/s12938-022-01001-x}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-300231}, year = {2022}, abstract = {Background Machine learning, especially deep learning, is becoming more and more relevant in research and development in the medical domain. For all the supervised deep learning applications, data is the most critical factor in securing successful implementation and sustaining the progress of the machine learning model. Especially gastroenterological data, which often involves endoscopic videos, are cumbersome to annotate. Domain experts are needed to interpret and annotate the videos. To support those domain experts, we generated a framework. With this framework, instead of annotating every frame in the video sequence, experts are just performing key annotations at the beginning and the end of sequences with pathologies, e.g., visible polyps. Subsequently, non-expert annotators supported by machine learning add the missing annotations for the frames in-between. Methods In our framework, an expert reviews the video and annotates a few video frames to verify the object's annotations for the non-expert. In a second step, a non-expert has visual confirmation of the given object and can annotate all following and preceding frames with AI assistance. After the expert has finished, relevant frames will be selected and passed on to an AI model. This information allows the AI model to detect and mark the desired object on all following and preceding frames with an annotation. Therefore, the non-expert can adjust and modify the AI predictions and export the results, which can then be used to train the AI model. Results Using this framework, we were able to reduce workload of domain experts on average by a factor of 20 on our data. This is primarily due to the structure of the framework, which is designed to minimize the workload of the domain expert. Pairing this framework with a state-of-the-art semi-automated AI model enhances the annotation speed further. Through a prospective study with 10 participants, we show that semi-automated annotation using our tool doubles the annotation speed of non-expert annotators compared to a well-known state-of-the-art annotation tool. Conclusion In summary, we introduce a framework for fast expert annotation for gastroenterologists, which reduces the workload of the domain expert considerably while maintaining a very high annotation quality. The framework incorporates a semi-automated annotation system utilizing trained object detection models. The software and framework are open-source.}, language = {en} } @techreport{DworzakGrossmannLe2023, type = {Working Paper}, author = {Dworzak, Manuel and Großmann, Marcel and Le, Duy Thanh}, title = {Federated Learning for Service Placement in Fog and Edge Computing}, series = {KuVS Fachgespr{\"a}ch - W{\"u}rzburg Workshop on Modeling, Analysis and Simulation of Next-Generation Communication Networks 2023 (WueWoWAS'23)}, journal = {KuVS Fachgespr{\"a}ch - W{\"u}rzburg Workshop on Modeling, Analysis and Simulation of Next-Generation Communication Networks 2023 (WueWoWAS'23)}, doi = {10.25972/OPUS-32219}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-322193}, pages = {4}, year = {2023}, abstract = {Service orchestration requires enormous attention and is a struggle nowadays. Of course, virtualization provides a base level of abstraction for services to be deployable on a lot of infrastructures. With container virtualization, the trend to migrate applications to a micro-services level in order to be executable in Fog and Edge Computing environments increases manageability and maintenance efforts rapidly. Similarly, network virtualization adds effort to calibrate IP flows for Software-Defined Networks and eventually route it by means of Network Function Virtualization. Nevertheless, there are concepts like MAPE-K to support micro-service distribution in next-generation cloud and network environments. We want to explore, how a service distribution can be improved by adopting machine learning concepts for infrastructure or service changes. Therefore, we show how federated machine learning is integrated into a cloud-to-fog-continuum without burdening single nodes.}, language = {en} } @article{KaltdorfSchulzeHelmprobstetal.2017, author = {Kaltdorf, Kristin Verena and Schulze, Katja and Helmprobst, Frederik and Kollmannsberger, Philip and Dandekar, Thomas and Stigloher, Christian}, title = {Fiji macro 3D ART VeSElecT: 3D automated reconstruction tool for vesicle structures of electron tomograms}, series = {PLoS Computational Biology}, volume = {13}, journal = {PLoS Computational Biology}, number = {1}, doi = {10.1371/journal.pcbi.1005317}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-172112}, year = {2017}, abstract = {Automatic image reconstruction is critical to cope with steadily increasing data from advanced microscopy. We describe here the Fiji macro 3D ART VeSElecT which we developed to study synaptic vesicles in electron tomograms. We apply this tool to quantify vesicle properties (i) in embryonic Danio rerio 4 and 8 days past fertilization (dpf) and (ii) to compare Caenorhabditis elegans N2 neuromuscular junctions (NMJ) wild-type and its septin mutant (unc-59(e261)). We demonstrate development-specific and mutant-specific changes in synaptic vesicle pools in both models. We confirm the functionality of our macro by applying our 3D ART VeSElecT on zebrafish NMJ showing smaller vesicles in 8 dpf embryos then 4 dpf, which was validated by manual reconstruction of the vesicle pool. Furthermore, we analyze the impact of C. elegans septin mutant unc-59(e261) on vesicle pool formation and vesicle size. Automated vesicle registration and characterization was implemented in Fiji as two macros (registration and measurement). This flexible arrangement allows in particular reducing false positives by an optional manual revision step. Preprocessing and contrast enhancement work on image-stacks of 1nm/pixel in x and y direction. Semi-automated cell selection was integrated. 3D ART VeSElecT removes interfering components, detects vesicles by 3D segmentation and calculates vesicle volume and diameter (spherical approximation, inner/outer diameter). Results are collected in color using the RoiManager plugin including the possibility of manual removal of non-matching confounder vesicles. Detailed evaluation considered performance (detected vesicles) and specificity (true vesicles) as well as precision and recall. We furthermore show gain in segmentation and morphological filtering compared to learning based methods and a large time gain compared to manual segmentation. 3D ART VeSElecT shows small error rates and its speed gain can be up to 68 times faster in comparison to manual annotation. Both automatic and semi-automatic modes are explained including a tutorial.}, language = {en} } @article{ToepferCorovicFetteetal.2015, author = {Toepfer, Martin and Corovic, Hamo and Fette, Georg and Kl{\"u}gl, Peter and St{\"o}rk, Stefan and Puppe, Frank}, title = {Fine-grained information extraction from German transthoracic echocardiography reports}, series = {BMC Medical Informatics and Decision Making}, volume = {15}, journal = {BMC Medical Informatics and Decision Making}, number = {91}, doi = {doi:10.1186/s12911-015-0215-x}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-125509}, year = {2015}, abstract = {Background Information extraction techniques that get structured representations out of unstructured data make a large amount of clinically relevant information about patients accessible for semantic applications. These methods typically rely on standardized terminologies that guide this process. Many languages and clinical domains, however, lack appropriate resources and tools, as well as evaluations of their applications, especially if detailed conceptualizations of the domain are required. For instance, German transthoracic echocardiography reports have not been targeted sufficiently before, despite of their importance for clinical trials. This work therefore aimed at development and evaluation of an information extraction component with a fine-grained terminology that enables to recognize almost all relevant information stated in German transthoracic echocardiography reports at the University Hospital of W{\"u}rzburg. Methods A domain expert validated and iteratively refined an automatically inferred base terminology. The terminology was used by an ontology-driven information extraction system that outputs attribute value pairs. The final component has been mapped to the central elements of a standardized terminology, and it has been evaluated according to documents with different layouts. Results The final system achieved state-of-the-art precision (micro average.996) and recall (micro average.961) on 100 test documents that represent more than 90 \% of all reports. In particular, principal aspects as defined in a standardized external terminology were recognized with f 1=.989 (micro average) and f 1=.963 (macro average). As a result of keyword matching and restraint concept extraction, the system obtained high precision also on unstructured or exceptionally short documents, and documents with uncommon layout. Conclusions The developed terminology and the proposed information extraction system allow to extract fine-grained information from German semi-structured transthoracic echocardiography reports with very high precision and high recall on the majority of documents at the University Hospital of W{\"u}rzburg. Extracted results populate a clinical data warehouse which supports clinical research.}, language = {en} } @phdthesis{Glasser2001, author = {Glaßer, Christian}, title = {Forbidden-Patterns and Word Extensions for Concatenation Hierarchies}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-1179153}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2001}, abstract = {Starfree regular languages can be build up from alphabet letters by using only Boolean operations and concatenation. The complexity of these languages can be measured with the so-called dot-depth. This measure leads to concatenation hierarchies like the dot-depth hierarchy (DDH) and the closely related Straubing-Th{\´e}rien hierarchy (STH). The question whether the single levels of these hierarchies are decidable is still open and is known as the dot-depth problem. In this thesis we prove/reprove the decidability of some lower levels of both hierarchies. More precisely, we characterize these levels in terms of patterns in finite automata (subgraphs in the transition graph) that are not allowed. Therefore, such characterizations are called forbidden-pattern characterizations. The main results of the thesis are as follows: forbidden-pattern characterization for level 3/2 of the DDH (this implies the decidability of this level) decidability of the Boolean hierarchy over level 1/2 of the DDH definition of decidable hierarchies having close relations to the DDH and STH Moreover, we prove/reprove the decidability of the levels 1/2 and 3/2 of both hierarchies in terms of forbidden-pattern characterizations. We show the decidability of the Boolean hierarchies over level 1/2 of the DDH and over level 1/2 of the STH. A technique which uses word extensions plays the central role in the proofs of these results. With this technique it is possible to treat the levels 1/2 and 3/2 of both hierarchies in a uniform way. Furthermore, it can be used to prove the decidability of the mentioned Boolean hierarchies. Among other things we provide a combinatorial tool that allows to partition words of arbitrary length into factors of bounded length such that every second factor u leads to a loop with label u in a given finite automaton.}, subject = {Automatentheorie}, language = {en} } @phdthesis{Reith2001, author = {Reith, Steffen}, title = {Generalized Satisfiability Problems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-74}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2001}, abstract = {In the last 40 years, complexity theory has grown to a rich and powerful field in theoretical computer science. The main task of complexity theory is the classification of problems with respect to their consumption of resources (e.g., running time or required memory). To study the computational complexity (i.e., consumption of resources) of problems, similar problems are grouped into so called complexity classes. During the systematic study of numerous problems of practical relevance, no efficient algorithm for a great number of studied problems was found. Moreover, it was unclear whether such algorithms exist. A major breakthrough in this situation was the introduction of the complexity classes P and NP and the identification of hardest problems in NP. These hardest problems of NP are nowadays known as NP-complete problems. One prominent example of an NP-complete problem is the satisfiability problem of propositional formulas (SAT). Here we get a propositional formula as an input and it must be decided whether an assignment for the propositional variables exists, such that this assignment satisfies the given formula. The intensive study of NP led to numerous related classes, e.g., the classes of the polynomial-time hierarchy PH, P, \#P, PP, NL, L and \#L. During the study of these classes, problems related to propositional formulas were often identified to be complete problems for these classes. Hence some questions arise: Why is SAT so hard to solve? Are there modifications of SAT which are complete for other well-known complexity classes? In the context of these questions a result by E. Post is extremely useful. He identified and characterized all classes of Boolean functions being closed under superposition. It is possible to study problems which are connected to generalized propositional logic by using this result, which was done in this thesis. Hence, many different problems connected to propositional logic were studied and classified with respect to their computational complexity, clearing the borderline between easy and hard problems.}, subject = {Erf{\"u}llbarkeitsproblem}, language = {en} } @article{AppelScholzMuelleretal.2015, author = {Appel, Mirjam and Scholz, Claus-J{\"u}rgen and M{\"u}ller, Tobias and Dittrich, Marcus and K{\"o}nig, Christian and Bockstaller, Marie and Oguz, Tuba and Khalili, Afshin and Antwi-Adjei, Emmanuel and Schauer, Tamas and Margulies, Carla and Tanimoto, Hiromu and Yarali, Ayse}, title = {Genome-Wide Association Analyses Point to Candidate Genes for Electric Shock Avoidance in Drosophila melanogaster}, series = {PLoS ONE}, volume = {10}, journal = {PLoS ONE}, number = {5}, doi = {10.1371/journal.pone.0126986}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-152006}, pages = {e0126986}, year = {2015}, abstract = {Electric shock is a common stimulus for nociception-research and the most widely used reinforcement in aversive associative learning experiments. Yet, nothing is known about the mechanisms it recruits at the periphery. To help fill this gap, we undertook a genome-wide association analysis using 38 inbred Drosophila melanogaster strains, which avoided shock to varying extents. We identified 514 genes whose expression levels and/or sequences covaried with shock avoidance scores. We independently scrutinized 14 of these genes using mutants, validating the effect of 7 of them on shock avoidance. This emphasizes the value of our candidate gene list as a guide for follow-up research. In addition, by integrating our association results with external protein-protein interaction data we obtained a shock avoidance- associated network of 38 genes. Both this network and the original candidate list contained a substantial number of genes that affect mechanosensory bristles, which are hairlike organs distributed across the fly's body. These results may point to a potential role for mechanosensory bristles in shock sensation. Thus, we not only provide a first list of candidate genes for shock avoidance, but also point to an interesting new hypothesis on nociceptive mechanisms.}, language = {en} } @article{Puppe2022, author = {Puppe, Frank}, title = {Gesellschaftliche Perspektiven einer fachspezifischen KI f{\"u}r automatisierte Entscheidungen}, series = {Informatik Spektrum}, volume = {45}, journal = {Informatik Spektrum}, number = {2}, issn = {0170-6012}, doi = {10.1007/s00287-022-01443-6}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-324197}, pages = {88-95}, year = {2022}, abstract = {Die k{\"u}nstliche Intelligenz (KI) entwickelt sich rasant und hat bereits eindrucksvolle Erfolge zu verzeichnen, darunter {\"u}bermenschliche Kompetenz in den meisten Spielen und vielen Quizshows, intelligente Suchmaschinen, individualisierte Werbung, Spracherkennung, -ausgabe und -{\"u}bersetzung auf sehr hohem Niveau und hervorragende Leistungen bei der Bildverarbeitung, u. a. in der Medizin, der optischen Zeichenerkennung, beim autonomen Fahren, aber auch beim Erkennen von Menschen auf Bildern und Videos oder bei Deep Fakes f{\"u}r Fotos und Videos. Es ist zu erwarten, dass die KI auch in der Entscheidungsfindung Menschen {\"u}bertreffen wird; ein alter Traum der Expertensysteme, der durch Lernverfahren, Big Data und Zugang zu dem gesammelten Wissen im Web in greifbare N{\"a}he r{\"u}ckt. Gegenstand dieses Beitrags sind aber weniger die technischen Entwicklungen, sondern m{\"o}gliche gesellschaftliche Auswirkungen einer spezialisierten, kompetenten KI f{\"u}r verschiedene Bereiche der autonomen, d. h. nicht nur unterst{\"u}tzenden Entscheidungsfindung: als Fußballschiedsrichter, in der Medizin, f{\"u}r richterliche Entscheidungen und sehr spekulativ auch im politischen Bereich. Dabei werden Vor- und Nachteile dieser Szenarien aus gesellschaftlicher Sicht diskutiert.}, subject = {K{\"u}nstliche Intelligenz}, language = {de} } @article{MaiwaldBruschkeSchneideretal.2023, author = {Maiwald, Ferdinand and Bruschke, Jonas and Schneider, Danilo and Wacker, Markus and Niebling, Florian}, title = {Giving historical photographs a new perspective: introducing camera orientation parameters as new metadata in a large-scale 4D application}, series = {Remote Sensing}, volume = {15}, journal = {Remote Sensing}, number = {7}, issn = {2072-4292}, doi = {10.3390/rs15071879}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-311103}, year = {2023}, abstract = {The ongoing digitization of historical photographs in archives allows investigating the quality, quantity, and distribution of these images. However, the exact interior and exterior camera orientations of these photographs are usually lost during the digitization process. The proposed method uses content-based image retrieval (CBIR) to filter exterior images of single buildings in combination with metadata information. The retrieved photographs are automatically processed in an adapted structure-from-motion (SfM) pipeline to determine the camera parameters. In an interactive georeferencing process, the calculated camera positions are transferred into a global coordinate system. As all image and camera data are efficiently stored in the proposed 4D database, they can be conveniently accessed afterward to georeference newly digitized images by using photogrammetric triangulation and spatial resection. The results show that the CBIR and the subsequent SfM are robust methods for various kinds of buildings and different quantity of data. The absolute accuracy of the camera positions after georeferencing lies in the range of a few meters likely introduced by the inaccurate LOD2 models used for transformation. The proposed photogrammetric method, the database structure, and the 4D visualization interface enable adding historical urban photographs and 3D models from other locations.}, language = {en} } @phdthesis{Schaefer2003, author = {Sch{\"a}fer, Dirk}, title = {Globale Selbstlokalisation autonomer mobiler Roboter - Ein Schl{\"u}sselproblem der Service-Robotik}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-7601}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2003}, abstract = {Die Dissertation behandelt die Problemstellung der globalen Selbstlokalisation autonomer mobiler Roboter, welche folgendermaßen beschrieben werden kann: Ein mobiler Roboter, eingesetzt in einem Geb{\"a}ude, kann unter Umst{\"a}nden das Wissen {\"u}ber seinen Standort verlieren. Man geht nun davon aus, dass dem Roboter eine Geb{\"a}udekarte als Modell zur Verf{\"u}gung steht. Mit Hilfe eines Laser-Entfernungsmessers kann das mobile Ger{\"a}t neue Informationen aufnehmen und damit bei korrekter Zuordnung zur Modellkarte geeignete hypothetische Standorte ermitteln. In der Regel werden diese Positionen aber mehrdeutig sein. Indem sich der Roboter intelligent in seiner Einsatzumgebung bewegt, kann er die urspr{\"u}nglichen Sensordaten verifizieren und ermittelt im besten Fall seine tats{\"a}chliche Position.F{\"u}r diese Problemstellung wird ein neuer L{\"o}sungsansatz in Theorie und Praxis pr{\"a}sentiert, welcher die jeweils aktuelle lokale Karte und damit alle Sensordaten mittels feature-basierter Matchingverfahren auf das Modell der Umgebung abbildet. Ein Explorationsalgorithmus bewegt den Roboter w{\"a}hrend der Bewegungsphase autonom zu Sensorpunkten, welche neue Informationen bereitstellen. W{\"a}hrend der Bewegungsphase werden dabei die bisherigen hypothetischen Positionen best{\"a}rkt oder geschw{\"a}cht, sodaß nach kurzer Zeit eine dominante Position, die tats{\"a}chliche Roboterposition,{\"u}brigbleibt.}, subject = {Mobiler Roboter}, language = {de} } @phdthesis{Schmidt2011, author = {Schmidt, Marco}, title = {Ground Station Networks for Efficient Operation of Distributed Small Satellite Systems}, isbn = {978-3-923959-77-8}, doi = {10.25972/OPUS-4984}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-64999}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2011}, abstract = {The field of small satellite formations and constellations attracted growing attention, based on recent advances in small satellite engineering. The utilization of distributed space systems allows the realization of innovative applications and will enable improved temporal and spatial resolution in observation scenarios. On the other side, this new paradigm imposes a variety of research challenges. In this monograph new networking concepts for space missions are presented, using networks of ground stations. The developed approaches combine ground station resources in a coordinated way to achieve more robust and efficient communication links. Within this thesis, the following topics were elaborated to improve the performance in distributed space missions: Appropriate scheduling of contact windows in a distributed ground system is a necessary process to avoid low utilization of ground stations. The theoretical basis for the novel concept of redundant scheduling was elaborated in detail. Additionally to the presented algorithm was a scheduling system implemented, its performance was tested extensively with real world scheduling problems. In the scope of data management, a system was developed which autonomously synchronizes data frames in ground station networks and uses this information to detect and correct transmission errors. The system was validated with hardware in the loop experiments, demonstrating the benefits of the developed approach.}, subject = {Kleinsatellit}, language = {en} } @phdthesis{Duelli2012, author = {Duelli, Michael}, title = {Heuristic Design and Provisioning of Resilient Multi-Layer Networks}, doi = {10.25972/OPUS-5600}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-69433}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2012}, abstract = {To jointly provide different services/technologies, like IP and Ethernet or IP and SDH/SONET, in a single network, equipment of multiple technologies needs to be deployed to the sites/Points of Presence (PoP) and interconnected with each other. Therein, a technology may provide transport functionality to other technologies and increase the number of available resources by using multiplexing techniques. By providing its own switching functionality, each technology creates connections in a logical layer which leads to the notion of multi-layer networks. The design of such networks comprises the deployment and interconnection of components to suit to given traffic demands. To prevent traffic loss due to failures of networking equipment, protection mechanisms need to be established. In multi-layer networks, protection usually can be applied in any of the considered layers. In turn, the hierarchical structure of multi-layer networks also bears shared risk groups (SRG). To achieve a cost-optimal resilient network, an appropriate combination of multiplexing techniques, technologies, and their interconnections needs to be found. Thus, network design is a combinatorial problem with a large parameter and solution space. After the design stage, the resources of a multi-layer network can be provided to traffic demands. Especially, dynamic capacity provisioning requires interaction of sites and layers, as well as accurate retrieval of constraint information. In recent years, generalized multiprotocol label switching (GMPLS) and path computation elements (PCE) have emerged as possible approaches for these challenges. Like the design, the provisioning of multi-layer networks comprises a variety of optimization parameters, like blocking probability, resilience, and energy efficiency. In this work, we introduce several efficient heuristics to approach the considered optimization problems. We perform capital expenditure (CAPEX)-aware design of multi-layer networks from scratch, based on IST NOBEL phase 2 project's cost and equipment data. We comprise traffic and resilience requirements in different and multiple layers as well as different network architectures. On top of the designed networks, we consider the dynamic provisioning of multi-layer traffic based on the GMPLS and PCE architecture. We evaluate different PCE deployments, information retrieval strategies, and re-optimization. Finally, we show how information about provisioning utilization can be used to provide a feedback for network design.}, subject = {Mehrschichtsystem}, language = {en} } @techreport{FundaKonheiserGermanetal.2023, type = {Working Paper}, author = {Funda, Christoph and Konheiser, Tobias and German, Reinhard and Hielscher, Kai-Steffen}, title = {How to Model and Predict the Scalability of a Hardware-In-The-Loop Test Bench for Data Re-Injection?}, series = {KuVS Fachgespr{\"a}ch - W{\"u}rzburg Workshop on Modeling, Analysis and Simulation of Next-Generation Communication Networks 2023 (WueWoWAS'23)}, journal = {KuVS Fachgespr{\"a}ch - W{\"u}rzburg Workshop on Modeling, Analysis and Simulation of Next-Generation Communication Networks 2023 (WueWoWAS'23)}, doi = {10.25972/OPUS-32215}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-322150}, pages = {4}, year = {2023}, abstract = {This paper describes a novel application of an empirical network calculus model based on measurements of a hardware-in-the-loop (HIL) test system. The aim is to predict the performance of a HIL test bench for open-loop re-injection in the context of scalability. HIL test benches are distributed computer systems including software, hardware, and networking devices. They are used to validate complex technical systems, but have not yet been system under study themselves. Our approach is to use measurements from the HIL system to create an empirical model for arrival and service curves. We predict the performance and design the previously unknown parameters of the HIL simulator with network calculus (NC), namely the buffer sizes and the minimum needed pre-buffer time for the playback buffer. We furthermore show, that it is possible to estimate the CPU load from arrival and service-curves based on the utilization theorem, and hence estimate the scalability of the HIL system in the context of the number of sensor streams.}, language = {en} } @phdthesis{Aschenbrenner2017, author = {Aschenbrenner, Doris}, title = {Human Robot Interaction Concepts for Human Supervisory Control and Telemaintenance Applications in an Industry 4.0 Environment}, isbn = {978-3-945459-18-8}, doi = {10.25972/OPUS-15052}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-150520}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2017}, abstract = {While teleoperation of technical highly sophisticated systems has already been a wide field of research, especially for space and robotics applications, the automation industry has not yet benefited from its results. Besides the established fields of application, also production lines with industrial robots and the surrounding plant components are in need of being remotely accessible. This is especially critical for maintenance or if an unexpected problem cannot be solved by the local specialists. Special machine manufacturers, especially robotics companies, sell their technology worldwide. Some factories, for example in emerging economies, lack qualified personnel for repair and maintenance tasks. When a severe failure occurs, an expert of the manufacturer needs to fly there, which leads to long down times of the machine or even the whole production line. With the development of data networks, a huge part of those travels can be omitted, if appropriate teleoperation equipment is provided. This thesis describes the development of a telemaintenance system, which was established in an active production line for research purposes. The customer production site of Braun in Marktheidenfeld, a factory which belongs to Procter \& Gamble, consists of a six-axis cartesian industrial robot by KUKA Industries, a two-component injection molding system and an assembly unit. The plant produces plastic parts for electric toothbrushes. In the research projects "MainTelRob" and "Bayern.digital", during which this plant was utilised, the Zentrum f{\"u}r Telematik e.V. (ZfT) and its project partners develop novel technical approaches and procedures for modern telemaintenance. The term "telemaintenance" hereby refers to the integration of computer science and communication technologies into the maintenance strategy. It is particularly interesting for high-grade capital-intensive goods like industrial robots. Typical telemaintenance tasks are for example the analysis of a robot failure or difficult repair operations. The service department of KUKA Industries is responsible for the worldwide distributed customers who own more than one robot. Currently such tasks are offered via phone support and service staff which travels abroad. They want to expand their service activities on telemaintenance and struggle with the high demands of teleoperation especially regarding security infrastructure. In addition, the facility in Marktheidenfeld has to keep up with the high international standards of Procter \& Gamble and wants to minimize machine downtimes. Like 71.6 \% of all German companies, P\&G sees a huge potential for early information on their production system, but complains about the insufficient quality and the lack of currentness of data. The main research focus of this work lies on the human machine interface for all human tasks in a telemaintenance setup. This thesis provides own work in the use of a mobile device in context of maintenance, describes new tools on asynchronous remote analysis and puts all parts together in an integrated telemaintenance infrastructure. With the help of Augmented Reality, the user performance and satisfaction could be raised. A special regard is put upon the situation awareness of the remote expert realized by different camera viewpoints. In detail the work consists of: - Support of maintenance tasks with a mobile device - Development and evaluation of a context-aware inspection tool - Comparison of a new touch-based mobile robot programming device to the former teach pendant - Study on Augmented Reality support for repair tasks with a mobile device - Condition monitoring for a specific plant with industrial robot - Human computer interaction for remote analysis of a single plant cycle - A big data analysis tool for a multitude of cycles and similar plants - 3D process visualization for a specific plant cycle with additional virtual information - Network architecture in hardware, software and network infrastructure - Mobile device computer supported collaborative work for telemaintenance - Motor exchange telemaintenance example in running production environment - Augmented reality supported remote plant visualization for better situation awareness}, subject = {Fernwartung}, language = {en} } @phdthesis{Selbach2011, author = {Selbach, Stefan}, title = {Hybride bitparallele Volltextsuche}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-66476}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2011}, abstract = {Der große Vorteil eines q-Gramm Indexes liegt darin, dass es m{\"o}glich ist beliebige Zeichenketten in einer Dokumentensammlung zu suchen. Ein Nachteil jedoch liegt darin, dass bei gr{\"o}ßer werdenden Datenmengen dieser Index dazu neigt, sehr groß zu werden, was mit einem deutlichem Leistungsabfall verbunden ist. In dieser Arbeit wird eine neuartige Technik vorgestellt, die die Leistung eines q-Gramm Indexes mithilfe zus{\"a}tzlicher M-Matrizen f{\"u}r jedes q-Gramm und durch die Kombination mit einem invertierten Index erh{\"o}ht. Eine M-Matrix ist eine Bit-Matrix, die Informationen {\"u}ber die Positionen eines q-Gramms enth{\"a}lt. Auch bei der Kombination von zwei oder mehreren Q-Grammen bieten diese M-Matrizen Informationen {\"u}ber die Positionen der Kombination. Dies kann verwendet werden, um die Komplexit{\"a}t der Zusammenf{\"u}hrung der q-Gramm Trefferlisten f{\"u}r eine gegebene Suchanfrage zu reduzieren und verbessert die Leistung des n-Gramm-invertierten Index. Die Kombination mit einem termbasierten invertierten Index beschleunigt die durchschnittliche Suchzeit zus{\"a}tzlich und vereint die Vorteile beider Index-Formate. Redundante Informationen werden in dem q-Gramm Index reduziert und weitere Funktionalit{\"a}t hinzugef{\"u}gt, wie z.B. die Bewertung von Treffern nach Relevanz, die M{\"o}glichkeit, nach Konzepten zu suchen oder Indexpartitionierungen nach Wichtigkeit der enthaltenen Terme zu erstellen.}, subject = {Information Retrieval}, language = {de} } @article{AndronicShirakashiPickeletal.2015, author = {Andronic, Joseph and Shirakashi, Ryo and Pickel, Simone U. and Westerling, Katherine M. and Klein, Teresa and Holm, Thorge and Sauer, Markus and Sukhorukov, Vladimir L.}, title = {Hypotonic Activation of the Myo-Inositol Transporter SLC5A3 in HEK293 Cells Probed by Cell Volumetry, Confocal and Super-Resolution Microscopy}, series = {PLoS One}, volume = {10}, journal = {PLoS One}, number = {3}, doi = {10.1371/journal.pone.0119990}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-126408}, year = {2015}, abstract = {Swelling-activated pathways for myo-inositol, one of the most abundant organic osmolytes in mammalian cells, have not yet been identified. The present study explores the SLC5A3 protein as a possible transporter of myo-inositol in hyponically swollen HEK293 cells. To address this issue, we examined the relationship between the hypotonicity-induced changes in plasma membrane permeability to myo-inositol Pino [m/s] and expression/localization of SLC5A3. Pino values were determined by cell volumetry over a wide tonicity range (100-275 mOsm) in myo-inositol-substituted solutions. While being negligible under mild hypotonicity (200-275 mOsm), Pino grew rapidly at osmolalities below 200 mOsm to reach a maximum of ∼3 nm/s at 100-125 mOsm, as indicated by fast cell swelling due to myo-inositol influx. The increase in Pino resulted most likely from the hypotonicity-mediated incorporation of cytosolic SLC5A3 into the plasma membrane, as revealed by confocal fluorescence microscopy of cells expressing EGFP-tagged SLC5A3 and super-resolution imaging of immunostained SLC5A3 by direct stochastic optical reconstruction microscopy (dSTORM). dSTORM in hypotonic cells revealed a surface density of membrane-associated SLC5A3 proteins of 200-2000 localizations/μm2. Assuming SLC5A3 to be the major path for myo-inositol, a turnover rate of 80-800 myo-inositol molecules per second for a single transporter protein was estimated from combined volumetric and dSTORM data. Hypotonic stress also caused a significant upregulation of SLC5A3 gene expression as detected by semiquantitative RT-PCR and Western blot analysis. In summary, our data provide first evidence for swelling-mediated activation of SLC5A3 thus suggesting a functional role of this transporter in hypotonic volume regulation of mammalian cells.}, language = {en} } @article{WeissSchultz2015, author = {Weiß, Clemens Leonard and Schultz, J{\"o}rg}, title = {Identification of divergent WH2 motifs by HMM-HMM alignments}, series = {BMC Research Notes}, volume = {8}, journal = {BMC Research Notes}, number = {18}, doi = {10.1186/s13104-015-0981-7}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-126413}, year = {2015}, abstract = {Background The actin cytoskeleton is a hallmark of eukaryotic cells. Its regulation as well as its interaction with other proteins is carefully orchestrated by actin interaction domains. One of the key players is the WH2 motif, which enables binding to actin monomers and filaments and is involved in the regulation of actin nucleation. Contrasting conserved domains, the identification of this motif in protein sequences is challenging, as it is short and poorly conserved. Findings To identify divergent members, we combined Hidden-Markov-Model (HMM) to HMM alignments with orthology predictions. Thereby, we identified nearly 500 proteins containing so far not annotated WH2 motifs. This included shootin-1, an actin binding protein involved in neuron polarization. Among others, WH2 motifs of 'proximal to raf' (ptr)-orthologs, which are described in the literature, but not annotated in genome databases, were identified. Conclusion In summary, we increased the number of WH2 motif containing proteins substantially. This identification of candidate regions for actin interaction could steer their experimental characterization. Furthermore, the approach outlined here can easily be adapted to the identification of divergent members of further domain families.}, language = {en} } @article{BugaScholzKumaretal.2012, author = {Buga, Ana-Maria and Scholz, Claus J{\"u}rgen and Kumar, Senthil and Herndon, James G. and Alexandru, Dragos and Cojocaru, Gabriel Radu and Dandekar, Thomas and Popa-Wagner, Aurel}, title = {Identification of New Therapeutic Targets by Genome-Wide Analysis of Gene Expression in the Ipsilateral Cortex of Aged Rats after Stroke}, series = {PLoS One}, volume = {7}, journal = {PLoS One}, number = {12}, doi = {10.1371/journal.pone.0050985}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-130657}, pages = {e50985}, year = {2012}, abstract = {Background: Because most human stroke victims are elderly, studies of experimental stroke in the aged rather than the young rat model may be optimal for identifying clinically relevant cellular responses, as well for pinpointing beneficial interventions. Methodology/Principal Findings: We employed the Affymetrix platform to analyze the whole-gene transcriptome following temporary ligation of the middle cerebral artery in aged and young rats. The correspondence, heat map, and dendrogram analyses independently suggest a differential, age-group-specific behaviour of major gene clusters after stroke. Overall, the pattern of gene expression strongly suggests that the response of the aged rat brain is qualitatively rather than quantitatively different from the young, i.e. the total number of regulated genes is comparable in the two age groups, but the aged rats had great difficulty in mounting a timely response to stroke. Our study indicates that four genes related to neuropathic syndrome, stress, anxiety disorders and depression (Acvr1c, Cort, Htr2b and Pnoc) may have impaired response to stroke in aged rats. New therapeutic options in aged rats may also include Calcrl, Cyp11b1, Prcp, Cebpa, Cfd, Gpnmb, Fcgr2b, Fcgr3a, Tnfrsf26, Adam 17 and Mmp14. An unexpected target is the enzyme 3-hydroxy-3-methylglutaryl-Coenzyme A synthase 1 in aged rats, a key enzyme in the cholesterol synthesis pathway. Post-stroke axonal growth was compromised in both age groups. Conclusion/Significance: We suggest that a multi-stage, multimodal treatment in aged animals may be more likely to produce positive results. Such a therapeutic approach should be focused on tissue restoration but should also address other aspects of patient post-stroke therapy such as neuropathic syndrome, stress, anxiety disorders, depression, neurotransmission and blood pressure.}, language = {en} } @techreport{VomhoffGeisslerHossfeld2022, type = {Working Paper}, author = {Vomhoff, Viktoria and Geißler, Stefan and Hoßfeld, Tobias}, title = {Identification of Signaling Patterns in Mobile IoT Signaling Traffic}, series = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, journal = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, doi = {10.25972/OPUS-28081}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-280819}, pages = {4}, year = {2022}, abstract = {We attempt to identify sequences of signaling dialogs, to strengthen our understanding of the signaling behavior of IoT devices by examining a dataset containing over 270.000 distinct IoT devices whose signaling traffic has been observed over a 31-day period in a 2G network [4]. We propose a set of rules that allows the assembly of signaling dialogs into so-called sessions in order to identify common patterns and lay the foundation for future research in the areas of traffic modeling and anomaly detection.}, subject = {Datennetz}, language = {en} }