@inproceedings{OPUS4-4233, title = {9. Fachgespr{\"a}ch Sensornetze der GI/ITG Fachgruppe Kommunikation und Verteilte Systeme}, editor = {Kolla, Reiner}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-51106}, year = {2010}, abstract = {J{\"a}hrliches Fachgespr{\"a}ch zu Sensornetzen der GI/ITG Fachgruppe Kommunikation und Verteilte Systeme, 16. - 17. September 2010, Universit{\"a}t W{\"u}rzburg}, subject = {Drahtloses Sensorsystem}, language = {mul} } @article{StaigerCadotKooteretal.2012, author = {Staiger, Christine and Cadot, Sidney and Kooter, Raul and Dittrich, Marcus and M{\"u}ller, Tobias and Klau, Gunnar W. and Wessels, Lodewyk F. A.}, title = {A Critical Evaluation of Network and Pathway-Based Classifiers for Outcome Prediction in Breast Cancer}, series = {PLoS One}, volume = {7}, journal = {PLoS One}, number = {4}, doi = {10.1371/journal.pone.0034796}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-131323}, pages = {e34796}, year = {2012}, abstract = {Recently, several classifiers that combine primary tumor data, like gene expression data, and secondary data sources, such as protein-protein interaction networks, have been proposed for predicting outcome in breast cancer. In these approaches, new composite features are typically constructed by aggregating the expression levels of several genes. The secondary data sources are employed to guide this aggregation. Although many studies claim that these approaches improve classification performance over single genes classifiers, the gain in performance is difficult to assess. This stems mainly from the fact that different breast cancer data sets and validation procedures are employed to assess the performance. Here we address these issues by employing a large cohort of six breast cancer data sets as benchmark set and by performing an unbiased evaluation of the classification accuracies of the different approaches. Contrary to previous claims, we find that composite feature classifiers do not outperform simple single genes classifiers. We investigate the effect of (1) the number of selected features; (2) the specific gene set from which features are selected; (3) the size of the training set and (4) the heterogeneity of the data set on the performance of composite feature and single genes classifiers. Strikingly, we find that randomization of secondary data sources, which destroys all biological information in these sources, does not result in a deterioration in performance of composite feature classifiers. Finally, we show that when a proper correction for gene set size is performed, the stability of single genes sets is similar to the stability of composite feature sets. Based on these results there is currently no reason to prefer prognostic classifiers based on composite features over single genes classifiers for predicting outcome in breast cancer.}, language = {en} } @article{BayerPruckner2023, author = {Bayer, Daniel and Pruckner, Marco}, title = {A digital twin of a local energy system based on real smart meter data}, series = {Energy Informatics}, volume = {6}, journal = {Energy Informatics}, doi = {10.1186/s42162-023-00263-6}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-357456}, year = {2023}, abstract = {The steadily increasing usage of smart meters generates a valuable amount of high-resolution data about the individual energy consumption and production of local energy systems. Private households install more and more photovoltaic systems, battery storage and big consumers like heat pumps. Thus, our vision is to augment these collected smart meter time series of a complete system (e.g., a city, town or complex institutions like airports) with simulatively added previously named components. We, therefore, propose a novel digital twin of such an energy system based solely on a complete set of smart meter data including additional building data. Based on the additional geospatial data, the twin is intended to represent the addition of the abovementioned components as realistically as possible. Outputs of the twin can be used as a decision support for either system operators where to strengthen the system or for individual households where and how to install photovoltaic systems and batteries. Meanwhile, the first local energy system operators had such smart meter data of almost all residential consumers for several years. We acquire those of an exemplary operator and discuss a case study presenting some features of our digital twin and highlighting the value of the combination of smart meter and geospatial data.}, language = {en} } @inproceedings{DaviesDewellHarvey2021, author = {Davies, Richard and Dewell, Nathan and Harvey, Carlo}, title = {A framework for interactive, autonomous and semantic dialogue generation in games}, series = {Proceedings of the 1st Games Technology Summit}, booktitle = {Proceedings of the 1st Games Technology Summit}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-246023}, pages = {16-28}, year = {2021}, abstract = {Immersive virtual environments provide users with the opportunity to escape from the real world, but scripted dialogues can disrupt the presence within the world the user is trying to escape within. Both Non-Playable Character (NPC) to Player and NPC to NPC dialogue can be non-natural and the reliance on responding with pre-defined dialogue does not always meet the players emotional expectations or provide responses appropriate to the given context or world states. This paper investigates the application of Artificial Intelligence (AI) and Natural Language Processing to generate dynamic human-like responses within a themed virtual world. Each thematic has been analysed against humangenerated responses for the same seed and demonstrates invariance of rating across a range of model sizes, but shows an effect of theme and the size of the corpus used for fine-tuning the context for the game world.}, language = {en} } @article{SchmidSchindelinCardonaetal.2010, author = {Schmid, Benjamin and Schindelin, Johannes and Cardona, Albert and Longair, Martin and Heisenberg, Martin}, title = {A high-level 3D visualization API for Java and ImageJ}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-67851}, year = {2010}, abstract = {Background: Current imaging methods such as Magnetic Resonance Imaging (MRI), Confocal microscopy, Electron Microscopy (EM) or Selective Plane Illumination Microscopy (SPIM) yield three-dimensional (3D) data sets in need of appropriate computational methods for their analysis. The reconstruction, segmentation and registration are best approached from the 3D representation of the data set. Results: Here we present a platform-independent framework based on Java and Java 3D for accelerated rendering of biological images. Our framework is seamlessly integrated into ImageJ, a free image processing package with a vast collection of community-developed biological image analysis tools. Our framework enriches the ImageJ software libraries with methods that greatly reduce the complexity of developing image analysis tools in an interactive 3D visualization environment. In particular, we provide high-level access to volume rendering, volume editing, surface extraction, and image annotation. The ability to rely on a library that removes the low-level details enables concentrating software development efforts on the algorithm implementation parts. Conclusions: Our framework enables biomedical image software development to be built with 3D visualization capabilities with very little effort. We offer the source code and convenient binary packages along with extensive documentation at http://3dviewer.neurofly.de.}, subject = {Visualisierung}, language = {en} } @unpublished{Nassourou2011, author = {Nassourou, Mohamadou}, title = {A Knowledge-based Hybrid Statistical Classifier for Reconstructing the Chronology of the Quran}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-54712}, year = {2011}, abstract = {Computationally categorizing Quran's chapters has been mainly confined to the determination of chapters' revelation places. However this broad classification is not sufficient to effectively and thoroughly understand and interpret the Quran. The chronology of revelation would not only improve comprehending the philosophy of Islam, but also the easiness of implementing and memorizing its laws and recommendations. This paper attempts estimating possible chapters' dates of revelation through their lexical frequency profiles. A hybrid statistical classifier consisting of stemming and clustering algorithms for comparing lexical frequency profiles of chapters, and deriving dates of revelation has been developed. The classifier is trained using some chapters with known dates of revelation. Then it classifies chapters with uncertain dates of revelation by computing their proximity to the training ones. The results reported here indicate that the proposed methodology yields usable results in estimating dates of revelation of the Quran's chapters based on their lexical contents.}, subject = {Text Mining}, language = {en} } @unpublished{Nassourou2011, author = {Nassourou, Mohamadou}, title = {A Rule-based Statistical Classifier for Determining a Base Text and Ranking Witnesses In Textual Documents Collation Process}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-57465}, year = {2011}, abstract = {Given a collection of diverging documents about some lost original text, any person interested in the text would try reconstructing it from the diverging documents. Whether it is eclecticism, stemmatics, or copy-text, one is expected to explicitly or indirectly select one of the documents as a starting point or as a base text, which could be emended through comparison with remaining documents, so that a text that could be designated as the original document is generated. Unfortunately the process of giving priority to one of the documents also known as witnesses is a subjective approach. In fact even Cladistics, which could be considered as a computer-based approach of implementing stemmatics, does not present or recommend users to select a certain witness as a starting point for the process of reconstructing the original document. In this study, a computational method using a rule-based Bayesian classifier is used, to assist text scholars in their attempts of reconstructing a non-existing document from some available witnesses. The method developed in this study consists of selecting a base text successively and collating it with remaining documents. Each completed collation cycle stores the selected base text and its closest witness, along with a weighted score of their similarities and differences. At the end of the collation process, a witness selected more often by majority of base texts is considered as the probable base text of the collection. Witnesses' scores are weighted using a weighting system, based on effects of types of textual modifications on the process of reconstructing original documents. Users have the possibility to select between baseless and base text collation. If a base text is selected, the task is reduced to ranking the witnesses with respect to the base text, otherwise a base text as well as ranking of the witnesses with respect to the base text are computed and displayed on a histogram.}, subject = {Textvergleich}, language = {en} } @inproceedings{AliMontenegro2015, author = {Ali, Qasim and Montenegro, Sergio}, title = {A Simple Approach to Quadrocopter Formation Flying Test Setup for Education and Development}, series = {INTED2015 Proceedings}, booktitle = {INTED2015 Proceedings}, publisher = {International Academy of Technology, Education and Development (IATED)}, isbn = {978-84-606-5763-7}, issn = {2340-1079}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-114495}, pages = {2776 -- 2784}, year = {2015}, abstract = {A simple test setup has been developed at Institute of Aerospace Information Technology, University of W{\"u}rzburg, Germany to realize basic functionalities for formation flight of quadrocopters. The test environment is planned to be utilized for developing and validating the algorithms for formation flying capability in real environment as well as for education purpose. An already existing test bed for single quadrocopter was extended with necessary inter-communication and distributed control mechanism to test the algorithms for formation flights in 2 degrees of freedom (roll / pitch). This study encompasses the domain of communication, control engineering and embedded systems programming. Bluetooth protocol has been used for inter-communication between two quadrocopters. A simple approach of PID control in combination with Kalman filter has been exploited. MATLAB Instrument Control Toolbox has been used for data display, plotting and analysis. Plots can be drawn in real-time and received information can also be stored in the form of files for later use and analysis. The test setup has been developed indigenously and at considerably low cost. Emphasis has been placed on simplicity to facilitate students learning process. Several lessons have been learnt during the course of development of this setup. Proposed setup is quite flexible that can be modified as per changing requirements.}, subject = {Flugk{\"o}rper}, language = {en} } @article{PrantlZeckBaueretal.2022, author = {Prantl, Thomas and Zeck, Timo and Bauer, Andre and Ten, Peter and Prantl, Dominik and Yahya, Ala Eddine Ben and Ifflaender, Lukas and Dmitrienko, Alexandra and Krupitzer, Christian and Kounev, Samuel}, title = {A Survey on Secure Group Communication Schemes With Focus on IoT Communication}, series = {IEEE Access}, volume = {10}, journal = {IEEE Access}, doi = {10.1109/ACCESS.2022.3206451}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-300257}, pages = {99944 -- 99962}, year = {2022}, abstract = {A key feature for Internet of Things (IoT) is to control what content is available to each user. To handle this access management, encryption schemes can be used. Due to the diverse usage of encryption schemes, there are various realizations of 1-to-1, 1-to-n, and n-to-n schemes in the literature. This multitude of encryption methods with a wide variety of properties presents developers with the challenge of selecting the optimal method for a particular use case, which is further complicated by the fact that there is no overview of existing encryption schemes. To fill this gap, we envision a cryptography encyclopedia providing such an overview of existing encryption schemes. In this survey paper, we take a first step towards such an encyclopedia by creating a sub-encyclopedia for secure group communication (SGC) schemes, which belong to the n-to-n category. We extensively surveyed the state-of-the-art and classified 47 different schemes. More precisely, we provide (i) a comprehensive overview of the relevant security features, (ii) a set of relevant performance metrics, (iii) a classification for secure group communication schemes, and (iv) workflow descriptions of the 47 schemes. Moreover, we perform a detailed performance and security evaluation of the 47 secure group communication schemes. Based on this evaluation, we create a guideline for the selection of secure group communication schemes.}, language = {en} } @article{HeinWienrichLatoschik2021, author = {Hein, Rebecca M. and Wienrich, Carolin and Latoschik, Marc E.}, title = {A systematic review of foreign language learning with immersive technologies (2001-2020)}, series = {AIMS Electronics and Electrical Engineering}, volume = {5}, journal = {AIMS Electronics and Electrical Engineering}, number = {2}, doi = {10.3934/electreng.2021007}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-268811}, pages = {117-145}, year = {2021}, abstract = {This study provides a systematic literature review of research (2001-2020) in the field of teaching and learning a foreign language and intercultural learning using immersive technologies. Based on 2507 sources, 54 articles were selected according to a predefined selection criteria. The review is aimed at providing information about which immersive interventions are being used for foreign language learning and teaching and where potential research gaps exist. The papers were analyzed and coded according to the following categories: (1) investigation form and education level, (2) degree of immersion, and technology used, (3) predictors, and (4) criterions. The review identified key research findings relating the use of immersive technologies for learning and teaching a foreign language and intercultural learning at cognitive, affective, and conative levels. The findings revealed research gaps in the area of teachers as a target group, and virtual reality (VR) as a fully immersive intervention form. Furthermore, the studies reviewed rarely examined behavior, and implicit measurements related to inter- and trans-cultural learning and teaching. Inter- and transcultural learning and teaching especially is an underrepresented investigation subject. Finally, concrete suggestions for future research are given. The systematic review contributes to the challenge of interdisciplinary cooperation between pedagogy, foreign language didactics, and Human-Computer Interaction to achieve innovative teaching-learning formats and a successful digital transformation.}, language = {en} } @article{HalbigLatoschik2021, author = {Halbig, Andreas and Latoschik, Marc Erich}, title = {A systematic review of physiological measurements, factors, methods, and applications in virtual reality}, series = {Frontiers in Virtual Reality}, volume = {2}, journal = {Frontiers in Virtual Reality}, doi = {10.3389/frvir.2021.694567}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-260503}, year = {2021}, abstract = {Measurements of physiological parameters provide an objective, often non-intrusive, and (at least semi-)automatic evaluation and utilization of user behavior. In addition, specific hardware devices of Virtual Reality (VR) often ship with built-in sensors, i.e. eye-tracking and movements sensors. Hence, the combination of physiological measurements and VR applications seems promising. Several approaches have investigated the applicability and benefits of this combination for various fields of applications. However, the range of possible application fields, coupled with potentially useful and beneficial physiological parameters, types of sensor, target variables and factors, and analysis approaches and techniques is manifold. This article provides a systematic overview and an extensive state-of-the-art review of the usage of physiological measurements in VR. We identified 1,119 works that make use of physiological measurements in VR. Within these, we identified 32 approaches that focus on the classification of characteristics of experience, common in VR applications. The first part of this review categorizes the 1,119 works by field of application, i.e. therapy, training, entertainment, and communication and interaction, as well as by the specific target factors and variables measured by the physiological parameters. An additional category summarizes general VR approaches applicable to all specific fields of application since they target typical VR qualities. In the second part of this review, we analyze the target factors and variables regarding the respective methods used for an automatic analysis and, potentially, classification. For example, we highlight which measurement setups have been proven to be sensitive enough to distinguish different levels of arousal, valence, anxiety, stress, or cognitive workload in the virtual realm. This work may prove useful for all researchers wanting to use physiological data in VR and who want to have a good overview of prior approaches taken, their benefits and potential drawbacks.}, language = {en} } @article{GrohmannHerbstChalbanietal.2020, author = {Grohmann, Johannes and Herbst, Nikolas and Chalbani, Avi and Arian, Yair and Peretz, Noam and Kounev, Samuel}, title = {A Taxonomy of Techniques for SLO Failure Prediction in Software Systems}, series = {Computers}, volume = {9}, journal = {Computers}, number = {1}, issn = {2073-431X}, doi = {10.3390/computers9010010}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-200594}, pages = {10}, year = {2020}, abstract = {Failure prediction is an important aspect of self-aware computing systems. Therefore, a multitude of different approaches has been proposed in the literature over the past few years. In this work, we propose a taxonomy for organizing works focusing on the prediction of Service Level Objective (SLO) failures. Our taxonomy classifies related work along the dimensions of the prediction target (e.g., anomaly detection, performance prediction, or failure prediction), the time horizon (e.g., detection or prediction, online or offline application), and the applied modeling type (e.g., time series forecasting, machine learning, or queueing theory). The classification is derived based on a systematic mapping of relevant papers in the area. Additionally, we give an overview of different techniques in each sub-group and address remaining challenges in order to guide future research.}, language = {en} } @inproceedings{SchlosserJarschelDuellietal.2010, author = {Schlosser, Daniel and Jarschel, Michael and Duelli, Michael and Hoßfeld, Tobias and Hoffmann, Klaus and Hoffmann, Marco and Morper, Hans Jochen and Jurca, Dan and Khan, Ashiq}, title = {A Use Case Driven Approach to Network Virtualization}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-55611}, year = {2010}, abstract = {In today's Internet, services are very different in their requirements on the underlying transport network. In the future, this diversity will increase and it will be more difficult to accommodate all services in a single network. A possible approach to cope with this diversity within future networks is the introduction of support for running isolated networks for different services on top of a single shared physical substrate. This would also enable easy network management and ensure an economically sound operation. End-customers will readily adopt this approach as it enables new and innovative services without being expensive. In order to arrive at a concept that enables this kind of network, it needs to be designed around and constantly checked against realistic use cases. In this contribution, we present three use cases for future networks. We describe functional blocks of a virtual network architecture, which are necessary to support these use cases within the network. Furthermore, we discuss the interfaces needed between the functional blocks and consider standardization issues that arise in order to achieve a global consistent control and management structure of virtual networks.}, subject = {Virtualisierung}, language = {en} } @techreport{AlfredssonKasslerVestinetal.2022, type = {Working Paper}, author = {Alfredsson, Rebecka and Kassler, Andreas and Vestin, Jonathan and Pieska, Marcus and Amend, Markus}, title = {Accelerating a Transport Layer based 5G Multi-Access Proxy on SmartNIC}, series = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, journal = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, doi = {10.25972/OPUS-28079}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-280798}, pages = {4}, year = {2022}, abstract = {Utilizing multiple access technologies such as 5G, 4G, and Wi-Fi within a coherent framework is currently standardized by 3GPP within 5G ATSSS. Indeed, distributing packets over multiple networks can lead to increased robustness, resiliency and capacity. A key part of such a framework is the multi-access proxy, which transparently distributes packets over multiple paths. As the proxy needs to serve thousands of customers, scalability and performance are crucial for operator deployments. In this paper, we leverage recent advancements in data plane programming, implement a multi-access proxy based on the MP-DCCP tunneling approach in P4 and hardware accelerate it by deploying the pipeline on a smartNIC. This is challenging due to the complex scheduling and congestion control operations involved. We present our pipeline and data structures design for congestion control and packet scheduling state management. Initial measurements in our testbed show that packet latency is in the range of 25 μs demonstrating the feasibility of our approach.}, subject = {Datennetz}, language = {en} } @techreport{BrischKasslerVestinetal.2023, type = {Working Paper}, author = {Brisch, Fabian and Kassler, Andreas and Vestin, Jonathan and Pieska, Marcus and Amend, Markus}, title = {Accelerating Transport Layer Multipath Packet Scheduling for 5G-ATSSS}, series = {KuVS Fachgespr{\"a}ch - W{\"u}rzburg Workshop on Modeling, Analysis and Simulation of Next-Generation Communication Networks 2023 (WueWoWAS'23)}, journal = {KuVS Fachgespr{\"a}ch - W{\"u}rzburg Workshop on Modeling, Analysis and Simulation of Next-Generation Communication Networks 2023 (WueWoWAS'23)}, doi = {10.25972/OPUS-32205}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-322052}, pages = {4}, year = {2023}, abstract = {Utilizing multiple access networks such as 5G, 4G, and Wi-Fi simultaneously can lead to increased robustness, resiliency, and capacity for mobile users. However, transparently implementing packet distribution over multiple paths within the core of the network faces multiple challenges including scalability to a large number of customers, low latency, and high-capacity packet processing requirements. In this paper, we offload congestion-aware multipath packet scheduling to a smartNIC. However, such hardware acceleration faces multiple challenges due to programming language and platform limitations. We implement different multipath schedulers in P4 with different complexity in order to cope with dynamically changing path capacities. Using testbed measurements, we show that our CMon scheduler, which monitors path congestion in the data plane and dynamically adjusts scheduling weights for the different paths based on path state information, can process more than 3.5 Mpps packets 25 μs latency.}, language = {en} } @phdthesis{Baunach2012, author = {Baunach, Marcel}, title = {Advances in Distributed Real-Time Sensor/Actuator Systems Operation - Operating Systems, Communication, and Application Design Concepts -}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-76489}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2012}, abstract = {This work takes a close look at several quite different research areas related to the design of networked embedded sensor/actuator systems. The variety of the topics illustrates the potential complexity of current sensor network applications; especially when enriched with actuators for proactivity and environmental interaction. Besides their conception, development, installation and long-term operation, we'll mainly focus on more "low-level" aspects: Compositional hardware and software design, task cooperation and collaboration, memory management, and real-time operation will be addressed from a local node perspective. In contrast, inter-node synchronization, communication, as well as sensor data acquisition, aggregation, and fusion will be discussed from a rather global network view. The diversity in the concepts was intentionally accepted to finally facilitate the reliable implementation of truly complex systems. In particular, these should go beyond the usual "sense and transmit of sensor data", but show how powerful today's networked sensor/actuator systems can be despite of their low computational performance and constrained hardware: If their resources are only coordinated efficiently!}, subject = {Eingebettetes System}, language = {en} } @phdthesis{Eichelberger2005, author = {Eichelberger, Holger}, title = {Aesthetics and automatic layout of UML class diagrams}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-14831}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2005}, abstract = {In the last years, visual methods have been introduced in industrial software production and teaching of software engineering. In particular, the international standardization of a graphical software engineering language, the Unified Modeling Language (UML) was a reason for this tendency. Unfortunately, various problems exist in concrete realizations of tools, e.g. due to a missing compliance to the standard. One problem is the automatic layout, which is required for a consistent automatic software design. The thesis derives reasons and criteria for an automatic layout method, which produces drawings of UML class diagrams according to the UML specification and issues of human computer interaction, e.g. readability. A unique set of aesthetic criteria is combined from four different disciplines involved in this topic. Based on these aethetic rules, a hierarchical layout algorithm is developed, analyzed, measured by specialized measuring techniques and compared to related work. Then, the realization of the algorithm as a Java framework is given as an architectural description. Finally, adaptions to anticipated future changes of the UML, improvements of the framework and example drawings of the implementation are given.}, subject = {URL}, language = {en} } @article{BartlWenningerWolfetal.2021, author = {Bartl, Andrea and Wenninger, Stephan and Wolf, Erik and Botsch, Mario and Latoschik, Marc Erich}, title = {Affordable but not cheap: a case study of the effects of two 3D-reconstruction methods of virtual humans}, series = {Frontiers in Virtual Reality}, volume = {2}, journal = {Frontiers in Virtual Reality}, doi = {10.3389/frvir.2021.694617}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-260492}, year = {2021}, abstract = {Realistic and lifelike 3D-reconstruction of virtual humans has various exciting and important use cases. Our and others' appearances have notable effects on ourselves and our interaction partners in virtual environments, e.g., on acceptance, preference, trust, believability, behavior (the Proteus effect), and more. Today, multiple approaches for the 3D-reconstruction of virtual humans exist. They significantly vary in terms of the degree of achievable realism, the technical complexities, and finally, the overall reconstruction costs involved. This article compares two 3D-reconstruction approaches with very different hardware requirements. The high-cost solution uses a typical complex and elaborated camera rig consisting of 94 digital single-lens reflex (DSLR) cameras. The recently developed low-cost solution uses a smartphone camera to create videos that capture multiple views of a person. Both methods use photogrammetric reconstruction and template fitting with the same template model and differ in their adaptation to the method-specific input material. Each method generates high-quality virtual humans ready to be processed, animated, and rendered by standard XR simulation and game engines such as Unreal or Unity. We compare the results of the two 3D-reconstruction methods in an immersive virtual environment against each other in a user study. Our results indicate that the virtual humans from the low-cost approach are perceived similarly to those from the high-cost approach regarding the perceived similarity to the original, human-likeness, beauty, and uncanniness, despite significant differences in the objectively measured quality. The perceived feeling of change of the own body was higher for the low-cost virtual humans. Quality differences were perceived more strongly for one's own body than for other virtual humans.}, language = {en} } @techreport{GrigorjewDiederichHossfeldetal.2022, type = {Working Paper}, author = {Grigorjew, Alexej and Diederich, Philip and Hoßfeld, Tobias and Kellerer, Wolfgang}, title = {Affordable Measurement Setups for Networking Device Latency with Sub-Microsecond Accuracy}, series = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, journal = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, doi = {10.25972/OPUS-28075}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-280751}, pages = {5}, year = {2022}, abstract = {This document presents a networking latency measurement setup that focuses on affordability and universal applicability, and can provide sub-microsecond accuracy. It explains the prerequisites, hardware choices, and considerations to respect during measurement. In addition, it discusses the necessity for exhaustive latency measurements when dealing with high availability and low latency requirements. Preliminary results show that the accuracy is within ±0.02 μs when used with the Intel I350-T2 network adapter.}, subject = {Datennetz}, language = {en} } @phdthesis{Herrler2007, author = {Herrler, Rainer}, title = {Agentenbasierte Simulation zur Ablaufoptimierung in Krankenh{\"a}usern und anderen verteilten, dynamischen Umgebungen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-24483}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2007}, abstract = {Verteilte dynamische Systeme unter lokalen und globalen Gesichtspunkten zu optimieren ist eine schwierige Aufgabe. Zwar sind grunds{\"a}tzliche Auswirkungen einzelner Maßnahmen h{\"a}ufig bekannt, durch widerstrebende Ziele, Wechselwirkungen zwischen Prozessen und Nebenwirkungen von Maßnahmen ist ein analytisches Vorgehen bei der Optimierung nicht m{\"o}glich. Besonders schwierig wird es, wenn lokale Einheiten einerseits ihre Ziele und Autonomie behalten sollen, aber durch zentrale Vorgaben bzw. Anreize so gesteuert werden sollen, dass ein {\"u}bergeordnetes Ziel erreicht wird. Ein praktisches Beispiel dieses allgemeinen Optimierungsproblems findet sich im Gesundheitswesen. Das Management von modernen Kliniken ist stets mit dem Problem konfrontiert, die Qualit{\"a}t der Pflege zu gew{\"a}hrleisten und gleichzeitig kosteneffizient zu arbeiten. Hier gilt es unter gegeben Rahmenbedingungen und bei Respektierung der Autonomie der Funktionseinheiten, Optimierungsmaßnahmen zu finden und durchzuf{\"u}hren. Vorhandene Werkzeuge zur Simulation und Modellierung bieten f{\"u}r diese Aufgabe keine ausreichend guten Vorgehensmodelle und Modellierungsmechanismen. Die agentenbasierte Simulation erm{\"o}glicht die Abbildung solcher Systeme und die Durchf{\"u}hrung von Simulationsexperimenten zur Bewertung einzelner Maßnahmen. Es werden L{\"o}sungswege und Werkzeuge vorgestellt und evaluiert, die den Benutzer bei der Formalisierung des Wissens und der Modellierung solch komplexer Szenarien unterst{\"u}tzen und ein systematisches Vorgehen zur Optimierung erm{\"o}glichen.}, subject = {Simulation}, language = {de} } @phdthesis{Kluegl2000, author = {Kl{\"u}gl, Franziska}, title = {Aktivit{\"a}tsbasierte Verhaltensmodellierung und ihre Unterst{\"u}tzung bei Multiagentensimulationen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-2874}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2000}, abstract = {Durch Zusammenf{\"u}hrung traditioneller Methoden zur individuenbasierten Simulation und dem Konzept der Multiagentensysteme steht mit der Multiagentensimulation eine Methodik zur Verf{\"u}gung, die es erm{\"o}glicht, sowohl technisch als auch konzeptionell eine neue Ebene an Detaillierung bei Modellbildung und Simulation zu erreichen. Ein Modell beruht dabei auf dem Konzept einer Gesellschaft: Es besteht aus einer Menge interagierender, aber in ihren Entscheidungen autonomen Einheiten, den Agenten. Diese {\"a}ndern durch ihre Aktionen ihre Umwelt und reagieren ebenso auf die f{\"u}r sie wahrnehmbaren {\"A}nderungen in der Umwelt. Durch die Simulation jedes Agenten zusammen mit der Umwelt, in der er "lebt", wird die Dynamik im Gesamtsystem beobachtbar. In der vorliegenden Dissertation wurde ein Repr{\"a}sentationsschema f{\"u}r Multiagentensimulationen entwickelt werden, das es Fachexperten, wie zum Beispiel Biologen, erm{\"o}glicht, selbst{\"a}ndig ohne traditionelles Programmieren Multiagentenmodelle zu implementieren und mit diesen Experimente durchzuf{\"u}hren. Dieses deklarative Schema beruht auf zwei Basiskonzepten: Der K{\"o}rper eines Agenten besteht aus Zustandsvariablen. Das Verhalten des Agenten kann mit Regeln beschrieben werden. Ausgehend davon werden verschiedene Strukturierungsans{\"a}tze behandelt. Das wichtigste Konzept ist das der "Aktivit{\"a}t", einer Art "Verhaltenszustand": W{\"a}hrend der Agent in einer Aktivit{\"a}t A verweilt, f{\"u}hrt er die zugeh{\"o}rigen Aktionen aus und dies solange, bis eine Regel feuert, die diese Aktivit{\"a}t beendet und eine neue Aktivit{\"a}t ausw{\"a}hlt. Durch Indizierung dieser Regeln bei den zugeh{\"o}rigen Aktivit{\"a}ten und Einf{\"u}hrung von abstrakten Aktivit{\"a}ten entsteht ein Schema f{\"u}r eine vielf{\"a}ltig strukturierbare Verhaltensbeschreibung. Zu diesem Schema wurde ein Interpreter entwickelt, der ein derartig repr{\"a}sentiertes Modell ausf{\"u}hrt und so Simulationsexperimente mit dem Multiagentenmodell erlaubt. Auf dieser Basis wurde die Modellierungs- und Experimentierumgebung SeSAm ("Shell f{\"u}r Simulierte Agentensysteme") entwickelt. Sie verwendet vorhandene Konzepte aus dem visuellen Programmieren. Mit dieser Umgebung wurden Anwendungsmodelle aus verschiedenen Dom{\"a}nen realisiert: Neben abstrakten Spielbeispielen waren dies vor allem Fragestellungen zu sozialen Insekten, z.B. zum Verhalten von Ameisen, Bienen oder der Interaktion zwischen Bienenv{\"o}lkern und Milbenpopulationen.}, subject = {Agent }, language = {de} } @phdthesis{Boehler2005, author = {B{\"o}hler, Elmar}, title = {Algebraic closures in complexity theory}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-16106}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2005}, abstract = {We use algebraic closures and structures which are derived from these in complexity theory. We classify problems with Boolean circuits and Boolean constraints according to their complexity. We transfer algebraic structures to structural complexity. We use the generation problem to classify important complexity classes.}, subject = {Komplexit{\"a}tstheorie}, language = {en} } @article{KammererGoesterReichertetal.2021, author = {Kammerer, Klaus and G{\"o}ster, Manuel and Reichert, Manfred and Pryss, R{\"u}diger}, title = {Ambalytics: a scalable and distributed system architecture concept for bibliometric network analyses}, series = {Future Internet}, volume = {13}, journal = {Future Internet}, number = {8}, issn = {1999-5903}, doi = {10.3390/fi13080203}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-244916}, year = {2021}, abstract = {A deep understanding about a field of research is valuable for academic researchers. In addition to technical knowledge, this includes knowledge about subareas, open research questions, and social communities (networks) of individuals and organizations within a given field. With bibliometric analyses, researchers can acquire quantitatively valuable knowledge about a research area by using bibliographic information on academic publications provided by bibliographic data providers. Bibliometric analyses include the calculation of bibliometric networks to describe affiliations or similarities of bibliometric entities (e.g., authors) and group them into clusters representing subareas or communities. Calculating and visualizing bibliometric networks is a nontrivial and time-consuming data science task that requires highly skilled individuals. In addition to domain knowledge, researchers must often provide statistical knowledge and programming skills or use software tools having limited functionality and usability. In this paper, we present the ambalytics bibliometric platform, which reduces the complexity of bibliometric network analysis and the visualization of results. It accompanies users through the process of bibliometric analysis and eliminates the need for individuals to have programming skills and statistical knowledge, while preserving advanced functionality, such as algorithm parameterization, for experts. As a proof-of-concept, and as an example of bibliometric analyses outcomes, the calculation of research fronts networks based on a hybrid similarity approach is shown. Being designed to scale, ambalytics makes use of distributed systems concepts and technologies. It is based on the microservice architecture concept and uses the Kubernetes framework for orchestration. This paper presents the initial building block of a comprehensive bibliometric analysis platform called ambalytics, which aims at a high usability for users as well as scalability.}, language = {en} } @article{TsouliasJoerissenNuechter2022, author = {Tsoulias, Nikos and J{\"o}rissen, Sven and N{\"u}chter, Andreas}, title = {An approach for monitoring temperature on fruit surface by means of thermal point cloud}, series = {MethodsX}, volume = {9}, journal = {MethodsX}, issn = {2215-0161}, doi = {10.1016/j.mex.2022.101712}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-300270}, year = {2022}, abstract = {Heat and excessive solar radiation can produce abiotic stresses during apple maturation, resulting fruit quality. Therefore, the monitoring of temperature on fruit surface (FST) over the growing period can allow to identify thresholds, above of which several physiological disorders such as sunburn may occur in apple. The current approaches neglect spatial variation of FST and have reduced repeatability, resulting in unreliable predictions. In this study, LiDAR laser scanning and thermal imaging were employed to detect the temperature on fruit surface by means of 3D point cloud. A process for calibrating the two sensors based on an active board target and producing a 3D thermal point cloud was suggested. After calibration, the sensor system was utilised to scan the fruit trees, while temperature values assigned in the corresponding 3D point cloud were based on the extrinsic calibration. Whereas a fruit detection algorithm was performed to segment the FST from each apple. • The approach allows the calibration of LiDAR laser scanner with thermal camera in order to produce a 3D thermal point cloud. • The method can be applied in apple trees for segmenting FST in 3D. Whereas the approach can be utilised to predict several physiological disorders including sunburn on fruit surface.}, language = {en} } @phdthesis{Jarschel2014, author = {Jarschel, Michael}, title = {An Assessment of Applications and Performance Analysis of Software Defined Networking}, issn = {1432-8801}, doi = {10.25972/OPUS-10079}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-100795}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {With the introduction of OpenFlow by the Stanford University in 2008, a process began in the area of network research, which questions the predominant approach of fully distributed network control. OpenFlow is a communication protocol that allows the externalization of the network control plane from the network devices, such as a router, and to realize it as a logically-centralized entity in software. For this concept, the term "Software Defined Networking" (SDN) was coined during scientific discourse. For the network operators, this concept has several advantages. The two most important can be summarized under the points cost savings and flexibility. Firstly, it is possible through the uniform interface for network hardware ("Southbound API"), as implemented by OpenFlow, to combine devices and software from different manufacturers, which increases the innovation and price pressure on them. Secondly, the realization of the network control plane as a freely programmable software with open interfaces ("Northbound API") provides the opportunity to adapt it to the individual circumstances of the operator's network and to exchange information with the applications it serves. This allows the network to be more flexible and to react more quickly to changing circumstances as well as transport the traffic more effectively and tailored to the user's "Quality of Experience" (QoE). The approach of a separate network control layer for packet-based networks is not new and has already been proposed several times in the past. Therefore, the SDN approach has raised many questions about its feasibility in terms of efficiency and applicability. These questions are caused to some extent by the fact that there is no generally accepted definition of the SDN concept to date. It is therefore a part of this thesis to derive such a definition. In addition, several of the open issues are investigated. This Investigations follow the three aspects: Performance Evaluation of Software Defined Networking, applications on the SDN control layer, and the usability of SDN Northbound-API for creation application-awareness in network operation. Performance Evaluation of Software Defined Networking: The question of the efficiency of an SDN-based system was from the beginning one of the most important. In this thesis, experimental measurements of the performance of OpenFlow-enabled switch hardware and control software were conducted for the purpose of answering this question. The results of these measurements were used as input parameters for establishing an analytical model of the reactive SDN approach. Through the model it could be determined that the performance of the software control layer, often called "Controller", is crucial for the overall performance of the system, but that the approach is generally viable. Based on this finding a software for analyzing the performance of SDN controllers was developed. This software allows the emulation of the forwarding layer of an SDN network towards the control software and can thus determine its performance in different situations and configurations. The measurements with this software showed that there are quite significant differences in the behavior of different control software implementations. Among other things it has been shown that some show different characteristics for various switches, in particular in terms of message processing speed. Under certain circumstances this can lead to network failures. Applications on the SDN control layer: The core piece of software defined networking are the intelligent network applications that operate on the control layer. However, their development is still in its infancy and little is known about the technical possibilities and their limitations. Therefore, the relationship between an SDN-based and classical implementation of a network function is investigated in this thesis. This function is the monitoring of network links and the traffic they carry. A typical approach for this task has been built based on Wiretapping and specialized measurement hardware and compared with an implementation based on OpenFlow switches and a special SDN control application. The results of the comparison show that the SDN version can compete in terms of measurement accuracy for bandwidth and delay estimation with the traditional measurement set-up. However, a compromise has to be found for measurements below the millisecond range. Another question regarding the SDN control applications is whether and how well they can solve existing problems in networks. Two programs have been developed based on SDN in this thesis to solve two typical network issues. Firstly, the tool "IPOM", which enables considerably more flexibility in the study of effects of network structures for a researcher, who is confined to a fixed physical test network topology. The second software provides an interface between the Cloud Orchestration Software "OpenNebula" and an OpenFlow controller. The purpose of this software was to investigate experimentally whether a pre-notification of the network of an impending relocation of a virtual service in a data center is sufficient to ensure the continuous operation of that service. This was demonstrated on the example of a video service. Usability of the SDN Northbound API for creating application-awareness in network operation: Currently, the fact that the network and the applications that run on it are developed and operated separately leads to problems in network operation. SDN offers with the Northbound-API an open interface that enables the exchange between information of both worlds during operation. One aim of this thesis was to investigate whether this interface can be exploited so that the QoE experienced by the user can be maintained on high level. For this purpose, the QoE influence factors were determined on a challenging application by means of a subjective survey study. The application is cloud gaming, in which the calculation of video game environments takes place in the cloud and is transported via video over the network to the user. It was shown that apart from the most important factor influencing QoS, i.e., packet loss on the downlink, also the type of game type and its speed play a role. This demonstrates that in addition to QoS the application state is important and should be communicated to the network. Since an implementation of such a state conscious SDN for the example of Cloud Gaming was not possible due to its proprietary implementation, in this thesis the application "YouTube video streaming" was chosen as an alternative. For this application, status information is retrievable via the "Yomo" tool and can be used for network control. It was shown that an SDN-based implementation of an application-aware network has distinct advantages over traditional network management methods and the user quality can be obtained in spite of disturbances.}, subject = {Leistungsbewertung}, language = {en} } @article{GageikStrohmeierMontenegro2013, author = {Gageik, Nils and Strohmeier, Michael and Montenegro, Sergio}, title = {An Autonomous UAV with an Optical Flow Sensor for Positioning and Navigation}, series = {International Journal of Advanced Robotic Systems}, journal = {International Journal of Advanced Robotic Systems}, doi = {10.5772/56813}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-96368}, year = {2013}, abstract = {A procedure to control all six DOF (degrees of freedom) of a UAV (unmanned aerial vehicle) without an external reference system and to enable fully autonomous flight is presented here. For 2D positioning the principle of optical flow is used. Together with the output of height estimation, fusing ultrasonic, infrared and inertial and pressure sensor data, the 3D position of the UAV can be computed, controlled and steered. All data processing is done on the UAV. An external computer with a pathway planning interface is for commanding purposes only. The presented system is part of the AQopterI8 project, which aims to develop an autonomous flying quadrocopter for indoor application. The focus of this paper is 2D positioning using an optical flow sensor. As a result of the performed evaluation, it can be concluded that for position hold, the standard deviation of the position error is 10cm and after landing the position error is about 30cm.}, language = {en} } @phdthesis{Peng2019, author = {Peng, Dongliang}, title = {An Optimization-Based Approach for Continuous Map Generalization}, edition = {1. Auflage}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-104-4}, doi = {10.25972/WUP-978-3-95826-105-1}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-174427}, school = {W{\"u}rzburg University Press}, pages = {xv, 132}, year = {2019}, abstract = {Maps are the main tool to represent geographical information. Geographical information is usually scale-dependent, so users need to have access to maps at different scales. In our digital age, the access is realized by zooming. As discrete changes during the zooming tend to distract users, smooth changes are preferred. This is why some digital maps are trying to make the zooming as continuous as they can. The process of producing maps at different scales with smooth changes is called continuous map generalization. In order to produce maps of high quality, cartographers often take into account additional requirements. These requirements are transferred to models in map generalization. Optimization for map generalization is important not only because it finds optimal solutions in the sense of the models, but also because it helps us to evaluate the quality of the models. Optimization, however, becomes more delicate when we deal with continuous map generalization. In this area, there are requirements not only for a specific map but also for relations between maps at difference scales. This thesis is about continuous map generalization based on optimization. First, we show the background of our research topics. Second, we find optimal sequences for aggregating land-cover areas. We compare the A\$^{\!\star}\$\xspace algorithm and integer linear programming in completing this task. Third, we continuously generalize county boundaries to provincial boundaries based on compatible triangulations. We morph between the two sets of boundaries, using dynamic programming to compute the correspondence. Fourth, we continuously generalize buildings to built-up areas by aggregating and growing. In this work, we group buildings with the help of a minimum spanning tree. Fifth, we define vertex trajectories that allow us to morph between polylines. We require that both the angles and the edge lengths change linearly over time. As it is impossible to fulfill all of these requirements simultaneously, we mediate between them using least-squares adjustment. Sixth, we discuss the performance of some commonly used data structures for a specific spatial problem. Seventh, we conclude this thesis and present open problems.}, subject = {Generalisierung }, language = {en} } @unpublished{Dandekar2023, author = {Dandekar, Thomas}, title = {Analysing the phase space of the standard model and its basic four forces from a qubit phase transition perspective: implications for large-scale structure generation and early cosmological events}, doi = {10.25972/OPUS-29858}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-298580}, pages = {42}, year = {2023}, abstract = {The phase space for the standard model of the basic four forces for n quanta includes all possible ensemble combinations of their quantum states m, a total of n**m states. Neighbor states reach according to transition possibilities (S-matrix) with emergent time from entropic ensemble gradients. We replace the "big bang" by a condensation event (interacting qubits become decoherent) and inflation by a crystallization event - the crystal unit cell guarantees same symmetries everywhere. Interacting qubits solidify and form a rapidly growing domain where the n**m states become separated ensemble states, rising long-range forces stop ultimately further growth. After that very early events, standard cosmology with the hot fireball model takes over. Our theory agrees well with lack of inflation traces in cosmic background measurements, large-scale structure of voids and filaments, supercluster formation, galaxy formation, dominance of matter and life-friendliness. We prove qubit interactions to be 1,2,4 or 8 dimensional (agrees with E8 symmetry of our universe). Repulsive forces at ultrashort distances result from quantization, long-range forces limit crystal growth. Crystals come and go in the qubit ocean. This selects for the ability to lay seeds for new crystals, for self-organization and life-friendliness. We give energy estimates for free qubits vs bound qubits, misplacements in the qubit crystal and entropy increase during qubit decoherence / crystal formation. Scalar fields for color interaction and gravity derive from the permeating qubit-interaction field. Hence, vacuum energy gets low only inside the qubit crystal. Condensed mathematics may advantageously model free / bound qubits in phase space.}, language = {en} } @phdthesis{Hock2014, author = {Hock, David Rog{\´e}r}, title = {Analysis and Optimization of Resilient Routing in Core Communication Networks}, issn = {1432-8801}, doi = {10.25972/OPUS-10168}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-101681}, school = {Universit{\"a}t W{\"u}rzburg}, pages = {175}, year = {2014}, abstract = {Routing is one of the most important issues in any communication network. It defines on which path packets are transmitted from the source of a connection to the destination. It allows to control the distribution of flows between different locations in the network and thereby is a means to influence the load distribution or to reach certain constraints imposed by particular applications. As failures in communication networks appear regularly and cannot be completely avoided, routing is required to be resilient against such outages, i.e., routing still has to be able to forward packets on backup paths even if primary paths are not working any more. Throughout the years, various routing technologies have been introduced that are very different in their control structure, in their way of working, and in their ability to handle certain failure cases. Each of the different routing approaches opens up their own specific questions regarding configuration, optimization, and inclusion of resilience issues. This monograph investigates, with the example of three particular routing technologies, some concrete issues regarding the analysis and optimization of resilience. It thereby contributes to a better general, technology-independent understanding of these approaches and of their diverse potential for the use in future network architectures. The first considered routing type, is decentralized intra-domain routing based on administrative IP link costs and the shortest path principle. Typical examples are common today's intra-domain routing protocols OSPF and IS-IS. This type of routing includes automatic restoration abilities in case of failures what makes it in general very robust even in the case of severe network outages including several failed components. Furthermore, special IP-Fast Reroute mechanisms allow for a faster reaction on outages. For routing based on link costs, traffic engineering, e.g. the optimization of the maximum relative link load in the network, can be done indirectly by changing the administrative link costs to adequate values. The second considered routing type, MPLS-based routing, is based on the a priori configuration of primary and backup paths, so-called Label Switched Paths. The routing layout of MPLS paths offers more freedom compared to IP-based routing as it is not restricted by any shortest path constraints but any paths can be setup. However, this in general involves a higher configuration effort. Finally, in the third considered routing type, typically centralized routing using a Software Defined Networking (SDN) architecture, simple switches only forward packets according to routing decisions made by centralized controller units. SDN-based routing layouts offer the same freedom as for explicit paths configured using MPLS. In case of a failure, new rules can be setup by the controllers to continue the routing in the reduced topology. However, new resilience issues arise caused by the centralized architecture. If controllers are not reachable anymore, the forwarding rules in the single nodes cannot be adapted anymore. This might render a rerouting in case of connection problems in severe failure scenarios infeasible.}, subject = {Leistungsbewertung}, language = {en} } @article{GreubelAndresHennecke2023, author = {Greubel, Andr{\´e} and Andres, Daniela and Hennecke, Martin}, title = {Analyzing reporting on ransomware incidents: a case study}, series = {Social Sciences}, volume = {12}, journal = {Social Sciences}, number = {5}, issn = {2076-0760}, doi = {10.3390/socsci12050265}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-313746}, year = {2023}, abstract = {Knowledge about ransomware is important for protecting sensitive data and for participating in public debates about suitable regulation regarding its security. However, as of now, this topic has received little to no attention in most school curricula. As such, it is desirable to analyze what citizens can learn about this topic outside of formal education, e.g., from news articles. This analysis is both relevant to analyzing the public discourse about ransomware, as well as to identify what aspects of this topic should be included in the limited time available for this topic in formal education. Thus, this paper was motivated both by educational and media research. The central goal is to explore how the media reports on this topic and, additionally, to identify potential misconceptions that could stem from this reporting. To do so, we conducted an exploratory case study into the reporting of 109 media articles regarding a high-impact ransomware event: the shutdown of the Colonial Pipeline (located in the east of the USA). We analyzed how the articles introduced central terminology, what details were provided, what details were not, and what (mis-)conceptions readers might receive from them. Our results show that an introduction of the terminology and technical concepts of security is insufficient for a complete understanding of the incident. Most importantly, the articles may lead to four misconceptions about ransomware that are likely to lead to misleading conclusions about the responsibility for the incident and possible political and technical options to prevent such attacks in the future.}, language = {en} } @article{ZirkelCecilSchaeferetal.2012, author = {Zirkel, J. and Cecil, A. and Sch{\"a}fer, F. and Rahlfs, S. and Ouedraogo, A. and Xiao, K. and Sawadogo, S. and Coulibaly, B. and Becker, K. and Dandekar, T.}, title = {Analyzing Thiol-Dependent Redox Networks in the Presence of Methylene Blue and Other Antimalarial Agents with RT-PCR-Supported in silico Modeling}, series = {Bioinformatics and Biology Insights}, volume = {6}, journal = {Bioinformatics and Biology Insights}, doi = {10.4137/BBI.S10193}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-123751}, pages = {287-302}, year = {2012}, abstract = {BACKGROUND: In the face of growing resistance in malaria parasites to drugs, pharmacological combination therapies are important. There is accumulating evidence that methylene blue (MB) is an effective drug against malaria. Here we explore the biological effects of both MB alone and in combination therapy using modeling and experimental data. RESULTS: We built a model of the central metabolic pathways in P. falciparum. Metabolic flux modes and their changes under MB were calculated by integrating experimental data (RT-PCR data on mRNAs for redox enzymes) as constraints and results from the YANA software package for metabolic pathway calculations. Several different lines of MB attack on Plasmodium redox defense were identified by analysis of the network effects. Next, chloroquine resistance based on pfmdr/and pfcrt transporters, as well as pyrimethamine/sulfadoxine resistance (by mutations in DHF/DHPS), were modeled in silico. Further modeling shows that MB has a favorable synergism on antimalarial network effects with these commonly used antimalarial drugs. CONCLUSIONS: Theoretical and experimental results support that methylene blue should, because of its resistance-breaking potential, be further tested as a key component in drug combination therapy efforts in holoendemic areas.}, language = {en} } @phdthesis{Kindermann2016, author = {Kindermann, Philipp}, title = {Angular Schematization in Graph Drawing}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-020-7 (print)}, doi = {10.25972/WUP-978-3-95826-021-4}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-112549}, school = {W{\"u}rzburg University Press}, pages = {184}, year = {2016}, abstract = {Graphs are a frequently used tool to model relationships among entities. A graph is a binary relation between objects, that is, it consists of a set of objects (vertices) and a set of pairs of objects (edges). Networks are common examples of modeling data as a graph. For example, relationships between persons in a social network, or network links between computers in a telecommunication network can be represented by a graph. The clearest way to illustrate the modeled data is to visualize the graphs. The field of Graph Drawing deals with the problem of finding algorithms to automatically generate graph visualizations. The task is to find a "good" drawing, which can be measured by different criteria such as number of crossings between edges or the used area. In this thesis, we study Angular Schematization in Graph Drawing. By this, we mean drawings with large angles (for example, between the edges at common vertices or at crossing points). The thesis consists of three parts. First, we deal with the placement of boxes. Boxes are axis-parallel rectangles that can, for example, contain text. They can be placed on a map to label important sites, or can be used to describe semantic relationships between words in a word network. In the second part of the thesis, we consider graph drawings visually guide the viewer. These drawings generally induce large angles between edges that meet at a vertex. Furthermore, the edges are drawn crossing-free and in a way that makes them easy to follow for the human eye. The third and final part is devoted to crossings with large angles. In drawings with crossings, it is important to have large angles between edges at their crossing point, preferably right angles.}, language = {en} } @unpublished{Nassourou2011, author = {Nassourou, Mohamadou}, title = {Assisting Analysis and Understanding of Quran Search Results with Interactive Scatter Plots and Tables}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-55840}, year = {2011}, abstract = {The Quran is the holy book of Islam consisting of 6236 verses divided into 114 chapters called suras. Many verses are similar and even identical. Searching for similar texts (e.g verses) could return thousands of verses, that when displayed completely or partly as textual list would make analysis and understanding difficult and confusing. Moreover it would be visually impossible to instantly figure out the overall distribution of the retrieved verses in the Quran. As consequence reading and analyzing the verses would be tedious and unintuitive. In this study a combination of interactive scatter plots and tables has been developed to assist analysis and understanding of the search result. Retrieved verses are clustered by chapters, and a weight is assigned to each cluster according to number of verses it contains, so that users could visually identify most relevant areas, and figure out the places of revelation of the verses. Users visualize the complete result and can select a region of the plot to zoom in, click on a marker to display a table containing verses with English translation side by side.}, subject = {Text Mining}, language = {en} } @unpublished{Nassourou2011, author = {Nassourou, Mohamadou}, title = {Assisting Understanding, Retention, and Dissemination of Religious Texts Knowledge with Modeling, and Visualization Techniques: The Case of The Quran}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-55927}, year = {2011}, abstract = {Learning a book in general involves reading it, underlining important words, adding comments, summarizing some passages, and marking up some text or concepts. Once deeper understanding is achieved, one would like to organize and manage her/his knowledge in such a way that, it could be easily remembered and efficiently transmitted to others. In this paper, books organized in terms of chapters consisting of verses, are considered as the source of knowledge to be modeled. The knowledge model consists of verses with their metadata and semantic annotations. The metadata represent the multiple perspectives of knowledge modeling. Verses with their metadata and annotations form a meta-model, which will be published on a web Mashup. The meta-model with linking between its elements constitute a knowledge base. An XML-based annotation system breaking down the learning process into specific tasks, helps constructing the desired meta-model. The system is made up of user interfaces for creating metadata, annotating chapters' contents according to user selected semantics, and templates for publishing the generated knowledge on the Internet. The proposed software system improves comprehension and retention of knowledge contained in religious texts through modeling and visualization. The system has been applied to the Quran, and the result obtained shows that multiple perspectives of information modeling can be successfully applied to religious texts. It is expected that this short ongoing study would motivate others to engage in devising and offering software systems for cross-religions learning.}, subject = {Wissensmanagement}, language = {en} } @article{MandelHoernleinIflandetal.2011, author = {Mandel, Alexander and H{\"o}rnlein, Alexander and Ifland, Marianus and L{\"u}neburg, Edeltraud and Deckert, J{\"u}rgen and Puppe, Frank}, title = {Aufwandsanalyse f{\"u}r computerunterst{\"u}tzte Multiple-Choice Papierklausuren}, series = {GMS Journal for Medical Education}, volume = {28}, journal = {GMS Journal for Medical Education}, number = {4}, doi = {10.3205/zma000767}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-134386}, pages = {1-15, Doc55}, year = {2011}, abstract = {Introduction: Multiple-choice-examinations are still fundamental for assessment in medical degree programs. In addition to content related research, the optimization of the technical procedure is an important question. Medical examiners face three options: paper-based examinations with or without computer support or completely electronic examinations. Critical aspects are the effort for formatting, the logistic effort during the actual examination, quality, promptness and effort of the correction, the time for making the documents available for inspection by the students, and the statistical analysis of the examination results. Methods: Since three semesters a computer program for input and formatting of MC-questions in medical and other paper-based examinations is used and continuously improved at Wuerzburg University. In the winter semester (WS) 2009/10 eleven, in the summer semester (SS) 2010 twelve and in WS 2010/11 thirteen medical examinations were accomplished with the program and automatically evaluated. For the last two semesters the remaining manual workload was recorded. Results: The cost of the formatting and the subsequent analysis including adjustments of the analysis of an average examination with about 140 participants and about 35 questions was 5-7 hours for exams without complications in the winter semester 2009/2010, about 2 hours in SS 2010 and about 1.5 hours in the winter semester 2010/11. Including exams with complications, the average time was about 3 hours per exam in SS 2010 and 2.67 hours for the WS 10/11. Discussion: For conventional multiple-choice exams the computer-based formatting and evaluation of paper-based exams offers a significant time reduction for lecturers in comparison with the manual correction of paper-based exams and compared to purely electronically conducted exams it needs a much simpler technological infrastructure and fewer staff during the exam."}, language = {de} } @article{WolffRutter2012, author = {Wolff, Alexander and Rutter, Iganz}, title = {Augmenting the Connectivity of Planar and Geometric Graphs}, series = {Journal of Graph Algorithms and Applications}, journal = {Journal of Graph Algorithms and Applications}, doi = {10.7155/jgaa.00275}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-97587}, year = {2012}, abstract = {In this paper we study connectivity augmentation problems. Given a connected graph G with some desirable property, we want to make G 2-vertex connected (or 2-edge connected) by adding edges such that the resulting graph keeps the property. The aim is to add as few edges as possible. The property that we consider is planarity, both in an abstract graph-theoretic and in a geometric setting, where vertices correspond to points in the plane and edges to straight-line segments. We show that it is NP-hard to � nd a minimum-cardinality augmentation that makes a planar graph 2-edge connected. For making a planar graph 2-vertex connected this was known. We further show that both problems are hard in the geometric setting, even when restricted to trees. The problems remain hard for higher degrees of connectivity. On the other hand we give polynomial-time algorithms for the special case of convex geometric graphs. We also study the following related problem. Given a planar (plane geometric) graph G, two vertices s and t of G, and an integer c, how many edges have to be added to G such that G is still planar (plane geometric) and contains c edge- (or vertex-) disjoint s{t paths? For the planar case we give a linear-time algorithm for c = 2. For the plane geometric case we give optimal worst-case bounds for c = 2; for c = 3 we characterize the cases that have a solution.}, language = {en} } @article{KrenzerHeilFittingetal., author = {Krenzer, Adrian and Heil, Stefan and Fitting, Daniel and Matti, Safa and Zoller, Wolfram G. and Hann, Alexander and Puppe, Frank}, title = {Automated classification of polyps using deep learning architectures and few-shot learning}, series = {BMC Medical Imaging}, volume = {23}, journal = {BMC Medical Imaging}, doi = {10.1186/s12880-023-01007-4}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-357465}, abstract = {Background Colorectal cancer is a leading cause of cancer-related deaths worldwide. The best method to prevent CRC is a colonoscopy. However, not all colon polyps have the risk of becoming cancerous. Therefore, polyps are classified using different classification systems. After the classification, further treatment and procedures are based on the classification of the polyp. Nevertheless, classification is not easy. Therefore, we suggest two novel automated classifications system assisting gastroenterologists in classifying polyps based on the NICE and Paris classification. Methods We build two classification systems. One is classifying polyps based on their shape (Paris). The other classifies polyps based on their texture and surface patterns (NICE). A two-step process for the Paris classification is introduced: First, detecting and cropping the polyp on the image, and secondly, classifying the polyp based on the cropped area with a transformer network. For the NICE classification, we design a few-shot learning algorithm based on the Deep Metric Learning approach. The algorithm creates an embedding space for polyps, which allows classification from a few examples to account for the data scarcity of NICE annotated images in our database. Results For the Paris classification, we achieve an accuracy of 89.35 \%, surpassing all papers in the literature and establishing a new state-of-the-art and baseline accuracy for other publications on a public data set. For the NICE classification, we achieve a competitive accuracy of 81.13 \% and demonstrate thereby the viability of the few-shot learning paradigm in polyp classification in data-scarce environments. Additionally, we show different ablations of the algorithms. Finally, we further elaborate on the explainability of the system by showing heat maps of the neural network explaining neural activations. Conclusion Overall we introduce two polyp classification systems to assist gastroenterologists. We achieve state-of-the-art performance in the Paris classification and demonstrate the viability of the few-shot learning paradigm in the NICE classification, addressing the prevalent data scarcity issues faced in medical machine learning.}, language = {en} } @article{BeckerCaminitiFiorellaetal.2013, author = {Becker, Martin and Caminiti, Saverio and Fiorella, Donato and Francis, Louise and Gravino, Pietro and Haklay, Mordechai (Muki) and Hotho, Andreas and Loreto, Virrorio and Mueller, Juergen and Ricchiuti, Ferdinando and Servedio, Vito D. P. and Sirbu, Alina and Tria, Franesca}, title = {Awareness and Learning in Participatory Noise Sensing}, series = {PLOS ONE}, volume = {8}, journal = {PLOS ONE}, number = {12}, issn = {1932-6203}, doi = {10.1371/journal.pone.0081638}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-127675}, pages = {e81638}, year = {2013}, abstract = {The development of ICT infrastructures has facilitated the emergence of new paradigms for looking at society and the environment over the last few years. Participatory environmental sensing, i.e. directly involving citizens in environmental monitoring, is one example, which is hoped to encourage learning and enhance awareness of environmental issues. In this paper, an analysis of the behaviour of individuals involved in noise sensing is presented. Citizens have been involved in noise measuring activities through the WideNoise smartphone application. This application has been designed to record both objective (noise samples) and subjective (opinions, feelings) data. The application has been open to be used freely by anyone and has been widely employed worldwide. In addition, several test cases have been organised in European countries. Based on the information submitted by users, an analysis of emerging awareness and learning is performed. The data show that changes in the way the environment is perceived after repeated usage of the application do appear. Specifically, users learn how to recognise different noise levels they are exposed to. Additionally, the subjective data collected indicate an increased user involvement in time and a categorisation effect between pleasant and less pleasant environments.}, language = {en} } @article{WienrichDoellingerHein2021, author = {Wienrich, Carolin and D{\"o}llinger, Nina and Hein, Rebecca}, title = {Behavioral Framework of Immersive Technologies (BehaveFIT): How and why virtual reality can support behavioral change processes}, series = {Frontiers in Virtual Reality}, volume = {2}, journal = {Frontiers in Virtual Reality}, doi = {10.3389/frvir.2021.627194}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-258796}, year = {2021}, abstract = {The design and evaluation of assisting technologies to support behavior change processes have become an essential topic within the field of human-computer interaction research in general and the field of immersive intervention technologies in particular. The mechanisms and success of behavior change techniques and interventions are broadly investigated in the field of psychology. However, it is not always easy to adapt these psychological findings to the context of immersive technologies. The lack of theoretical foundation also leads to a lack of explanation as to why and how immersive interventions support behavior change processes. The Behavioral Framework for immersive Technologies (BehaveFIT) addresses this lack by 1) presenting an intelligible categorization and condensation of psychological barriers and immersive features, by 2) suggesting a mapping that shows why and how immersive technologies can help to overcome barriers and finally by 3) proposing a generic prediction path that enables a structured, theory-based approach to the development and evaluation of immersive interventions. These three steps explain how BehaveFIT can be used, and include guiding questions for each step. Further, two use cases illustrate the usage of BehaveFIT. Thus, the present paper contributes to guidance for immersive intervention design and evaluation, showing that immersive interventions support behavior change processes and explain and predict 'why' and 'how' immersive interventions can bridge the intention-behavior-gap.}, language = {en} } @unpublished{Dandekar2019, author = {Dandekar, Thomas}, title = {Biological heuristics applied to cosmology suggests a condensation nucleus as start of our universe and inflation cosmology replaced by a period of rapid Weiss domain-like crystal growth}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-183945}, pages = {24}, year = {2019}, abstract = {Cosmology often uses intricate formulas and mathematics to derive new theories and concepts. We do something different in this paper: We look at biological processes and derive from these heuristics so that the revised cosmology agrees with astronomical observations but does also agree with standard biological observations. We show that we then have to replace any type of singularity at the start of the universe by a condensation nucleus and that the very early period of the universe usually assumed to be inflation has to be replaced by a period of rapid crystal growth as in Weiss magnetization domains. Impressively, these minor modifications agree well with astronomical observations including removing the strong inflation perturbations which were never observed in the recent BICEP2 experiments. Furthermore, looking at biological principles suggests that such a new theory with a condensation nucleus at start and a first rapid phase of magnetization-like growth of the ordered, physical laws obeying lattice we live in is in fact the only convincing theory of the early phases of our universe that also is compatible with current observations. We show in detail in the following that such a process of crystal creation, breaking of new crystal seeds and ultimate evaporation of the present crystal readily leads over several generations to an evolution and selection of better, more stable and more self-organizing crystals. Moreover, this explains the "fine-tuning" question why our universe is fine-tuned to favor life: Our Universe is so self-organizing to have enough offspring and the detailed physics involved is at the same time highly favorable for all self-organizing processes including life. This biological theory contrasts with current standard inflation cosmologies. The latter do not perform well in explaining any phenomena of sophisticated structure creation or self-organization. As proteins can only thermodynamically fold by increasing the entropy in the solution around them we suggest for cosmology a condensation nucleus for a universe can form only in a "chaotic ocean" of string-soup or quantum foam if the entropy outside of the nucleus rapidly increases. We derive an interaction potential for 1 to n-dimensional strings or quantum-foams and show that they allow only 1D, 2D, 4D or octonion interactions. The latter is the richest structure and agrees to the E8 symmetry fundamental to particle physics and also compatible with the ten dimensional string theory E8 which is part of the M-theory. Interestingly, any other interactions of other dimensionality can be ruled out using Hurwitz compositional theorem. Crystallization explains also extremely well why we have only one macroscopic reality and where the worldlines of alternative trajectories exist: They are in other planes of the crystal and for energy reasons they crystallize mostly at the same time, yielding a beautiful and stable crystal. This explains decoherence and allows to determine the size of Planck´s quantum h (very small as separation of crystal layers by energy is extremely strong). Ultimate dissolution of real crystals suggests an explanation for dark energy agreeing with estimates for the "big rip". The halo distribution of dark matter favoring galaxy formation is readily explained by a crystal seed starting with unit cells made of normal and dark matter. That we have only matter and not antimatter can be explained as there may be right handed mattercrystals and left-handed antimatter crystals. Similarly, real crystals are never perfect and we argue that exactly such irregularities allow formation of galaxies, clusters and superclusters. Finally, heuristics from genetics suggest to look for a systems perspective to derive correct vacuum and Higgs Boson energies.}, language = {en} } @article{PfitznerMayNuechter2018, author = {Pfitzner, Christian and May, Stefan and N{\"u}chter, Andreas}, title = {Body weight estimation for dose-finding and health monitoring of lying, standing and walking patients based on RGB-D data}, series = {Sensors}, volume = {18}, journal = {Sensors}, number = {5}, doi = {10.3390/s18051311}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-176642}, pages = {1311}, year = {2018}, abstract = {This paper describes the estimation of the body weight of a person in front of an RGB-D camera. A survey of different methods for body weight estimation based on depth sensors is given. First, an estimation of people standing in front of a camera is presented. Second, an approach based on a stream of depth images is used to obtain the body weight of a person walking towards a sensor. The algorithm first extracts features from a point cloud and forwards them to an artificial neural network (ANN) to obtain an estimation of body weight. Besides the algorithm for the estimation, this paper further presents an open-access dataset based on measurements from a trauma room in a hospital as well as data from visitors of a public event. In total, the dataset contains 439 measurements. The article illustrates the efficiency of the approach with experiments with persons lying down in a hospital, standing persons, and walking persons. Applicable scenarios for the presented algorithm are body weight-related dosing of emergency patients.}, language = {en} } @article{KirikkayisGallikWinteretal.2023, author = {Kirikkayis, Yusuf and Gallik, Florian and Winter, Michael and Reichert, Manfred}, title = {BPMNE4IoT: a framework for modeling, executing and monitoring IoT-driven processes}, series = {Future Internet}, volume = {15}, journal = {Future Internet}, number = {3}, issn = {1999-5903}, doi = {10.3390/fi15030090}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-304097}, year = {2023}, abstract = {The Internet of Things (IoT) enables a variety of smart applications, including smart home, smart manufacturing, and smart city. By enhancing Business Process Management Systems with IoT capabilities, the execution and monitoring of business processes can be significantly improved. Providing a holistic support for modeling, executing and monitoring IoT-driven processes, however, constitutes a challenge. Existing process modeling and process execution languages, such as BPMN 2.0, are unable to fully meet the IoT characteristics (e.g., asynchronicity and parallelism) of IoT-driven processes. In this article, we present BPMNE4IoT—A holistic framework for modeling, executing and monitoring IoT-driven processes. We introduce various artifacts and events based on the BPMN 2.0 metamodel that allow realizing the desired IoT awareness of business processes. The framework is evaluated along two real-world scenarios from two different domains. Moreover, we present a user study for comparing BPMNE4IoT and BPMN 2.0. In particular, this study has confirmed that the BPMNE4IoT framework facilitates the support of IoT-driven processes.}, language = {en} } @article{LugrinLatoschikHabeletal.2016, author = {Lugrin, Jean-Luc and Latoschik, Marc Erich and Habel, Michael and Roth, Daniel and Seufert, Christian and Grafe, Silke}, title = {Breaking Bad Behaviors: A New Tool for Learning Classroom Management Using Virtual Reality}, series = {Frontiers in ICT}, volume = {3}, journal = {Frontiers in ICT}, number = {26}, doi = {10.3389/fict.2016.00026}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-147945}, year = {2016}, abstract = {This article presents an immersive virtual reality (VR) system for training classroom management skills, with a specific focus on learning to manage disruptive student behavior in face-to-face, one-to-many teaching scenarios. The core of the system is a real-time 3D virtual simulation of a classroom populated by twenty-four semi-autonomous virtual students. The system has been designed as a companion tool for classroom management seminars in a syllabus for primary and secondary school teachers. This will allow lecturers to link theory with practice using the medium of VR. The system is therefore designed for two users: a trainee teacher and an instructor supervising the training session. The teacher is immersed in a real-time 3D simulation of a classroom by means of a head-mounted display and headphone. The instructor operates a graphical desktop console, which renders a view of the class and the teacher whose avatar movements are captured by a marker less tracking system. This console includes a 2D graphics menu with convenient behavior and feedback control mechanisms to provide human-guided training sessions. The system is built using low-cost consumer hardware and software. Its architecture and technical design are described in detail. A first evaluation confirms its conformance to critical usability requirements (i.e., safety and comfort, believability, simplicity, acceptability, extensibility, affordability, and mobility). Our initial results are promising and constitute the necessary first step toward a possible investigation of the efficiency and effectiveness of such a system in terms of learning outcomes and experience.}, language = {en} } @article{DoellingerWienrichLatoschik2021, author = {D{\"o}llinger, Nina and Wienrich, Carolin and Latoschik, Marc Erich}, title = {Challenges and opportunities of immersive technologies for mindfulness meditation: a systematic review}, series = {Frontiers in Virtual Reality}, volume = {2}, journal = {Frontiers in Virtual Reality}, doi = {10.3389/frvir.2021.644683}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-259047}, pages = {644683}, year = {2021}, abstract = {Mindfulness is considered an important factor of an individual's subjective well-being. Consequently, Human-Computer Interaction (HCI) has investigated approaches that strengthen mindfulness, i.e., by inventing multimedia technologies to support mindfulness meditation. These approaches often use smartphones, tablets, or consumer-grade desktop systems to allow everyday usage in users' private lives or in the scope of organized therapies. Virtual, Augmented, and Mixed Reality (VR, AR, MR; in short: XR) significantly extend the design space for such approaches. XR covers a wide range of potential sensory stimulation, perceptive and cognitive manipulations, content presentation, interaction, and agency. These facilities are linked to typical XR-specific perceptions that are conceptually closely related to mindfulness research, such as (virtual) presence and (virtual) embodiment. However, a successful exploitation of XR that strengthens mindfulness requires a systematic analysis of the potential interrelation and influencing mechanisms between XR technology, its properties, factors, and phenomena and existing models and theories of the construct of mindfulness. This article reports such a systematic analysis of XR-related research from HCI and life sciences to determine the extent to which existing research frameworks on HCI and mindfulness can be applied to XR technologies, the potential of XR technologies to support mindfulness, and open research gaps. Fifty papers of ACM Digital Library and National Institutes of Health's National Library of Medicine (PubMed) with and without empirical efficacy evaluation were included in our analysis. The results reveal that at the current time, empirical research on XR-based mindfulness support mainly focuses on therapy and therapeutic outcomes. Furthermore, most of the currently investigated XR-supported mindfulness interactions are limited to vocally guided meditations within nature-inspired virtual environments. While an analysis of empirical research on those systems did not reveal differences in mindfulness compared to non-mediated mindfulness practices, various design proposals illustrate that XR has the potential to provide interactive and body-based innovations for mindfulness practice. We propose a structured approach for future work to specify and further explore the potential of XR as mindfulness-support. The resulting framework provides design guidelines for XR-based mindfulness support based on the elements and psychological mechanisms of XR interactions.}, language = {en} } @techreport{NguyenLohHossfeld2023, type = {Working Paper}, author = {Nguyen, Kien and Loh, Frank and Hoßfeld, Tobias}, title = {Challenges of Serverless Deployment in Edge-MEC-Cloud}, series = {KuVS Fachgespr{\"a}ch - W{\"u}rzburg Workshop on Modeling, Analysis and Simulation of Next-Generation Communication Networks 2023 (WueWoWAS'23)}, journal = {KuVS Fachgespr{\"a}ch - W{\"u}rzburg Workshop on Modeling, Analysis and Simulation of Next-Generation Communication Networks 2023 (WueWoWAS'23)}, doi = {10.25972/OPUS-32202}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-322025}, pages = {4}, year = {2023}, abstract = {The emerging serverless computing may meet Edge Cloud in a beneficial manner as the two offer flexibility and dynamicity in optimizing finite hardware resources. However, the lack of proper study of a joint platform leaves a gap in literature about consumption and performance of such integration. To this end, this paper identifies the key questions and proposes a methodology to answer them.}, language = {en} } @phdthesis{Ullmann2015, author = {Ullmann, Tobias}, title = {Characterization of Arctic Environment by Means of Polarimetric Synthetic Aperture Radar (PolSAR) Data and Digital Elevation Models (DEM)}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-115719}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {The ecosystem of the high northern latitudes is affected by the recently changing environmental conditions. The Arctic has undergone a significant climatic change over the last decades. The land coverage is changing and a phenological response to the warming is apparent. Remotely sensed data can assist the monitoring and quantification of these changes. The remote sensing of the Arctic was predominantly carried out by the usage of optical sensors but these encounter problems in the Arctic environment, e.g. the frequent cloud cover or the solar geometry. In contrast, the imaging of Synthetic Aperture Radar is not affected by the cloud cover and the acquisition of radar imagery is independent of the solar illumination. The objective of this work was to explore how polarimetric Synthetic Aperture Radar (PolSAR) data of TerraSAR-X, TanDEM-X, Radarsat-2 and ALOS PALSAR and interferometric-derived digital elevation model data of the TanDEM-X Mission can contribute to collect meaningful information on the actual state of the Arctic Environment. The study was conducted for Canadian sites of the Mackenzie Delta Region and Banks Island and in situ reference data were available for the assessment. The up-to-date analysis of the PolSAR data made the application of the Non-Local Means filtering and of the decomposition of co-polarized data necessary. The Non-Local Means filter showed a high capability to preserve the image values, to keep the edges and to reduce the speckle. This supported not only the suitability for the interpretation but also for the classification. The classification accuracies of Non-Local Means filtered data were in average +10\% higher compared to unfiltered images. The correlation of the co- and quad-polarized decomposition features was high for classes with distinct surface or double bounce scattering and a usage of the co-polarized data is beneficial for regions of natural land coverage and for low vegetation formations with little volume scattering. The evaluation further revealed that the X- and C-Band were most sensitive to the generalized land cover classes. It was found that the X-Band data were sensitive to low vegetation formations with low shrub density, the C-Band data were sensitive to the shrub density and the shrub dominated tundra. In contrast, the L-Band data were less sensitive to the land cover. Among the different dual-polarized data the HH/VV-polarized data were identified to be most meaningful for the characterization and classification, followed by the HH/HV-polarized and the VV/VH-polarized data. The quad-polarized data showed highest sensitivity to the land cover but differences to the co-polarized data were small. The accuracy assessment showed that spectral information was required for accurate land cover classification. The best results were obtained when spectral and radar information was combined. The benefit of including radar data in the classification was up to +15\% accuracy and most significant for the classes wetland and sparse vegetated tundra. The best classifications were realized with quad-polarized C-Band and multispectral data and with co-polarized X-Band and multispectral data. The overall accuracy was up to 80\% for unsupervised and up to 90\% for supervised classifications. The results indicated that the shortwave co-polarized data show promise for the classification of tundra land cover since the polarimetric information is sensitive to low vegetation and the wetlands. Furthermore, co-polarized data provide a higher spatial resolution than the quad-polarized data. The analysis of the intermediate digital elevation model data of the TanDEM-X showed a high potential for the characterization of the surface morphology. The basic and relative topographic features were shown to be of high relevance for the quantification of the surface morphology and an area-wide application is feasible. In addition, these data were of value for the classification and delineation of landforms. Such classifications will assist the delineation of geomorphological units and have potential to identify locations of actual and future morphologic activity.}, subject = {Mackenzie-River-Delta}, language = {en} } @article{PawellekKrmarLeistneretal.2021, author = {Pawellek, Ruben and Krmar, Jovana and Leistner, Adrian and Djajić, Nevena and Otašević, Biljana and Protić, Ana and Holzgrabe, Ulrike}, title = {Charged aerosol detector response modeling for fatty acids based on experimental settings and molecular features: a machine learning approach}, series = {Journal of Cheminformatics}, volume = {13}, journal = {Journal of Cheminformatics}, number = {1}, doi = {10.1186/s13321-021-00532-0}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-261618}, year = {2021}, abstract = {The charged aerosol detector (CAD) is the latest representative of aerosol-based detectors that generate a response independent of the analytes' chemical structure. This study was aimed at accurately predicting the CAD response of homologous fatty acids under varying experimental conditions. Fatty acids from C12 to C18 were used as model substances due to semivolatile characterics that caused non-uniform CAD behaviour. Considering both experimental conditions and molecular descriptors, a mixed quantitative structure-property relationship (QSPR) modeling was performed using Gradient Boosted Trees (GBT). The ensemble of 10 decisions trees (learning rate set at 0.55, the maximal depth set at 5, and the sample rate set at 1.0) was able to explain approximately 99\% (Q\(^2\): 0.987, RMSE: 0.051) of the observed variance in CAD responses. Validation using an external test compound confirmed the high predictive ability of the model established (R-2: 0.990, RMSEP: 0.050). With respect to the intrinsic attribute selection strategy, GBT used almost all independent variables during model building. Finally, it attributed the highest importance to the power function value, the flow rate of the mobile phase, evaporation temperature, the content of the organic solvent in the mobile phase and the molecular descriptors such as molecular weight (MW), Radial Distribution Function-080/weighted by mass (RDF080m) and average coefficient of the last eigenvector from distance/detour matrix (Ve2_D/Dt). The identification of the factors most relevant to the CAD responsiveness has contributed to a better understanding of the underlying mechanisms of signal generation. An increased CAD response that was obtained for acetone as organic modifier demonstrated its potential to replace the more expensive and environmentally harmful acetonitrile.}, language = {en} } @article{HentschelKobsHotho2022, author = {Hentschel, Simon and Kobs, Konstantin and Hotho, Andreas}, title = {CLIP knows image aesthetics}, series = {Frontiers in Artificial Intelligence}, volume = {5}, journal = {Frontiers in Artificial Intelligence}, issn = {2624-8212}, doi = {10.3389/frai.2022.976235}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-297150}, year = {2022}, abstract = {Most Image Aesthetic Assessment (IAA) methods use a pretrained ImageNet classification model as a base to fine-tune. We hypothesize that content classification is not an optimal pretraining task for IAA, since the task discourages the extraction of features that are useful for IAA, e.g., composition, lighting, or style. On the other hand, we argue that the Contrastive Language-Image Pretraining (CLIP) model is a better base for IAA models, since it has been trained using natural language supervision. Due to the rich nature of language, CLIP needs to learn a broad range of image features that correlate with sentences describing the image content, composition, environments, and even subjective feelings about the image. While it has been shown that CLIP extracts features useful for content classification tasks, its suitability for tasks that require the extraction of style-based features like IAA has not yet been shown. We test our hypothesis by conducting a three-step study, investigating the usefulness of features extracted by CLIP compared to features obtained from the last layer of a comparable ImageNet classification model. In each step, we get more computationally expensive. First, we engineer natural language prompts that let CLIP assess an image's aesthetic without adjusting any weights in the model. To overcome the challenge that CLIP's prompting only is applicable to classification tasks, we propose a simple but effective strategy to convert multiple prompts to a continuous scalar as required when predicting an image's mean aesthetic score. Second, we train a linear regression on the AVA dataset using image features obtained by CLIP's image encoder. The resulting model outperforms a linear regression trained on features from an ImageNet classification model. It also shows competitive performance with fully fine-tuned networks based on ImageNet, while only training a single layer. Finally, by fine-tuning CLIP's image encoder on the AVA dataset, we show that CLIP only needs a fraction of training epochs to converge, while also performing better than a fine-tuned ImageNet model. Overall, our experiments suggest that CLIP is better suited as a base model for IAA methods than ImageNet pretrained networks.}, language = {en} } @techreport{LeGrossmannKrieger2022, type = {Working Paper}, author = {Le, Duy Thanh and Großmann, Marcel and Krieger, Udo R.}, title = {Cloudless Resource Monitoring in a Fog Computing System Enabled by an SDN/NFV Infrastructure}, series = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, journal = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, doi = {10.25972/OPUS-28072}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-280723}, pages = {4}, year = {2022}, abstract = {Today's advanced Internet-of-Things applications raise technical challenges on cloud, edge, and fog computing. The design of an efficient, virtualized, context-aware, self-configuring orchestration system of a fog computing system constitutes a major development effort within this very innovative area of research. In this paper we describe the architecture and relevant implementation aspects of a cloudless resource monitoring system interworking with an SDN/NFV infrastructure. It realizes the basic monitoring component of the fundamental MAPE-K principles employed in autonomic computing. Here we present the hierarchical layering and functionality within the underlying fog nodes to generate a working prototype of an intelligent, self-managed orchestrator for advanced IoT applications and services. The latter system has the capability to monitor automatically various performance aspects of the resource allocation among multiple hosts of a fog computing system interconnected by SDN.}, subject = {Datennetz}, language = {en} } @article{SchokraieWarnkenHotzWagenblattetal.2012, author = {Schokraie, Elham and Warnken, Uwe and Hotz-Wagenblatt, Agnes and Grohme, Markus A. and Hengherr, Steffen and F{\"o}rster, Frank and Schill, Ralph O. and Frohme, Marcus and Dandekar, Thomas and Schn{\"o}lzer, Martina}, title = {Comparative proteome analysis of Milnesium tardigradum in early embryonic state versus adults in active and anhydrobiotic state}, series = {PLoS One}, volume = {7}, journal = {PLoS One}, number = {9}, doi = {10.1371/journal.pone.0045682}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-134447}, pages = {e45682}, year = {2012}, abstract = {Tardigrades have fascinated researchers for more than 300 years because of their extraordinary capability to undergo cryptobiosis and survive extreme environmental conditions. However, the survival mechanisms of tardigrades are still poorly understood mainly due to the absence of detailed knowledge about the proteome and genome of these organisms. Our study was intended to provide a basis for the functional characterization of expressed proteins in different states of tardigrades. High-throughput, high-accuracy proteomics in combination with a newly developed tardigrade specific protein database resulted in the identification of more than 3000 proteins in three different states: early embryonic state and adult animals in active and anhydrobiotic state. This comprehensive proteome resource includes protein families such as chaperones, antioxidants, ribosomal proteins, cytoskeletal proteins, transporters, protein channels, nutrient reservoirs, and developmental proteins. A comparative analysis of protein families in the different states was performed by calculating the exponentially modified protein abundance index which classifies proteins in major and minor components. This is the first step to analyzing the proteins involved in early embryonic development, and furthermore proteins which might play an important role in the transition into the anhydrobiotic state.}, language = {en} } @article{HossfeldHeegaardKellerer2023, author = {Hossfeld, Tobias and Heegaard, Poul E. and Kellerer, Wolfgang}, title = {Comparing the scalability of communication networks and systems}, series = {IEEE Access}, volume = {11}, journal = {IEEE Access}, doi = {10.1109/ACCESS.2023.3314201}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-349403}, pages = {101474-101497}, year = {2023}, abstract = {Scalability is often mentioned in literature, but a stringent definition is missing. In particular, there is no general scalability assessment which clearly indicates whether a system scales or not or whether a system scales better than another. The key contribution of this article is the definition of a scalability index (SI) which quantifies if a system scales in comparison to another system, a hypothetical system, e.g., linear system, or the theoretically optimal system. The suggested SI generalizes different metrics from literature, which are specialized cases of our SI. The primary target of our scalability framework is, however, benchmarking of two systems, which does not require any reference system. The SI is demonstrated and evaluated for different use cases, that are (1) the performance of an IoT load balancer depending on the system load, (2) the availability of a communication system depending on the size and structure of the network, (3) scalability comparison of different location selection mechanisms in fog computing with respect to delays and energy consumption; (4) comparison of time-sensitive networking (TSN) mechanisms in terms of efficiency and utilization. Finally, we discuss how to use and how not to use the SI and give recommendations and guidelines in practice. To the best of our knowledge, this is the first work which provides a general SI for the comparison and benchmarking of systems, which is the primary target of our scalability analysis.}, language = {en} } @phdthesis{Spoerhase2009, author = {Spoerhase, Joachim}, title = {Competitive and Voting Location}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-52978}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2009}, abstract = {We consider competitive location problems where two competing providers place their facilities sequentially and users can decide between the competitors. We assume that both competitors act non-cooperatively and aim at maximizing their own benefits. We investigate the complexity and approximability of such problems on graphs, in particular on simple graph classes such as trees and paths. We also develop fast algorithms for single competitive location problems where each provider places a single facilty. Voting location, in contrast, aims at identifying locations that meet social criteria. The provider wants to satisfy the users (customers) of the facility to be opened. In general, there is no location that is favored by all users. Therefore, a satisfactory compromise has to be found. To this end, criteria arising from voting theory are considered. The solution of the location problem is understood as the winner of a virtual election among the users of the facilities, in which the potential locations play the role of the candidates and the users represent the voters. Competitive and voting location problems turn out to be closely related.}, subject = {Standortproblem}, language = {en} } @phdthesis{Kosub2001, author = {Kosub, Sven}, title = {Complexity and Partitions}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-2808}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2001}, abstract = {Computational complexity theory usually investigates the complexity of sets, i.e., the complexity of partitions into two parts. But often it is more appropriate to represent natural problems by partitions into more than two parts. A particularly interesting class of such problems consists of classification problems for relations. For instance, a binary relation R typically defines a partitioning of the set of all pairs (x,y) into four parts, classifiable according to the cases where R(x,y) and R(y,x) hold, only R(x,y) or only R(y,x) holds or even neither R(x,y) nor R(y,x) is true. By means of concrete classification problems such as Graph Embedding or Entailment (for propositional logic), this thesis systematically develops tools, in shape of the boolean hierarchy of NP-partitions and its refinements, for the qualitative analysis of the complexity of partitions generated by NP-relations. The Boolean hierarchy of NP-partitions is introduced as a generalization of the well-known and well-studied Boolean hierarchy (of sets) over NP. Whereas the latter hierarchy has a very simple structure, the situation is much more complicated for the case of partitions into at least three parts. To get an idea of this hierarchy, alternative descriptions of the partition classes are given in terms of finite, labeled lattices. Based on these characterizations the Embedding Conjecture is established providing the complete information on the structure of the hierarchy. This conjecture is supported by several results. A natural extension of the Boolean hierarchy of NP-partitions emerges from the lattice-characterization of its classes by considering partition classes generated by finite, labeled posets. It turns out that all significant ideas translate from the case of lattices. The induced refined Boolean hierarchy of NP-partitions enables us more accuratly capturing the complexity of certain relations (such as Graph Embedding) and a description of projectively closed partition classes.}, subject = {Partition }, language = {en} } @article{BoehlerCreignouGalotaetal.2012, author = {B{\"o}hler, Elmar and Creignou, Nadia and Galota, Matthias and Reith, Steffen and Schnoor, Henning and Vollmer, Heribert}, title = {Complexity Classifications for Different Equivalence and Audit Problems for Boolean Circuits}, series = {Logical Methods in Computer Science}, volume = {8}, journal = {Logical Methods in Computer Science}, number = {3:27}, doi = {10.2168/LMCS-8(3:27)2012}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-131121}, pages = {1 -- 25}, year = {2012}, abstract = {We study Boolean circuits as a representation of Boolean functions and conskier different equivalence, audit, and enumeration problems. For a number of restricted sets of gate types (bases) we obtain efficient algorithms, while for all other gate types we show these problems are at least NP-hard.}, language = {en} } @article{SchererFleishmanJonesetal.2021, author = {Scherer, Marc and Fleishman, Sarel J. and Jones, Patrik R. and Dandekar, Thomas and Bencurova, Elena}, title = {Computational Enzyme Engineering Pipelines for Optimized Production of Renewable Chemicals}, series = {Frontiers in Bioengineering and Biotechnology}, volume = {9}, journal = {Frontiers in Bioengineering and Biotechnology}, issn = {2296-4185}, doi = {10.3389/fbioe.2021.673005}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-240598}, year = {2021}, abstract = {To enable a sustainable supply of chemicals, novel biotechnological solutions are required that replace the reliance on fossil resources. One potential solution is to utilize tailored biosynthetic modules for the metabolic conversion of CO2 or organic waste to chemicals and fuel by microorganisms. Currently, it is challenging to commercialize biotechnological processes for renewable chemical biomanufacturing because of a lack of highly active and specific biocatalysts. As experimental methods to engineer biocatalysts are time- and cost-intensive, it is important to establish efficient and reliable computational tools that can speed up the identification or optimization of selective, highly active, and stable enzyme variants for utilization in the biotechnological industry. Here, we review and suggest combinations of effective state-of-the-art software and online tools available for computational enzyme engineering pipelines to optimize metabolic pathways for the biosynthesis of renewable chemicals. Using examples relevant for biotechnology, we explain the underlying principles of enzyme engineering and design and illuminate future directions for automated optimization of biocatalysts for the assembly of synthetic metabolic pathways.}, language = {en} } @unpublished{Nassourou2011, author = {Nassourou, Mohamadou}, title = {Computer-based Textual Documents Collation System for Reconstructing the Original Text from Automatically Identified Base Text and Ranked Witnesses}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-65749}, year = {2011}, abstract = {Given a collection of diverging documents about some lost original text, any person interested in the text would try reconstructing it from the diverging documents. Whether it is eclecticism, stemmatics, or copy-text, one is expected to explicitly or indirectly select one of the documents as a starting point or as a base text, which could be emended through comparison with remaining documents, so that a text that could be designated as the original document is generated. Unfortunately the process of giving priority to one of the documents also known as witnesses is a subjective approach. In fact even Cladistics, which could be considered as a computer-based approach of implementing stemmatics, does not present or recommend users to select a certain witness as a starting point for the process of reconstructing the original document. In this study, a computational method using a rule-based Bayesian classifier is used, to assist text scholars in their attempts of reconstructing a non-existing document from some available witnesses. The method developed in this study consists of selecting a base text successively and collating it with remaining documents. Each completed collation cycle stores the selected base text and its closest witness, along with a weighted score of their similarities and differences. At the end of the collation process, a witness selected more often by majority of base texts is considered as the probable base text of the collection. Witnesses' scores are weighted using a weighting system, based on effects of types of textual modifications on the process of reconstructing original documents. Users have the possibility to select between baseless and base text collation. If a base text is selected, the task is reduced to ranking the witnesses with respect to the base text, otherwise a base text as well as ranking of the witnesses with respect to the base text are computed and displayed on a bar diagram. Additionally this study includes a recursive algorithm for automatically reconstructing the original text from the identified base text and ranked witnesses.}, subject = {Textvergleich}, language = {en} } @unpublished{Nassourou2011, author = {Nassourou, Mohamadou}, title = {Computing Generic Causes of Revelation of the Quranic Verses Using Machine Learning Techniques}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-66083}, year = {2011}, abstract = {Because many verses of the holy Quran are similar, there is high probability that, similar verses addressing same issues share same generic causes of revelation. In this study, machine learning techniques have been employed in order to automatically derive causes of revelation of Quranic verses. The derivation of the causes of revelation is viewed as a classification problem. Initially the categories are based on the verses with known causes of revelation, and the testing set consists of the remaining verses. Based on a computed threshold value, a na{\"i}ve Bayesian classifier is used to categorize some verses. After that, using a decision tree classifier the remaining uncategorized verses are separated into verses that contain indicators (resultative connectors, causative expressions…), and those that do not. As for those verses having indicators, each one is segmented into its constituent clauses by identification of the linking indicators. Then a dominant clause is extracted and considered either as the cause of revelation, or post-processed by adding or subtracting some terms to form a causal clause that constitutes the cause of revelation. Concerning remaining unclassified verses without indicators, a naive Bayesian classifier is again used to assign each one of them to one of the existing classes based on features and topics similarity. As for verses that could not be classified so far, manual classification was made by considering each verse as a category on its own. The result obtained in this study is encouraging, and shows that automatic derivation of Quranic verses' generic causes of revelation is achievable, and reasonably reliable for understanding and implementing the teachings of the Quran.}, subject = {Text Mining}, language = {en} } @article{LatoschikWienrich2022, author = {Latoschik, Marc Erich and Wienrich, Carolin}, title = {Congruence and plausibility, not presence: pivotal conditions for XR experiences and effects, a novel approach}, series = {Frontiers in Virtual Reality}, volume = {3}, journal = {Frontiers in Virtual Reality}, issn = {2673-4192}, doi = {10.3389/frvir.2022.694433}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-284787}, year = {2022}, abstract = {Presence is often considered the most important quale describing the subjective feeling of being in a computer-generated and/or computer-mediated virtual environment. The identification and separation of orthogonal presence components, i.e., the place illusion and the plausibility illusion, has been an accepted theoretical model describing Virtual Reality (VR) experiences for some time. This perspective article challenges this presence-oriented VR theory. First, we argue that a place illusion cannot be the major construct to describe the much wider scope of virtual, augmented, and mixed reality (VR, AR, MR: or XR for short). Second, we argue that there is no plausibility illusion but merely plausibility, and we derive the place illusion caused by the congruent and plausible generation of spatial cues and similarly for all the current model's so-defined illusions. Finally, we propose congruence and plausibility to become the central essential conditions in a novel theoretical model describing XR experiences and effects.}, language = {en} } @phdthesis{Loeffler2021, author = {L{\"o}ffler, Andre}, title = {Constrained Graph Layouts: Vertices on the Outer Face and on the Integer Grid}, edition = {1. Auflage}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-146-4}, doi = {10.25972/WUP-978-3-95826-147-1}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-215746}, school = {W{\"u}rzburg University Press}, pages = {viii, 161}, year = {2021}, abstract = {Constraining graph layouts - that is, restricting the placement of vertices and the routing of edges to obey certain constraints - is common practice in graph drawing. In this book, we discuss algorithmic results on two different restriction types: placing vertices on the outer face and on the integer grid. For the first type, we look into the outer k-planar and outer k-quasi-planar graphs, as well as giving a linear-time algorithm to recognize full and closed outer k-planar graphs Monadic Second-order Logic. For the second type, we consider the problem of transferring a given planar drawing onto the integer grid while perserving the original drawings topology; we also generalize a variant of Cauchy's rigidity theorem for orthogonal polyhedra of genus 0 to those of arbitrary genus.}, subject = {Graphenzeichnen}, language = {en} } @article{GlemarecLugrinBosseretal.2022, author = {Gl{\´e}marec, Yann and Lugrin, Jean-Luc and Bosser, Anne-Gwenn and Buche, C{\´e}dric and Latoschik, Marc Erich}, title = {Controlling the stage: a high-level control system for virtual audiences in Virtual Reality}, series = {Frontiers in Virtual Reality}, volume = {3}, journal = {Frontiers in Virtual Reality}, issn = {2673-4192}, doi = {10.3389/frvir.2022.876433}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-284601}, year = {2022}, abstract = {This article presents a novel method for controlling a virtual audience system (VAS) in Virtual Reality (VR) application, called STAGE, which has been originally designed for supervised public speaking training in university seminars dedicated to the preparation and delivery of scientific talks. We are interested in creating pedagogical narratives: narratives encompass affective phenomenon and rather than organizing events changing the course of a training scenario, pedagogical plans using our system focus on organizing the affects it arouses for the trainees. Efficiently controlling a virtual audience towards a specific training objective while evaluating the speaker's performance presents a challenge for a seminar instructor: the high level of cognitive and physical demands required to be able to control the virtual audience, whilst evaluating speaker's performance, adjusting and allowing it to quickly react to the user's behaviors and interactions. It is indeed a critical limitation of a number of existing systems that they rely on a Wizard of Oz approach, where the tutor drives the audience in reaction to the user's performance. We address this problem by integrating with a VAS a high-level control component for tutors, which allows using predefined audience behavior rules, defining custom ones, as well as intervening during run-time for finer control of the unfolding of the pedagogical plan. At its core, this component offers a tool to program, select, modify and monitor interactive training narratives using a high-level representation. The STAGE offers the following features: i) a high-level API to program pedagogical narratives focusing on a specific public speaking situation and training objectives, ii) an interactive visualization interface iii) computation and visualization of user metrics, iv) a semi-autonomous virtual audience composed of virtual spectators with automatic reactions to the speaker and surrounding spectators while following the pedagogical plan V) and the possibility for the instructor to embody a virtual spectator to ask questions or guide the speaker from within the Virtual Environment. We present here the design, and implementation of the tutoring system and its integration in STAGE, and discuss its reception by end-users.}, language = {en} } @article{SteiningerAbelZiegleretal.2023, author = {Steininger, Michael and Abel, Daniel and Ziegler, Katrin and Krause, Anna and Paeth, Heiko and Hotho, Andreas}, title = {ConvMOS: climate model output statistics with deep learning}, series = {Data Mining and Knowledge Discovery}, volume = {37}, journal = {Data Mining and Knowledge Discovery}, number = {1}, issn = {1384-5810}, doi = {10.1007/s10618-022-00877-6}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-324213}, pages = {136-166}, year = {2023}, abstract = {Climate models are the tool of choice for scientists researching climate change. Like all models they suffer from errors, particularly systematic and location-specific representation errors. One way to reduce these errors is model output statistics (MOS) where the model output is fitted to observational data with machine learning. In this work, we assess the use of convolutional Deep Learning climate MOS approaches and present the ConvMOS architecture which is specifically designed based on the observation that there are systematic and location-specific errors in the precipitation estimates of climate models. We apply ConvMOS models to the simulated precipitation of the regional climate model REMO, showing that a combination of per-location model parameters for reducing location-specific errors and global model parameters for reducing systematic errors is indeed beneficial for MOS performance. We find that ConvMOS models can reduce errors considerably and perform significantly better than three commonly used MOS approaches and plain ResNet and U-Net models in most cases. Our results show that non-linear MOS models underestimate the number of extreme precipitation events, which we alleviate by training models specialized towards extreme precipitation events with the imbalanced regression method DenseLoss. While we consider climate MOS, we argue that aspects of ConvMOS may also be beneficial in other domains with geospatial data, such as air pollution modeling or weather forecasts.}, subject = {Klima}, language = {en} } @article{AtienzadeCastroCortesetal.2012, author = {Atienza, Nieves and de Castro, Natalia and Cort{\´e}s, Carmen and Garrido, M. {\´A}ngeles and Grima, Clara I. and Hern{\´a}ndez, Gregorio and M{\´a}rquez, Alberto and Moreno-Gonz{\´a}lez, Auxiliadora and N{\"o}llenburg, Martin and Portillo, Jos{\´e} Ram{\´o}n and Reyes, Pedro and Valenzuela, Jes{\´u}s and Trinidad Villar, Maria and Wolff, Alexander}, title = {Cover contact graphs}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-78845}, year = {2012}, abstract = {We study problems that arise in the context of covering certain geometric objects called seeds (e.g., points or disks) by a set of other geometric objects called cover (e.g., a set of disks or homothetic triangles). We insist that the interiors of the seeds and the cover elements are pairwise disjoint, respectively, but they can touch. We call the contact graph of a cover a cover contact graph (CCG). We are interested in three types of tasks, both in the general case and in the special case of seeds on a line: (a) deciding whether a given seed set has a connected CCG, (b) deciding whether a given graph has a realization as a CCG on a given seed set, and (c) bounding the sizes of certain classes of CCG's. Concerning (a) we give efficient algorithms for the case that seeds are points and show that the problem becomes hard if seeds and covers are disks. Concerning (b) we show that this problem is hard even for point seeds and disk covers (given a fixed correspondence between graph vertices and seeds). Concerning (c) we obtain upper and lower bounds on the number of CCG's for point seeds.}, subject = {Informatik}, language = {de} } @phdthesis{Fink2014, author = {Fink, Martin}, title = {Crossings, Curves, and Constraints in Graph Drawing}, publisher = {W{\"u}rzburg University Press}, isbn = {978-3-95826-002-3 (print)}, doi = {10.25972/WUP-978-3-95826-003-0}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-98235}, school = {W{\"u}rzburg University Press}, pages = {222}, year = {2014}, abstract = {In many cases, problems, data, or information can be modeled as graphs. Graphs can be used as a tool for modeling in any case where connections between distinguishable objects occur. Any graph consists of a set of objects, called vertices, and a set of connections, called edges, such that any edge connects a pair of vertices. For example, a social network can be modeled by a graph by transforming the users of the network into vertices and friendship relations between users into edges. Also physical networks like computer networks or transportation networks, for example, the metro network of a city, can be seen as graphs. For making graphs and, thereby, the data that is modeled, well-understandable for users, we need a visualization. Graph drawing deals with algorithms for visualizing graphs. In this thesis, especially the use of crossings and curves is investigated for graph drawing problems under additional constraints. The constraints that occur in the problems investigated in this thesis especially restrict the positions of (a part of) the vertices; this is done either as a hard constraint or as an optimization criterion.}, subject = {Graphenzeichnen}, language = {en} } @techreport{Metzger2020, type = {Working Paper}, author = {Metzger, Florian}, title = {Crowdsensed QoE for the community - a concept to make QoE assessment accessible}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-203748}, pages = {7}, year = {2020}, abstract = {In recent years several community testbeds as well as participatory sensing platforms have successfully established themselves to provide open data to everyone interested. Each of them with a specific goal in mind, ranging from collecting radio coverage data up to environmental and radiation data. Such data can be used by the community in their decision making, whether to subscribe to a specific mobile phone service that provides good coverage in an area or in finding a sunny and warm region for the summer holidays. However, the existing platforms are usually limiting themselves to directly measurable network QoS. If such a crowdsourced data set provides more in-depth derived measures, this would enable an even better decision making. A community-driven crowdsensing platform that derives spatial application-layer user experience from resource-friendly bandwidth estimates would be such a case, video streaming services come to mind as a prime example. In this paper we present a concept for such a system based on an initial prototype that eases the collection of data necessary to determine mobile-specific QoE at large scale. In addition we reason why the simple quality metric proposed here can hold its own.}, subject = {Quality of Experience}, language = {en} } @article{DuLauterbachLietal.2020, author = {Du, Shitong and Lauterbach, Helge A. and Li, Xuyou and Demisse, Girum G. and Borrmann, Dorit and N{\"u}chter, Andreas}, title = {Curvefusion — A Method for Combining Estimated Trajectories with Applications to SLAM and Time-Calibration}, series = {Sensors}, volume = {20}, journal = {Sensors}, number = {23}, issn = {1424-8220}, doi = {10.3390/s20236918}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-219988}, year = {2020}, abstract = {Mapping and localization of mobile robots in an unknown environment are essential for most high-level operations like autonomous navigation or exploration. This paper presents a novel approach for combining estimated trajectories, namely curvefusion. The robot used in the experiments is equipped with a horizontally mounted 2D profiler, a constantly spinning 3D laser scanner and a GPS module. The proposed algorithm first combines trajectories from different sensors to optimize poses of the planar three degrees of freedom (DoF) trajectory, which is then fed into continuous-time simultaneous localization and mapping (SLAM) to further improve the trajectory. While state-of-the-art multi-sensor fusion methods mainly focus on probabilistic methods, our approach instead adopts a deformation-based method to optimize poses. To this end, a similarity metric for curved shapes is introduced into the robotics community to fuse the estimated trajectories. Additionally, a shape-based point correspondence estimation method is applied to the multi-sensor time calibration. Experiments show that the proposed fusion method can achieve relatively better accuracy, even if the error of the trajectory before fusion is large, which demonstrates that our method can still maintain a certain degree of accuracy in an environment where typical pose estimation methods have poor performance. In addition, the proposed time-calibration method also achieves high accuracy in estimating point correspondences.}, language = {en} } @techreport{RossiMaurelliUnnithanetal.2021, author = {Rossi, Angelo Pio and Maurelli, Francesco and Unnithan, Vikram and Dreger, Hendrik and Mathewos, Kedus and Pradhan, Nayan and Corbeanu, Dan-Andrei and Pozzobon, Riccardo and Massironi, Matteo and Ferrari, Sabrina and Pernechele, Claudia and Paoletti, Lorenzo and Simioni, Emanuele and Maurizio, Pajola and Santagata, Tommaso and Borrmann, Dorit and N{\"u}chter, Andreas and Bredenbeck, Anton and Zevering, Jasper and Arzberger, Fabian and Reyes Mantilla, Camilo Andr{\´e}s}, title = {DAEDALUS - Descent And Exploration in Deep Autonomy of Lava Underground Structures}, isbn = {978-3-945459-33-1}, issn = {1868-7466}, doi = {10.25972/OPUS-22791}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-227911}, pages = {188}, year = {2021}, abstract = {The DAEDALUS mission concept aims at exploring and characterising the entrance and initial part of Lunar lava tubes within a compact, tightly integrated spherical robotic device, with a complementary payload set and autonomous capabilities. The mission concept addresses specifically the identification and characterisation of potential resources for future ESA exploration, the local environment of the subsurface and its geologic and compositional structure. A sphere is ideally suited to protect sensors and scientific equipment in rough, uneven environments. It will house laser scanners, cameras and ancillary payloads. The sphere will be lowered into the skylight and will explore the entrance shaft, associated caverns and conduits. Lidar (light detection and ranging) systems produce 3D models with high spatial accuracy independent of lighting conditions and visible features. Hence this will be the primary exploration toolset within the sphere. The additional payload that can be accommodated in the robotic sphere consists of camera systems with panoramic lenses and scanners such as multi-wavelength or single-photon scanners. A moving mass will trigger movements. The tether for lowering the sphere will be used for data communication and powering the equipment during the descending phase. Furthermore, the connector tether-sphere will host a WIFI access point, such that data of the conduit can be transferred to the surface relay station. During the exploration phase, the robot will be disconnected from the cable, and will use wireless communication. Emergency autonomy software will ensure that in case of loss of communication, the robot will continue the nominal mission.}, subject = {Mond}, language = {en} } @techreport{RaffeckGeisslerHossfeld2022, type = {Working Paper}, author = {Raffeck, Simon and Geißler, Stefan and Hoßfeld, Tobias}, title = {DBM: Decentralized Burst Mitigation for Self-Organizing LoRa Deployments}, series = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, journal = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, doi = {10.25972/OPUS-28080}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-280809}, pages = {4}, year = {2022}, abstract = {This work proposes a novel approach to disperse dense transmission intervals and reduce bursty traffic patterns without the need for centralized control. Furthermore, by keeping the mechanism as close to the Long Range Wide Area Network (LoRaWAN) standard as possible the suggested mechanism can be deployed within existing networks and can even be co-deployed with other devices.}, subject = {Datennetz}, language = {en} } @article{PetschkeStaab2019, author = {Petschke, Danny and Staab, Torsten E.M.}, title = {DDRS4PALS: a software for the acquisition and simulation of lifetime spectra using the DRS4 evaluation board}, series = {SoftwareX}, volume = {10}, journal = {SoftwareX}, doi = {10.1016/j.softx.2019.100261}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-202276}, pages = {100261}, year = {2019}, abstract = {Lifetime techniques are applied to diverse fields of study including materials sciences, semiconductor physics, biology, molecular biophysics and photochemistry. Here we present DDRS4PALS, a software for the acquisition and simulation of lifetime spectra using the DRS4 evaluation board (Paul Scherrer Institute, Switzerland) for time resolved measurements and digitization of detector output pulses. Artifact afflicted pulses can be corrected or rejected prior to the lifetime calculation to provide the generation of high-quality lifetime spectra, which are crucial for a profound analysis, i.e. the decomposition of the true information. Moreover, the pulses can be streamed on an (external) hard drive during the measurement and subsequently downloaded in the offline mode without being connected to the hardware. This allows the generation of various lifetime spectra at different configurations from one single measurement and, hence, a meaningful comparison in terms of analyzability and quality. Parallel processing and an integrated JavaScript based language provide convenient options to accelerate and automate time consuming processes such as lifetime spectra simulations.}, language = {en} } @article{AliMontenegro2016, author = {Ali, Qasim and Montenegro, Sergio}, title = {Decentralized control for scalable quadcopter formations}, series = {International Journal of Aerospace Engineering}, volume = {2016}, journal = {International Journal of Aerospace Engineering}, doi = {10.1155/2016/9108983}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-146704}, pages = {9108983}, year = {2016}, abstract = {An innovative framework has been developed for teamwork of two quadcopter formations, each having its specified formation geometry, assigned task, and matching control scheme. Position control for quadcopters in one of the formations has been implemented through a Linear Quadratic Regulator Proportional Integral (LQR PI) control scheme based on explicit model following scheme. Quadcopters in the other formation are controlled through LQR PI servomechanism control scheme. These two control schemes are compared in terms of their performance and control effort. Both formations are commanded by respective ground stations through virtual leaders. Quadcopters in formations are able to track desired trajectories as well as hovering at desired points for selected time duration. In case of communication loss between ground station and any of the quadcopters, the neighboring quadcopter provides the command data, received from the ground station, to the affected unit. Proposed control schemes have been validated through extensive simulations using MATLAB®/Simulink® that provided favorable results.}, language = {en} } @article{MuellerLeppichGeissetal.2023, author = {M{\"u}ller, Konstantin and Leppich, Robert and Geiß, Christian and Borst, Vanessa and Pelizari, Patrick Aravena and Kounev, Samuel and Taubenb{\"o}ck, Hannes}, title = {Deep neural network regression for normalized digital surface model generation with Sentinel-2 imagery}, series = {IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing}, volume = {16}, journal = {IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing}, issn = {1939-1404}, doi = {10.1109/JSTARS.2023.3297710}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-349424}, pages = {8508-8519}, year = {2023}, abstract = {In recent history, normalized digital surface models (nDSMs) have been constantly gaining importance as a means to solve large-scale geographic problems. High-resolution surface models are precious, as they can provide detailed information for a specific area. However, measurements with a high resolution are time consuming and costly. Only a few approaches exist to create high-resolution nDSMs for extensive areas. This article explores approaches to extract high-resolution nDSMs from low-resolution Sentinel-2 data, allowing us to derive large-scale models. We thereby utilize the advantages of Sentinel 2 being open access, having global coverage, and providing steady updates through a high repetition rate. Several deep learning models are trained to overcome the gap in producing high-resolution surface maps from low-resolution input data. With U-Net as a base architecture, we extend the capabilities of our model by integrating tailored multiscale encoders with differently sized kernels in the convolution as well as conformed self-attention inside the skip connection gates. Using pixelwise regression, our U-Net base models can achieve a mean height error of approximately 2 m. Moreover, through our enhancements to the model architecture, we reduce the model error by more than 7\%.}, language = {en} } @phdthesis{Nogatz2023, author = {Nogatz, Falco}, title = {Defining and Implementing Domain-Specific Languages with Prolog}, doi = {10.25972/OPUS-30187}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-301872}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {The landscape of today's programming languages is manifold. With the diversity of applications, the difficulty of adequately addressing and specifying the used programs increases. This often leads to newly designed and implemented domain-specific languages. They enable domain experts to express knowledge in their preferred format, resulting in more readable and concise programs. Due to its flexible and declarative syntax without reserved keywords, the logic programming language Prolog is particularly suitable for defining and embedding domain-specific languages. This thesis addresses the questions and challenges that arise when integrating domain-specific languages into Prolog. We compare the two approaches to define them either externally or internally, and provide assisting tools for each. The grammar of a formal language is usually defined in the extended Backus-Naur form. In this work, we handle this formalism as a domain-specific language in Prolog, and define term expansions that allow to translate it into equivalent definite clause grammars. We present the package library(dcg4pt) for SWI-Prolog, which enriches them by an additional argument to automatically process the term's corresponding parse tree. To simplify the work with definite clause grammars, we visualise their application by a web-based tracer. The external integration of domain-specific languages requires the programmer to keep the grammar, parser, and interpreter in sync. In many cases, domain-specific languages can instead be directly embedded into Prolog by providing appropriate operator definitions. In addition, we propose syntactic extensions for Prolog to expand its expressiveness, for instance to state logic formulas with their connectives verbatim. This allows to use all tools that were originally written for Prolog, for instance code linters and editors with syntax highlighting. We present the package library(plammar), a standard-compliant parser for Prolog source code, written in Prolog. It is able to automatically infer from example sentences the required operator definitions with their classes and precedences as well as the required Prolog language extensions. As a result, we can automatically answer the question: Is it possible to model these example sentences as valid Prolog clauses, and how? We discuss and apply the two approaches to internal and external integrations for several domain-specific languages, namely the extended Backus-Naur form, GraphQL, XPath, and a controlled natural language to represent expert rules in if-then form. The created toolchain with library(dcg4pt) and library(plammar) yields new application opportunities for static Prolog source code analysis, which we also present.}, subject = {PROLOG }, language = {en} } @article{SeufertSchroederSeufert2021, author = {Seufert, Anika and Schr{\"o}der, Svenja and Seufert, Michael}, title = {Delivering User Experience over Networks: Towards a Quality of Experience Centered Design Cycle for Improved Design of Networked Applications}, series = {SN Computer Science}, volume = {2}, journal = {SN Computer Science}, number = {6}, issn = {2661-8907}, doi = {10.1007/s42979-021-00851-x}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-271762}, year = {2021}, abstract = {To deliver the best user experience (UX), the human-centered design cycle (HCDC) serves as a well-established guideline to application developers. However, it does not yet cover network-specific requirements, which become increasingly crucial, as most applications deliver experience over the Internet. The missing network-centric view is provided by Quality of Experience (QoE), which could team up with UX towards an improved overall experience. By considering QoE aspects during the development process, it can be achieved that applications become network-aware by design. In this paper, the Quality of Experience Centered Design Cycle (QoE-CDC) is proposed, which provides guidelines on how to design applications with respect to network-specific requirements and QoE. Its practical value is showcased for popular application types and validated by outlining the design of a new smartphone application. We show that combining HCDC and QoE-CDC will result in an application design, which reaches a high UX and avoids QoE degradation.}, language = {en} } @article{SteiningerKobsDavidsonetal.2021, author = {Steininger, Michael and Kobs, Konstantin and Davidson, Padraig and Krause, Anna and Hotho, Andreas}, title = {Density-based weighting for imbalanced regression}, series = {Machine Learning}, volume = {110}, journal = {Machine Learning}, number = {8}, issn = {1573-0565}, doi = {10.1007/s10994-021-06023-5}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-269177}, pages = {2187-2211}, year = {2021}, abstract = {In many real world settings, imbalanced data impedes model performance of learning algorithms, like neural networks, mostly for rare cases. This is especially problematic for tasks focusing on these rare occurrences. For example, when estimating precipitation, extreme rainfall events are scarce but important considering their potential consequences. While there are numerous well studied solutions for classification settings, most of them cannot be applied to regression easily. Of the few solutions for regression tasks, barely any have explored cost-sensitive learning which is known to have advantages compared to sampling-based methods in classification tasks. In this work, we propose a sample weighting approach for imbalanced regression datasets called DenseWeight and a cost-sensitive learning approach for neural network regression with imbalanced data called DenseLoss based on our weighting scheme. DenseWeight weights data points according to their target value rarities through kernel density estimation (KDE). DenseLoss adjusts each data point's influence on the loss according to DenseWeight, giving rare data points more influence on model training compared to common data points. We show on multiple differently distributed datasets that DenseLoss significantly improves model performance for rare data points through its density-based weighting scheme. Additionally, we compare DenseLoss to the state-of-the-art method SMOGN, finding that our method mostly yields better performance. Our approach provides more control over model training as it enables us to actively decide on the trade-off between focusing on common or rare cases through a single hyperparameter, allowing the training of better models for rare data points.}, language = {en} } @phdthesis{Klein2014, author = {Klein, Dominik Werner}, title = {Design and Evaluation of Components for Future Internet Architectures}, issn = {1432-8801}, doi = {10.25972/OPUS-9313}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-93134}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {Die derzeitige Internetarchitektur wurde nicht in einem geplanten Prozess konzipiert und entwickelt, sondern hat vielmehr eine evolutionsartige Entwicklung hinter sich. Ausl{\"o}ser f{\"u}r die jeweiligen Evolutionsschritte waren dabei meist aufstrebende Anwendungen, welche neue Anforderungen an die zugrundeliegende Netzarchitektur gestellt haben. Um diese Anforderungen zu erf{\"u}llen, wurden h{\"a}ufig neuartige Dienste oder Protokolle spezifiziert und in die bestehende Architektur integriert. Dieser Prozess ist jedoch meist mit hohem Aufwand verbunden und daher sehr tr{\"a}ge, was die Entwicklung und Verbreitung innovativer Dienste beeintr{\"a}chtigt. Derzeitig diskutierte Konzepte wie Software-Defined Networking (SDN) oder Netzvirtualisierung (NV) werden als eine M{\"o}glichkeit angesehen, die Altlasten der bestehenden Internetarchitektur zu l{\"o}sen. Beiden Konzepten gemein ist die Idee, logische Netze {\"u}ber dem physikalischen Substrat zu betreiben. Diese logischen Netze sind hochdynamisch und k{\"o}nnen so flexibel an die Anforderungen der jeweiligen Anwendungen angepasst werden. Insbesondere erlaubt das Konzept der Virtualisierung intelligentere Netzknoten, was innovative neue Anwendungsf{\"a}lle erm{\"o}glicht. Ein h{\"a}ufig in diesem Zusammenhang diskutierter Anwendungsfall ist die Mobilit{\"a}t sowohl von Endger{\"a}ten als auch von Diensten an sich. Die Mobilit{\"a}t der Dienste wird hierbei ausgenutzt, um die Zugriffsverz{\"o}gerung oder die belegten Ressourcen im Netz zu reduzieren, indem die Dienste zum Beispiel in f{\"u}r den Nutzer geographisch nahe Datenzentren migriert werden. Neben den reinen Mechanismen bez{\"u}glich Dienst- und Endger{\"a}temobilit{\"a}t sind in diesem Zusammenhang auch geeignete {\"U}berwachungsl{\"o}sungen relevant, welche die vom Nutzer wahrgenommene Dienstg{\"u}te bewerten k{\"o}nnen. Diese L{\"o}sungen liefern wichtige Entscheidungshilfen f{\"u}r die Migration oder {\"u}berwachen m{\"o}gliche Effekte der Migration auf die erfahrene Dienstg{\"u}te beim Nutzer. Im Falle von Video Streaming erm{\"o}glicht ein solcher Anwendungsfall die flexible Anpassung der Streaming Topologie f{\"u}r mobile Nutzer, um so die Videoqualit{\"a}t unabh{\"a}ngig vom Zugangsnetz aufrechterhalten zu k{\"o}nnen. Im Rahmen dieser Doktorarbeit wird der beschriebene Anwendungsfall am Beispiel einer Video Streaming Anwendung n{\"a}her analysiert und auftretende Herausforderungen werden diskutiert. Des Weiteren werden L{\"o}sungsans{\"a}tze vorgestellt und bez{\"u}glich ihrer Effizienz ausgewertet. Im Detail besch{\"a}ftigt sich die Arbeit mit der Leistungsanalyse von Mechanismen f{\"u}r die Dienstmobilit{\"a}t und entwickelt eine Architektur zur Optimierung der Dienstmobilit{\"a}t. Im Bereich Endger{\"a}temobilit{\"a}t werden Verbesserungen entwickelt, welche die Latenz zwischen Endger{\"a}t und Dienst reduzieren oder die Konnektivit{\"a}t unabh{\"a}ngig vom Zugangsnetz gew{\"a}hrleisten. Im letzten Teilbereich wird eine L{\"o}sung zur {\"U}berwachung der Videoqualit{\"a}t im Netz entwickelt und bez{\"u}glich ihrer Genauigkeit analysiert.}, subject = {Leistungsbewertung}, language = {en} } @unpublished{Nassourou2011, author = {Nassourou, Mohamadou}, title = {Design and Implementation of a Model-driven XML-based Integrated System Architecture for Assisting Analysis, Understanding, and Retention of Religious Texts:The Case of The Quran}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-65737}, year = {2011}, abstract = {Learning a book in general involves reading it, underlining important words, adding comments, summarizing some passages, and marking up some text or concepts. Once deeper understanding is achieved, one would like to organize and manage her/his knowledge in such a way that, it could be easily remembered and efficiently transmitted to others. This paper discusses about modeling religious texts using semantic XML markup based on frame-based knowledge representation, with the purpose of assisting understanding, retention, and sharing of knowledge they contain. In this study, books organized in terms of chapters made up of verses are considered as the source of knowledge to model. Some metadata representing the multiple perspectives of knowledge modeling are assigned to each chapter and verse. Chapters and verses with their metadata form a meta-model, which is represented using frames, and published on a web mashup. An XML-based annotation and visualization system equipped with user interfaces for creating static and dynamic metadata, annotating chapters' contents according to user selected semantics, and templates for publishing generated knowledge on the Internet, has been developed. The system has been applied to the Quran, and the result obtained shows that multiple perspectives of information modeling can be successfully applied to religious texts, in order to support analysis, understanding, and retention of the texts.}, subject = {Wissensrepr{\"a}sentation}, language = {en} } @unpublished{Nassourou2011, author = {Nassourou, Mohamadou}, title = {Design and Implementation of Architectures for Interactive Textual Documents Collation Systems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-56601}, year = {2011}, abstract = {One of the main purposes of textual documents collation is to identify a base text or closest witness to the base text, by analyzing and interpreting differences also known as types of changes that might exist between those documents. Based on this fact, it is reasonable to argue that, explicit identification of types of changes such as deletions, additions, transpositions, and mutations should be part of the collation process. The identification could be carried out by an interpretation module after alignment has taken place. Unfortunately existing collation software such as CollateX1 and Juxta2's collation engine do not have interpretation modules. In fact they implement the Gothenburg model [1] for collation process which does not include an interpretation unit. Currently both CollateX and Juxta's collation engine do not distinguish in their critical apparatus between the types of changes, and do not offer statistics about those changes. This paper presents a model for both integrated and distributed collation processes that improves the Gothenburg model. The model introduces an interpretation component for computing and distinguishing between the types of changes that documents could have undergone. Moreover two architectures implementing the model in order to solve the problem of interactive collation are discussed as well. Each architecture uses CollateX library, and provides on the one hand preprocessing functions for transforming input documents into CollateX input format, and on the other hand a post-processing module for enabling interactive collation. Finally simple algorithms for distinguishing between types of changes, and linking collated source documents with the collation results are also introduced.}, subject = {Softwarearchitektur}, language = {en} } @misc{Kaempgen2009, type = {Master Thesis}, author = {Kaempgen, Benedikt}, title = {Deskriptives Data-Mining f{\"u}r Entscheidungstr{\"a}ger: Eine Mehrfachfallstudie}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-46343}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2009}, abstract = {Das Potenzial der Wissensentdeckung in Daten wird h{\"a}ufig nicht ausgenutzt, was haupts{\"a}chlich auf Barrieren zwischen dem Entwicklerteam und dem Endnutzer des Data-Mining zur{\"u}ckzuf{\"u}hren ist. In dieser Arbeit wird ein transparenter Ansatz zum Beschreiben und Erkl{\"a}ren von Daten f{\"u}r Entscheidungstr{\"a}ger vorgestellt. In Entscheidungstr{\"a}ger-zentrierten Aufgaben werden die Projektanforderungen definiert und die Ergebnisse zu einer Geschichte zusammengestellt. Eine Anforderung besteht dabei aus einem tabellarischen Bericht und ggf. Mustern in seinem Inhalt, jeweils verst{\"a}ndlich f{\"u}r einen Entscheidungstr{\"a}ger. Die technischen Aufgaben bestehen aus einer Datenpr{\"u}fung, der Integration der Daten in einem Data-Warehouse sowie dem Generieren von Berichten und dem Entdecken von Mustern wie in den Anforderungen beschrieben. Mehrere Data-Mining-Projekte k{\"o}nnen durch Wissensmanagement sowie eine geeignete Infrastruktur voneinander profitieren. Der Ansatz wurde in zwei Projekten unter Verwendung von ausschließlich Open-Source-Software angewendet.}, subject = {Data Mining}, language = {de} } @article{WienrichCarolus2021, author = {Wienrich, Carolin and Carolus, Astrid}, title = {Development of an Instrument to Measure Conceptualizations and Competencies About Conversational Agents on the Example of Smart Speakers}, series = {Frontiers in Computer Science}, volume = {3}, journal = {Frontiers in Computer Science}, doi = {10.3389/fcomp.2021.685277}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-260198}, year = {2021}, abstract = {The concept of digital literacy has been introduced as a new cultural technique, which is regarded as essential for successful participation in a (future) digitized world. Regarding the increasing importance of AI, literacy concepts need to be extended to account for AI-related specifics. The easy handling of the systems results in increased usage, contrasting limited conceptualizations (e.g., imagination of future importance) and competencies (e.g., knowledge about functional principles). In reference to voice-based conversational agents as a concrete application of AI, the present paper aims for the development of a measurement to assess the conceptualizations and competencies about conversational agents. In a first step, a theoretical framework of "AI literacy" is transferred to the context of conversational agent literacy. Second, the "conversational agent literacy scale" (short CALS) is developed, constituting the first attempt to measure interindividual differences in the "(il) literate" usage of conversational agents. 29 items were derived, of which 170 participants answered. An explanatory factor analysis identified five factors leading to five subscales to assess CAL: storage and transfer of the smart speaker's data input; smart speaker's functional principles; smart speaker's intelligent functions, learning abilities; smart speaker's reach and potential; smart speaker's technological (surrounding) infrastructure. Preliminary insights into construct validity and reliability of CALS showed satisfying results. Third, using the newly developed instrument, a student sample's CAL was assessed, revealing intermediated values. Remarkably, owning a smart speaker did not lead to higher CAL scores, confirming our basic assumption that usage of systems does not guarantee enlightened conceptualizations and competencies. In sum, the paper contributes to the first insights into the operationalization and understanding of CAL as a specific subdomain of AI-related competencies.}, language = {en} } @phdthesis{Binder2006, author = {Binder, Andreas}, title = {Die stochastische Wissenschaft und zwei Teilsysteme eines Web-basierten Informations- und Anwendungssystems zu ihrer Etablierung}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-26146}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2006}, abstract = {Das stochastische Denken, die Bernoullische Stochastik und dessen informationstechnologische Umsetzung, namens Stochastikon stellen die Grundlage f{\"u}r das Verst{\"a}ndnis und die erfolgreiche Nutzung einer stochastischen Wissenschaft dar. Im Rahmen dieser Arbeit erfolgt eine Kl{\"a}rung des Begriffs des stochastischen Denkens, eine anschauliche Darstellung der von Elart von Collani entwickelten Bernoullischen Stochastik und eine Beschreibung von Stochastikon. Dabei werden sowohl das Gesamtkonzept von Stochastikon, sowie die Ziele, Aufgaben und die Realisierung der beiden Teilsysteme namens Mentor und Encyclopedia vorgestellt. Das stochastische Denken erlaubt eine realit{\"a}tsnahe Sichtweise der Dinge, d.h. eine Sichtweise, die mit den menschlichen Beobachtungen und Erfahrungen im Einklang steht und somit die Unsicherheit {\"u}ber zuk{\"u}nftige Entwicklungen ber{\"u}cksichtigt. Der in diesem Kontext verwendete Begriff der Unsicherheit bezieht sich ausschließlich auf zuk{\"u}nftige Entwicklungen und {\"a}ußert sich in Variabilit{\"a}t. Quellen der Unsicherheit sind einerseits die menschliche Ignoranz und andererseits der Zufall. Unter Ignoranz wird hierbei die Unwissenheit des Menschen {\"u}ber die unbekannten, aber feststehenden Fakten verstanden, die die Anfangsbedingungen der zuk{\"u}nftigen Entwicklung repr{\"a}sentieren. Die Bernoullische Stochastik liefert ein Regelwerk und erm{\"o}glicht die Entwicklung eines quantitativen Modells zur Beschreibung der Unsicherheit und expliziter Einbeziehung der beiden Quellen Ignoranz und Zufall. Das Modell tr{\"a}gt den Namen Bernoulli-Raum und bildet die Grundlage f{\"u}r die Herleitung quantitativer Verfahren, um zuverl{\"a}ssige und genaue Aussagen sowohl {\"u}ber die nicht-existente zuf{\"a}llige Zukunft (Vorhersageverfahren), als auch {\"u}ber die unbekannte feststehende Vergangenheit (Messverfahren). Das Softwaresystem Stochastikon implementiert die Bernoullische Stochastik in Form einer Reihe autarker, miteinander kommunizierender Teilsysteme. Ziel des Teilsystems Encyclopedia ist die Bereitstellung und Bewertung stochastischen Wissens. Das Teilsystem Mentor dient der Unterst{\"u}tzung des Anwenders bei der Probleml{\"o}sungsfindung durch Identifikation eines richtigen Modells bzw. eines korrekten Bernoulli-Raums. Der L{\"o}sungsfindungsprozess selber enth{\"a}lt keinerlei Unsicherheit. Die ganze Unsicherheit steckt in der L{\"o}sung, d.h. im Bernoulli-Raum, der explizit die vorhandene Unwissenheit (Ignoranz) und den vorliegenden Zufall abdeckend enth{\"a}lt.}, subject = {Stochastik}, language = {de} } @article{KruegerFriedrichFoersteretal.2012, author = {Krueger, Beate and Friedrich, Torben and F{\"o}rster, Frank and Bernhardt, J{\"o}rg and Gross, Roy and Dandekar, Thomas}, title = {Different evolutionary modifications as a guide to rewire two-component systems}, series = {Bioinformatics and Biology Insights}, volume = {6}, journal = {Bioinformatics and Biology Insights}, doi = {10.4137/BBI.S9356}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-123647}, pages = {97-128}, year = {2012}, abstract = {Two-component systems (TCS) are short signalling pathways generally occurring in prokaryotes. They frequently regulate prokaryotic stimulus responses and thus are also of interest for engineering in biotechnology and synthetic biology. The aim of this study is to better understand and describe rewiring of TCS while investigating different evolutionary scenarios. Based on large-scale screens of TCS in different organisms, this study gives detailed data, concrete alignments, and structure analysis on three general modification scenarios, where TCS were rewired for new responses and functions: (i) exchanges in the sequence within single TCS domains, (ii) exchange of whole TCS domains; (iii) addition of new components modulating TCS function. As a result, the replacement of stimulus and promotor cassettes to rewire TCS is well defined exploiting the alignments given here. The diverged TCS examples are non-trivial and the design is challenging. Designed connector proteins may also be useful to modify TCS in selected cases.}, language = {en} } @article{PetschkeStaab2018, author = {Petschke, Danny and Staab, Torsten E.M.}, title = {DLTPulseGenerator: a library for the simulation of lifetime spectra based on detector-output pulses}, series = {SoftwareX}, volume = {7}, journal = {SoftwareX}, doi = {10.1016/j.softx.2018.04.002}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-176883}, pages = {122-128}, year = {2018}, abstract = {The quantitative analysis of lifetime spectra relevant in both life and materials sciences presents one of the ill-posed inverse problems and, hence, leads to most stringent requirements on the hardware specifications and the analysis algorithms. Here we present DLTPulseGenerator, a library written in native C++ 11, which provides a simulation of lifetime spectra according to the measurement setup. The simulation is based on pairs of non-TTL detector output-pulses. Those pulses require the Constant Fraction Principle (CFD) for the determination of the exact timing signal and, thus, the calculation of the time difference i.e. the lifetime. To verify the functionality, simulation results were compared to experimentally obtained data using Positron Annihilation Lifetime Spectroscopy (PALS) on pure tin.}, language = {en} } @phdthesis{Houshiar2017, author = {Houshiar, Hamidreza}, title = {Documentation and mapping with 3D point cloud processing}, isbn = {978-3-945459-14-0}, doi = {10.25972/OPUS-14449}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-144493}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2017}, abstract = {3D point clouds are a de facto standard for 3D documentation and modelling. The advances in laser scanning technology broadens the usability and access to 3D measurement systems. 3D point clouds are used in many disciplines such as robotics, 3D modelling, archeology and surveying. Scanners are able to acquire up to a million of points per second to represent the environment with a dense point cloud. This represents the captured environment with a very high degree of detail. The combination of laser scanning technology with photography adds color information to the point clouds. Thus the environment is represented more realistically. Full 3D models of environments, without any occlusion, require multiple scans. Merging point clouds is a challenging process. This thesis presents methods for point cloud registration based on the panorama images generated from the scans. Image representation of point clouds introduces 2D image processing methods to 3D point clouds. Several projection methods for the generation of panorama maps of point clouds are presented in this thesis. Additionally, methods for point cloud reduction and compression based on the panorama maps are proposed. Due to the large amounts of data generated from the 3D measurement systems these methods are necessary to improve the point cloud processing, transmission and archiving. This thesis introduces point cloud processing methods as a novel framework for the digitisation of archeological excavations. The framework replaces the conventional documentation methods for excavation sites. It employs point clouds for the generation of the digital documentation of an excavation with the help of an archeologist on-site. The 3D point cloud is used not only for data representation but also for analysis and knowledge generation. Finally, this thesis presents an autonomous indoor mobile mapping system. The mapping system focuses on the sensor placement planning method. Capturing a complete environment requires several scans. The sensor placement planning method solves for the minimum required scans to digitise large environments. Combining this method with a navigation system on a mobile robot platform enables it to acquire data fully autonomously. This thesis introduces a novel hole detection method for point clouds to detect obscured parts of a captured environment. The sensor placement planning method selects the next scan position with the most coverage of the obscured environment. This reduces the required number of scans. The navigation system on the robot platform consist of path planning, path following and obstacle avoidance. This guarantees the safe navigation of the mobile robot platform between the scan positions. The sensor placement planning method is designed as a stand alone process that could be used with a mobile robot platform for autonomous mapping of an environment or as an assistant tool for the surveyor on scanning projects.}, subject = {3D Punktwolke}, language = {en} } @unpublished{Nassourou2010, author = {Nassourou, Mohamadou}, title = {Doing Webservices Composition by Content-based Mashup: Example of a Web-based Simulator for Itinerary Planning}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-50036}, year = {2010}, abstract = {Webservices composition is traditionally carried out using composition technologies such as Business Process Execution Language (BPEL) [1] and Web Service Choreography Interface (WSCI) [2]. The composition technology involves the process of web service discovery, invocation, and composition. However these technologies are not easy and flexible enough because they are mainly developer-centric. Moreover majority of websites have not yet embarked into the world of web service, although they have very important and useful information to offer. Is it because they have not understood the usefulness of web services or is it because of the costs? Whatever might be the answers to these questions, time and money are definitely required in order to create and offer web services. To avoid these expenditures, wrappers [7] to automatically generate webservices from websites would be a cheaper and easier solution. Mashups offer a different way of doing webservices composition. In web environment a Mashup is a web application that brings together data from several sources using webservices, APIs, wrappers and so on, in order to create entirely a new application that was not provided before. This paper presents first an overview of Mashups and the process of web service invocation and composition based on Mashup, then describes an example of a web-based simulator for navigation system in Germany.}, subject = {Mashup }, language = {en} } @article{BuchinBuchinByrkaetal.2012, author = {Buchin, Kevin and Buchin, Maike and Byrka, Jaroslaw and N{\"o}llenburg, Martin and Okamoto, Yoshio and Silveira, Rodrigo I. and Wolff, Alexander}, title = {Drawing (Complete) Binary Tanglegrams}, series = {Algorithmica}, volume = {62}, journal = {Algorithmica}, doi = {10.1007/s00453-010-9456-3}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-124622}, pages = {309-332}, year = {2012}, abstract = {A binary tanglegram is a drawing of a pair of rooted binary trees whose leaf sets are in one-to-one correspondence; matching leaves are connected by inter-tree edges. For applications, for example, in phylogenetics, it is essential that both trees are drawn without edge crossings and that the inter-tree edges have as few crossings as possible. It is known that finding a tanglegram with the minimum number of crossings is NP-hard and that the problem is fixed-parameter tractable with respect to that number. We prove that under the Unique Games Conjecture there is no constant-factor approximation for binary trees. We show that the problem is NP-hard even if both trees are complete binary trees. For this case we give an O(n 3)-time 2-approximation and a new, simple fixed-parameter algorithm. We show that the maximization version of the dual problem for binary trees can be reduced to a version of MaxCut for which the algorithm of Goemans and Williamson yields a 0.878-approximation.}, language = {en} } @article{DumicBjeloperaNuechter2021, author = {Dumic, Emil and Bjelopera, Anamaria and N{\"u}chter, Andreas}, title = {Dynamic point cloud compression based on projections, surface reconstruction and video compression}, series = {Sensors}, volume = {22}, journal = {Sensors}, number = {1}, issn = {1424-8220}, doi = {10.3390/s22010197}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-252231}, year = {2021}, abstract = {In this paper we will present a new dynamic point cloud compression based on different projection types and bit depth, combined with the surface reconstruction algorithm and video compression for obtained geometry and texture maps. Texture maps have been compressed after creating Voronoi diagrams. Used video compression is specific for geometry (FFV1) and texture (H.265/HEVC). Decompressed point clouds are reconstructed using a Poisson surface reconstruction algorithm. Comparison with the original point clouds was performed using point-to-point and point-to-plane measures. Comprehensive experiments show better performance for some projection maps: cylindrical, Miller and Mercator projections.}, language = {en} } @phdthesis{Kaussner2003, author = {Kaußner, Armin}, title = {Dynamische Szenerien in der Fahrsimulation}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-8286}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2003}, abstract = {In der Arbeit wird ein neues Konzept f{\"u}r Fahrsimulator-Datenbasen vorgestellt. Der Anwender entwirft eine auf seine Fragestellung zugeschnittene Datenbasis mithilfe einer einfachen Skriptsprache. Das Straßennetzwerk wird auf einer topologischen Ebene rep{\"a}sentiert. In jedem Simulationsschritt wird hieraus im Sichtbarkeitsbereich des Fahrers die geometrische Rep{\"a}sentation berechnet. Die f{\"u}r den Fahrer unsichtbaren Teile des Straßenetzwerks k{\"o}nnen w{\"a}hrend der Simulation ver{\"a}ndert werden. Diese Ver{\"a}nderungen k{\"o}nnen von der Route des Fahrers oder von den in der Simulation erhobenen Messerten abh{\"a}ngen. Zudem kann der Anwender das Straßennetzwerk interaktiv ver{\"a}ndern. Das vorgestellte Konzept bietet zahlreiche M{\"o}glichkeiten zur Erzeugung reproduzierbarer Szenarien f{\"u}r Experimente in Fahrsimulatoren.}, subject = {Straßenverkehr}, language = {de} } @article{NaglerNaegeleGillietal.2018, author = {Nagler, Matthias and N{\"a}gele, Thomas and Gilli, Christian and Fragner, Lena and Korte, Arthur and Platzer, Alexander and Farlow, Ashley and Nordborg, Magnus and Weckwerth, Wolfram}, title = {Eco-Metabolomics and Metabolic Modeling: Making the Leap From Model Systems in the Lab to Native Populations in the Field}, series = {Frontiers in Plant Science}, volume = {9}, journal = {Frontiers in Plant Science}, number = {1556}, issn = {1664-462X}, doi = {10.3389/fpls.2018.01556}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-189560}, year = {2018}, abstract = {Experimental high-throughput analysis of molecular networks is a central approach to characterize the adaptation of plant metabolism to the environment. However, recent studies have demonstrated that it is hardly possible to predict in situ metabolic phenotypes from experiments under controlled conditions, such as growth chambers or greenhouses. This is particularly due to the high molecular variance of in situ samples induced by environmental fluctuations. An approach of functional metabolome interpretation of field samples would be desirable in order to be able to identify and trace back the impact of environmental changes on plant metabolism. To test the applicability of metabolomics studies for a characterization of plant populations in the field, we have identified and analyzed in situ samples of nearby grown natural populations of Arabidopsis thaliana in Austria. A. thaliana is the primary molecular biological model system in plant biology with one of the best functionally annotated genomes representing a reference system for all other plant genome projects. The genomes of these novel natural populations were sequenced and phylogenetically compared to a comprehensive genome database of A. thaliana ecotypes. Experimental results on primary and secondary metabolite profiling and genotypic variation were functionally integrated by a data mining strategy, which combines statistical output of metabolomics data with genome-derived biochemical pathway reconstruction and metabolic modeling. Correlations of biochemical model predictions and population-specific genetic variation indicated varying strategies of metabolic regulation on a population level which enabled the direct comparison, differentiation, and prediction of metabolic adaptation of the same species to different habitats. These differences were most pronounced at organic and amino acid metabolism as well as at the interface of primary and secondary metabolism and allowed for the direct classification of population-specific metabolic phenotypes within geographically contiguous sampling sites.}, language = {en} } @techreport{OdhahGrassKraemer2022, type = {Working Paper}, author = {Odhah, Najib and Grass, Eckhard and Kraemer, Rolf}, title = {Effective Rate of URLLC with Short Block-Length Information Theory}, series = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, journal = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, doi = {10.25972/OPUS-28085}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-280859}, pages = {4}, year = {2022}, abstract = {Shannon channel capacity estimation, based on large packet length is used in traditional Radio Resource Management (RRM) optimization. This is good for the normal transmission of data in a wired or wireless system. For industrial automation and control, rather short packages are used due to the short-latency requirements. Using Shannon's formula leads in this case to inaccurate RRM solutions, thus another formula should be used to optimize radio resources in short block-length packet transmission, which is the basic of Ultra-Reliable Low-Latency Communications (URLLCs). The stringent requirement of delay Quality of Service (QoS) for URLLCs requires a link-level channel model rather than a physical level channel model. After finding the basic and accurate formula of the achievable rate of short block-length packet transmission, the RRM optimization problem can be accurately formulated and solved under the new constraints of URLLCs. In this short paper, the current mathematical models, which are used in formulating the effective transmission rate of URLLCs, will be briefly explained. Then, using this rate in RRM for URLLC will be discussed.}, subject = {Datennetz}, language = {en} } @article{MadeiraGromerLatoschiketal.2021, author = {Madeira, Octavia and Gromer, Daniel and Latoschik, Marc Erich and Pauli, Paul}, title = {Effects of Acrophobic Fear and Trait Anxiety on Human Behavior in a Virtual Elevated Plus-Maze}, series = {Frontiers in Virtual Reality}, volume = {2}, journal = {Frontiers in Virtual Reality}, doi = {10.3389/frvir.2021.635048}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-258709}, year = {2021}, abstract = {The Elevated Plus-Maze (EPM) is a well-established apparatus to measure anxiety in rodents, i.e., animals exhibiting an increased relative time spent in the closed vs. the open arms are considered anxious. To examine whether such anxiety-modulated behaviors are conserved in humans, we re-translated this paradigm to a human setting using virtual reality in a Cave Automatic Virtual Environment (CAVE) system. In two studies, we examined whether the EPM exploration behavior of humans is modulated by their trait anxiety and also assessed the individuals' levels of acrophobia (fear of height), claustrophobia (fear of confined spaces), sensation seeking, and the reported anxiety when on the maze. First, we constructed an exact virtual copy of the animal EPM adjusted to human proportions. In analogy to animal EPM studies, participants (N = 30) freely explored the EPM for 5 min. In the second study (N = 61), we redesigned the EPM to make it more human-adapted and to differentiate influences of trait anxiety and acrophobia by introducing various floor textures and lower walls of closed arms to the height of standard handrails. In the first experiment, hierarchical regression analyses of exploration behavior revealed the expected association between open arm avoidance and Trait Anxiety, an even stronger association with acrophobic fear. In the second study, results revealed that acrophobia was associated with avoidance of open arms with mesh-floor texture, whereas for trait anxiety, claustrophobia, and sensation seeking, no effect was detected. Also, subjects' fear rating was moderated by all psychometrics but trait anxiety. In sum, both studies consistently indicate that humans show no general open arm avoidance analogous to rodents and that human EPM behavior is modulated strongest by acrophobic fear, whereas trait anxiety plays a subordinate role. Thus, we conclude that the criteria for cross-species validity are met insufficiently in this case. Despite the exploratory nature, our studies provide in-depth insights into human exploration behavior on the virtual EPM.}, language = {en} } @phdthesis{Menth2004, author = {Menth, Michael}, title = {Efficient admission control and routing for resilient communication networks}, doi = {10.25972/OPUS-846}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-9949}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2004}, abstract = {This work is subdivided into two main areas: resilient admission control and resilient routing. The work gives an overview of the state of the art of quality of service mechanisms in communication networks and proposes a categorization of admission control (AC) methods. These approaches are investigated regarding performance, more precisely, regarding the potential resource utilization by dimensioning the capacity for a network with a given topology, traffic matrix, and a required flow blocking probability. In case of a failure, the affected traffic is rerouted over backup paths which increases the traffic rate on the respective links. To guarantee the effectiveness of admission control also in failure scenarios, the increased traffic rate must be taken into account for capacity dimensioning and leads to resilient AC. Capacity dimensioning is not feasible for existing networks with already given link capacities. For the application of resilient NAC in this case, the size of distributed AC budgets must be adapted according to the traffic matrix in such a way that the maximum blocking probability for all flows is minimized and that the capacity of all links is not exceeded by the admissible traffic rate in any failure scenario. Several algorithms for the solution of that problem are presented and compared regarding their efficiency and fairness. A prototype for resilient AC was implemented in the laboratories of Siemens AG in Munich within the scope of the project KING. Resilience requires additional capacity on the backup paths for failure scenarios. The amount of this backup capacity depends on the routing and can be minimized by routing optimization. New protection switching mechanisms are presented that deviate the traffic quickly around outage locations. They are simple and can be implemented, e.g, by MPLS technology. The Self-Protecting Multi-Path (SPM) is a multi-path consisting of disjoint partial paths. The traffic is distributed over all faultless partial paths according to an optimized load balancing function both in the working case and in failure scenarios. Performance studies show that the network topology and the traffic matrix also influence the amount of required backup capacity significantly. The example of the COST-239 network illustrates that conventional shortest path routing may need 50\% more capacity than the optimized SPM if all single link and node failures are protected.}, subject = {Kommunikation}, language = {en} } @article{KraftBirkReichertetal.2020, author = {Kraft, Robin and Birk, Ferdinand and Reichert, Manfred and Deshpande, Aniruddha and Schlee, Winfried and Langguth, Berthold and Baumeister, Harald and Probst, Thomas and Spiliopoulou, Myra and Pryss, R{\"u}diger}, title = {Efficient processing of geospatial mHealth data using a scalable crowdsensing platform}, series = {Sensors}, volume = {20}, journal = {Sensors}, number = {12}, issn = {1424-8220}, doi = {10.3390/s20123456}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-207826}, year = {2020}, abstract = {Smart sensors and smartphones are becoming increasingly prevalent. Both can be used to gather environmental data (e.g., noise). Importantly, these devices can be connected to each other as well as to the Internet to collect large amounts of sensor data, which leads to many new opportunities. In particular, mobile crowdsensing techniques can be used to capture phenomena of common interest. Especially valuable insights can be gained if the collected data are additionally related to the time and place of the measurements. However, many technical solutions still use monolithic backends that are not capable of processing crowdsensing data in a flexible, efficient, and scalable manner. In this work, an architectural design was conceived with the goal to manage geospatial data in challenging crowdsensing healthcare scenarios. It will be shown how the proposed approach can be used to provide users with an interactive map of environmental noise, allowing tinnitus patients and other health-conscious people to avoid locations with harmful sound levels. Technically, the shown approach combines cloud-native applications with Big Data and stream processing concepts. In general, the presented architectural design shall serve as a foundation to implement practical and scalable crowdsensing platforms for various healthcare scenarios beyond the addressed use case.}, language = {en} } @phdthesis{Wolz2003, author = {Wolz, Frank}, title = {Ein generisches Konzept zur Modellierung und Bewertung feldprogrammierbarer Architekturen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-7944}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2003}, abstract = {Gegenstand der Arbeit stellt eine erstmalig unternommene, architektur{\"u}bergreifende Studie {\"u}ber feldprogrammierbare Logikbausteine zur Implementierung synchroner Schaltkreise dar. Zun{\"a}chst wird ein Modell f{\"u}r allgemeine feldprogrammiebare Architekturen basierend auf periodischen Graphen definiert. Schließlich werden Bewertungsmaße f{\"u}r Architekturen und Schaltkreislayouts angegeben zur Charakterisierung struktureller Eigenschaften hinsichtlich des Verhaltens in Chipfl{\"a}chenverbrauch und Signalverz{\"o}gerung. Ferner wird ein generisches Layout-Werkzeug entwickelt, das f{\"u}r beliebige Architekturen und Schaltkreise Implementierungen berechnen und bewerten kann. Abschließend werden neun ressourcenminimalistische Architekturen mit Maschen- und mit Inselstruktur einander gegen{\"u}bergestellt.}, subject = {Gay-Array-Bauelement}, language = {de} } @unpublished{Nassourou2010, author = {Nassourou, Mohamadou}, title = {Empirical Study on Screen Scraping Web Service Creation: Case of Rhein-Main-Verkehrsverbund (RMV)}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-49396}, year = {2010}, abstract = {Internet is the biggest database that science and technology have ever produced. The world wide web is a large repository of information that cannot be used for automation by many applications due to its limited target audience. One of the solutions to the automation problem is to develop wrappers. Wrapping is a process whereby unstructured extracted information is transformed into a more structured one such as XML, which could be provided as webservice to other applications. A web service is a web page whose content is well structured so that a computer program can consume it automatically. This paper describes steps involved in constructing wrappers manually in order to automatically generate web services.}, subject = {HTML}, language = {en} } @techreport{GrossmannHomeyer2023, type = {Working Paper}, author = {Großmann, Marcel and Homeyer, Tobias}, title = {Emulation of Multipath Transmissions in P4 Networks with Kathar{\´a}}, series = {KuVS Fachgespr{\"a}ch - W{\"u}rzburg Workshop on Modeling, Analysis and Simulation of Next-Generation Communication Networks 2023 (WueWoWAS'23)}, journal = {KuVS Fachgespr{\"a}ch - W{\"u}rzburg Workshop on Modeling, Analysis and Simulation of Next-Generation Communication Networks 2023 (WueWoWAS'23)}, doi = {10.25972/OPUS-32209}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-322095}, pages = {4}, year = {2023}, abstract = {Packets sent over a network can either get lost or reach their destination. Protocols like TCP try to solve this problem by resending the lost packets. However, retransmissions consume a lot of time and are cumbersome for the transmission of critical data. Multipath solutions are quite common to address this reliability issue and are available on almost every layer of the ISO/OSI model. We propose a solution based on a P4 network to duplicate packets in order to send them to their destination via multiple routes. The last network hop ensures that only a single copy of the traffic is further forwarded to its destination by adopting a concept similar to Bloom filters. Besides, if fast delivery is requested we provide a P4 prototype, which randomly forwards the packets over different transmission paths. For reproducibility, we implement our approach in a container-based network emulation system called Kathar{\´a}.}, language = {en} } @article{OberdoerferHeidrichBirnstieletal.2021, author = {Oberd{\"o}rfer, Sebastian and Heidrich, David and Birnstiel, Sandra and Latoschik, Marc Erich}, title = {Enchanted by Your Surrounding? Measuring the Effects of Immersion and Design of Virtual Environments on Decision-Making}, series = {Frontiers in Virtual Reality}, volume = {2}, journal = {Frontiers in Virtual Reality}, doi = {10.3389/frvir.2021.679277}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-260101}, pages = {679277}, year = {2021}, abstract = {Impaired decision-making leads to the inability to distinguish between advantageous and disadvantageous choices. The impairment of a person's decision-making is a common goal of gambling games. Given the recent trend of gambling using immersive Virtual Reality it is crucial to investigate the effects of both immersion and the virtual environment (VE) on decision-making. In a novel user study, we measured decision-making using three virtual versions of the Iowa Gambling Task (IGT). The versions differed with regard to the degree of immersion and design of the virtual environment. While emotions affect decision-making, we further measured the positive and negative affect of participants. A higher visual angle on a stimulus leads to an increased emotional response. Thus, we kept the visual angle on the Iowa Gambling Task the same between our conditions. Our results revealed no significant impact of immersion or the VE on the IGT. We further found no significant difference between the conditions with regard to positive and negative affect. This suggests that neither the medium used nor the design of the VE causes an impairment of decision-making. However, in combination with a recent study, we provide first evidence that a higher visual angle on the IGT leads to an effect of impairment.}, language = {en} } @article{RodriguesWeissHewigetal.2021, author = {Rodrigues, Johannes and Weiß, Martin and Hewig, Johannes and Allen, John J. B.}, title = {EPOS: EEG Processing Open-Source Scripts}, series = {Frontiers in Neuroscience}, volume = {15}, journal = {Frontiers in Neuroscience}, issn = {1662-453X}, doi = {10.3389/fnins.2021.660449}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-240221}, year = {2021}, abstract = {Background: Since the replication crisis, standardization has become even more important in psychological science and neuroscience. As a result, many methods are being reconsidered, and researchers' degrees of freedom in these methods are being discussed as a potential source of inconsistencies across studies. New Method: With the aim of addressing these subjectivity issues, we have been working on a tutorial-like EEG (pre-)processing pipeline to achieve an automated method based on the semi-automated analysis proposed by Delorme and Makeig. Results: Two scripts are presented and explained step-by-step to perform basic, informed ERP and frequency-domain analyses, including data export to statistical programs and visual representations of the data. The open-source software EEGlab in MATLAB is used as the data handling platform, but scripts based on code provided by Mike Cohen (2014) are also included. Comparison with existing methods: This accompanying tutorial-like article explains and shows how the processing of our automated pipeline affects the data and addresses, especially beginners in EEG-analysis, as other (pre)-processing chains are mostly targeting rather informed users in specialized areas or only parts of a complete procedure. In this context, we compared our pipeline with a selection of existing approaches. Conclusion: The need for standardization and replication is evident, yet it is equally important to control the plausibility of the suggested solution by data exploration. Here, we provide the community with a tool to enhance the understanding and capability of EEG-analysis. We aim to contribute to comprehensive and reliable analyses for neuro-scientific research.}, language = {en} } @article{GehrkeBalbachRauchetal.2019, author = {Gehrke, Alexander and Balbach, Nico and Rauch, Yong-Mi and Degkwitz, Andreas and Puppe, Frank}, title = {Erkennung von handschriftlichen Unterstreichungen in Alten Drucken}, series = {Bibliothek Forschung und Praxis}, volume = {43}, journal = {Bibliothek Forschung und Praxis}, number = {3}, issn = {1865-7648}, doi = {10.1515/bfp-2019-2083}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-193377}, pages = {447 -- 452}, year = {2019}, abstract = {Die Erkennung handschriftlicher Artefakte wie Unterstreichungen in Buchdrucken erm{\"o}glicht R{\"u}ckschl{\"u}sse auf das Rezeptionsverhalten und die Provenienzgeschichte und wird auch f{\"u}r eine OCR ben{\"o}tigt. Dabei soll zwischen handschriftlichen Unterstreichungen und waagerechten Linien im Druck (z. B. Trennlinien usw.) unterschieden werden, da letztere nicht ausgezeichnet werden sollen. Im Beitrag wird ein Ansatz basierend auf einem auf Unterstreichungen trainierten Neuronalen Netz gem{\"a}ß der U-Net Architektur vorgestellt, dessen Ergebnisse in einem zweiten Schritt mit heuristischen Regeln nachbearbeitet werden. Die Evaluationen zeigen, dass Unterstreichungen sehr gut erkannt werden, wenn bei der Binarisierung der Scans nicht zu viele Pixel der Unterstreichung wegen geringem Kontrast verloren gehen. Zuk{\"u}nftig sollen die Worte oberhalb der Unterstreichung mit OCR transkribiert werden und auch andere Artefakte wie handschriftliche Notizen in alten Drucken erkannt werden.}, language = {de} } @techreport{SertbasBuelbuelErgencFischer2022, type = {Working Paper}, author = {Sertbas B{\"u}lb{\"u}l, Nurefsan and Ergenc, Doganalp and Fischer, Mathias}, title = {Evaluating Dynamic Path Reconfiguration for Time Sensitive Networks}, series = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, journal = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, doi = {10.25972/OPUS-28074}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-280743}, pages = {5}, year = {2022}, abstract = {In time-sensitive networks (TSN) based on 802.1Qbv, i.e., the time-aware Shaper (TAS) protocol, precise transmission schedules and, paths are used to ensure end-to-end deterministic communication. Such resource reservations for data flows are usually established at the startup time of an application and remain untouched until the flow ends. There is no way to migrate existing flows easily to alternative paths without inducing additional delay or wasting resources. Therefore, some of the new flows cannot be embedded due to capacity limitations on certain links which leads to sub-optimal flow assignment. As future networks will need to support a large number of lowlatency flows, accommodating new flows at runtime and adapting existing flows accordingly becomes a challenging problem. In this extended abstract we summarize a previously published paper of us [1]. We combine software-defined networking (SDN), which provides better control of network flows, with TSN to be able to seamlessly migrate time-sensitive flows. For that, we formulate an optimization problem and propose different dynamic path configuration strategies under deterministic communication requirements. Our simulation results indicate that regularly reconfiguring the flow assignments can improve the latency of time-sensitive flows and can increase the number of flows embedded in the network around 4\% in worst-case scenarios while still satisfying individual flow deadlines.}, subject = {Datennetz}, language = {en} } @article{AliMontenegro2016, author = {Ali, Qasim and Montenegro, Sergio}, title = {Explicit Model Following Distributed Control Scheme for Formation Flying of Mini UAVs}, series = {IEEE Access}, volume = {4}, journal = {IEEE Access}, number = {397-406}, doi = {10.1109/ACCESS.2016.2517203}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-146061}, year = {2016}, abstract = {A centralized heterogeneous formation flight position control scheme has been formulated using an explicit model following design, based on a Linear Quadratic Regulator Proportional Integral (LQR PI) controller. The leader quadcopter is a stable reference model with desired dynamics whose output is perfectly tracked by the two wingmen quadcopters. The leader itself is controlled through the pole placement control method with desired stability characteristics, while the two followers are controlled through a robust and adaptive LQR PI control method. Selected 3-D formation geometry and static stability are maintained under a number of possible perturbations. With this control scheme, formation geometry may also be switched to any arbitrary shape during flight, provided a suitable collision avoidance mechanism is incorporated. In case of communication loss between the leader and any of the followers, the other follower provides the data, received from the leader, to the affected follower. The stability of the closed-loop system has been analyzed using singular values. The proposed approach for the tightly coupled formation flight of mini unmanned aerial vehicles has been validated with the help of extensive simulations using MATLAB/Simulink, which provided promising results.}, language = {en} } @article{LodaKrebsDanhofetal.2019, author = {Loda, Sophia and Krebs, Jonathan and Danhof, Sophia and Schreder, Martin and Solimando, Antonio G. and Strifler, Susanne and Rasche, Leo and Kort{\"u}m, Martin and Kerscher, Alexander and Knop, Stefan and Puppe, Frank and Einsele, Hermann and Bittrich, Max}, title = {Exploration of artificial intelligence use with ARIES in multiple myeloma research}, series = {Journal of Clinical Medicine}, volume = {8}, journal = {Journal of Clinical Medicine}, number = {7}, issn = {2077-0383}, doi = {10.3390/jcm8070999}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-197231}, pages = {999}, year = {2019}, abstract = {Background: Natural language processing (NLP) is a powerful tool supporting the generation of Real-World Evidence (RWE). There is no NLP system that enables the extensive querying of parameters specific to multiple myeloma (MM) out of unstructured medical reports. We therefore created a MM-specific ontology to accelerate the information extraction (IE) out of unstructured text. Methods: Our MM ontology consists of extensive MM-specific and hierarchically structured attributes and values. We implemented "A Rule-based Information Extraction System" (ARIES) that uses this ontology. We evaluated ARIES on 200 randomly selected medical reports of patients diagnosed with MM. Results: Our system achieved a high F1-Score of 0.92 on the evaluation dataset with a precision of 0.87 and recall of 0.98. Conclusions: Our rule-based IE system enables the comprehensive querying of medical reports. The IE accelerates the extraction of data and enables clinicians to faster generate RWE on hematological issues. RWE helps clinicians to make decisions in an evidence-based manner. Our tool easily accelerates the integration of research evidence into everyday clinical practice.}, language = {en} }