@article{ZirkelCecilSchaeferetal.2012, author = {Zirkel, J. and Cecil, A. and Sch{\"a}fer, F. and Rahlfs, S. and Ouedraogo, A. and Xiao, K. and Sawadogo, S. and Coulibaly, B. and Becker, K. and Dandekar, T.}, title = {Analyzing Thiol-Dependent Redox Networks in the Presence of Methylene Blue and Other Antimalarial Agents with RT-PCR-Supported in silico Modeling}, series = {Bioinformatics and Biology Insights}, volume = {6}, journal = {Bioinformatics and Biology Insights}, doi = {10.4137/BBI.S10193}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-123751}, pages = {287-302}, year = {2012}, abstract = {BACKGROUND: In the face of growing resistance in malaria parasites to drugs, pharmacological combination therapies are important. There is accumulating evidence that methylene blue (MB) is an effective drug against malaria. Here we explore the biological effects of both MB alone and in combination therapy using modeling and experimental data. RESULTS: We built a model of the central metabolic pathways in P. falciparum. Metabolic flux modes and their changes under MB were calculated by integrating experimental data (RT-PCR data on mRNAs for redox enzymes) as constraints and results from the YANA software package for metabolic pathway calculations. Several different lines of MB attack on Plasmodium redox defense were identified by analysis of the network effects. Next, chloroquine resistance based on pfmdr/and pfcrt transporters, as well as pyrimethamine/sulfadoxine resistance (by mutations in DHF/DHPS), were modeled in silico. Further modeling shows that MB has a favorable synergism on antimalarial network effects with these commonly used antimalarial drugs. CONCLUSIONS: Theoretical and experimental results support that methylene blue should, because of its resistance-breaking potential, be further tested as a key component in drug combination therapy efforts in holoendemic areas.}, language = {en} } @phdthesis{Kindermann2016, author = {Kindermann, Philipp}, title = {Angular Schematization in Graph Drawing}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-020-7 (print)}, doi = {10.25972/WUP-978-3-95826-021-4}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-112549}, school = {W{\"u}rzburg University Press}, pages = {184}, year = {2016}, abstract = {Graphs are a frequently used tool to model relationships among entities. A graph is a binary relation between objects, that is, it consists of a set of objects (vertices) and a set of pairs of objects (edges). Networks are common examples of modeling data as a graph. For example, relationships between persons in a social network, or network links between computers in a telecommunication network can be represented by a graph. The clearest way to illustrate the modeled data is to visualize the graphs. The field of Graph Drawing deals with the problem of finding algorithms to automatically generate graph visualizations. The task is to find a "good" drawing, which can be measured by different criteria such as number of crossings between edges or the used area. In this thesis, we study Angular Schematization in Graph Drawing. By this, we mean drawings with large angles (for example, between the edges at common vertices or at crossing points). The thesis consists of three parts. First, we deal with the placement of boxes. Boxes are axis-parallel rectangles that can, for example, contain text. They can be placed on a map to label important sites, or can be used to describe semantic relationships between words in a word network. In the second part of the thesis, we consider graph drawings visually guide the viewer. These drawings generally induce large angles between edges that meet at a vertex. Furthermore, the edges are drawn crossing-free and in a way that makes them easy to follow for the human eye. The third and final part is devoted to crossings with large angles. In drawings with crossings, it is important to have large angles between edges at their crossing point, preferably right angles.}, language = {en} } @unpublished{Nassourou2011, author = {Nassourou, Mohamadou}, title = {Assisting Analysis and Understanding of Quran Search Results with Interactive Scatter Plots and Tables}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-55840}, year = {2011}, abstract = {The Quran is the holy book of Islam consisting of 6236 verses divided into 114 chapters called suras. Many verses are similar and even identical. Searching for similar texts (e.g verses) could return thousands of verses, that when displayed completely or partly as textual list would make analysis and understanding difficult and confusing. Moreover it would be visually impossible to instantly figure out the overall distribution of the retrieved verses in the Quran. As consequence reading and analyzing the verses would be tedious and unintuitive. In this study a combination of interactive scatter plots and tables has been developed to assist analysis and understanding of the search result. Retrieved verses are clustered by chapters, and a weight is assigned to each cluster according to number of verses it contains, so that users could visually identify most relevant areas, and figure out the places of revelation of the verses. Users visualize the complete result and can select a region of the plot to zoom in, click on a marker to display a table containing verses with English translation side by side.}, subject = {Text Mining}, language = {en} } @unpublished{Nassourou2011, author = {Nassourou, Mohamadou}, title = {Assisting Understanding, Retention, and Dissemination of Religious Texts Knowledge with Modeling, and Visualization Techniques: The Case of The Quran}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-55927}, year = {2011}, abstract = {Learning a book in general involves reading it, underlining important words, adding comments, summarizing some passages, and marking up some text or concepts. Once deeper understanding is achieved, one would like to organize and manage her/his knowledge in such a way that, it could be easily remembered and efficiently transmitted to others. In this paper, books organized in terms of chapters consisting of verses, are considered as the source of knowledge to be modeled. The knowledge model consists of verses with their metadata and semantic annotations. The metadata represent the multiple perspectives of knowledge modeling. Verses with their metadata and annotations form a meta-model, which will be published on a web Mashup. The meta-model with linking between its elements constitute a knowledge base. An XML-based annotation system breaking down the learning process into specific tasks, helps constructing the desired meta-model. The system is made up of user interfaces for creating metadata, annotating chapters' contents according to user selected semantics, and templates for publishing the generated knowledge on the Internet. The proposed software system improves comprehension and retention of knowledge contained in religious texts through modeling and visualization. The system has been applied to the Quran, and the result obtained shows that multiple perspectives of information modeling can be successfully applied to religious texts. It is expected that this short ongoing study would motivate others to engage in devising and offering software systems for cross-religions learning.}, subject = {Wissensmanagement}, language = {en} } @article{MandelHoernleinIflandetal.2011, author = {Mandel, Alexander and H{\"o}rnlein, Alexander and Ifland, Marianus and L{\"u}neburg, Edeltraud and Deckert, J{\"u}rgen and Puppe, Frank}, title = {Aufwandsanalyse f{\"u}r computerunterst{\"u}tzte Multiple-Choice Papierklausuren}, series = {GMS Journal for Medical Education}, volume = {28}, journal = {GMS Journal for Medical Education}, number = {4}, doi = {10.3205/zma000767}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-134386}, pages = {1-15, Doc55}, year = {2011}, abstract = {Introduction: Multiple-choice-examinations are still fundamental for assessment in medical degree programs. In addition to content related research, the optimization of the technical procedure is an important question. Medical examiners face three options: paper-based examinations with or without computer support or completely electronic examinations. Critical aspects are the effort for formatting, the logistic effort during the actual examination, quality, promptness and effort of the correction, the time for making the documents available for inspection by the students, and the statistical analysis of the examination results. Methods: Since three semesters a computer program for input and formatting of MC-questions in medical and other paper-based examinations is used and continuously improved at Wuerzburg University. In the winter semester (WS) 2009/10 eleven, in the summer semester (SS) 2010 twelve and in WS 2010/11 thirteen medical examinations were accomplished with the program and automatically evaluated. For the last two semesters the remaining manual workload was recorded. Results: The cost of the formatting and the subsequent analysis including adjustments of the analysis of an average examination with about 140 participants and about 35 questions was 5-7 hours for exams without complications in the winter semester 2009/2010, about 2 hours in SS 2010 and about 1.5 hours in the winter semester 2010/11. Including exams with complications, the average time was about 3 hours per exam in SS 2010 and 2.67 hours for the WS 10/11. Discussion: For conventional multiple-choice exams the computer-based formatting and evaluation of paper-based exams offers a significant time reduction for lecturers in comparison with the manual correction of paper-based exams and compared to purely electronically conducted exams it needs a much simpler technological infrastructure and fewer staff during the exam."}, language = {de} } @article{WolffRutter2012, author = {Wolff, Alexander and Rutter, Iganz}, title = {Augmenting the Connectivity of Planar and Geometric Graphs}, series = {Journal of Graph Algorithms and Applications}, journal = {Journal of Graph Algorithms and Applications}, doi = {10.7155/jgaa.00275}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-97587}, year = {2012}, abstract = {In this paper we study connectivity augmentation problems. Given a connected graph G with some desirable property, we want to make G 2-vertex connected (or 2-edge connected) by adding edges such that the resulting graph keeps the property. The aim is to add as few edges as possible. The property that we consider is planarity, both in an abstract graph-theoretic and in a geometric setting, where vertices correspond to points in the plane and edges to straight-line segments. We show that it is NP-hard to � nd a minimum-cardinality augmentation that makes a planar graph 2-edge connected. For making a planar graph 2-vertex connected this was known. We further show that both problems are hard in the geometric setting, even when restricted to trees. The problems remain hard for higher degrees of connectivity. On the other hand we give polynomial-time algorithms for the special case of convex geometric graphs. We also study the following related problem. Given a planar (plane geometric) graph G, two vertices s and t of G, and an integer c, how many edges have to be added to G such that G is still planar (plane geometric) and contains c edge- (or vertex-) disjoint s{t paths? For the planar case we give a linear-time algorithm for c = 2. For the plane geometric case we give optimal worst-case bounds for c = 2; for c = 3 we characterize the cases that have a solution.}, language = {en} } @article{KrenzerHeilFittingetal., author = {Krenzer, Adrian and Heil, Stefan and Fitting, Daniel and Matti, Safa and Zoller, Wolfram G. and Hann, Alexander and Puppe, Frank}, title = {Automated classification of polyps using deep learning architectures and few-shot learning}, series = {BMC Medical Imaging}, volume = {23}, journal = {BMC Medical Imaging}, doi = {10.1186/s12880-023-01007-4}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-357465}, abstract = {Background Colorectal cancer is a leading cause of cancer-related deaths worldwide. The best method to prevent CRC is a colonoscopy. However, not all colon polyps have the risk of becoming cancerous. Therefore, polyps are classified using different classification systems. After the classification, further treatment and procedures are based on the classification of the polyp. Nevertheless, classification is not easy. Therefore, we suggest two novel automated classifications system assisting gastroenterologists in classifying polyps based on the NICE and Paris classification. Methods We build two classification systems. One is classifying polyps based on their shape (Paris). The other classifies polyps based on their texture and surface patterns (NICE). A two-step process for the Paris classification is introduced: First, detecting and cropping the polyp on the image, and secondly, classifying the polyp based on the cropped area with a transformer network. For the NICE classification, we design a few-shot learning algorithm based on the Deep Metric Learning approach. The algorithm creates an embedding space for polyps, which allows classification from a few examples to account for the data scarcity of NICE annotated images in our database. Results For the Paris classification, we achieve an accuracy of 89.35 \%, surpassing all papers in the literature and establishing a new state-of-the-art and baseline accuracy for other publications on a public data set. For the NICE classification, we achieve a competitive accuracy of 81.13 \% and demonstrate thereby the viability of the few-shot learning paradigm in polyp classification in data-scarce environments. Additionally, we show different ablations of the algorithms. Finally, we further elaborate on the explainability of the system by showing heat maps of the neural network explaining neural activations. Conclusion Overall we introduce two polyp classification systems to assist gastroenterologists. We achieve state-of-the-art performance in the Paris classification and demonstrate the viability of the few-shot learning paradigm in the NICE classification, addressing the prevalent data scarcity issues faced in medical machine learning.}, language = {en} } @article{BeckerCaminitiFiorellaetal.2013, author = {Becker, Martin and Caminiti, Saverio and Fiorella, Donato and Francis, Louise and Gravino, Pietro and Haklay, Mordechai (Muki) and Hotho, Andreas and Loreto, Virrorio and Mueller, Juergen and Ricchiuti, Ferdinando and Servedio, Vito D. P. and Sirbu, Alina and Tria, Franesca}, title = {Awareness and Learning in Participatory Noise Sensing}, series = {PLOS ONE}, volume = {8}, journal = {PLOS ONE}, number = {12}, issn = {1932-6203}, doi = {10.1371/journal.pone.0081638}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-127675}, pages = {e81638}, year = {2013}, abstract = {The development of ICT infrastructures has facilitated the emergence of new paradigms for looking at society and the environment over the last few years. Participatory environmental sensing, i.e. directly involving citizens in environmental monitoring, is one example, which is hoped to encourage learning and enhance awareness of environmental issues. In this paper, an analysis of the behaviour of individuals involved in noise sensing is presented. Citizens have been involved in noise measuring activities through the WideNoise smartphone application. This application has been designed to record both objective (noise samples) and subjective (opinions, feelings) data. The application has been open to be used freely by anyone and has been widely employed worldwide. In addition, several test cases have been organised in European countries. Based on the information submitted by users, an analysis of emerging awareness and learning is performed. The data show that changes in the way the environment is perceived after repeated usage of the application do appear. Specifically, users learn how to recognise different noise levels they are exposed to. Additionally, the subjective data collected indicate an increased user involvement in time and a categorisation effect between pleasant and less pleasant environments.}, language = {en} } @article{WienrichDoellingerHein2021, author = {Wienrich, Carolin and D{\"o}llinger, Nina and Hein, Rebecca}, title = {Behavioral Framework of Immersive Technologies (BehaveFIT): How and why virtual reality can support behavioral change processes}, series = {Frontiers in Virtual Reality}, volume = {2}, journal = {Frontiers in Virtual Reality}, doi = {10.3389/frvir.2021.627194}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-258796}, year = {2021}, abstract = {The design and evaluation of assisting technologies to support behavior change processes have become an essential topic within the field of human-computer interaction research in general and the field of immersive intervention technologies in particular. The mechanisms and success of behavior change techniques and interventions are broadly investigated in the field of psychology. However, it is not always easy to adapt these psychological findings to the context of immersive technologies. The lack of theoretical foundation also leads to a lack of explanation as to why and how immersive interventions support behavior change processes. The Behavioral Framework for immersive Technologies (BehaveFIT) addresses this lack by 1) presenting an intelligible categorization and condensation of psychological barriers and immersive features, by 2) suggesting a mapping that shows why and how immersive technologies can help to overcome barriers and finally by 3) proposing a generic prediction path that enables a structured, theory-based approach to the development and evaluation of immersive interventions. These three steps explain how BehaveFIT can be used, and include guiding questions for each step. Further, two use cases illustrate the usage of BehaveFIT. Thus, the present paper contributes to guidance for immersive intervention design and evaluation, showing that immersive interventions support behavior change processes and explain and predict 'why' and 'how' immersive interventions can bridge the intention-behavior-gap.}, language = {en} } @unpublished{Dandekar2019, author = {Dandekar, Thomas}, title = {Biological heuristics applied to cosmology suggests a condensation nucleus as start of our universe and inflation cosmology replaced by a period of rapid Weiss domain-like crystal growth}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-183945}, pages = {24}, year = {2019}, abstract = {Cosmology often uses intricate formulas and mathematics to derive new theories and concepts. We do something different in this paper: We look at biological processes and derive from these heuristics so that the revised cosmology agrees with astronomical observations but does also agree with standard biological observations. We show that we then have to replace any type of singularity at the start of the universe by a condensation nucleus and that the very early period of the universe usually assumed to be inflation has to be replaced by a period of rapid crystal growth as in Weiss magnetization domains. Impressively, these minor modifications agree well with astronomical observations including removing the strong inflation perturbations which were never observed in the recent BICEP2 experiments. Furthermore, looking at biological principles suggests that such a new theory with a condensation nucleus at start and a first rapid phase of magnetization-like growth of the ordered, physical laws obeying lattice we live in is in fact the only convincing theory of the early phases of our universe that also is compatible with current observations. We show in detail in the following that such a process of crystal creation, breaking of new crystal seeds and ultimate evaporation of the present crystal readily leads over several generations to an evolution and selection of better, more stable and more self-organizing crystals. Moreover, this explains the "fine-tuning" question why our universe is fine-tuned to favor life: Our Universe is so self-organizing to have enough offspring and the detailed physics involved is at the same time highly favorable for all self-organizing processes including life. This biological theory contrasts with current standard inflation cosmologies. The latter do not perform well in explaining any phenomena of sophisticated structure creation or self-organization. As proteins can only thermodynamically fold by increasing the entropy in the solution around them we suggest for cosmology a condensation nucleus for a universe can form only in a "chaotic ocean" of string-soup or quantum foam if the entropy outside of the nucleus rapidly increases. We derive an interaction potential for 1 to n-dimensional strings or quantum-foams and show that they allow only 1D, 2D, 4D or octonion interactions. The latter is the richest structure and agrees to the E8 symmetry fundamental to particle physics and also compatible with the ten dimensional string theory E8 which is part of the M-theory. Interestingly, any other interactions of other dimensionality can be ruled out using Hurwitz compositional theorem. Crystallization explains also extremely well why we have only one macroscopic reality and where the worldlines of alternative trajectories exist: They are in other planes of the crystal and for energy reasons they crystallize mostly at the same time, yielding a beautiful and stable crystal. This explains decoherence and allows to determine the size of Planck´s quantum h (very small as separation of crystal layers by energy is extremely strong). Ultimate dissolution of real crystals suggests an explanation for dark energy agreeing with estimates for the "big rip". The halo distribution of dark matter favoring galaxy formation is readily explained by a crystal seed starting with unit cells made of normal and dark matter. That we have only matter and not antimatter can be explained as there may be right handed mattercrystals and left-handed antimatter crystals. Similarly, real crystals are never perfect and we argue that exactly such irregularities allow formation of galaxies, clusters and superclusters. Finally, heuristics from genetics suggest to look for a systems perspective to derive correct vacuum and Higgs Boson energies.}, language = {en} } @article{PfitznerMayNuechter2018, author = {Pfitzner, Christian and May, Stefan and N{\"u}chter, Andreas}, title = {Body weight estimation for dose-finding and health monitoring of lying, standing and walking patients based on RGB-D data}, series = {Sensors}, volume = {18}, journal = {Sensors}, number = {5}, doi = {10.3390/s18051311}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-176642}, pages = {1311}, year = {2018}, abstract = {This paper describes the estimation of the body weight of a person in front of an RGB-D camera. A survey of different methods for body weight estimation based on depth sensors is given. First, an estimation of people standing in front of a camera is presented. Second, an approach based on a stream of depth images is used to obtain the body weight of a person walking towards a sensor. The algorithm first extracts features from a point cloud and forwards them to an artificial neural network (ANN) to obtain an estimation of body weight. Besides the algorithm for the estimation, this paper further presents an open-access dataset based on measurements from a trauma room in a hospital as well as data from visitors of a public event. In total, the dataset contains 439 measurements. The article illustrates the efficiency of the approach with experiments with persons lying down in a hospital, standing persons, and walking persons. Applicable scenarios for the presented algorithm are body weight-related dosing of emergency patients.}, language = {en} } @article{KirikkayisGallikWinteretal.2023, author = {Kirikkayis, Yusuf and Gallik, Florian and Winter, Michael and Reichert, Manfred}, title = {BPMNE4IoT: a framework for modeling, executing and monitoring IoT-driven processes}, series = {Future Internet}, volume = {15}, journal = {Future Internet}, number = {3}, issn = {1999-5903}, doi = {10.3390/fi15030090}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-304097}, year = {2023}, abstract = {The Internet of Things (IoT) enables a variety of smart applications, including smart home, smart manufacturing, and smart city. By enhancing Business Process Management Systems with IoT capabilities, the execution and monitoring of business processes can be significantly improved. Providing a holistic support for modeling, executing and monitoring IoT-driven processes, however, constitutes a challenge. Existing process modeling and process execution languages, such as BPMN 2.0, are unable to fully meet the IoT characteristics (e.g., asynchronicity and parallelism) of IoT-driven processes. In this article, we present BPMNE4IoT—A holistic framework for modeling, executing and monitoring IoT-driven processes. We introduce various artifacts and events based on the BPMN 2.0 metamodel that allow realizing the desired IoT awareness of business processes. The framework is evaluated along two real-world scenarios from two different domains. Moreover, we present a user study for comparing BPMNE4IoT and BPMN 2.0. In particular, this study has confirmed that the BPMNE4IoT framework facilitates the support of IoT-driven processes.}, language = {en} } @article{LugrinLatoschikHabeletal.2016, author = {Lugrin, Jean-Luc and Latoschik, Marc Erich and Habel, Michael and Roth, Daniel and Seufert, Christian and Grafe, Silke}, title = {Breaking Bad Behaviors: A New Tool for Learning Classroom Management Using Virtual Reality}, series = {Frontiers in ICT}, volume = {3}, journal = {Frontiers in ICT}, number = {26}, doi = {10.3389/fict.2016.00026}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-147945}, year = {2016}, abstract = {This article presents an immersive virtual reality (VR) system for training classroom management skills, with a specific focus on learning to manage disruptive student behavior in face-to-face, one-to-many teaching scenarios. The core of the system is a real-time 3D virtual simulation of a classroom populated by twenty-four semi-autonomous virtual students. The system has been designed as a companion tool for classroom management seminars in a syllabus for primary and secondary school teachers. This will allow lecturers to link theory with practice using the medium of VR. The system is therefore designed for two users: a trainee teacher and an instructor supervising the training session. The teacher is immersed in a real-time 3D simulation of a classroom by means of a head-mounted display and headphone. The instructor operates a graphical desktop console, which renders a view of the class and the teacher whose avatar movements are captured by a marker less tracking system. This console includes a 2D graphics menu with convenient behavior and feedback control mechanisms to provide human-guided training sessions. The system is built using low-cost consumer hardware and software. Its architecture and technical design are described in detail. A first evaluation confirms its conformance to critical usability requirements (i.e., safety and comfort, believability, simplicity, acceptability, extensibility, affordability, and mobility). Our initial results are promising and constitute the necessary first step toward a possible investigation of the efficiency and effectiveness of such a system in terms of learning outcomes and experience.}, language = {en} } @article{DoellingerWienrichLatoschik2021, author = {D{\"o}llinger, Nina and Wienrich, Carolin and Latoschik, Marc Erich}, title = {Challenges and opportunities of immersive technologies for mindfulness meditation: a systematic review}, series = {Frontiers in Virtual Reality}, volume = {2}, journal = {Frontiers in Virtual Reality}, doi = {10.3389/frvir.2021.644683}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-259047}, pages = {644683}, year = {2021}, abstract = {Mindfulness is considered an important factor of an individual's subjective well-being. Consequently, Human-Computer Interaction (HCI) has investigated approaches that strengthen mindfulness, i.e., by inventing multimedia technologies to support mindfulness meditation. These approaches often use smartphones, tablets, or consumer-grade desktop systems to allow everyday usage in users' private lives or in the scope of organized therapies. Virtual, Augmented, and Mixed Reality (VR, AR, MR; in short: XR) significantly extend the design space for such approaches. XR covers a wide range of potential sensory stimulation, perceptive and cognitive manipulations, content presentation, interaction, and agency. These facilities are linked to typical XR-specific perceptions that are conceptually closely related to mindfulness research, such as (virtual) presence and (virtual) embodiment. However, a successful exploitation of XR that strengthens mindfulness requires a systematic analysis of the potential interrelation and influencing mechanisms between XR technology, its properties, factors, and phenomena and existing models and theories of the construct of mindfulness. This article reports such a systematic analysis of XR-related research from HCI and life sciences to determine the extent to which existing research frameworks on HCI and mindfulness can be applied to XR technologies, the potential of XR technologies to support mindfulness, and open research gaps. Fifty papers of ACM Digital Library and National Institutes of Health's National Library of Medicine (PubMed) with and without empirical efficacy evaluation were included in our analysis. The results reveal that at the current time, empirical research on XR-based mindfulness support mainly focuses on therapy and therapeutic outcomes. Furthermore, most of the currently investigated XR-supported mindfulness interactions are limited to vocally guided meditations within nature-inspired virtual environments. While an analysis of empirical research on those systems did not reveal differences in mindfulness compared to non-mediated mindfulness practices, various design proposals illustrate that XR has the potential to provide interactive and body-based innovations for mindfulness practice. We propose a structured approach for future work to specify and further explore the potential of XR as mindfulness-support. The resulting framework provides design guidelines for XR-based mindfulness support based on the elements and psychological mechanisms of XR interactions.}, language = {en} } @techreport{NguyenLohHossfeld2023, type = {Working Paper}, author = {Nguyen, Kien and Loh, Frank and Hoßfeld, Tobias}, title = {Challenges of Serverless Deployment in Edge-MEC-Cloud}, series = {KuVS Fachgespr{\"a}ch - W{\"u}rzburg Workshop on Modeling, Analysis and Simulation of Next-Generation Communication Networks 2023 (WueWoWAS'23)}, journal = {KuVS Fachgespr{\"a}ch - W{\"u}rzburg Workshop on Modeling, Analysis and Simulation of Next-Generation Communication Networks 2023 (WueWoWAS'23)}, doi = {10.25972/OPUS-32202}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-322025}, pages = {4}, year = {2023}, abstract = {The emerging serverless computing may meet Edge Cloud in a beneficial manner as the two offer flexibility and dynamicity in optimizing finite hardware resources. However, the lack of proper study of a joint platform leaves a gap in literature about consumption and performance of such integration. To this end, this paper identifies the key questions and proposes a methodology to answer them.}, language = {en} } @phdthesis{Ullmann2015, author = {Ullmann, Tobias}, title = {Characterization of Arctic Environment by Means of Polarimetric Synthetic Aperture Radar (PolSAR) Data and Digital Elevation Models (DEM)}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-115719}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {The ecosystem of the high northern latitudes is affected by the recently changing environmental conditions. The Arctic has undergone a significant climatic change over the last decades. The land coverage is changing and a phenological response to the warming is apparent. Remotely sensed data can assist the monitoring and quantification of these changes. The remote sensing of the Arctic was predominantly carried out by the usage of optical sensors but these encounter problems in the Arctic environment, e.g. the frequent cloud cover or the solar geometry. In contrast, the imaging of Synthetic Aperture Radar is not affected by the cloud cover and the acquisition of radar imagery is independent of the solar illumination. The objective of this work was to explore how polarimetric Synthetic Aperture Radar (PolSAR) data of TerraSAR-X, TanDEM-X, Radarsat-2 and ALOS PALSAR and interferometric-derived digital elevation model data of the TanDEM-X Mission can contribute to collect meaningful information on the actual state of the Arctic Environment. The study was conducted for Canadian sites of the Mackenzie Delta Region and Banks Island and in situ reference data were available for the assessment. The up-to-date analysis of the PolSAR data made the application of the Non-Local Means filtering and of the decomposition of co-polarized data necessary. The Non-Local Means filter showed a high capability to preserve the image values, to keep the edges and to reduce the speckle. This supported not only the suitability for the interpretation but also for the classification. The classification accuracies of Non-Local Means filtered data were in average +10\% higher compared to unfiltered images. The correlation of the co- and quad-polarized decomposition features was high for classes with distinct surface or double bounce scattering and a usage of the co-polarized data is beneficial for regions of natural land coverage and for low vegetation formations with little volume scattering. The evaluation further revealed that the X- and C-Band were most sensitive to the generalized land cover classes. It was found that the X-Band data were sensitive to low vegetation formations with low shrub density, the C-Band data were sensitive to the shrub density and the shrub dominated tundra. In contrast, the L-Band data were less sensitive to the land cover. Among the different dual-polarized data the HH/VV-polarized data were identified to be most meaningful for the characterization and classification, followed by the HH/HV-polarized and the VV/VH-polarized data. The quad-polarized data showed highest sensitivity to the land cover but differences to the co-polarized data were small. The accuracy assessment showed that spectral information was required for accurate land cover classification. The best results were obtained when spectral and radar information was combined. The benefit of including radar data in the classification was up to +15\% accuracy and most significant for the classes wetland and sparse vegetated tundra. The best classifications were realized with quad-polarized C-Band and multispectral data and with co-polarized X-Band and multispectral data. The overall accuracy was up to 80\% for unsupervised and up to 90\% for supervised classifications. The results indicated that the shortwave co-polarized data show promise for the classification of tundra land cover since the polarimetric information is sensitive to low vegetation and the wetlands. Furthermore, co-polarized data provide a higher spatial resolution than the quad-polarized data. The analysis of the intermediate digital elevation model data of the TanDEM-X showed a high potential for the characterization of the surface morphology. The basic and relative topographic features were shown to be of high relevance for the quantification of the surface morphology and an area-wide application is feasible. In addition, these data were of value for the classification and delineation of landforms. Such classifications will assist the delineation of geomorphological units and have potential to identify locations of actual and future morphologic activity.}, subject = {Mackenzie-River-Delta}, language = {en} } @article{PawellekKrmarLeistneretal.2021, author = {Pawellek, Ruben and Krmar, Jovana and Leistner, Adrian and Djajić, Nevena and Otašević, Biljana and Protić, Ana and Holzgrabe, Ulrike}, title = {Charged aerosol detector response modeling for fatty acids based on experimental settings and molecular features: a machine learning approach}, series = {Journal of Cheminformatics}, volume = {13}, journal = {Journal of Cheminformatics}, number = {1}, doi = {10.1186/s13321-021-00532-0}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-261618}, year = {2021}, abstract = {The charged aerosol detector (CAD) is the latest representative of aerosol-based detectors that generate a response independent of the analytes' chemical structure. This study was aimed at accurately predicting the CAD response of homologous fatty acids under varying experimental conditions. Fatty acids from C12 to C18 were used as model substances due to semivolatile characterics that caused non-uniform CAD behaviour. Considering both experimental conditions and molecular descriptors, a mixed quantitative structure-property relationship (QSPR) modeling was performed using Gradient Boosted Trees (GBT). The ensemble of 10 decisions trees (learning rate set at 0.55, the maximal depth set at 5, and the sample rate set at 1.0) was able to explain approximately 99\% (Q\(^2\): 0.987, RMSE: 0.051) of the observed variance in CAD responses. Validation using an external test compound confirmed the high predictive ability of the model established (R-2: 0.990, RMSEP: 0.050). With respect to the intrinsic attribute selection strategy, GBT used almost all independent variables during model building. Finally, it attributed the highest importance to the power function value, the flow rate of the mobile phase, evaporation temperature, the content of the organic solvent in the mobile phase and the molecular descriptors such as molecular weight (MW), Radial Distribution Function-080/weighted by mass (RDF080m) and average coefficient of the last eigenvector from distance/detour matrix (Ve2_D/Dt). The identification of the factors most relevant to the CAD responsiveness has contributed to a better understanding of the underlying mechanisms of signal generation. An increased CAD response that was obtained for acetone as organic modifier demonstrated its potential to replace the more expensive and environmentally harmful acetonitrile.}, language = {en} } @article{HentschelKobsHotho2022, author = {Hentschel, Simon and Kobs, Konstantin and Hotho, Andreas}, title = {CLIP knows image aesthetics}, series = {Frontiers in Artificial Intelligence}, volume = {5}, journal = {Frontiers in Artificial Intelligence}, issn = {2624-8212}, doi = {10.3389/frai.2022.976235}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-297150}, year = {2022}, abstract = {Most Image Aesthetic Assessment (IAA) methods use a pretrained ImageNet classification model as a base to fine-tune. We hypothesize that content classification is not an optimal pretraining task for IAA, since the task discourages the extraction of features that are useful for IAA, e.g., composition, lighting, or style. On the other hand, we argue that the Contrastive Language-Image Pretraining (CLIP) model is a better base for IAA models, since it has been trained using natural language supervision. Due to the rich nature of language, CLIP needs to learn a broad range of image features that correlate with sentences describing the image content, composition, environments, and even subjective feelings about the image. While it has been shown that CLIP extracts features useful for content classification tasks, its suitability for tasks that require the extraction of style-based features like IAA has not yet been shown. We test our hypothesis by conducting a three-step study, investigating the usefulness of features extracted by CLIP compared to features obtained from the last layer of a comparable ImageNet classification model. In each step, we get more computationally expensive. First, we engineer natural language prompts that let CLIP assess an image's aesthetic without adjusting any weights in the model. To overcome the challenge that CLIP's prompting only is applicable to classification tasks, we propose a simple but effective strategy to convert multiple prompts to a continuous scalar as required when predicting an image's mean aesthetic score. Second, we train a linear regression on the AVA dataset using image features obtained by CLIP's image encoder. The resulting model outperforms a linear regression trained on features from an ImageNet classification model. It also shows competitive performance with fully fine-tuned networks based on ImageNet, while only training a single layer. Finally, by fine-tuning CLIP's image encoder on the AVA dataset, we show that CLIP only needs a fraction of training epochs to converge, while also performing better than a fine-tuned ImageNet model. Overall, our experiments suggest that CLIP is better suited as a base model for IAA methods than ImageNet pretrained networks.}, language = {en} } @techreport{LeGrossmannKrieger2022, type = {Working Paper}, author = {Le, Duy Thanh and Großmann, Marcel and Krieger, Udo R.}, title = {Cloudless Resource Monitoring in a Fog Computing System Enabled by an SDN/NFV Infrastructure}, series = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, journal = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, doi = {10.25972/OPUS-28072}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-280723}, pages = {4}, year = {2022}, abstract = {Today's advanced Internet-of-Things applications raise technical challenges on cloud, edge, and fog computing. The design of an efficient, virtualized, context-aware, self-configuring orchestration system of a fog computing system constitutes a major development effort within this very innovative area of research. In this paper we describe the architecture and relevant implementation aspects of a cloudless resource monitoring system interworking with an SDN/NFV infrastructure. It realizes the basic monitoring component of the fundamental MAPE-K principles employed in autonomic computing. Here we present the hierarchical layering and functionality within the underlying fog nodes to generate a working prototype of an intelligent, self-managed orchestrator for advanced IoT applications and services. The latter system has the capability to monitor automatically various performance aspects of the resource allocation among multiple hosts of a fog computing system interconnected by SDN.}, subject = {Datennetz}, language = {en} } @article{SchokraieWarnkenHotzWagenblattetal.2012, author = {Schokraie, Elham and Warnken, Uwe and Hotz-Wagenblatt, Agnes and Grohme, Markus A. and Hengherr, Steffen and F{\"o}rster, Frank and Schill, Ralph O. and Frohme, Marcus and Dandekar, Thomas and Schn{\"o}lzer, Martina}, title = {Comparative proteome analysis of Milnesium tardigradum in early embryonic state versus adults in active and anhydrobiotic state}, series = {PLoS One}, volume = {7}, journal = {PLoS One}, number = {9}, doi = {10.1371/journal.pone.0045682}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-134447}, pages = {e45682}, year = {2012}, abstract = {Tardigrades have fascinated researchers for more than 300 years because of their extraordinary capability to undergo cryptobiosis and survive extreme environmental conditions. However, the survival mechanisms of tardigrades are still poorly understood mainly due to the absence of detailed knowledge about the proteome and genome of these organisms. Our study was intended to provide a basis for the functional characterization of expressed proteins in different states of tardigrades. High-throughput, high-accuracy proteomics in combination with a newly developed tardigrade specific protein database resulted in the identification of more than 3000 proteins in three different states: early embryonic state and adult animals in active and anhydrobiotic state. This comprehensive proteome resource includes protein families such as chaperones, antioxidants, ribosomal proteins, cytoskeletal proteins, transporters, protein channels, nutrient reservoirs, and developmental proteins. A comparative analysis of protein families in the different states was performed by calculating the exponentially modified protein abundance index which classifies proteins in major and minor components. This is the first step to analyzing the proteins involved in early embryonic development, and furthermore proteins which might play an important role in the transition into the anhydrobiotic state.}, language = {en} } @article{HossfeldHeegaardKellerer2023, author = {Hossfeld, Tobias and Heegaard, Poul E. and Kellerer, Wolfgang}, title = {Comparing the scalability of communication networks and systems}, series = {IEEE Access}, volume = {11}, journal = {IEEE Access}, doi = {10.1109/ACCESS.2023.3314201}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-349403}, pages = {101474-101497}, year = {2023}, abstract = {Scalability is often mentioned in literature, but a stringent definition is missing. In particular, there is no general scalability assessment which clearly indicates whether a system scales or not or whether a system scales better than another. The key contribution of this article is the definition of a scalability index (SI) which quantifies if a system scales in comparison to another system, a hypothetical system, e.g., linear system, or the theoretically optimal system. The suggested SI generalizes different metrics from literature, which are specialized cases of our SI. The primary target of our scalability framework is, however, benchmarking of two systems, which does not require any reference system. The SI is demonstrated and evaluated for different use cases, that are (1) the performance of an IoT load balancer depending on the system load, (2) the availability of a communication system depending on the size and structure of the network, (3) scalability comparison of different location selection mechanisms in fog computing with respect to delays and energy consumption; (4) comparison of time-sensitive networking (TSN) mechanisms in terms of efficiency and utilization. Finally, we discuss how to use and how not to use the SI and give recommendations and guidelines in practice. To the best of our knowledge, this is the first work which provides a general SI for the comparison and benchmarking of systems, which is the primary target of our scalability analysis.}, language = {en} } @phdthesis{Spoerhase2009, author = {Spoerhase, Joachim}, title = {Competitive and Voting Location}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-52978}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2009}, abstract = {We consider competitive location problems where two competing providers place their facilities sequentially and users can decide between the competitors. We assume that both competitors act non-cooperatively and aim at maximizing their own benefits. We investigate the complexity and approximability of such problems on graphs, in particular on simple graph classes such as trees and paths. We also develop fast algorithms for single competitive location problems where each provider places a single facilty. Voting location, in contrast, aims at identifying locations that meet social criteria. The provider wants to satisfy the users (customers) of the facility to be opened. In general, there is no location that is favored by all users. Therefore, a satisfactory compromise has to be found. To this end, criteria arising from voting theory are considered. The solution of the location problem is understood as the winner of a virtual election among the users of the facilities, in which the potential locations play the role of the candidates and the users represent the voters. Competitive and voting location problems turn out to be closely related.}, subject = {Standortproblem}, language = {en} } @phdthesis{Kosub2001, author = {Kosub, Sven}, title = {Complexity and Partitions}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-2808}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2001}, abstract = {Computational complexity theory usually investigates the complexity of sets, i.e., the complexity of partitions into two parts. But often it is more appropriate to represent natural problems by partitions into more than two parts. A particularly interesting class of such problems consists of classification problems for relations. For instance, a binary relation R typically defines a partitioning of the set of all pairs (x,y) into four parts, classifiable according to the cases where R(x,y) and R(y,x) hold, only R(x,y) or only R(y,x) holds or even neither R(x,y) nor R(y,x) is true. By means of concrete classification problems such as Graph Embedding or Entailment (for propositional logic), this thesis systematically develops tools, in shape of the boolean hierarchy of NP-partitions and its refinements, for the qualitative analysis of the complexity of partitions generated by NP-relations. The Boolean hierarchy of NP-partitions is introduced as a generalization of the well-known and well-studied Boolean hierarchy (of sets) over NP. Whereas the latter hierarchy has a very simple structure, the situation is much more complicated for the case of partitions into at least three parts. To get an idea of this hierarchy, alternative descriptions of the partition classes are given in terms of finite, labeled lattices. Based on these characterizations the Embedding Conjecture is established providing the complete information on the structure of the hierarchy. This conjecture is supported by several results. A natural extension of the Boolean hierarchy of NP-partitions emerges from the lattice-characterization of its classes by considering partition classes generated by finite, labeled posets. It turns out that all significant ideas translate from the case of lattices. The induced refined Boolean hierarchy of NP-partitions enables us more accuratly capturing the complexity of certain relations (such as Graph Embedding) and a description of projectively closed partition classes.}, subject = {Partition }, language = {en} } @article{BoehlerCreignouGalotaetal.2012, author = {B{\"o}hler, Elmar and Creignou, Nadia and Galota, Matthias and Reith, Steffen and Schnoor, Henning and Vollmer, Heribert}, title = {Complexity Classifications for Different Equivalence and Audit Problems for Boolean Circuits}, series = {Logical Methods in Computer Science}, volume = {8}, journal = {Logical Methods in Computer Science}, number = {3:27}, doi = {10.2168/LMCS-8(3:27)2012}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-131121}, pages = {1 -- 25}, year = {2012}, abstract = {We study Boolean circuits as a representation of Boolean functions and conskier different equivalence, audit, and enumeration problems. For a number of restricted sets of gate types (bases) we obtain efficient algorithms, while for all other gate types we show these problems are at least NP-hard.}, language = {en} } @article{SchererFleishmanJonesetal.2021, author = {Scherer, Marc and Fleishman, Sarel J. and Jones, Patrik R. and Dandekar, Thomas and Bencurova, Elena}, title = {Computational Enzyme Engineering Pipelines for Optimized Production of Renewable Chemicals}, series = {Frontiers in Bioengineering and Biotechnology}, volume = {9}, journal = {Frontiers in Bioengineering and Biotechnology}, issn = {2296-4185}, doi = {10.3389/fbioe.2021.673005}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-240598}, year = {2021}, abstract = {To enable a sustainable supply of chemicals, novel biotechnological solutions are required that replace the reliance on fossil resources. One potential solution is to utilize tailored biosynthetic modules for the metabolic conversion of CO2 or organic waste to chemicals and fuel by microorganisms. Currently, it is challenging to commercialize biotechnological processes for renewable chemical biomanufacturing because of a lack of highly active and specific biocatalysts. As experimental methods to engineer biocatalysts are time- and cost-intensive, it is important to establish efficient and reliable computational tools that can speed up the identification or optimization of selective, highly active, and stable enzyme variants for utilization in the biotechnological industry. Here, we review and suggest combinations of effective state-of-the-art software and online tools available for computational enzyme engineering pipelines to optimize metabolic pathways for the biosynthesis of renewable chemicals. Using examples relevant for biotechnology, we explain the underlying principles of enzyme engineering and design and illuminate future directions for automated optimization of biocatalysts for the assembly of synthetic metabolic pathways.}, language = {en} } @unpublished{Nassourou2011, author = {Nassourou, Mohamadou}, title = {Computer-based Textual Documents Collation System for Reconstructing the Original Text from Automatically Identified Base Text and Ranked Witnesses}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-65749}, year = {2011}, abstract = {Given a collection of diverging documents about some lost original text, any person interested in the text would try reconstructing it from the diverging documents. Whether it is eclecticism, stemmatics, or copy-text, one is expected to explicitly or indirectly select one of the documents as a starting point or as a base text, which could be emended through comparison with remaining documents, so that a text that could be designated as the original document is generated. Unfortunately the process of giving priority to one of the documents also known as witnesses is a subjective approach. In fact even Cladistics, which could be considered as a computer-based approach of implementing stemmatics, does not present or recommend users to select a certain witness as a starting point for the process of reconstructing the original document. In this study, a computational method using a rule-based Bayesian classifier is used, to assist text scholars in their attempts of reconstructing a non-existing document from some available witnesses. The method developed in this study consists of selecting a base text successively and collating it with remaining documents. Each completed collation cycle stores the selected base text and its closest witness, along with a weighted score of their similarities and differences. At the end of the collation process, a witness selected more often by majority of base texts is considered as the probable base text of the collection. Witnesses' scores are weighted using a weighting system, based on effects of types of textual modifications on the process of reconstructing original documents. Users have the possibility to select between baseless and base text collation. If a base text is selected, the task is reduced to ranking the witnesses with respect to the base text, otherwise a base text as well as ranking of the witnesses with respect to the base text are computed and displayed on a bar diagram. Additionally this study includes a recursive algorithm for automatically reconstructing the original text from the identified base text and ranked witnesses.}, subject = {Textvergleich}, language = {en} } @unpublished{Nassourou2011, author = {Nassourou, Mohamadou}, title = {Computing Generic Causes of Revelation of the Quranic Verses Using Machine Learning Techniques}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-66083}, year = {2011}, abstract = {Because many verses of the holy Quran are similar, there is high probability that, similar verses addressing same issues share same generic causes of revelation. In this study, machine learning techniques have been employed in order to automatically derive causes of revelation of Quranic verses. The derivation of the causes of revelation is viewed as a classification problem. Initially the categories are based on the verses with known causes of revelation, and the testing set consists of the remaining verses. Based on a computed threshold value, a na{\"i}ve Bayesian classifier is used to categorize some verses. After that, using a decision tree classifier the remaining uncategorized verses are separated into verses that contain indicators (resultative connectors, causative expressions…), and those that do not. As for those verses having indicators, each one is segmented into its constituent clauses by identification of the linking indicators. Then a dominant clause is extracted and considered either as the cause of revelation, or post-processed by adding or subtracting some terms to form a causal clause that constitutes the cause of revelation. Concerning remaining unclassified verses without indicators, a naive Bayesian classifier is again used to assign each one of them to one of the existing classes based on features and topics similarity. As for verses that could not be classified so far, manual classification was made by considering each verse as a category on its own. The result obtained in this study is encouraging, and shows that automatic derivation of Quranic verses' generic causes of revelation is achievable, and reasonably reliable for understanding and implementing the teachings of the Quran.}, subject = {Text Mining}, language = {en} } @article{LatoschikWienrich2022, author = {Latoschik, Marc Erich and Wienrich, Carolin}, title = {Congruence and plausibility, not presence: pivotal conditions for XR experiences and effects, a novel approach}, series = {Frontiers in Virtual Reality}, volume = {3}, journal = {Frontiers in Virtual Reality}, issn = {2673-4192}, doi = {10.3389/frvir.2022.694433}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-284787}, year = {2022}, abstract = {Presence is often considered the most important quale describing the subjective feeling of being in a computer-generated and/or computer-mediated virtual environment. The identification and separation of orthogonal presence components, i.e., the place illusion and the plausibility illusion, has been an accepted theoretical model describing Virtual Reality (VR) experiences for some time. This perspective article challenges this presence-oriented VR theory. First, we argue that a place illusion cannot be the major construct to describe the much wider scope of virtual, augmented, and mixed reality (VR, AR, MR: or XR for short). Second, we argue that there is no plausibility illusion but merely plausibility, and we derive the place illusion caused by the congruent and plausible generation of spatial cues and similarly for all the current model's so-defined illusions. Finally, we propose congruence and plausibility to become the central essential conditions in a novel theoretical model describing XR experiences and effects.}, language = {en} } @phdthesis{Loeffler2021, author = {L{\"o}ffler, Andre}, title = {Constrained Graph Layouts: Vertices on the Outer Face and on the Integer Grid}, edition = {1. Auflage}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-146-4}, doi = {10.25972/WUP-978-3-95826-147-1}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-215746}, school = {W{\"u}rzburg University Press}, pages = {viii, 161}, year = {2021}, abstract = {Constraining graph layouts - that is, restricting the placement of vertices and the routing of edges to obey certain constraints - is common practice in graph drawing. In this book, we discuss algorithmic results on two different restriction types: placing vertices on the outer face and on the integer grid. For the first type, we look into the outer k-planar and outer k-quasi-planar graphs, as well as giving a linear-time algorithm to recognize full and closed outer k-planar graphs Monadic Second-order Logic. For the second type, we consider the problem of transferring a given planar drawing onto the integer grid while perserving the original drawings topology; we also generalize a variant of Cauchy's rigidity theorem for orthogonal polyhedra of genus 0 to those of arbitrary genus.}, subject = {Graphenzeichnen}, language = {en} } @article{GlemarecLugrinBosseretal.2022, author = {Gl{\´e}marec, Yann and Lugrin, Jean-Luc and Bosser, Anne-Gwenn and Buche, C{\´e}dric and Latoschik, Marc Erich}, title = {Controlling the stage: a high-level control system for virtual audiences in Virtual Reality}, series = {Frontiers in Virtual Reality}, volume = {3}, journal = {Frontiers in Virtual Reality}, issn = {2673-4192}, doi = {10.3389/frvir.2022.876433}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-284601}, year = {2022}, abstract = {This article presents a novel method for controlling a virtual audience system (VAS) in Virtual Reality (VR) application, called STAGE, which has been originally designed for supervised public speaking training in university seminars dedicated to the preparation and delivery of scientific talks. We are interested in creating pedagogical narratives: narratives encompass affective phenomenon and rather than organizing events changing the course of a training scenario, pedagogical plans using our system focus on organizing the affects it arouses for the trainees. Efficiently controlling a virtual audience towards a specific training objective while evaluating the speaker's performance presents a challenge for a seminar instructor: the high level of cognitive and physical demands required to be able to control the virtual audience, whilst evaluating speaker's performance, adjusting and allowing it to quickly react to the user's behaviors and interactions. It is indeed a critical limitation of a number of existing systems that they rely on a Wizard of Oz approach, where the tutor drives the audience in reaction to the user's performance. We address this problem by integrating with a VAS a high-level control component for tutors, which allows using predefined audience behavior rules, defining custom ones, as well as intervening during run-time for finer control of the unfolding of the pedagogical plan. At its core, this component offers a tool to program, select, modify and monitor interactive training narratives using a high-level representation. The STAGE offers the following features: i) a high-level API to program pedagogical narratives focusing on a specific public speaking situation and training objectives, ii) an interactive visualization interface iii) computation and visualization of user metrics, iv) a semi-autonomous virtual audience composed of virtual spectators with automatic reactions to the speaker and surrounding spectators while following the pedagogical plan V) and the possibility for the instructor to embody a virtual spectator to ask questions or guide the speaker from within the Virtual Environment. We present here the design, and implementation of the tutoring system and its integration in STAGE, and discuss its reception by end-users.}, language = {en} } @article{SteiningerAbelZiegleretal.2023, author = {Steininger, Michael and Abel, Daniel and Ziegler, Katrin and Krause, Anna and Paeth, Heiko and Hotho, Andreas}, title = {ConvMOS: climate model output statistics with deep learning}, series = {Data Mining and Knowledge Discovery}, volume = {37}, journal = {Data Mining and Knowledge Discovery}, number = {1}, issn = {1384-5810}, doi = {10.1007/s10618-022-00877-6}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-324213}, pages = {136-166}, year = {2023}, abstract = {Climate models are the tool of choice for scientists researching climate change. Like all models they suffer from errors, particularly systematic and location-specific representation errors. One way to reduce these errors is model output statistics (MOS) where the model output is fitted to observational data with machine learning. In this work, we assess the use of convolutional Deep Learning climate MOS approaches and present the ConvMOS architecture which is specifically designed based on the observation that there are systematic and location-specific errors in the precipitation estimates of climate models. We apply ConvMOS models to the simulated precipitation of the regional climate model REMO, showing that a combination of per-location model parameters for reducing location-specific errors and global model parameters for reducing systematic errors is indeed beneficial for MOS performance. We find that ConvMOS models can reduce errors considerably and perform significantly better than three commonly used MOS approaches and plain ResNet and U-Net models in most cases. Our results show that non-linear MOS models underestimate the number of extreme precipitation events, which we alleviate by training models specialized towards extreme precipitation events with the imbalanced regression method DenseLoss. While we consider climate MOS, we argue that aspects of ConvMOS may also be beneficial in other domains with geospatial data, such as air pollution modeling or weather forecasts.}, subject = {Klima}, language = {en} } @article{AtienzadeCastroCortesetal.2012, author = {Atienza, Nieves and de Castro, Natalia and Cort{\´e}s, Carmen and Garrido, M. {\´A}ngeles and Grima, Clara I. and Hern{\´a}ndez, Gregorio and M{\´a}rquez, Alberto and Moreno-Gonz{\´a}lez, Auxiliadora and N{\"o}llenburg, Martin and Portillo, Jos{\´e} Ram{\´o}n and Reyes, Pedro and Valenzuela, Jes{\´u}s and Trinidad Villar, Maria and Wolff, Alexander}, title = {Cover contact graphs}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-78845}, year = {2012}, abstract = {We study problems that arise in the context of covering certain geometric objects called seeds (e.g., points or disks) by a set of other geometric objects called cover (e.g., a set of disks or homothetic triangles). We insist that the interiors of the seeds and the cover elements are pairwise disjoint, respectively, but they can touch. We call the contact graph of a cover a cover contact graph (CCG). We are interested in three types of tasks, both in the general case and in the special case of seeds on a line: (a) deciding whether a given seed set has a connected CCG, (b) deciding whether a given graph has a realization as a CCG on a given seed set, and (c) bounding the sizes of certain classes of CCG's. Concerning (a) we give efficient algorithms for the case that seeds are points and show that the problem becomes hard if seeds and covers are disks. Concerning (b) we show that this problem is hard even for point seeds and disk covers (given a fixed correspondence between graph vertices and seeds). Concerning (c) we obtain upper and lower bounds on the number of CCG's for point seeds.}, subject = {Informatik}, language = {de} } @phdthesis{Fink2014, author = {Fink, Martin}, title = {Crossings, Curves, and Constraints in Graph Drawing}, publisher = {W{\"u}rzburg University Press}, isbn = {978-3-95826-002-3 (print)}, doi = {10.25972/WUP-978-3-95826-003-0}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-98235}, school = {W{\"u}rzburg University Press}, pages = {222}, year = {2014}, abstract = {In many cases, problems, data, or information can be modeled as graphs. Graphs can be used as a tool for modeling in any case where connections between distinguishable objects occur. Any graph consists of a set of objects, called vertices, and a set of connections, called edges, such that any edge connects a pair of vertices. For example, a social network can be modeled by a graph by transforming the users of the network into vertices and friendship relations between users into edges. Also physical networks like computer networks or transportation networks, for example, the metro network of a city, can be seen as graphs. For making graphs and, thereby, the data that is modeled, well-understandable for users, we need a visualization. Graph drawing deals with algorithms for visualizing graphs. In this thesis, especially the use of crossings and curves is investigated for graph drawing problems under additional constraints. The constraints that occur in the problems investigated in this thesis especially restrict the positions of (a part of) the vertices; this is done either as a hard constraint or as an optimization criterion.}, subject = {Graphenzeichnen}, language = {en} } @techreport{Metzger2020, type = {Working Paper}, author = {Metzger, Florian}, title = {Crowdsensed QoE for the community - a concept to make QoE assessment accessible}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-203748}, pages = {7}, year = {2020}, abstract = {In recent years several community testbeds as well as participatory sensing platforms have successfully established themselves to provide open data to everyone interested. Each of them with a specific goal in mind, ranging from collecting radio coverage data up to environmental and radiation data. Such data can be used by the community in their decision making, whether to subscribe to a specific mobile phone service that provides good coverage in an area or in finding a sunny and warm region for the summer holidays. However, the existing platforms are usually limiting themselves to directly measurable network QoS. If such a crowdsourced data set provides more in-depth derived measures, this would enable an even better decision making. A community-driven crowdsensing platform that derives spatial application-layer user experience from resource-friendly bandwidth estimates would be such a case, video streaming services come to mind as a prime example. In this paper we present a concept for such a system based on an initial prototype that eases the collection of data necessary to determine mobile-specific QoE at large scale. In addition we reason why the simple quality metric proposed here can hold its own.}, subject = {Quality of Experience}, language = {en} } @article{DuLauterbachLietal.2020, author = {Du, Shitong and Lauterbach, Helge A. and Li, Xuyou and Demisse, Girum G. and Borrmann, Dorit and N{\"u}chter, Andreas}, title = {Curvefusion — A Method for Combining Estimated Trajectories with Applications to SLAM and Time-Calibration}, series = {Sensors}, volume = {20}, journal = {Sensors}, number = {23}, issn = {1424-8220}, doi = {10.3390/s20236918}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-219988}, year = {2020}, abstract = {Mapping and localization of mobile robots in an unknown environment are essential for most high-level operations like autonomous navigation or exploration. This paper presents a novel approach for combining estimated trajectories, namely curvefusion. The robot used in the experiments is equipped with a horizontally mounted 2D profiler, a constantly spinning 3D laser scanner and a GPS module. The proposed algorithm first combines trajectories from different sensors to optimize poses of the planar three degrees of freedom (DoF) trajectory, which is then fed into continuous-time simultaneous localization and mapping (SLAM) to further improve the trajectory. While state-of-the-art multi-sensor fusion methods mainly focus on probabilistic methods, our approach instead adopts a deformation-based method to optimize poses. To this end, a similarity metric for curved shapes is introduced into the robotics community to fuse the estimated trajectories. Additionally, a shape-based point correspondence estimation method is applied to the multi-sensor time calibration. Experiments show that the proposed fusion method can achieve relatively better accuracy, even if the error of the trajectory before fusion is large, which demonstrates that our method can still maintain a certain degree of accuracy in an environment where typical pose estimation methods have poor performance. In addition, the proposed time-calibration method also achieves high accuracy in estimating point correspondences.}, language = {en} } @techreport{RossiMaurelliUnnithanetal.2021, author = {Rossi, Angelo Pio and Maurelli, Francesco and Unnithan, Vikram and Dreger, Hendrik and Mathewos, Kedus and Pradhan, Nayan and Corbeanu, Dan-Andrei and Pozzobon, Riccardo and Massironi, Matteo and Ferrari, Sabrina and Pernechele, Claudia and Paoletti, Lorenzo and Simioni, Emanuele and Maurizio, Pajola and Santagata, Tommaso and Borrmann, Dorit and N{\"u}chter, Andreas and Bredenbeck, Anton and Zevering, Jasper and Arzberger, Fabian and Reyes Mantilla, Camilo Andr{\´e}s}, title = {DAEDALUS - Descent And Exploration in Deep Autonomy of Lava Underground Structures}, isbn = {978-3-945459-33-1}, issn = {1868-7466}, doi = {10.25972/OPUS-22791}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-227911}, pages = {188}, year = {2021}, abstract = {The DAEDALUS mission concept aims at exploring and characterising the entrance and initial part of Lunar lava tubes within a compact, tightly integrated spherical robotic device, with a complementary payload set and autonomous capabilities. The mission concept addresses specifically the identification and characterisation of potential resources for future ESA exploration, the local environment of the subsurface and its geologic and compositional structure. A sphere is ideally suited to protect sensors and scientific equipment in rough, uneven environments. It will house laser scanners, cameras and ancillary payloads. The sphere will be lowered into the skylight and will explore the entrance shaft, associated caverns and conduits. Lidar (light detection and ranging) systems produce 3D models with high spatial accuracy independent of lighting conditions and visible features. Hence this will be the primary exploration toolset within the sphere. The additional payload that can be accommodated in the robotic sphere consists of camera systems with panoramic lenses and scanners such as multi-wavelength or single-photon scanners. A moving mass will trigger movements. The tether for lowering the sphere will be used for data communication and powering the equipment during the descending phase. Furthermore, the connector tether-sphere will host a WIFI access point, such that data of the conduit can be transferred to the surface relay station. During the exploration phase, the robot will be disconnected from the cable, and will use wireless communication. Emergency autonomy software will ensure that in case of loss of communication, the robot will continue the nominal mission.}, subject = {Mond}, language = {en} } @techreport{RaffeckGeisslerHossfeld2022, type = {Working Paper}, author = {Raffeck, Simon and Geißler, Stefan and Hoßfeld, Tobias}, title = {DBM: Decentralized Burst Mitigation for Self-Organizing LoRa Deployments}, series = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, journal = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, doi = {10.25972/OPUS-28080}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-280809}, pages = {4}, year = {2022}, abstract = {This work proposes a novel approach to disperse dense transmission intervals and reduce bursty traffic patterns without the need for centralized control. Furthermore, by keeping the mechanism as close to the Long Range Wide Area Network (LoRaWAN) standard as possible the suggested mechanism can be deployed within existing networks and can even be co-deployed with other devices.}, subject = {Datennetz}, language = {en} } @article{PetschkeStaab2019, author = {Petschke, Danny and Staab, Torsten E.M.}, title = {DDRS4PALS: a software for the acquisition and simulation of lifetime spectra using the DRS4 evaluation board}, series = {SoftwareX}, volume = {10}, journal = {SoftwareX}, doi = {10.1016/j.softx.2019.100261}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-202276}, pages = {100261}, year = {2019}, abstract = {Lifetime techniques are applied to diverse fields of study including materials sciences, semiconductor physics, biology, molecular biophysics and photochemistry. Here we present DDRS4PALS, a software for the acquisition and simulation of lifetime spectra using the DRS4 evaluation board (Paul Scherrer Institute, Switzerland) for time resolved measurements and digitization of detector output pulses. Artifact afflicted pulses can be corrected or rejected prior to the lifetime calculation to provide the generation of high-quality lifetime spectra, which are crucial for a profound analysis, i.e. the decomposition of the true information. Moreover, the pulses can be streamed on an (external) hard drive during the measurement and subsequently downloaded in the offline mode without being connected to the hardware. This allows the generation of various lifetime spectra at different configurations from one single measurement and, hence, a meaningful comparison in terms of analyzability and quality. Parallel processing and an integrated JavaScript based language provide convenient options to accelerate and automate time consuming processes such as lifetime spectra simulations.}, language = {en} } @article{AliMontenegro2016, author = {Ali, Qasim and Montenegro, Sergio}, title = {Decentralized control for scalable quadcopter formations}, series = {International Journal of Aerospace Engineering}, volume = {2016}, journal = {International Journal of Aerospace Engineering}, doi = {10.1155/2016/9108983}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-146704}, pages = {9108983}, year = {2016}, abstract = {An innovative framework has been developed for teamwork of two quadcopter formations, each having its specified formation geometry, assigned task, and matching control scheme. Position control for quadcopters in one of the formations has been implemented through a Linear Quadratic Regulator Proportional Integral (LQR PI) control scheme based on explicit model following scheme. Quadcopters in the other formation are controlled through LQR PI servomechanism control scheme. These two control schemes are compared in terms of their performance and control effort. Both formations are commanded by respective ground stations through virtual leaders. Quadcopters in formations are able to track desired trajectories as well as hovering at desired points for selected time duration. In case of communication loss between ground station and any of the quadcopters, the neighboring quadcopter provides the command data, received from the ground station, to the affected unit. Proposed control schemes have been validated through extensive simulations using MATLAB®/Simulink® that provided favorable results.}, language = {en} } @article{MuellerLeppichGeissetal.2023, author = {M{\"u}ller, Konstantin and Leppich, Robert and Geiß, Christian and Borst, Vanessa and Pelizari, Patrick Aravena and Kounev, Samuel and Taubenb{\"o}ck, Hannes}, title = {Deep neural network regression for normalized digital surface model generation with Sentinel-2 imagery}, series = {IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing}, volume = {16}, journal = {IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing}, issn = {1939-1404}, doi = {10.1109/JSTARS.2023.3297710}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-349424}, pages = {8508-8519}, year = {2023}, abstract = {In recent history, normalized digital surface models (nDSMs) have been constantly gaining importance as a means to solve large-scale geographic problems. High-resolution surface models are precious, as they can provide detailed information for a specific area. However, measurements with a high resolution are time consuming and costly. Only a few approaches exist to create high-resolution nDSMs for extensive areas. This article explores approaches to extract high-resolution nDSMs from low-resolution Sentinel-2 data, allowing us to derive large-scale models. We thereby utilize the advantages of Sentinel 2 being open access, having global coverage, and providing steady updates through a high repetition rate. Several deep learning models are trained to overcome the gap in producing high-resolution surface maps from low-resolution input data. With U-Net as a base architecture, we extend the capabilities of our model by integrating tailored multiscale encoders with differently sized kernels in the convolution as well as conformed self-attention inside the skip connection gates. Using pixelwise regression, our U-Net base models can achieve a mean height error of approximately 2 m. Moreover, through our enhancements to the model architecture, we reduce the model error by more than 7\%.}, language = {en} } @phdthesis{Nogatz2023, author = {Nogatz, Falco}, title = {Defining and Implementing Domain-Specific Languages with Prolog}, doi = {10.25972/OPUS-30187}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-301872}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {The landscape of today's programming languages is manifold. With the diversity of applications, the difficulty of adequately addressing and specifying the used programs increases. This often leads to newly designed and implemented domain-specific languages. They enable domain experts to express knowledge in their preferred format, resulting in more readable and concise programs. Due to its flexible and declarative syntax without reserved keywords, the logic programming language Prolog is particularly suitable for defining and embedding domain-specific languages. This thesis addresses the questions and challenges that arise when integrating domain-specific languages into Prolog. We compare the two approaches to define them either externally or internally, and provide assisting tools for each. The grammar of a formal language is usually defined in the extended Backus-Naur form. In this work, we handle this formalism as a domain-specific language in Prolog, and define term expansions that allow to translate it into equivalent definite clause grammars. We present the package library(dcg4pt) for SWI-Prolog, which enriches them by an additional argument to automatically process the term's corresponding parse tree. To simplify the work with definite clause grammars, we visualise their application by a web-based tracer. The external integration of domain-specific languages requires the programmer to keep the grammar, parser, and interpreter in sync. In many cases, domain-specific languages can instead be directly embedded into Prolog by providing appropriate operator definitions. In addition, we propose syntactic extensions for Prolog to expand its expressiveness, for instance to state logic formulas with their connectives verbatim. This allows to use all tools that were originally written for Prolog, for instance code linters and editors with syntax highlighting. We present the package library(plammar), a standard-compliant parser for Prolog source code, written in Prolog. It is able to automatically infer from example sentences the required operator definitions with their classes and precedences as well as the required Prolog language extensions. As a result, we can automatically answer the question: Is it possible to model these example sentences as valid Prolog clauses, and how? We discuss and apply the two approaches to internal and external integrations for several domain-specific languages, namely the extended Backus-Naur form, GraphQL, XPath, and a controlled natural language to represent expert rules in if-then form. The created toolchain with library(dcg4pt) and library(plammar) yields new application opportunities for static Prolog source code analysis, which we also present.}, subject = {PROLOG }, language = {en} } @article{SeufertSchroederSeufert2021, author = {Seufert, Anika and Schr{\"o}der, Svenja and Seufert, Michael}, title = {Delivering User Experience over Networks: Towards a Quality of Experience Centered Design Cycle for Improved Design of Networked Applications}, series = {SN Computer Science}, volume = {2}, journal = {SN Computer Science}, number = {6}, issn = {2661-8907}, doi = {10.1007/s42979-021-00851-x}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-271762}, year = {2021}, abstract = {To deliver the best user experience (UX), the human-centered design cycle (HCDC) serves as a well-established guideline to application developers. However, it does not yet cover network-specific requirements, which become increasingly crucial, as most applications deliver experience over the Internet. The missing network-centric view is provided by Quality of Experience (QoE), which could team up with UX towards an improved overall experience. By considering QoE aspects during the development process, it can be achieved that applications become network-aware by design. In this paper, the Quality of Experience Centered Design Cycle (QoE-CDC) is proposed, which provides guidelines on how to design applications with respect to network-specific requirements and QoE. Its practical value is showcased for popular application types and validated by outlining the design of a new smartphone application. We show that combining HCDC and QoE-CDC will result in an application design, which reaches a high UX and avoids QoE degradation.}, language = {en} } @article{SteiningerKobsDavidsonetal.2021, author = {Steininger, Michael and Kobs, Konstantin and Davidson, Padraig and Krause, Anna and Hotho, Andreas}, title = {Density-based weighting for imbalanced regression}, series = {Machine Learning}, volume = {110}, journal = {Machine Learning}, number = {8}, issn = {1573-0565}, doi = {10.1007/s10994-021-06023-5}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-269177}, pages = {2187-2211}, year = {2021}, abstract = {In many real world settings, imbalanced data impedes model performance of learning algorithms, like neural networks, mostly for rare cases. This is especially problematic for tasks focusing on these rare occurrences. For example, when estimating precipitation, extreme rainfall events are scarce but important considering their potential consequences. While there are numerous well studied solutions for classification settings, most of them cannot be applied to regression easily. Of the few solutions for regression tasks, barely any have explored cost-sensitive learning which is known to have advantages compared to sampling-based methods in classification tasks. In this work, we propose a sample weighting approach for imbalanced regression datasets called DenseWeight and a cost-sensitive learning approach for neural network regression with imbalanced data called DenseLoss based on our weighting scheme. DenseWeight weights data points according to their target value rarities through kernel density estimation (KDE). DenseLoss adjusts each data point's influence on the loss according to DenseWeight, giving rare data points more influence on model training compared to common data points. We show on multiple differently distributed datasets that DenseLoss significantly improves model performance for rare data points through its density-based weighting scheme. Additionally, we compare DenseLoss to the state-of-the-art method SMOGN, finding that our method mostly yields better performance. Our approach provides more control over model training as it enables us to actively decide on the trade-off between focusing on common or rare cases through a single hyperparameter, allowing the training of better models for rare data points.}, language = {en} } @phdthesis{Klein2014, author = {Klein, Dominik Werner}, title = {Design and Evaluation of Components for Future Internet Architectures}, issn = {1432-8801}, doi = {10.25972/OPUS-9313}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-93134}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {Die derzeitige Internetarchitektur wurde nicht in einem geplanten Prozess konzipiert und entwickelt, sondern hat vielmehr eine evolutionsartige Entwicklung hinter sich. Ausl{\"o}ser f{\"u}r die jeweiligen Evolutionsschritte waren dabei meist aufstrebende Anwendungen, welche neue Anforderungen an die zugrundeliegende Netzarchitektur gestellt haben. Um diese Anforderungen zu erf{\"u}llen, wurden h{\"a}ufig neuartige Dienste oder Protokolle spezifiziert und in die bestehende Architektur integriert. Dieser Prozess ist jedoch meist mit hohem Aufwand verbunden und daher sehr tr{\"a}ge, was die Entwicklung und Verbreitung innovativer Dienste beeintr{\"a}chtigt. Derzeitig diskutierte Konzepte wie Software-Defined Networking (SDN) oder Netzvirtualisierung (NV) werden als eine M{\"o}glichkeit angesehen, die Altlasten der bestehenden Internetarchitektur zu l{\"o}sen. Beiden Konzepten gemein ist die Idee, logische Netze {\"u}ber dem physikalischen Substrat zu betreiben. Diese logischen Netze sind hochdynamisch und k{\"o}nnen so flexibel an die Anforderungen der jeweiligen Anwendungen angepasst werden. Insbesondere erlaubt das Konzept der Virtualisierung intelligentere Netzknoten, was innovative neue Anwendungsf{\"a}lle erm{\"o}glicht. Ein h{\"a}ufig in diesem Zusammenhang diskutierter Anwendungsfall ist die Mobilit{\"a}t sowohl von Endger{\"a}ten als auch von Diensten an sich. Die Mobilit{\"a}t der Dienste wird hierbei ausgenutzt, um die Zugriffsverz{\"o}gerung oder die belegten Ressourcen im Netz zu reduzieren, indem die Dienste zum Beispiel in f{\"u}r den Nutzer geographisch nahe Datenzentren migriert werden. Neben den reinen Mechanismen bez{\"u}glich Dienst- und Endger{\"a}temobilit{\"a}t sind in diesem Zusammenhang auch geeignete {\"U}berwachungsl{\"o}sungen relevant, welche die vom Nutzer wahrgenommene Dienstg{\"u}te bewerten k{\"o}nnen. Diese L{\"o}sungen liefern wichtige Entscheidungshilfen f{\"u}r die Migration oder {\"u}berwachen m{\"o}gliche Effekte der Migration auf die erfahrene Dienstg{\"u}te beim Nutzer. Im Falle von Video Streaming erm{\"o}glicht ein solcher Anwendungsfall die flexible Anpassung der Streaming Topologie f{\"u}r mobile Nutzer, um so die Videoqualit{\"a}t unabh{\"a}ngig vom Zugangsnetz aufrechterhalten zu k{\"o}nnen. Im Rahmen dieser Doktorarbeit wird der beschriebene Anwendungsfall am Beispiel einer Video Streaming Anwendung n{\"a}her analysiert und auftretende Herausforderungen werden diskutiert. Des Weiteren werden L{\"o}sungsans{\"a}tze vorgestellt und bez{\"u}glich ihrer Effizienz ausgewertet. Im Detail besch{\"a}ftigt sich die Arbeit mit der Leistungsanalyse von Mechanismen f{\"u}r die Dienstmobilit{\"a}t und entwickelt eine Architektur zur Optimierung der Dienstmobilit{\"a}t. Im Bereich Endger{\"a}temobilit{\"a}t werden Verbesserungen entwickelt, welche die Latenz zwischen Endger{\"a}t und Dienst reduzieren oder die Konnektivit{\"a}t unabh{\"a}ngig vom Zugangsnetz gew{\"a}hrleisten. Im letzten Teilbereich wird eine L{\"o}sung zur {\"U}berwachung der Videoqualit{\"a}t im Netz entwickelt und bez{\"u}glich ihrer Genauigkeit analysiert.}, subject = {Leistungsbewertung}, language = {en} } @unpublished{Nassourou2011, author = {Nassourou, Mohamadou}, title = {Design and Implementation of a Model-driven XML-based Integrated System Architecture for Assisting Analysis, Understanding, and Retention of Religious Texts:The Case of The Quran}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-65737}, year = {2011}, abstract = {Learning a book in general involves reading it, underlining important words, adding comments, summarizing some passages, and marking up some text or concepts. Once deeper understanding is achieved, one would like to organize and manage her/his knowledge in such a way that, it could be easily remembered and efficiently transmitted to others. This paper discusses about modeling religious texts using semantic XML markup based on frame-based knowledge representation, with the purpose of assisting understanding, retention, and sharing of knowledge they contain. In this study, books organized in terms of chapters made up of verses are considered as the source of knowledge to model. Some metadata representing the multiple perspectives of knowledge modeling are assigned to each chapter and verse. Chapters and verses with their metadata form a meta-model, which is represented using frames, and published on a web mashup. An XML-based annotation and visualization system equipped with user interfaces for creating static and dynamic metadata, annotating chapters' contents according to user selected semantics, and templates for publishing generated knowledge on the Internet, has been developed. The system has been applied to the Quran, and the result obtained shows that multiple perspectives of information modeling can be successfully applied to religious texts, in order to support analysis, understanding, and retention of the texts.}, subject = {Wissensrepr{\"a}sentation}, language = {en} } @unpublished{Nassourou2011, author = {Nassourou, Mohamadou}, title = {Design and Implementation of Architectures for Interactive Textual Documents Collation Systems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-56601}, year = {2011}, abstract = {One of the main purposes of textual documents collation is to identify a base text or closest witness to the base text, by analyzing and interpreting differences also known as types of changes that might exist between those documents. Based on this fact, it is reasonable to argue that, explicit identification of types of changes such as deletions, additions, transpositions, and mutations should be part of the collation process. The identification could be carried out by an interpretation module after alignment has taken place. Unfortunately existing collation software such as CollateX1 and Juxta2's collation engine do not have interpretation modules. In fact they implement the Gothenburg model [1] for collation process which does not include an interpretation unit. Currently both CollateX and Juxta's collation engine do not distinguish in their critical apparatus between the types of changes, and do not offer statistics about those changes. This paper presents a model for both integrated and distributed collation processes that improves the Gothenburg model. The model introduces an interpretation component for computing and distinguishing between the types of changes that documents could have undergone. Moreover two architectures implementing the model in order to solve the problem of interactive collation are discussed as well. Each architecture uses CollateX library, and provides on the one hand preprocessing functions for transforming input documents into CollateX input format, and on the other hand a post-processing module for enabling interactive collation. Finally simple algorithms for distinguishing between types of changes, and linking collated source documents with the collation results are also introduced.}, subject = {Softwarearchitektur}, language = {en} } @misc{Kaempgen2009, type = {Master Thesis}, author = {Kaempgen, Benedikt}, title = {Deskriptives Data-Mining f{\"u}r Entscheidungstr{\"a}ger: Eine Mehrfachfallstudie}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-46343}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2009}, abstract = {Das Potenzial der Wissensentdeckung in Daten wird h{\"a}ufig nicht ausgenutzt, was haupts{\"a}chlich auf Barrieren zwischen dem Entwicklerteam und dem Endnutzer des Data-Mining zur{\"u}ckzuf{\"u}hren ist. In dieser Arbeit wird ein transparenter Ansatz zum Beschreiben und Erkl{\"a}ren von Daten f{\"u}r Entscheidungstr{\"a}ger vorgestellt. In Entscheidungstr{\"a}ger-zentrierten Aufgaben werden die Projektanforderungen definiert und die Ergebnisse zu einer Geschichte zusammengestellt. Eine Anforderung besteht dabei aus einem tabellarischen Bericht und ggf. Mustern in seinem Inhalt, jeweils verst{\"a}ndlich f{\"u}r einen Entscheidungstr{\"a}ger. Die technischen Aufgaben bestehen aus einer Datenpr{\"u}fung, der Integration der Daten in einem Data-Warehouse sowie dem Generieren von Berichten und dem Entdecken von Mustern wie in den Anforderungen beschrieben. Mehrere Data-Mining-Projekte k{\"o}nnen durch Wissensmanagement sowie eine geeignete Infrastruktur voneinander profitieren. Der Ansatz wurde in zwei Projekten unter Verwendung von ausschließlich Open-Source-Software angewendet.}, subject = {Data Mining}, language = {de} } @article{WienrichCarolus2021, author = {Wienrich, Carolin and Carolus, Astrid}, title = {Development of an Instrument to Measure Conceptualizations and Competencies About Conversational Agents on the Example of Smart Speakers}, series = {Frontiers in Computer Science}, volume = {3}, journal = {Frontiers in Computer Science}, doi = {10.3389/fcomp.2021.685277}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-260198}, year = {2021}, abstract = {The concept of digital literacy has been introduced as a new cultural technique, which is regarded as essential for successful participation in a (future) digitized world. Regarding the increasing importance of AI, literacy concepts need to be extended to account for AI-related specifics. The easy handling of the systems results in increased usage, contrasting limited conceptualizations (e.g., imagination of future importance) and competencies (e.g., knowledge about functional principles). In reference to voice-based conversational agents as a concrete application of AI, the present paper aims for the development of a measurement to assess the conceptualizations and competencies about conversational agents. In a first step, a theoretical framework of "AI literacy" is transferred to the context of conversational agent literacy. Second, the "conversational agent literacy scale" (short CALS) is developed, constituting the first attempt to measure interindividual differences in the "(il) literate" usage of conversational agents. 29 items were derived, of which 170 participants answered. An explanatory factor analysis identified five factors leading to five subscales to assess CAL: storage and transfer of the smart speaker's data input; smart speaker's functional principles; smart speaker's intelligent functions, learning abilities; smart speaker's reach and potential; smart speaker's technological (surrounding) infrastructure. Preliminary insights into construct validity and reliability of CALS showed satisfying results. Third, using the newly developed instrument, a student sample's CAL was assessed, revealing intermediated values. Remarkably, owning a smart speaker did not lead to higher CAL scores, confirming our basic assumption that usage of systems does not guarantee enlightened conceptualizations and competencies. In sum, the paper contributes to the first insights into the operationalization and understanding of CAL as a specific subdomain of AI-related competencies.}, language = {en} } @phdthesis{Binder2006, author = {Binder, Andreas}, title = {Die stochastische Wissenschaft und zwei Teilsysteme eines Web-basierten Informations- und Anwendungssystems zu ihrer Etablierung}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-26146}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2006}, abstract = {Das stochastische Denken, die Bernoullische Stochastik und dessen informationstechnologische Umsetzung, namens Stochastikon stellen die Grundlage f{\"u}r das Verst{\"a}ndnis und die erfolgreiche Nutzung einer stochastischen Wissenschaft dar. Im Rahmen dieser Arbeit erfolgt eine Kl{\"a}rung des Begriffs des stochastischen Denkens, eine anschauliche Darstellung der von Elart von Collani entwickelten Bernoullischen Stochastik und eine Beschreibung von Stochastikon. Dabei werden sowohl das Gesamtkonzept von Stochastikon, sowie die Ziele, Aufgaben und die Realisierung der beiden Teilsysteme namens Mentor und Encyclopedia vorgestellt. Das stochastische Denken erlaubt eine realit{\"a}tsnahe Sichtweise der Dinge, d.h. eine Sichtweise, die mit den menschlichen Beobachtungen und Erfahrungen im Einklang steht und somit die Unsicherheit {\"u}ber zuk{\"u}nftige Entwicklungen ber{\"u}cksichtigt. Der in diesem Kontext verwendete Begriff der Unsicherheit bezieht sich ausschließlich auf zuk{\"u}nftige Entwicklungen und {\"a}ußert sich in Variabilit{\"a}t. Quellen der Unsicherheit sind einerseits die menschliche Ignoranz und andererseits der Zufall. Unter Ignoranz wird hierbei die Unwissenheit des Menschen {\"u}ber die unbekannten, aber feststehenden Fakten verstanden, die die Anfangsbedingungen der zuk{\"u}nftigen Entwicklung repr{\"a}sentieren. Die Bernoullische Stochastik liefert ein Regelwerk und erm{\"o}glicht die Entwicklung eines quantitativen Modells zur Beschreibung der Unsicherheit und expliziter Einbeziehung der beiden Quellen Ignoranz und Zufall. Das Modell tr{\"a}gt den Namen Bernoulli-Raum und bildet die Grundlage f{\"u}r die Herleitung quantitativer Verfahren, um zuverl{\"a}ssige und genaue Aussagen sowohl {\"u}ber die nicht-existente zuf{\"a}llige Zukunft (Vorhersageverfahren), als auch {\"u}ber die unbekannte feststehende Vergangenheit (Messverfahren). Das Softwaresystem Stochastikon implementiert die Bernoullische Stochastik in Form einer Reihe autarker, miteinander kommunizierender Teilsysteme. Ziel des Teilsystems Encyclopedia ist die Bereitstellung und Bewertung stochastischen Wissens. Das Teilsystem Mentor dient der Unterst{\"u}tzung des Anwenders bei der Probleml{\"o}sungsfindung durch Identifikation eines richtigen Modells bzw. eines korrekten Bernoulli-Raums. Der L{\"o}sungsfindungsprozess selber enth{\"a}lt keinerlei Unsicherheit. Die ganze Unsicherheit steckt in der L{\"o}sung, d.h. im Bernoulli-Raum, der explizit die vorhandene Unwissenheit (Ignoranz) und den vorliegenden Zufall abdeckend enth{\"a}lt.}, subject = {Stochastik}, language = {de} } @article{KruegerFriedrichFoersteretal.2012, author = {Krueger, Beate and Friedrich, Torben and F{\"o}rster, Frank and Bernhardt, J{\"o}rg and Gross, Roy and Dandekar, Thomas}, title = {Different evolutionary modifications as a guide to rewire two-component systems}, series = {Bioinformatics and Biology Insights}, volume = {6}, journal = {Bioinformatics and Biology Insights}, doi = {10.4137/BBI.S9356}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-123647}, pages = {97-128}, year = {2012}, abstract = {Two-component systems (TCS) are short signalling pathways generally occurring in prokaryotes. They frequently regulate prokaryotic stimulus responses and thus are also of interest for engineering in biotechnology and synthetic biology. The aim of this study is to better understand and describe rewiring of TCS while investigating different evolutionary scenarios. Based on large-scale screens of TCS in different organisms, this study gives detailed data, concrete alignments, and structure analysis on three general modification scenarios, where TCS were rewired for new responses and functions: (i) exchanges in the sequence within single TCS domains, (ii) exchange of whole TCS domains; (iii) addition of new components modulating TCS function. As a result, the replacement of stimulus and promotor cassettes to rewire TCS is well defined exploiting the alignments given here. The diverged TCS examples are non-trivial and the design is challenging. Designed connector proteins may also be useful to modify TCS in selected cases.}, language = {en} }