@article{ZirkelCecilSchaeferetal.2012, author = {Zirkel, J. and Cecil, A. and Sch{\"a}fer, F. and Rahlfs, S. and Ouedraogo, A. and Xiao, K. and Sawadogo, S. and Coulibaly, B. and Becker, K. and Dandekar, T.}, title = {Analyzing Thiol-Dependent Redox Networks in the Presence of Methylene Blue and Other Antimalarial Agents with RT-PCR-Supported in silico Modeling}, series = {Bioinformatics and Biology Insights}, volume = {6}, journal = {Bioinformatics and Biology Insights}, doi = {10.4137/BBI.S10193}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-123751}, pages = {287-302}, year = {2012}, abstract = {BACKGROUND: In the face of growing resistance in malaria parasites to drugs, pharmacological combination therapies are important. There is accumulating evidence that methylene blue (MB) is an effective drug against malaria. Here we explore the biological effects of both MB alone and in combination therapy using modeling and experimental data. RESULTS: We built a model of the central metabolic pathways in P. falciparum. Metabolic flux modes and their changes under MB were calculated by integrating experimental data (RT-PCR data on mRNAs for redox enzymes) as constraints and results from the YANA software package for metabolic pathway calculations. Several different lines of MB attack on Plasmodium redox defense were identified by analysis of the network effects. Next, chloroquine resistance based on pfmdr/and pfcrt transporters, as well as pyrimethamine/sulfadoxine resistance (by mutations in DHF/DHPS), were modeled in silico. Further modeling shows that MB has a favorable synergism on antimalarial network effects with these commonly used antimalarial drugs. CONCLUSIONS: Theoretical and experimental results support that methylene blue should, because of its resistance-breaking potential, be further tested as a key component in drug combination therapy efforts in holoendemic areas.}, language = {en} } @phdthesis{Kindermann2016, author = {Kindermann, Philipp}, title = {Angular Schematization in Graph Drawing}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-020-7 (print)}, doi = {10.25972/WUP-978-3-95826-021-4}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-112549}, school = {W{\"u}rzburg University Press}, pages = {184}, year = {2016}, abstract = {Graphs are a frequently used tool to model relationships among entities. A graph is a binary relation between objects, that is, it consists of a set of objects (vertices) and a set of pairs of objects (edges). Networks are common examples of modeling data as a graph. For example, relationships between persons in a social network, or network links between computers in a telecommunication network can be represented by a graph. The clearest way to illustrate the modeled data is to visualize the graphs. The field of Graph Drawing deals with the problem of finding algorithms to automatically generate graph visualizations. The task is to find a "good" drawing, which can be measured by different criteria such as number of crossings between edges or the used area. In this thesis, we study Angular Schematization in Graph Drawing. By this, we mean drawings with large angles (for example, between the edges at common vertices or at crossing points). The thesis consists of three parts. First, we deal with the placement of boxes. Boxes are axis-parallel rectangles that can, for example, contain text. They can be placed on a map to label important sites, or can be used to describe semantic relationships between words in a word network. In the second part of the thesis, we consider graph drawings visually guide the viewer. These drawings generally induce large angles between edges that meet at a vertex. Furthermore, the edges are drawn crossing-free and in a way that makes them easy to follow for the human eye. The third and final part is devoted to crossings with large angles. In drawings with crossings, it is important to have large angles between edges at their crossing point, preferably right angles.}, language = {en} } @unpublished{Nassourou2011, author = {Nassourou, Mohamadou}, title = {Assisting Analysis and Understanding of Quran Search Results with Interactive Scatter Plots and Tables}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-55840}, year = {2011}, abstract = {The Quran is the holy book of Islam consisting of 6236 verses divided into 114 chapters called suras. Many verses are similar and even identical. Searching for similar texts (e.g verses) could return thousands of verses, that when displayed completely or partly as textual list would make analysis and understanding difficult and confusing. Moreover it would be visually impossible to instantly figure out the overall distribution of the retrieved verses in the Quran. As consequence reading and analyzing the verses would be tedious and unintuitive. In this study a combination of interactive scatter plots and tables has been developed to assist analysis and understanding of the search result. Retrieved verses are clustered by chapters, and a weight is assigned to each cluster according to number of verses it contains, so that users could visually identify most relevant areas, and figure out the places of revelation of the verses. Users visualize the complete result and can select a region of the plot to zoom in, click on a marker to display a table containing verses with English translation side by side.}, subject = {Text Mining}, language = {en} } @unpublished{Nassourou2011, author = {Nassourou, Mohamadou}, title = {Assisting Understanding, Retention, and Dissemination of Religious Texts Knowledge with Modeling, and Visualization Techniques: The Case of The Quran}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-55927}, year = {2011}, abstract = {Learning a book in general involves reading it, underlining important words, adding comments, summarizing some passages, and marking up some text or concepts. Once deeper understanding is achieved, one would like to organize and manage her/his knowledge in such a way that, it could be easily remembered and efficiently transmitted to others. In this paper, books organized in terms of chapters consisting of verses, are considered as the source of knowledge to be modeled. The knowledge model consists of verses with their metadata and semantic annotations. The metadata represent the multiple perspectives of knowledge modeling. Verses with their metadata and annotations form a meta-model, which will be published on a web Mashup. The meta-model with linking between its elements constitute a knowledge base. An XML-based annotation system breaking down the learning process into specific tasks, helps constructing the desired meta-model. The system is made up of user interfaces for creating metadata, annotating chapters' contents according to user selected semantics, and templates for publishing the generated knowledge on the Internet. The proposed software system improves comprehension and retention of knowledge contained in religious texts through modeling and visualization. The system has been applied to the Quran, and the result obtained shows that multiple perspectives of information modeling can be successfully applied to religious texts. It is expected that this short ongoing study would motivate others to engage in devising and offering software systems for cross-religions learning.}, subject = {Wissensmanagement}, language = {en} } @article{MandelHoernleinIflandetal.2011, author = {Mandel, Alexander and H{\"o}rnlein, Alexander and Ifland, Marianus and L{\"u}neburg, Edeltraud and Deckert, J{\"u}rgen and Puppe, Frank}, title = {Aufwandsanalyse f{\"u}r computerunterst{\"u}tzte Multiple-Choice Papierklausuren}, series = {GMS Journal for Medical Education}, volume = {28}, journal = {GMS Journal for Medical Education}, number = {4}, doi = {10.3205/zma000767}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-134386}, pages = {1-15, Doc55}, year = {2011}, abstract = {Introduction: Multiple-choice-examinations are still fundamental for assessment in medical degree programs. In addition to content related research, the optimization of the technical procedure is an important question. Medical examiners face three options: paper-based examinations with or without computer support or completely electronic examinations. Critical aspects are the effort for formatting, the logistic effort during the actual examination, quality, promptness and effort of the correction, the time for making the documents available for inspection by the students, and the statistical analysis of the examination results. Methods: Since three semesters a computer program for input and formatting of MC-questions in medical and other paper-based examinations is used and continuously improved at Wuerzburg University. In the winter semester (WS) 2009/10 eleven, in the summer semester (SS) 2010 twelve and in WS 2010/11 thirteen medical examinations were accomplished with the program and automatically evaluated. For the last two semesters the remaining manual workload was recorded. Results: The cost of the formatting and the subsequent analysis including adjustments of the analysis of an average examination with about 140 participants and about 35 questions was 5-7 hours for exams without complications in the winter semester 2009/2010, about 2 hours in SS 2010 and about 1.5 hours in the winter semester 2010/11. Including exams with complications, the average time was about 3 hours per exam in SS 2010 and 2.67 hours for the WS 10/11. Discussion: For conventional multiple-choice exams the computer-based formatting and evaluation of paper-based exams offers a significant time reduction for lecturers in comparison with the manual correction of paper-based exams and compared to purely electronically conducted exams it needs a much simpler technological infrastructure and fewer staff during the exam."}, language = {de} } @article{WolffRutter2012, author = {Wolff, Alexander and Rutter, Iganz}, title = {Augmenting the Connectivity of Planar and Geometric Graphs}, series = {Journal of Graph Algorithms and Applications}, journal = {Journal of Graph Algorithms and Applications}, doi = {10.7155/jgaa.00275}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-97587}, year = {2012}, abstract = {In this paper we study connectivity augmentation problems. Given a connected graph G with some desirable property, we want to make G 2-vertex connected (or 2-edge connected) by adding edges such that the resulting graph keeps the property. The aim is to add as few edges as possible. The property that we consider is planarity, both in an abstract graph-theoretic and in a geometric setting, where vertices correspond to points in the plane and edges to straight-line segments. We show that it is NP-hard to � nd a minimum-cardinality augmentation that makes a planar graph 2-edge connected. For making a planar graph 2-vertex connected this was known. We further show that both problems are hard in the geometric setting, even when restricted to trees. The problems remain hard for higher degrees of connectivity. On the other hand we give polynomial-time algorithms for the special case of convex geometric graphs. We also study the following related problem. Given a planar (plane geometric) graph G, two vertices s and t of G, and an integer c, how many edges have to be added to G such that G is still planar (plane geometric) and contains c edge- (or vertex-) disjoint s{t paths? For the planar case we give a linear-time algorithm for c = 2. For the plane geometric case we give optimal worst-case bounds for c = 2; for c = 3 we characterize the cases that have a solution.}, language = {en} } @article{KrenzerHeilFittingetal., author = {Krenzer, Adrian and Heil, Stefan and Fitting, Daniel and Matti, Safa and Zoller, Wolfram G. and Hann, Alexander and Puppe, Frank}, title = {Automated classification of polyps using deep learning architectures and few-shot learning}, series = {BMC Medical Imaging}, volume = {23}, journal = {BMC Medical Imaging}, doi = {10.1186/s12880-023-01007-4}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-357465}, abstract = {Background Colorectal cancer is a leading cause of cancer-related deaths worldwide. The best method to prevent CRC is a colonoscopy. However, not all colon polyps have the risk of becoming cancerous. Therefore, polyps are classified using different classification systems. After the classification, further treatment and procedures are based on the classification of the polyp. Nevertheless, classification is not easy. Therefore, we suggest two novel automated classifications system assisting gastroenterologists in classifying polyps based on the NICE and Paris classification. Methods We build two classification systems. One is classifying polyps based on their shape (Paris). The other classifies polyps based on their texture and surface patterns (NICE). A two-step process for the Paris classification is introduced: First, detecting and cropping the polyp on the image, and secondly, classifying the polyp based on the cropped area with a transformer network. For the NICE classification, we design a few-shot learning algorithm based on the Deep Metric Learning approach. The algorithm creates an embedding space for polyps, which allows classification from a few examples to account for the data scarcity of NICE annotated images in our database. Results For the Paris classification, we achieve an accuracy of 89.35 \%, surpassing all papers in the literature and establishing a new state-of-the-art and baseline accuracy for other publications on a public data set. For the NICE classification, we achieve a competitive accuracy of 81.13 \% and demonstrate thereby the viability of the few-shot learning paradigm in polyp classification in data-scarce environments. Additionally, we show different ablations of the algorithms. Finally, we further elaborate on the explainability of the system by showing heat maps of the neural network explaining neural activations. Conclusion Overall we introduce two polyp classification systems to assist gastroenterologists. We achieve state-of-the-art performance in the Paris classification and demonstrate the viability of the few-shot learning paradigm in the NICE classification, addressing the prevalent data scarcity issues faced in medical machine learning.}, language = {en} } @article{BeckerCaminitiFiorellaetal.2013, author = {Becker, Martin and Caminiti, Saverio and Fiorella, Donato and Francis, Louise and Gravino, Pietro and Haklay, Mordechai (Muki) and Hotho, Andreas and Loreto, Virrorio and Mueller, Juergen and Ricchiuti, Ferdinando and Servedio, Vito D. P. and Sirbu, Alina and Tria, Franesca}, title = {Awareness and Learning in Participatory Noise Sensing}, series = {PLOS ONE}, volume = {8}, journal = {PLOS ONE}, number = {12}, issn = {1932-6203}, doi = {10.1371/journal.pone.0081638}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-127675}, pages = {e81638}, year = {2013}, abstract = {The development of ICT infrastructures has facilitated the emergence of new paradigms for looking at society and the environment over the last few years. Participatory environmental sensing, i.e. directly involving citizens in environmental monitoring, is one example, which is hoped to encourage learning and enhance awareness of environmental issues. In this paper, an analysis of the behaviour of individuals involved in noise sensing is presented. Citizens have been involved in noise measuring activities through the WideNoise smartphone application. This application has been designed to record both objective (noise samples) and subjective (opinions, feelings) data. The application has been open to be used freely by anyone and has been widely employed worldwide. In addition, several test cases have been organised in European countries. Based on the information submitted by users, an analysis of emerging awareness and learning is performed. The data show that changes in the way the environment is perceived after repeated usage of the application do appear. Specifically, users learn how to recognise different noise levels they are exposed to. Additionally, the subjective data collected indicate an increased user involvement in time and a categorisation effect between pleasant and less pleasant environments.}, language = {en} } @article{WienrichDoellingerHein2021, author = {Wienrich, Carolin and D{\"o}llinger, Nina and Hein, Rebecca}, title = {Behavioral Framework of Immersive Technologies (BehaveFIT): How and why virtual reality can support behavioral change processes}, series = {Frontiers in Virtual Reality}, volume = {2}, journal = {Frontiers in Virtual Reality}, doi = {10.3389/frvir.2021.627194}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-258796}, year = {2021}, abstract = {The design and evaluation of assisting technologies to support behavior change processes have become an essential topic within the field of human-computer interaction research in general and the field of immersive intervention technologies in particular. The mechanisms and success of behavior change techniques and interventions are broadly investigated in the field of psychology. However, it is not always easy to adapt these psychological findings to the context of immersive technologies. The lack of theoretical foundation also leads to a lack of explanation as to why and how immersive interventions support behavior change processes. The Behavioral Framework for immersive Technologies (BehaveFIT) addresses this lack by 1) presenting an intelligible categorization and condensation of psychological barriers and immersive features, by 2) suggesting a mapping that shows why and how immersive technologies can help to overcome barriers and finally by 3) proposing a generic prediction path that enables a structured, theory-based approach to the development and evaluation of immersive interventions. These three steps explain how BehaveFIT can be used, and include guiding questions for each step. Further, two use cases illustrate the usage of BehaveFIT. Thus, the present paper contributes to guidance for immersive intervention design and evaluation, showing that immersive interventions support behavior change processes and explain and predict 'why' and 'how' immersive interventions can bridge the intention-behavior-gap.}, language = {en} } @unpublished{Dandekar2019, author = {Dandekar, Thomas}, title = {Biological heuristics applied to cosmology suggests a condensation nucleus as start of our universe and inflation cosmology replaced by a period of rapid Weiss domain-like crystal growth}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-183945}, pages = {24}, year = {2019}, abstract = {Cosmology often uses intricate formulas and mathematics to derive new theories and concepts. We do something different in this paper: We look at biological processes and derive from these heuristics so that the revised cosmology agrees with astronomical observations but does also agree with standard biological observations. We show that we then have to replace any type of singularity at the start of the universe by a condensation nucleus and that the very early period of the universe usually assumed to be inflation has to be replaced by a period of rapid crystal growth as in Weiss magnetization domains. Impressively, these minor modifications agree well with astronomical observations including removing the strong inflation perturbations which were never observed in the recent BICEP2 experiments. Furthermore, looking at biological principles suggests that such a new theory with a condensation nucleus at start and a first rapid phase of magnetization-like growth of the ordered, physical laws obeying lattice we live in is in fact the only convincing theory of the early phases of our universe that also is compatible with current observations. We show in detail in the following that such a process of crystal creation, breaking of new crystal seeds and ultimate evaporation of the present crystal readily leads over several generations to an evolution and selection of better, more stable and more self-organizing crystals. Moreover, this explains the "fine-tuning" question why our universe is fine-tuned to favor life: Our Universe is so self-organizing to have enough offspring and the detailed physics involved is at the same time highly favorable for all self-organizing processes including life. This biological theory contrasts with current standard inflation cosmologies. The latter do not perform well in explaining any phenomena of sophisticated structure creation or self-organization. As proteins can only thermodynamically fold by increasing the entropy in the solution around them we suggest for cosmology a condensation nucleus for a universe can form only in a "chaotic ocean" of string-soup or quantum foam if the entropy outside of the nucleus rapidly increases. We derive an interaction potential for 1 to n-dimensional strings or quantum-foams and show that they allow only 1D, 2D, 4D or octonion interactions. The latter is the richest structure and agrees to the E8 symmetry fundamental to particle physics and also compatible with the ten dimensional string theory E8 which is part of the M-theory. Interestingly, any other interactions of other dimensionality can be ruled out using Hurwitz compositional theorem. Crystallization explains also extremely well why we have only one macroscopic reality and where the worldlines of alternative trajectories exist: They are in other planes of the crystal and for energy reasons they crystallize mostly at the same time, yielding a beautiful and stable crystal. This explains decoherence and allows to determine the size of Planck´s quantum h (very small as separation of crystal layers by energy is extremely strong). Ultimate dissolution of real crystals suggests an explanation for dark energy agreeing with estimates for the "big rip". The halo distribution of dark matter favoring galaxy formation is readily explained by a crystal seed starting with unit cells made of normal and dark matter. That we have only matter and not antimatter can be explained as there may be right handed mattercrystals and left-handed antimatter crystals. Similarly, real crystals are never perfect and we argue that exactly such irregularities allow formation of galaxies, clusters and superclusters. Finally, heuristics from genetics suggest to look for a systems perspective to derive correct vacuum and Higgs Boson energies.}, language = {en} } @article{PfitznerMayNuechter2018, author = {Pfitzner, Christian and May, Stefan and N{\"u}chter, Andreas}, title = {Body weight estimation for dose-finding and health monitoring of lying, standing and walking patients based on RGB-D data}, series = {Sensors}, volume = {18}, journal = {Sensors}, number = {5}, doi = {10.3390/s18051311}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-176642}, pages = {1311}, year = {2018}, abstract = {This paper describes the estimation of the body weight of a person in front of an RGB-D camera. A survey of different methods for body weight estimation based on depth sensors is given. First, an estimation of people standing in front of a camera is presented. Second, an approach based on a stream of depth images is used to obtain the body weight of a person walking towards a sensor. The algorithm first extracts features from a point cloud and forwards them to an artificial neural network (ANN) to obtain an estimation of body weight. Besides the algorithm for the estimation, this paper further presents an open-access dataset based on measurements from a trauma room in a hospital as well as data from visitors of a public event. In total, the dataset contains 439 measurements. The article illustrates the efficiency of the approach with experiments with persons lying down in a hospital, standing persons, and walking persons. Applicable scenarios for the presented algorithm are body weight-related dosing of emergency patients.}, language = {en} } @article{KirikkayisGallikWinteretal.2023, author = {Kirikkayis, Yusuf and Gallik, Florian and Winter, Michael and Reichert, Manfred}, title = {BPMNE4IoT: a framework for modeling, executing and monitoring IoT-driven processes}, series = {Future Internet}, volume = {15}, journal = {Future Internet}, number = {3}, issn = {1999-5903}, doi = {10.3390/fi15030090}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-304097}, year = {2023}, abstract = {The Internet of Things (IoT) enables a variety of smart applications, including smart home, smart manufacturing, and smart city. By enhancing Business Process Management Systems with IoT capabilities, the execution and monitoring of business processes can be significantly improved. Providing a holistic support for modeling, executing and monitoring IoT-driven processes, however, constitutes a challenge. Existing process modeling and process execution languages, such as BPMN 2.0, are unable to fully meet the IoT characteristics (e.g., asynchronicity and parallelism) of IoT-driven processes. In this article, we present BPMNE4IoT—A holistic framework for modeling, executing and monitoring IoT-driven processes. We introduce various artifacts and events based on the BPMN 2.0 metamodel that allow realizing the desired IoT awareness of business processes. The framework is evaluated along two real-world scenarios from two different domains. Moreover, we present a user study for comparing BPMNE4IoT and BPMN 2.0. In particular, this study has confirmed that the BPMNE4IoT framework facilitates the support of IoT-driven processes.}, language = {en} } @article{LugrinLatoschikHabeletal.2016, author = {Lugrin, Jean-Luc and Latoschik, Marc Erich and Habel, Michael and Roth, Daniel and Seufert, Christian and Grafe, Silke}, title = {Breaking Bad Behaviors: A New Tool for Learning Classroom Management Using Virtual Reality}, series = {Frontiers in ICT}, volume = {3}, journal = {Frontiers in ICT}, number = {26}, doi = {10.3389/fict.2016.00026}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-147945}, year = {2016}, abstract = {This article presents an immersive virtual reality (VR) system for training classroom management skills, with a specific focus on learning to manage disruptive student behavior in face-to-face, one-to-many teaching scenarios. The core of the system is a real-time 3D virtual simulation of a classroom populated by twenty-four semi-autonomous virtual students. The system has been designed as a companion tool for classroom management seminars in a syllabus for primary and secondary school teachers. This will allow lecturers to link theory with practice using the medium of VR. The system is therefore designed for two users: a trainee teacher and an instructor supervising the training session. The teacher is immersed in a real-time 3D simulation of a classroom by means of a head-mounted display and headphone. The instructor operates a graphical desktop console, which renders a view of the class and the teacher whose avatar movements are captured by a marker less tracking system. This console includes a 2D graphics menu with convenient behavior and feedback control mechanisms to provide human-guided training sessions. The system is built using low-cost consumer hardware and software. Its architecture and technical design are described in detail. A first evaluation confirms its conformance to critical usability requirements (i.e., safety and comfort, believability, simplicity, acceptability, extensibility, affordability, and mobility). Our initial results are promising and constitute the necessary first step toward a possible investigation of the efficiency and effectiveness of such a system in terms of learning outcomes and experience.}, language = {en} } @article{DoellingerWienrichLatoschik2021, author = {D{\"o}llinger, Nina and Wienrich, Carolin and Latoschik, Marc Erich}, title = {Challenges and opportunities of immersive technologies for mindfulness meditation: a systematic review}, series = {Frontiers in Virtual Reality}, volume = {2}, journal = {Frontiers in Virtual Reality}, doi = {10.3389/frvir.2021.644683}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-259047}, pages = {644683}, year = {2021}, abstract = {Mindfulness is considered an important factor of an individual's subjective well-being. Consequently, Human-Computer Interaction (HCI) has investigated approaches that strengthen mindfulness, i.e., by inventing multimedia technologies to support mindfulness meditation. These approaches often use smartphones, tablets, or consumer-grade desktop systems to allow everyday usage in users' private lives or in the scope of organized therapies. Virtual, Augmented, and Mixed Reality (VR, AR, MR; in short: XR) significantly extend the design space for such approaches. XR covers a wide range of potential sensory stimulation, perceptive and cognitive manipulations, content presentation, interaction, and agency. These facilities are linked to typical XR-specific perceptions that are conceptually closely related to mindfulness research, such as (virtual) presence and (virtual) embodiment. However, a successful exploitation of XR that strengthens mindfulness requires a systematic analysis of the potential interrelation and influencing mechanisms between XR technology, its properties, factors, and phenomena and existing models and theories of the construct of mindfulness. This article reports such a systematic analysis of XR-related research from HCI and life sciences to determine the extent to which existing research frameworks on HCI and mindfulness can be applied to XR technologies, the potential of XR technologies to support mindfulness, and open research gaps. Fifty papers of ACM Digital Library and National Institutes of Health's National Library of Medicine (PubMed) with and without empirical efficacy evaluation were included in our analysis. The results reveal that at the current time, empirical research on XR-based mindfulness support mainly focuses on therapy and therapeutic outcomes. Furthermore, most of the currently investigated XR-supported mindfulness interactions are limited to vocally guided meditations within nature-inspired virtual environments. While an analysis of empirical research on those systems did not reveal differences in mindfulness compared to non-mediated mindfulness practices, various design proposals illustrate that XR has the potential to provide interactive and body-based innovations for mindfulness practice. We propose a structured approach for future work to specify and further explore the potential of XR as mindfulness-support. The resulting framework provides design guidelines for XR-based mindfulness support based on the elements and psychological mechanisms of XR interactions.}, language = {en} } @techreport{NguyenLohHossfeld2023, type = {Working Paper}, author = {Nguyen, Kien and Loh, Frank and Hoßfeld, Tobias}, title = {Challenges of Serverless Deployment in Edge-MEC-Cloud}, series = {KuVS Fachgespr{\"a}ch - W{\"u}rzburg Workshop on Modeling, Analysis and Simulation of Next-Generation Communication Networks 2023 (WueWoWAS'23)}, journal = {KuVS Fachgespr{\"a}ch - W{\"u}rzburg Workshop on Modeling, Analysis and Simulation of Next-Generation Communication Networks 2023 (WueWoWAS'23)}, doi = {10.25972/OPUS-32202}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-322025}, pages = {4}, year = {2023}, abstract = {The emerging serverless computing may meet Edge Cloud in a beneficial manner as the two offer flexibility and dynamicity in optimizing finite hardware resources. However, the lack of proper study of a joint platform leaves a gap in literature about consumption and performance of such integration. To this end, this paper identifies the key questions and proposes a methodology to answer them.}, language = {en} } @phdthesis{Ullmann2015, author = {Ullmann, Tobias}, title = {Characterization of Arctic Environment by Means of Polarimetric Synthetic Aperture Radar (PolSAR) Data and Digital Elevation Models (DEM)}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-115719}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {The ecosystem of the high northern latitudes is affected by the recently changing environmental conditions. The Arctic has undergone a significant climatic change over the last decades. The land coverage is changing and a phenological response to the warming is apparent. Remotely sensed data can assist the monitoring and quantification of these changes. The remote sensing of the Arctic was predominantly carried out by the usage of optical sensors but these encounter problems in the Arctic environment, e.g. the frequent cloud cover or the solar geometry. In contrast, the imaging of Synthetic Aperture Radar is not affected by the cloud cover and the acquisition of radar imagery is independent of the solar illumination. The objective of this work was to explore how polarimetric Synthetic Aperture Radar (PolSAR) data of TerraSAR-X, TanDEM-X, Radarsat-2 and ALOS PALSAR and interferometric-derived digital elevation model data of the TanDEM-X Mission can contribute to collect meaningful information on the actual state of the Arctic Environment. The study was conducted for Canadian sites of the Mackenzie Delta Region and Banks Island and in situ reference data were available for the assessment. The up-to-date analysis of the PolSAR data made the application of the Non-Local Means filtering and of the decomposition of co-polarized data necessary. The Non-Local Means filter showed a high capability to preserve the image values, to keep the edges and to reduce the speckle. This supported not only the suitability for the interpretation but also for the classification. The classification accuracies of Non-Local Means filtered data were in average +10\% higher compared to unfiltered images. The correlation of the co- and quad-polarized decomposition features was high for classes with distinct surface or double bounce scattering and a usage of the co-polarized data is beneficial for regions of natural land coverage and for low vegetation formations with little volume scattering. The evaluation further revealed that the X- and C-Band were most sensitive to the generalized land cover classes. It was found that the X-Band data were sensitive to low vegetation formations with low shrub density, the C-Band data were sensitive to the shrub density and the shrub dominated tundra. In contrast, the L-Band data were less sensitive to the land cover. Among the different dual-polarized data the HH/VV-polarized data were identified to be most meaningful for the characterization and classification, followed by the HH/HV-polarized and the VV/VH-polarized data. The quad-polarized data showed highest sensitivity to the land cover but differences to the co-polarized data were small. The accuracy assessment showed that spectral information was required for accurate land cover classification. The best results were obtained when spectral and radar information was combined. The benefit of including radar data in the classification was up to +15\% accuracy and most significant for the classes wetland and sparse vegetated tundra. The best classifications were realized with quad-polarized C-Band and multispectral data and with co-polarized X-Band and multispectral data. The overall accuracy was up to 80\% for unsupervised and up to 90\% for supervised classifications. The results indicated that the shortwave co-polarized data show promise for the classification of tundra land cover since the polarimetric information is sensitive to low vegetation and the wetlands. Furthermore, co-polarized data provide a higher spatial resolution than the quad-polarized data. The analysis of the intermediate digital elevation model data of the TanDEM-X showed a high potential for the characterization of the surface morphology. The basic and relative topographic features were shown to be of high relevance for the quantification of the surface morphology and an area-wide application is feasible. In addition, these data were of value for the classification and delineation of landforms. Such classifications will assist the delineation of geomorphological units and have potential to identify locations of actual and future morphologic activity.}, subject = {Mackenzie-River-Delta}, language = {en} } @article{PawellekKrmarLeistneretal.2021, author = {Pawellek, Ruben and Krmar, Jovana and Leistner, Adrian and Djajić, Nevena and Otašević, Biljana and Protić, Ana and Holzgrabe, Ulrike}, title = {Charged aerosol detector response modeling for fatty acids based on experimental settings and molecular features: a machine learning approach}, series = {Journal of Cheminformatics}, volume = {13}, journal = {Journal of Cheminformatics}, number = {1}, doi = {10.1186/s13321-021-00532-0}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-261618}, year = {2021}, abstract = {The charged aerosol detector (CAD) is the latest representative of aerosol-based detectors that generate a response independent of the analytes' chemical structure. This study was aimed at accurately predicting the CAD response of homologous fatty acids under varying experimental conditions. Fatty acids from C12 to C18 were used as model substances due to semivolatile characterics that caused non-uniform CAD behaviour. Considering both experimental conditions and molecular descriptors, a mixed quantitative structure-property relationship (QSPR) modeling was performed using Gradient Boosted Trees (GBT). The ensemble of 10 decisions trees (learning rate set at 0.55, the maximal depth set at 5, and the sample rate set at 1.0) was able to explain approximately 99\% (Q\(^2\): 0.987, RMSE: 0.051) of the observed variance in CAD responses. Validation using an external test compound confirmed the high predictive ability of the model established (R-2: 0.990, RMSEP: 0.050). With respect to the intrinsic attribute selection strategy, GBT used almost all independent variables during model building. Finally, it attributed the highest importance to the power function value, the flow rate of the mobile phase, evaporation temperature, the content of the organic solvent in the mobile phase and the molecular descriptors such as molecular weight (MW), Radial Distribution Function-080/weighted by mass (RDF080m) and average coefficient of the last eigenvector from distance/detour matrix (Ve2_D/Dt). The identification of the factors most relevant to the CAD responsiveness has contributed to a better understanding of the underlying mechanisms of signal generation. An increased CAD response that was obtained for acetone as organic modifier demonstrated its potential to replace the more expensive and environmentally harmful acetonitrile.}, language = {en} } @article{HentschelKobsHotho2022, author = {Hentschel, Simon and Kobs, Konstantin and Hotho, Andreas}, title = {CLIP knows image aesthetics}, series = {Frontiers in Artificial Intelligence}, volume = {5}, journal = {Frontiers in Artificial Intelligence}, issn = {2624-8212}, doi = {10.3389/frai.2022.976235}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-297150}, year = {2022}, abstract = {Most Image Aesthetic Assessment (IAA) methods use a pretrained ImageNet classification model as a base to fine-tune. We hypothesize that content classification is not an optimal pretraining task for IAA, since the task discourages the extraction of features that are useful for IAA, e.g., composition, lighting, or style. On the other hand, we argue that the Contrastive Language-Image Pretraining (CLIP) model is a better base for IAA models, since it has been trained using natural language supervision. Due to the rich nature of language, CLIP needs to learn a broad range of image features that correlate with sentences describing the image content, composition, environments, and even subjective feelings about the image. While it has been shown that CLIP extracts features useful for content classification tasks, its suitability for tasks that require the extraction of style-based features like IAA has not yet been shown. We test our hypothesis by conducting a three-step study, investigating the usefulness of features extracted by CLIP compared to features obtained from the last layer of a comparable ImageNet classification model. In each step, we get more computationally expensive. First, we engineer natural language prompts that let CLIP assess an image's aesthetic without adjusting any weights in the model. To overcome the challenge that CLIP's prompting only is applicable to classification tasks, we propose a simple but effective strategy to convert multiple prompts to a continuous scalar as required when predicting an image's mean aesthetic score. Second, we train a linear regression on the AVA dataset using image features obtained by CLIP's image encoder. The resulting model outperforms a linear regression trained on features from an ImageNet classification model. It also shows competitive performance with fully fine-tuned networks based on ImageNet, while only training a single layer. Finally, by fine-tuning CLIP's image encoder on the AVA dataset, we show that CLIP only needs a fraction of training epochs to converge, while also performing better than a fine-tuned ImageNet model. Overall, our experiments suggest that CLIP is better suited as a base model for IAA methods than ImageNet pretrained networks.}, language = {en} } @techreport{LeGrossmannKrieger2022, type = {Working Paper}, author = {Le, Duy Thanh and Großmann, Marcel and Krieger, Udo R.}, title = {Cloudless Resource Monitoring in a Fog Computing System Enabled by an SDN/NFV Infrastructure}, series = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, journal = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, doi = {10.25972/OPUS-28072}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-280723}, pages = {4}, year = {2022}, abstract = {Today's advanced Internet-of-Things applications raise technical challenges on cloud, edge, and fog computing. The design of an efficient, virtualized, context-aware, self-configuring orchestration system of a fog computing system constitutes a major development effort within this very innovative area of research. In this paper we describe the architecture and relevant implementation aspects of a cloudless resource monitoring system interworking with an SDN/NFV infrastructure. It realizes the basic monitoring component of the fundamental MAPE-K principles employed in autonomic computing. Here we present the hierarchical layering and functionality within the underlying fog nodes to generate a working prototype of an intelligent, self-managed orchestrator for advanced IoT applications and services. The latter system has the capability to monitor automatically various performance aspects of the resource allocation among multiple hosts of a fog computing system interconnected by SDN.}, subject = {Datennetz}, language = {en} } @article{SchokraieWarnkenHotzWagenblattetal.2012, author = {Schokraie, Elham and Warnken, Uwe and Hotz-Wagenblatt, Agnes and Grohme, Markus A. and Hengherr, Steffen and F{\"o}rster, Frank and Schill, Ralph O. and Frohme, Marcus and Dandekar, Thomas and Schn{\"o}lzer, Martina}, title = {Comparative proteome analysis of Milnesium tardigradum in early embryonic state versus adults in active and anhydrobiotic state}, series = {PLoS One}, volume = {7}, journal = {PLoS One}, number = {9}, doi = {10.1371/journal.pone.0045682}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-134447}, pages = {e45682}, year = {2012}, abstract = {Tardigrades have fascinated researchers for more than 300 years because of their extraordinary capability to undergo cryptobiosis and survive extreme environmental conditions. However, the survival mechanisms of tardigrades are still poorly understood mainly due to the absence of detailed knowledge about the proteome and genome of these organisms. Our study was intended to provide a basis for the functional characterization of expressed proteins in different states of tardigrades. High-throughput, high-accuracy proteomics in combination with a newly developed tardigrade specific protein database resulted in the identification of more than 3000 proteins in three different states: early embryonic state and adult animals in active and anhydrobiotic state. This comprehensive proteome resource includes protein families such as chaperones, antioxidants, ribosomal proteins, cytoskeletal proteins, transporters, protein channels, nutrient reservoirs, and developmental proteins. A comparative analysis of protein families in the different states was performed by calculating the exponentially modified protein abundance index which classifies proteins in major and minor components. This is the first step to analyzing the proteins involved in early embryonic development, and furthermore proteins which might play an important role in the transition into the anhydrobiotic state.}, language = {en} }