@phdthesis{Huber2023, author = {Huber, Stephan}, title = {Proxemo: Documenting Observed Emotions in HCI}, doi = {10.25972/OPUS-30573}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-305730}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {For formative evaluations of user experience (UX) a variety of methods have been developed over the years. However, most techniques require the users to interact with the study as a secondary task. This active involvement in the evaluation is not inclusive of all users and potentially biases the experience currently being studied. Yet there is a lack of methods for situations in which the user has no spare cognitive resources. This condition occurs when 1) users' cognitive abilities are impaired (e.g., people with dementia) or 2) users are confronted with very demanding tasks (e.g., air traffic controllers). In this work we focus on emotions as a key component of UX and propose the new structured observation method Proxemo for formative UX evaluations. Proxemo allows qualified observers to document users' emotions by proxy in real time and then directly link them to triggers. Technically this is achieved by synchronising the timestamps of emotions documented by observers with a video recording of the interaction. In order to facilitate the documentation of observed emotions in highly diverse contexts we conceptualise and implement two separate versions of a documentation aid named Proxemo App. For formative UX evaluations of technology-supported reminiscence sessions with people with dementia, we create a smartwatch app to discreetly document emotions from the categories anger, general alertness, pleasure, wistfulness and pride. For formative UX evaluations of prototypical user interfaces with air traffic controllers we create a smartphone app to efficiently document emotions from the categories anger, boredom, surprise, stress and pride. Descriptive case studies in both application domains indicate the feasibility and utility of the method Proxemo and the appropriateness of the respectively adapted design of the Proxemo App. The third part of this work is a series of meta-evaluation studies to determine quality criteria of Proxemo. We evaluate Proxemo regarding its reliability, validity, thoroughness and effectiveness, and compare Proxemo's efficiency and the observers' experience to documentation with pen and paper. Proxemo is reliable, as well as more efficient, thorough and effective than handwritten notes and provides a better UX to observers. Proxemo compares well with existing methods where benchmarks are available. With Proxemo we contribute a validated structured observation method that has shown to meet requirements formative UX evaluations in the extreme contexts of users with cognitive impairments or high task demands. Proxemo is agnostic regarding researchers' theoretical approaches and unites reductionist and holistic perspectives within one method. Future work should explore the applicability of Proxemo for further domains and extend the list of audited quality criteria to include, for instance, downstream utility. With respect to basic research we strive to better understand the sources leading observers to empathic judgments and propose reminisce and older adults as model environment for investigating mixed emotions.}, subject = {Gef{\"u}hl}, language = {en} } @article{Hurtienne2013, author = {Hurtienne, J{\"o}rn}, title = {Inter-coder reliability of categorising force-dynamic events in human-technology interaction}, volume = {1}, number = {1}, issn = {2197-2796}, doi = {10.1515/gcla-2013-0005}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-194127}, pages = {59-78}, year = {2013}, abstract = {Two studies are reported that investigate how readily accessible and applicable ten force-dynamic categories are to novices in describing short episodes of human-technology interaction (Study 1) and that establish a measure of inter-coder reliability when re-classifying these episodes into force-dynamic categories (Study 2). The results of the first study show that people can easily and confidently relate their experiences with technology to the definitions of force-dynamic events (e.g. "The driver released the handbrake" as an example of restraint removal). The results of the second study show moderate agreement between four expert coders across all ten force-dynamic categories (Cohen's kappa = .59) when re-classifying these episodes. Agreement values for single force-dynamic categories ranged between 'fair' and 'almost perfect', i.e. between kappa = .30 and .95. Agreement with the originally intended classifications of study 1 was higher than the pure inter-coder reliabilities. Single coders achieved an average kappa of .71, indicating substantial agreement. Using more than one coder increased kappas to almost perfect: up to .87 for four coders. A qualitative analysis of the predicted versus the observed number of category confusions revealed that about half of the category disagreement could be predicted from strong overlaps in the definitions of force-dynamic categories. From the quantitative and qualitative results, guidelines are derived to aid the better training of coders in order to increase inter-coder reliability.}, language = {en} } @phdthesis{Hoehn2002, author = {H{\"o}hn, Holger}, title = {Multimediale, datenbankgest{\"u}tzte Lehr- und Lernplattformen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-4049}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2002}, abstract = {Die Dissertation befaßt sich mit der Entwicklung einer multimedialen, datenbankgest{\"u}tzten Lehr- und Lernplattform. Die entwickelten Module erm{\"o}glichen und erweitern nicht nur die M{\"o}glichkeit des Selbststudiums f{\"u}r den Studenten sondern erleichtern auch die Arbeit der Dozenten. Außerdem wird auch die Zusammenarbeit und der Austausch von Lernobjekten zwischen verschiedenen Institutionen erm{\"o}glicht. In der Lehr- und Lernplattform k{\"o}nnen verschiedene Lernobjekt-Typen verwaltet werden. Exemplarisch wurden die Typen Bilder, 3D-Animationen, Vorlesungen, Lerntexte, Fallbeispiele und Quizelemente integriert. Die Lehr- und Lernplattform besteht aus drei Bausteinen: 1. In der Lernobjekt-Datenbank werden alle Lernobjekt-Typen und Lernobjekte verwaltet. 2. Autorenwerkzeuge dienen zur Erstellung von Lernobjekten. 3. In der Lernplattform werden die Lernobjekte den Studenten zum (Selbst-)Lernen pr{\"a}sentiert. Neben den Vorteilen, die der Einsatz von E-Learning im allgemeinen bietet, wie die flexible Lernorganisation oder die Nutzung von Lerninhalten unabh{\"a}ngig von Ort und Zeit, zeichnet sich die entwickelte Lehr- und Lernplattform besonders durch folgende Punkte aus: Generierung von Lerninhalten h{\"o}herer Qualit{\"a}t durch multizentrische Expertenb{\"u}ndelung und Arbeitsteilung, Erweiterbarkeit auf andere, neue Lernobjekt-Typen, Verwaltbarkeit, Konsistenz, Flexibilit{\"a}t, geringer Verwaltungsaufwand, Navigationsm{\"o}glichkeiten f{\"u}r den Studenten, Personalisierbarkeit und Konformit{\"a}t zu internationalen Standards. Sowohl bei der Modellierung als auch bei der Umsetzung wurde darauf geachtet, m{\"o}glichst gut die Anforderungen der Dermatologie bei gleichzeitiger Erweiterbarkeit auf andere, {\"a}hnliche Szenarien zu erf{\"u}llen. Besonders einfach sollte die Anpassung der Plattform f{\"u}r andere bildorientierte Disziplinen sein.}, subject = {Multimedia}, language = {de} } @misc{Hoehn2006, type = {Master Thesis}, author = {H{\"o}hn, Winfried}, title = {Mustererkennung in Fr{\"u}hdrucken}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-30429}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2006}, abstract = {No abstract available}, subject = {Mustererkennung}, language = {de} } @techreport{HoewelerXiangHoepfneretal.2022, type = {Working Paper}, author = {H{\"o}weler, Malte and Xiang, Zuo and H{\"o}pfner, Franz and Nguyen, Giang T. and Fitzek, Frank H. P.}, title = {Towards Stateless Core Networks: Measuring State Access Patterns}, series = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, journal = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, doi = {10.25972/OPUS-28077}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-280770}, pages = {4}, year = {2022}, abstract = {Future mobile communication networks, such as 5G and beyond, can benefit from Virtualized Network Functions (VNFs) when deployed on cloud infrastructures to achieve elasticity and scalability. However, new challenges arise as to managing states of Network Functions (NFs). Especially control plane VNFs, which are mainly found in cellular core networks like the 5G Core (5GC), received little attention since the shift towards virtualizing NFs. Most existing solutions for these core networks are often complex, intrusive, and are seldom compliant with the standard. With the emergence of 5G campus networks, UEs will be mainly machine-type devices. These devices communicate more deterministically, bringing new opportunities for elaborated state management. This work presents an emulation environment to perform rigorous measurements on state access patterns. The emulation comes with a fully parameterized Markov model for the UE to examine a wide variety of different devices. These measurements can then be used as a solid base for designing an efficient, simple, and standard conform state management solution that brings us further towards stateless core networks.}, subject = {Datennetz}, language = {en} } @phdthesis{Jarschel2014, author = {Jarschel, Michael}, title = {An Assessment of Applications and Performance Analysis of Software Defined Networking}, issn = {1432-8801}, doi = {10.25972/OPUS-10079}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-100795}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {With the introduction of OpenFlow by the Stanford University in 2008, a process began in the area of network research, which questions the predominant approach of fully distributed network control. OpenFlow is a communication protocol that allows the externalization of the network control plane from the network devices, such as a router, and to realize it as a logically-centralized entity in software. For this concept, the term "Software Defined Networking" (SDN) was coined during scientific discourse. For the network operators, this concept has several advantages. The two most important can be summarized under the points cost savings and flexibility. Firstly, it is possible through the uniform interface for network hardware ("Southbound API"), as implemented by OpenFlow, to combine devices and software from different manufacturers, which increases the innovation and price pressure on them. Secondly, the realization of the network control plane as a freely programmable software with open interfaces ("Northbound API") provides the opportunity to adapt it to the individual circumstances of the operator's network and to exchange information with the applications it serves. This allows the network to be more flexible and to react more quickly to changing circumstances as well as transport the traffic more effectively and tailored to the user's "Quality of Experience" (QoE). The approach of a separate network control layer for packet-based networks is not new and has already been proposed several times in the past. Therefore, the SDN approach has raised many questions about its feasibility in terms of efficiency and applicability. These questions are caused to some extent by the fact that there is no generally accepted definition of the SDN concept to date. It is therefore a part of this thesis to derive such a definition. In addition, several of the open issues are investigated. This Investigations follow the three aspects: Performance Evaluation of Software Defined Networking, applications on the SDN control layer, and the usability of SDN Northbound-API for creation application-awareness in network operation. Performance Evaluation of Software Defined Networking: The question of the efficiency of an SDN-based system was from the beginning one of the most important. In this thesis, experimental measurements of the performance of OpenFlow-enabled switch hardware and control software were conducted for the purpose of answering this question. The results of these measurements were used as input parameters for establishing an analytical model of the reactive SDN approach. Through the model it could be determined that the performance of the software control layer, often called "Controller", is crucial for the overall performance of the system, but that the approach is generally viable. Based on this finding a software for analyzing the performance of SDN controllers was developed. This software allows the emulation of the forwarding layer of an SDN network towards the control software and can thus determine its performance in different situations and configurations. The measurements with this software showed that there are quite significant differences in the behavior of different control software implementations. Among other things it has been shown that some show different characteristics for various switches, in particular in terms of message processing speed. Under certain circumstances this can lead to network failures. Applications on the SDN control layer: The core piece of software defined networking are the intelligent network applications that operate on the control layer. However, their development is still in its infancy and little is known about the technical possibilities and their limitations. Therefore, the relationship between an SDN-based and classical implementation of a network function is investigated in this thesis. This function is the monitoring of network links and the traffic they carry. A typical approach for this task has been built based on Wiretapping and specialized measurement hardware and compared with an implementation based on OpenFlow switches and a special SDN control application. The results of the comparison show that the SDN version can compete in terms of measurement accuracy for bandwidth and delay estimation with the traditional measurement set-up. However, a compromise has to be found for measurements below the millisecond range. Another question regarding the SDN control applications is whether and how well they can solve existing problems in networks. Two programs have been developed based on SDN in this thesis to solve two typical network issues. Firstly, the tool "IPOM", which enables considerably more flexibility in the study of effects of network structures for a researcher, who is confined to a fixed physical test network topology. The second software provides an interface between the Cloud Orchestration Software "OpenNebula" and an OpenFlow controller. The purpose of this software was to investigate experimentally whether a pre-notification of the network of an impending relocation of a virtual service in a data center is sufficient to ensure the continuous operation of that service. This was demonstrated on the example of a video service. Usability of the SDN Northbound API for creating application-awareness in network operation: Currently, the fact that the network and the applications that run on it are developed and operated separately leads to problems in network operation. SDN offers with the Northbound-API an open interface that enables the exchange between information of both worlds during operation. One aim of this thesis was to investigate whether this interface can be exploited so that the QoE experienced by the user can be maintained on high level. For this purpose, the QoE influence factors were determined on a challenging application by means of a subjective survey study. The application is cloud gaming, in which the calculation of video game environments takes place in the cloud and is transported via video over the network to the user. It was shown that apart from the most important factor influencing QoS, i.e., packet loss on the downlink, also the type of game type and its speed play a role. This demonstrates that in addition to QoS the application state is important and should be communicated to the network. Since an implementation of such a state conscious SDN for the example of Cloud Gaming was not possible due to its proprietary implementation, in this thesis the application "YouTube video streaming" was chosen as an alternative. For this application, status information is retrievable via the "Yomo" tool and can be used for network control. It was shown that an SDN-based implementation of an application-aware network has distinct advantages over traditional network management methods and the user quality can be obtained in spite of disturbances.}, subject = {Leistungsbewertung}, language = {en} } @misc{Kaempgen2009, type = {Master Thesis}, author = {Kaempgen, Benedikt}, title = {Deskriptives Data-Mining f{\"u}r Entscheidungstr{\"a}ger: Eine Mehrfachfallstudie}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-46343}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2009}, abstract = {Das Potenzial der Wissensentdeckung in Daten wird h{\"a}ufig nicht ausgenutzt, was haupts{\"a}chlich auf Barrieren zwischen dem Entwicklerteam und dem Endnutzer des Data-Mining zur{\"u}ckzuf{\"u}hren ist. In dieser Arbeit wird ein transparenter Ansatz zum Beschreiben und Erkl{\"a}ren von Daten f{\"u}r Entscheidungstr{\"a}ger vorgestellt. In Entscheidungstr{\"a}ger-zentrierten Aufgaben werden die Projektanforderungen definiert und die Ergebnisse zu einer Geschichte zusammengestellt. Eine Anforderung besteht dabei aus einem tabellarischen Bericht und ggf. Mustern in seinem Inhalt, jeweils verst{\"a}ndlich f{\"u}r einen Entscheidungstr{\"a}ger. Die technischen Aufgaben bestehen aus einer Datenpr{\"u}fung, der Integration der Daten in einem Data-Warehouse sowie dem Generieren von Berichten und dem Entdecken von Mustern wie in den Anforderungen beschrieben. Mehrere Data-Mining-Projekte k{\"o}nnen durch Wissensmanagement sowie eine geeignete Infrastruktur voneinander profitieren. Der Ansatz wurde in zwei Projekten unter Verwendung von ausschließlich Open-Source-Software angewendet.}, subject = {Data Mining}, language = {de} } @article{KaiserLeschRotheetal.2020, author = {Kaiser, Dennis and Lesch, Veronika and Rothe, Julian and Strohmeier, Michael and Spieß, Florian and Krupitzer, Christian and Montenegro, Sergio and Kounev, Samuel}, title = {Towards Self-Aware Multirotor Formations}, series = {Computers}, volume = {9}, journal = {Computers}, number = {1}, issn = {2073-431X}, doi = {10.3390/computers9010007}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-200572}, pages = {7}, year = {2020}, abstract = {In the present day, unmanned aerial vehicles become seemingly more popular every year, but, without regulation of the increasing number of these vehicles, the air space could become chaotic and uncontrollable. In this work, a framework is proposed to combine self-aware computing with multirotor formations to address this problem. The self-awareness is envisioned to improve the dynamic behavior of multirotors. The formation scheme that is implemented is called platooning, which arranges vehicles in a string behind the lead vehicle and is proposed to bring order into chaotic air space. Since multirotors define a general category of unmanned aerial vehicles, the focus of this thesis are quadcopters, platforms with four rotors. A modification for the LRA-M self-awareness loop is proposed and named Platooning Awareness. The implemented framework is able to offer two flight modes that enable waypoint following and the self-awareness module to find a path through scenarios, where obstacles are present on the way, onto a goal position. The evaluation of this work shows that the proposed framework is able to use self-awareness to learn about its environment, avoid obstacles, and can successfully move a platoon of drones through multiple scenarios.}, language = {en} } @article{KaltdorfSchulzeHelmprobstetal.2017, author = {Kaltdorf, Kristin Verena and Schulze, Katja and Helmprobst, Frederik and Kollmannsberger, Philip and Dandekar, Thomas and Stigloher, Christian}, title = {Fiji macro 3D ART VeSElecT: 3D automated reconstruction tool for vesicle structures of electron tomograms}, series = {PLoS Computational Biology}, volume = {13}, journal = {PLoS Computational Biology}, number = {1}, doi = {10.1371/journal.pcbi.1005317}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-172112}, year = {2017}, abstract = {Automatic image reconstruction is critical to cope with steadily increasing data from advanced microscopy. We describe here the Fiji macro 3D ART VeSElecT which we developed to study synaptic vesicles in electron tomograms. We apply this tool to quantify vesicle properties (i) in embryonic Danio rerio 4 and 8 days past fertilization (dpf) and (ii) to compare Caenorhabditis elegans N2 neuromuscular junctions (NMJ) wild-type and its septin mutant (unc-59(e261)). We demonstrate development-specific and mutant-specific changes in synaptic vesicle pools in both models. We confirm the functionality of our macro by applying our 3D ART VeSElecT on zebrafish NMJ showing smaller vesicles in 8 dpf embryos then 4 dpf, which was validated by manual reconstruction of the vesicle pool. Furthermore, we analyze the impact of C. elegans septin mutant unc-59(e261) on vesicle pool formation and vesicle size. Automated vesicle registration and characterization was implemented in Fiji as two macros (registration and measurement). This flexible arrangement allows in particular reducing false positives by an optional manual revision step. Preprocessing and contrast enhancement work on image-stacks of 1nm/pixel in x and y direction. Semi-automated cell selection was integrated. 3D ART VeSElecT removes interfering components, detects vesicles by 3D segmentation and calculates vesicle volume and diameter (spherical approximation, inner/outer diameter). Results are collected in color using the RoiManager plugin including the possibility of manual removal of non-matching confounder vesicles. Detailed evaluation considered performance (detected vesicles) and specificity (true vesicles) as well as precision and recall. We furthermore show gain in segmentation and morphological filtering compared to learning based methods and a large time gain compared to manual segmentation. 3D ART VeSElecT shows small error rates and its speed gain can be up to 68 times faster in comparison to manual annotation. Both automatic and semi-automatic modes are explained including a tutorial.}, language = {en} } @article{KammererGoesterReichertetal.2021, author = {Kammerer, Klaus and G{\"o}ster, Manuel and Reichert, Manfred and Pryss, R{\"u}diger}, title = {Ambalytics: a scalable and distributed system architecture concept for bibliometric network analyses}, series = {Future Internet}, volume = {13}, journal = {Future Internet}, number = {8}, issn = {1999-5903}, doi = {10.3390/fi13080203}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-244916}, year = {2021}, abstract = {A deep understanding about a field of research is valuable for academic researchers. In addition to technical knowledge, this includes knowledge about subareas, open research questions, and social communities (networks) of individuals and organizations within a given field. With bibliometric analyses, researchers can acquire quantitatively valuable knowledge about a research area by using bibliographic information on academic publications provided by bibliographic data providers. Bibliometric analyses include the calculation of bibliometric networks to describe affiliations or similarities of bibliometric entities (e.g., authors) and group them into clusters representing subareas or communities. Calculating and visualizing bibliometric networks is a nontrivial and time-consuming data science task that requires highly skilled individuals. In addition to domain knowledge, researchers must often provide statistical knowledge and programming skills or use software tools having limited functionality and usability. In this paper, we present the ambalytics bibliometric platform, which reduces the complexity of bibliometric network analysis and the visualization of results. It accompanies users through the process of bibliometric analysis and eliminates the need for individuals to have programming skills and statistical knowledge, while preserving advanced functionality, such as algorithm parameterization, for experts. As a proof-of-concept, and as an example of bibliometric analyses outcomes, the calculation of research fronts networks based on a hybrid similarity approach is shown. Being designed to scale, ambalytics makes use of distributed systems concepts and technologies. It is based on the microservice architecture concept and uses the Kubernetes framework for orchestration. This paper presents the initial building block of a comprehensive bibliometric analysis platform called ambalytics, which aims at a high usability for users as well as scalability.}, language = {en} } @article{KammererPryssHoppenstedtetal.2020, author = {Kammerer, Klaus and Pryss, R{\"u}diger and Hoppenstedt, Burkhard and Sommer, Kevin and Reichert, Manfred}, title = {Process-driven and flow-based processing of industrial sensor data}, series = {Sensors}, volume = {20}, journal = {Sensors}, number = {18}, issn = {1424-8220}, doi = {10.3390/s20185245}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-213089}, year = {2020}, abstract = {For machine manufacturing companies, besides the production of high quality and reliable machines, requirements have emerged to maintain machine-related aspects through digital services. The development of such services in the field of the Industrial Internet of Things (IIoT) is dealing with solutions such as effective condition monitoring and predictive maintenance. However, appropriate data sources are needed on which digital services can be technically based. As many powerful and cheap sensors have been introduced over the last years, their integration into complex machines is promising for developing digital services for various scenarios. It is apparent that for components handling recorded data of these sensors they must usually deal with large amounts of data. In particular, the labeling of raw sensor data must be furthered by a technical solution. To deal with these data handling challenges in a generic way, a sensor processing pipeline (SPP) was developed, which provides effective methods to capture, process, store, and visualize raw sensor data based on a processing chain. Based on the example of a machine manufacturing company, the SPP approach is presented in this work. For the company involved, the approach has revealed promising results.}, language = {en} } @phdthesis{Karch2002, author = {Karch, Oliver}, title = {Where am I? - Indoor localization based on range measurements}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-8442}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2002}, abstract = {Nowadays, robotics plays an important role in increasing fields of application. There exist many environments or situations where mobile robots instead of human beings are used, since the tasks are too hazardous, uncomfortable, repetitive, or costly for humans to perform. The autonomy and the mobility of the robot are often essential for a good solution of these problems. Thus, such a robot should at least be able to answer the question "Where am I?". This thesis investigates the problem of self-localizing a robot in an indoor environment using range measurements. That is, a robot equipped with a range sensor wakes up inside a building and has to determine its position using only its sensor data and a map of its environment. We examine this problem from an idealizing point of view (reducing it into a pure geometric one) and further investigate a method of Guibas, Motwani, and Raghavan from the field of computational geometry to solving it. Here, so-called visibility skeletons, which can be seen as coarsened representations of visibility polygons, play a decisive role. In the major part of this thesis we analyze the structures and the occurring complexities in the framework of this scheme. It turns out that the main source of complication are so-called overlapping embeddings of skeletons into the map polygon, for which we derive some restrictive visibility constraints. Based on these results we are able to improve one of the occurring complexity bounds in the sense that we can formulate it with respect to the number of reflex vertices instead of the total number of map vertices. This also affects the worst-case bound on the preprocessing complexity of the method. The second part of this thesis compares the previous idealizing assumptions with the properties of real-world environments and discusses the occurring problems. In order to circumvent these problems, we use the concept of distance functions, which model the resemblance between the sensor data and the map, and appropriately adapt the above method to the needs of realistic scenarios. In particular, we introduce a distance function, namely the polar coordinate metric, which seems to be well suited to the localization problem. Finally, we present the RoLoPro software where most of the discussed algorithms are implemented (including the polar coordinate metric).}, subject = {Autonomer Roboter}, language = {en} } @article{KarlDandekar2013, author = {Karl, Stefan and Dandekar, Thomas}, title = {Jimena: Efficient computing and system state identification for genetic regulatory networks}, series = {BMC Bioinformatics}, volume = {14}, journal = {BMC Bioinformatics}, doi = {10.1186/1471-2105-14-306}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-128671}, year = {2013}, abstract = {Background: Boolean networks capture switching behavior of many naturally occurring regulatory networks. For semi-quantitative modeling, interpolation between ON and OFF states is necessary. The high degree polynomial interpolation of Boolean genetic regulatory networks (GRNs) in cellular processes such as apoptosis or proliferation allows for the modeling of a wider range of node interactions than continuous activator-inhibitor models, but suffers from scaling problems for networks which contain nodes with more than ~10 inputs. Many GRNs from literature or new gene expression experiments exceed those limitations and a new approach was developed. Results: (i) As a part of our new GRN simulation framework Jimena we introduce and setup Boolean-tree-based data structures; (ii) corresponding algorithms greatly expedite the calculation of the polynomial interpolation in almost all cases, thereby expanding the range of networks which can be simulated by this model in reasonable time. (iii) Stable states for discrete models are efficiently counted and identified using binary decision diagrams. As application example, we show how system states can now be sampled efficiently in small up to large scale hormone disease networks (Arabidopsis thaliana development and immunity, pathogen Pseudomonas syringae and modulation by cytokinins and plant hormones). Conclusions: Jimena simulates currently available GRNs about 10-100 times faster than the previous implementation of the polynomial interpolation model and even greater gains are achieved for large scale-free networks. This speed-up also facilitates a much more thorough sampling of continuous state spaces which may lead to the identification of new stable states. Mutants of large networks can be constructed and analyzed very quickly enabling new insights into network robustness and behavior.}, language = {en} } @phdthesis{Kaussner2003, author = {Kaußner, Armin}, title = {Dynamische Szenerien in der Fahrsimulation}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-8286}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2003}, abstract = {In der Arbeit wird ein neues Konzept f{\"u}r Fahrsimulator-Datenbasen vorgestellt. Der Anwender entwirft eine auf seine Fragestellung zugeschnittene Datenbasis mithilfe einer einfachen Skriptsprache. Das Straßennetzwerk wird auf einer topologischen Ebene rep{\"a}sentiert. In jedem Simulationsschritt wird hieraus im Sichtbarkeitsbereich des Fahrers die geometrische Rep{\"a}sentation berechnet. Die f{\"u}r den Fahrer unsichtbaren Teile des Straßenetzwerks k{\"o}nnen w{\"a}hrend der Simulation ver{\"a}ndert werden. Diese Ver{\"a}nderungen k{\"o}nnen von der Route des Fahrers oder von den in der Simulation erhobenen Messerten abh{\"a}ngen. Zudem kann der Anwender das Straßennetzwerk interaktiv ver{\"a}ndern. Das vorgestellte Konzept bietet zahlreiche M{\"o}glichkeiten zur Erzeugung reproduzierbarer Szenarien f{\"u}r Experimente in Fahrsimulatoren.}, subject = {Straßenverkehr}, language = {de} } @article{KempfKrugPuppe2023, author = {Kempf, Sebastian and Krug, Markus and Puppe, Frank}, title = {KIETA: Key-insight extraction from scientific tables}, series = {Applied Intelligence}, volume = {53}, journal = {Applied Intelligence}, number = {8}, issn = {0924-669X}, doi = {10.1007/s10489-022-03957-8}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-324180}, pages = {9513-9530}, year = {2023}, abstract = {An important but very time consuming part of the research process is literature review. An already large and nevertheless growing ground set of publications as well as a steadily increasing publication rate continue to worsen the situation. Consequently, automating this task as far as possible is desirable. Experimental results of systems are key-insights of high importance during literature review and usually represented in form of tables. Our pipeline KIETA exploits these tables to contribute to the endeavor of automation by extracting them and their contained knowledge from scientific publications. The pipeline is split into multiple steps to guarantee modularity as well as analyzability, and agnosticim regarding the specific scientific domain up until the knowledge extraction step, which is based upon an ontology. Additionally, a dataset of corresponding articles has been manually annotated with information regarding table and knowledge extraction. Experiments show promising results that signal the possibility of an automated system, while also indicating limits of extracting knowledge from tables without any context.}, language = {en} } @article{KernKullmannGanaletal.2021, author = {Kern, Florian and Kullmann, Peter and Ganal, Elisabeth and Korwisi, Kristof and Stingl, Ren{\´e} and Niebling, Florian and Latoschik, Marc Erich}, title = {Off-The-Shelf Stylus: Using XR Devices for Handwriting and Sketching on Physically Aligned Virtual Surfaces}, series = {Frontiers in Virtual Reality}, volume = {2}, journal = {Frontiers in Virtual Reality}, doi = {10.3389/frvir.2021.684498}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-260219}, year = {2021}, abstract = {This article introduces the Off-The-Shelf Stylus (OTSS), a framework for 2D interaction (in 3D) as well as for handwriting and sketching with digital pen, ink, and paper on physically aligned virtual surfaces in Virtual, Augmented, and Mixed Reality (VR, AR, MR: XR for short). OTSS supports self-made XR styluses based on consumer-grade six-degrees-of-freedom XR controllers and commercially available styluses. The framework provides separate modules for three basic but vital features: 1) The stylus module provides stylus construction and calibration features. 2) The surface module provides surface calibration and visual feedback features for virtual-physical 2D surface alignment using our so-called 3ViSuAl procedure, and surface interaction features. 3) The evaluation suite provides a comprehensive test bed combining technical measurements for precision, accuracy, and latency with extensive usability evaluations including handwriting and sketching tasks based on established visuomotor, graphomotor, and handwriting research. The framework's development is accompanied by an extensive open source reference implementation targeting the Unity game engine using an Oculus Rift S headset and Oculus Touch controllers. The development compares three low-cost and low-tech options to equip controllers with a tip and includes a web browser-based surface providing support for interacting, handwriting, and sketching. The evaluation of the reference implementation based on the OTSS framework identified an average stylus precision of 0.98 mm (SD = 0.54 mm) and an average surface accuracy of 0.60 mm (SD = 0.32 mm) in a seated VR environment. The time for displaying the stylus movement as digital ink on the web browser surface in VR was 79.40 ms on average (SD = 23.26 ms), including the physical controller's motion-to-photon latency visualized by its virtual representation (M = 42.57 ms, SD = 15.70 ms). The usability evaluation (N = 10) revealed a low task load, high usability, and high user experience. Participants successfully reproduced given shapes and created legible handwriting, indicating that the OTSS and it's reference implementation is ready for everyday use. We provide source code access to our implementation, including stylus and surface calibration and surface interaction features, making it easy to reuse, extend, adapt and/or replicate previous results (https://go.uniwue.de/hci-otss).}, language = {en} } @phdthesis{Kindermann2016, author = {Kindermann, Philipp}, title = {Angular Schematization in Graph Drawing}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-020-7 (print)}, doi = {10.25972/WUP-978-3-95826-021-4}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-112549}, school = {W{\"u}rzburg University Press}, pages = {184}, year = {2016}, abstract = {Graphs are a frequently used tool to model relationships among entities. A graph is a binary relation between objects, that is, it consists of a set of objects (vertices) and a set of pairs of objects (edges). Networks are common examples of modeling data as a graph. For example, relationships between persons in a social network, or network links between computers in a telecommunication network can be represented by a graph. The clearest way to illustrate the modeled data is to visualize the graphs. The field of Graph Drawing deals with the problem of finding algorithms to automatically generate graph visualizations. The task is to find a "good" drawing, which can be measured by different criteria such as number of crossings between edges or the used area. In this thesis, we study Angular Schematization in Graph Drawing. By this, we mean drawings with large angles (for example, between the edges at common vertices or at crossing points). The thesis consists of three parts. First, we deal with the placement of boxes. Boxes are axis-parallel rectangles that can, for example, contain text. They can be placed on a map to label important sites, or can be used to describe semantic relationships between words in a word network. In the second part of the thesis, we consider graph drawings visually guide the viewer. These drawings generally induce large angles between edges that meet at a vertex. Furthermore, the edges are drawn crossing-free and in a way that makes them easy to follow for the human eye. The third and final part is devoted to crossings with large angles. In drawings with crossings, it is important to have large angles between edges at their crossing point, preferably right angles.}, language = {en} } @article{KirikkayisGallikWinteretal.2023, author = {Kirikkayis, Yusuf and Gallik, Florian and Winter, Michael and Reichert, Manfred}, title = {BPMNE4IoT: a framework for modeling, executing and monitoring IoT-driven processes}, series = {Future Internet}, volume = {15}, journal = {Future Internet}, number = {3}, issn = {1999-5903}, doi = {10.3390/fi15030090}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-304097}, year = {2023}, abstract = {The Internet of Things (IoT) enables a variety of smart applications, including smart home, smart manufacturing, and smart city. By enhancing Business Process Management Systems with IoT capabilities, the execution and monitoring of business processes can be significantly improved. Providing a holistic support for modeling, executing and monitoring IoT-driven processes, however, constitutes a challenge. Existing process modeling and process execution languages, such as BPMN 2.0, are unable to fully meet the IoT characteristics (e.g., asynchronicity and parallelism) of IoT-driven processes. In this article, we present BPMNE4IoT—A holistic framework for modeling, executing and monitoring IoT-driven processes. We introduce various artifacts and events based on the BPMN 2.0 metamodel that allow realizing the desired IoT awareness of business processes. The framework is evaluated along two real-world scenarios from two different domains. Moreover, we present a user study for comparing BPMNE4IoT and BPMN 2.0. In particular, this study has confirmed that the BPMNE4IoT framework facilitates the support of IoT-driven processes.}, language = {en} } @phdthesis{Klein2010, author = {Klein, Alexander}, title = {Performance Issues of MAC and Routing Protocols in Wireless Sensor Networks}, doi = {10.25972/OPUS-4465}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-52870}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2010}, abstract = {The focus of this work lies on the communication issues of Medium Access Control (MAC) and routing protocols in the context of WSNs. The communication challenges in these networks mainly result from high node density, low bandwidth, low energy constraints and the hardware limitations in terms of memory, computational power and sensing capabilities of low-power transceivers. For this reason, the structure of WSNs is always kept as simple as possible to minimize the impact of communication issues. Thus, the majority of WSNs apply a simple one hop star topology since multi-hop communication has high demands on the routing protocol since it increases the bandwidth requirements of the network. Moreover, medium access becomes a challenging problem due to the fact that low-power transceivers are very limited in their sensing capabilities. The first contribution is represented by the Backoff Preamble-based MAC Protocol with Sequential Contention Resolution (BPS-MAC) which is designed to overcome the limitations of low-power transceivers. Two communication issues, namely the Clear Channel Assessment (CCA) delay and the turnaround time, are directly addressed by the protocol. The CCA delay represents the period of time which is required by the transceiver to detect a busy radio channel while the turnaround time specifies the period of time which is required to switch between receive and transmit mode. Standard Carrier Sense Multiple Access (CSMA) protocols do not achieve high performance in terms of packet loss if the traffic is highly correlated due to the fact that the transceiver is not able to sense the medium during the switching phase. Therefore, a node may start to transmit data while another node is already transmitting since it has sensed an idle medium right before it started to switch its transceiver from receive to transmit mode. The BPS-MAC protocol uses a new sequential preamble-based medium access strategy which can be adapted to the hardware capabilities of the transceivers. The protocol achieves a very low packet loss rate even in wireless networks with high node density and event-driven traffic without the need of synchronization. This makes the protocol attractive to applications such as structural health monitoring, where event suppression is not an option. Moreover, acknowledgments or complex retransmission strategies become almost unnecessary since the sequential preamble-based contention resolution mechanism minimizes the collision probability. However, packets can still be lost as a consequence of interference or other issues which affect signal propagation. The second contribution consists of a new routing protocol which is able to quickly detect topology changes without generating a large amount of overhead. The key characteristics of the Statistic-Based Routing (SBR) protocol are high end-to-end reliability (in fixed and mobile networks), load balancing capabilities, a smooth continuous routing metric, quick adaptation to changing network conditions, low processing and memory requirements, low overhead, support of unidirectional links and simplicity. The protocol can establish routes in a hybrid or a proactive mode and uses an adaptive continuous routing metric which makes it very flexible in terms of scalability while maintaining stable routes. The hybrid mode is optimized for low-power WSNs since routes are only established on demand. The difference of the hybrid mode to reactive routing strategies is that routing messages are periodically transmitted to maintain already established routes. However, the protocol stops the transmission of routing messages if no data packets are transmitted for a certain time period in order to minimize the routing overhead and the energy consumption. The proactive mode is designed for high data rate networks which have less energy constraints. In this mode, the protocol periodically transmits routing messages to establish routes in a proactive way even in the absence of data traffic. Thus, nodes in the network can immediately transmit data since the route to the destination is already established in advance. In addition, a new delay-based routing message forwarding strategy is introduced. The forwarding strategy is part of SBR but can also be applied to many routing protocols in order to modify the established topology. The strategy can be used, e.g. in mobile networks, to decrease the packet loss by deferring routing messages with respect to the neighbor change rate. Thus, nodes with a stable neighborhood forward messages faster than nodes within a fast changing neighborhood. As a result, routes are established through nodes with correlated movement which results in fewer topology changes due to higher link durations.}, subject = {Routing}, language = {en} } @phdthesis{Klein2014, author = {Klein, Dominik Werner}, title = {Design and Evaluation of Components for Future Internet Architectures}, issn = {1432-8801}, doi = {10.25972/OPUS-9313}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-93134}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {Die derzeitige Internetarchitektur wurde nicht in einem geplanten Prozess konzipiert und entwickelt, sondern hat vielmehr eine evolutionsartige Entwicklung hinter sich. Ausl{\"o}ser f{\"u}r die jeweiligen Evolutionsschritte waren dabei meist aufstrebende Anwendungen, welche neue Anforderungen an die zugrundeliegende Netzarchitektur gestellt haben. Um diese Anforderungen zu erf{\"u}llen, wurden h{\"a}ufig neuartige Dienste oder Protokolle spezifiziert und in die bestehende Architektur integriert. Dieser Prozess ist jedoch meist mit hohem Aufwand verbunden und daher sehr tr{\"a}ge, was die Entwicklung und Verbreitung innovativer Dienste beeintr{\"a}chtigt. Derzeitig diskutierte Konzepte wie Software-Defined Networking (SDN) oder Netzvirtualisierung (NV) werden als eine M{\"o}glichkeit angesehen, die Altlasten der bestehenden Internetarchitektur zu l{\"o}sen. Beiden Konzepten gemein ist die Idee, logische Netze {\"u}ber dem physikalischen Substrat zu betreiben. Diese logischen Netze sind hochdynamisch und k{\"o}nnen so flexibel an die Anforderungen der jeweiligen Anwendungen angepasst werden. Insbesondere erlaubt das Konzept der Virtualisierung intelligentere Netzknoten, was innovative neue Anwendungsf{\"a}lle erm{\"o}glicht. Ein h{\"a}ufig in diesem Zusammenhang diskutierter Anwendungsfall ist die Mobilit{\"a}t sowohl von Endger{\"a}ten als auch von Diensten an sich. Die Mobilit{\"a}t der Dienste wird hierbei ausgenutzt, um die Zugriffsverz{\"o}gerung oder die belegten Ressourcen im Netz zu reduzieren, indem die Dienste zum Beispiel in f{\"u}r den Nutzer geographisch nahe Datenzentren migriert werden. Neben den reinen Mechanismen bez{\"u}glich Dienst- und Endger{\"a}temobilit{\"a}t sind in diesem Zusammenhang auch geeignete {\"U}berwachungsl{\"o}sungen relevant, welche die vom Nutzer wahrgenommene Dienstg{\"u}te bewerten k{\"o}nnen. Diese L{\"o}sungen liefern wichtige Entscheidungshilfen f{\"u}r die Migration oder {\"u}berwachen m{\"o}gliche Effekte der Migration auf die erfahrene Dienstg{\"u}te beim Nutzer. Im Falle von Video Streaming erm{\"o}glicht ein solcher Anwendungsfall die flexible Anpassung der Streaming Topologie f{\"u}r mobile Nutzer, um so die Videoqualit{\"a}t unabh{\"a}ngig vom Zugangsnetz aufrechterhalten zu k{\"o}nnen. Im Rahmen dieser Doktorarbeit wird der beschriebene Anwendungsfall am Beispiel einer Video Streaming Anwendung n{\"a}her analysiert und auftretende Herausforderungen werden diskutiert. Des Weiteren werden L{\"o}sungsans{\"a}tze vorgestellt und bez{\"u}glich ihrer Effizienz ausgewertet. Im Detail besch{\"a}ftigt sich die Arbeit mit der Leistungsanalyse von Mechanismen f{\"u}r die Dienstmobilit{\"a}t und entwickelt eine Architektur zur Optimierung der Dienstmobilit{\"a}t. Im Bereich Endger{\"a}temobilit{\"a}t werden Verbesserungen entwickelt, welche die Latenz zwischen Endger{\"a}t und Dienst reduzieren oder die Konnektivit{\"a}t unabh{\"a}ngig vom Zugangsnetz gew{\"a}hrleisten. Im letzten Teilbereich wird eine L{\"o}sung zur {\"U}berwachung der Videoqualit{\"a}t im Netz entwickelt und bez{\"u}glich ihrer Genauigkeit analysiert.}, subject = {Leistungsbewertung}, language = {en} } @article{KlemzRote2022, author = {Klemz, Boris and Rote, G{\"u}nter}, title = {Linear-Time Algorithms for Maximum-Weight Induced Matchings and Minimum Chain Covers in Convex Bipartite Graphs}, series = {Algorithmica}, volume = {84}, journal = {Algorithmica}, number = {4}, issn = {1432-0541}, doi = {10.1007/s00453-021-00904-w}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-267876}, pages = {1064-1080}, year = {2022}, abstract = {A bipartite graph G=(U,V,E) is convex if the vertices in V can be linearly ordered such that for each vertex u∈U, the neighbors of u are consecutive in the ordering of V. An induced matching H of G is a matching for which no edge of E connects endpoints of two different edges of H. We show that in a convex bipartite graph with n vertices and m weighted edges, an induced matching of maximum total weight can be computed in O(n+m) time. An unweighted convex bipartite graph has a representation of size O(n) that records for each vertex u∈U the first and last neighbor in the ordering of V. Given such a compact representation, we compute an induced matching of maximum cardinality in O(n) time. In convex bipartite graphs, maximum-cardinality induced matchings are dual to minimum chain covers. A chain cover is a covering of the edge set by chain subgraphs, that is, subgraphs that do not contain induced matchings of more than one edge. Given a compact representation, we compute a representation of a minimum chain cover in O(n) time. If no compact representation is given, the cover can be computed in O(n+m) time. All of our algorithms achieve optimal linear running time for the respective problem and model, and they improve and generalize the previous results in several ways: The best algorithms for the unweighted problem versions had a running time of O(n\(^{2}\)) (Brandst{\"a}dt et al. in Theor. Comput. Sci. 381(1-3):260-265, 2007. https://doi.org/10.1016/j.tcs.2007.04.006). The weighted case has not been considered before.}, language = {en} } @phdthesis{Kluge2004, author = {Kluge, Boris}, title = {Motion coordination for a mobile robot in dynamic environments}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-15508}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2004}, abstract = {Generating coordinated motion for a mobile robot operating in natural, continuously changing environments among moving obstacles such as humans is a complex task which requires the solution of various sub problems. In this thesis, we will cover the topics of perception and navigation in dynamic environments, as well as reasoning about the motion of the obstacles and of the robot itself. Perception is mainly considered for a laser range finder, and an according method for obstacle detection and tracking is proposed. Network optimization algorithms are used for data association in the tracking step, resulting in considerable robustness with respect to clutter by small objects. Navigation in general is accomplished using an adaptation of the velocity obstacle approach to the given vehicle kinematics, and cooperative motion coordination between the robot and a human guide is achieved using an appropriate selection rule for collision-free velocities. Next, the robot is enabled to compare its path to the path of a human guide using one of a collection of presented distance measures, which permits the detection of exceptional conditions. Furthermore, a taxonomy for the assessment of situations concerning the robot is presented, and following a summary of existing approaches to more intelligent and comprehensive perception, we propose a method for obstruction detection. Finally, a new approach to reflective navigation behaviors is described where the robot reasons about intelligent moving obstacles in its environment, which allows to adjust the character of the robot motion from regardful and defensive to more self-confident and aggressive behaviors.}, subject = {Bewegungsablauf}, language = {de} } @phdthesis{Kluegl2000, author = {Kl{\"u}gl, Franziska}, title = {Aktivit{\"a}tsbasierte Verhaltensmodellierung und ihre Unterst{\"u}tzung bei Multiagentensimulationen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-2874}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2000}, abstract = {Durch Zusammenf{\"u}hrung traditioneller Methoden zur individuenbasierten Simulation und dem Konzept der Multiagentensysteme steht mit der Multiagentensimulation eine Methodik zur Verf{\"u}gung, die es erm{\"o}glicht, sowohl technisch als auch konzeptionell eine neue Ebene an Detaillierung bei Modellbildung und Simulation zu erreichen. Ein Modell beruht dabei auf dem Konzept einer Gesellschaft: Es besteht aus einer Menge interagierender, aber in ihren Entscheidungen autonomen Einheiten, den Agenten. Diese {\"a}ndern durch ihre Aktionen ihre Umwelt und reagieren ebenso auf die f{\"u}r sie wahrnehmbaren {\"A}nderungen in der Umwelt. Durch die Simulation jedes Agenten zusammen mit der Umwelt, in der er "lebt", wird die Dynamik im Gesamtsystem beobachtbar. In der vorliegenden Dissertation wurde ein Repr{\"a}sentationsschema f{\"u}r Multiagentensimulationen entwickelt werden, das es Fachexperten, wie zum Beispiel Biologen, erm{\"o}glicht, selbst{\"a}ndig ohne traditionelles Programmieren Multiagentenmodelle zu implementieren und mit diesen Experimente durchzuf{\"u}hren. Dieses deklarative Schema beruht auf zwei Basiskonzepten: Der K{\"o}rper eines Agenten besteht aus Zustandsvariablen. Das Verhalten des Agenten kann mit Regeln beschrieben werden. Ausgehend davon werden verschiedene Strukturierungsans{\"a}tze behandelt. Das wichtigste Konzept ist das der "Aktivit{\"a}t", einer Art "Verhaltenszustand": W{\"a}hrend der Agent in einer Aktivit{\"a}t A verweilt, f{\"u}hrt er die zugeh{\"o}rigen Aktionen aus und dies solange, bis eine Regel feuert, die diese Aktivit{\"a}t beendet und eine neue Aktivit{\"a}t ausw{\"a}hlt. Durch Indizierung dieser Regeln bei den zugeh{\"o}rigen Aktivit{\"a}ten und Einf{\"u}hrung von abstrakten Aktivit{\"a}ten entsteht ein Schema f{\"u}r eine vielf{\"a}ltig strukturierbare Verhaltensbeschreibung. Zu diesem Schema wurde ein Interpreter entwickelt, der ein derartig repr{\"a}sentiertes Modell ausf{\"u}hrt und so Simulationsexperimente mit dem Multiagentenmodell erlaubt. Auf dieser Basis wurde die Modellierungs- und Experimentierumgebung SeSAm ("Shell f{\"u}r Simulierte Agentensysteme") entwickelt. Sie verwendet vorhandene Konzepte aus dem visuellen Programmieren. Mit dieser Umgebung wurden Anwendungsmodelle aus verschiedenen Dom{\"a}nen realisiert: Neben abstrakten Spielbeispielen waren dies vor allem Fragestellungen zu sozialen Insekten, z.B. zum Verhalten von Ameisen, Bienen oder der Interaktion zwischen Bienenv{\"o}lkern und Milbenpopulationen.}, subject = {Agent }, language = {de} } @article{KoopmannStubbemannKapaetal.2021, author = {Koopmann, Tobias and Stubbemann, Maximilian and Kapa, Matthias and Paris, Michael and Buenstorf, Guido and Hanika, Tom and Hotho, Andreas and J{\"a}schke, Robert and Stumme, Gerd}, title = {Proximity dimensions and the emergence of collaboration: a HypTrails study on German AI research}, series = {Scientometrics}, volume = {126}, journal = {Scientometrics}, number = {12}, issn = {1588-2861}, doi = {10.1007/s11192-021-03922-1}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-269831}, pages = {9847-9868}, year = {2021}, abstract = {Creation and exchange of knowledge depends on collaboration. Recent work has suggested that the emergence of collaboration frequently relies on geographic proximity. However, being co-located tends to be associated with other dimensions of proximity, such as social ties or a shared organizational environment. To account for such factors, multiple dimensions of proximity have been proposed, including cognitive, institutional, organizational, social and geographical proximity. Since they strongly interrelate, disentangling these dimensions and their respective impact on collaboration is challenging. To address this issue, we propose various methods for measuring different dimensions of proximity. We then present an approach to compare and rank them with respect to the extent to which they indicate co-publications and co-inventions. We adapt the HypTrails approach, which was originally developed to explain human navigation, to co-author and co-inventor graphs. We evaluate this approach on a subset of the German research community, specifically academic authors and inventors active in research on artificial intelligence (AI). We find that social proximity and cognitive proximity are more important for the emergence of collaboration than geographic proximity.}, language = {en} } @phdthesis{Kosub2001, author = {Kosub, Sven}, title = {Complexity and Partitions}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-2808}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2001}, abstract = {Computational complexity theory usually investigates the complexity of sets, i.e., the complexity of partitions into two parts. But often it is more appropriate to represent natural problems by partitions into more than two parts. A particularly interesting class of such problems consists of classification problems for relations. For instance, a binary relation R typically defines a partitioning of the set of all pairs (x,y) into four parts, classifiable according to the cases where R(x,y) and R(y,x) hold, only R(x,y) or only R(y,x) holds or even neither R(x,y) nor R(y,x) is true. By means of concrete classification problems such as Graph Embedding or Entailment (for propositional logic), this thesis systematically develops tools, in shape of the boolean hierarchy of NP-partitions and its refinements, for the qualitative analysis of the complexity of partitions generated by NP-relations. The Boolean hierarchy of NP-partitions is introduced as a generalization of the well-known and well-studied Boolean hierarchy (of sets) over NP. Whereas the latter hierarchy has a very simple structure, the situation is much more complicated for the case of partitions into at least three parts. To get an idea of this hierarchy, alternative descriptions of the partition classes are given in terms of finite, labeled lattices. Based on these characterizations the Embedding Conjecture is established providing the complete information on the structure of the hierarchy. This conjecture is supported by several results. A natural extension of the Boolean hierarchy of NP-partitions emerges from the lattice-characterization of its classes by considering partition classes generated by finite, labeled posets. It turns out that all significant ideas translate from the case of lattices. The induced refined Boolean hierarchy of NP-partitions enables us more accuratly capturing the complexity of certain relations (such as Graph Embedding) and a description of projectively closed partition classes.}, subject = {Partition }, language = {en} } @techreport{KounevBrosigHuber2014, author = {Kounev, Samuel and Brosig, Fabian and Huber, Nikolaus}, title = {The Descartes Modeling Language}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-104887}, pages = {91}, year = {2014}, abstract = {This technical report introduces the Descartes Modeling Language (DML), a new architecture-level modeling language for modeling Quality-of-Service (QoS) and resource management related aspects of modern dynamic IT systems, infrastructures and services. DML is designed to serve as a basis for self-aware resource management during operation ensuring that system QoS requirements are continuously satisfied while infrastructure resources are utilized as efficiently as possible.}, subject = {Ressourcenmanagement}, language = {en} } @article{KraftBirkReichertetal.2020, author = {Kraft, Robin and Birk, Ferdinand and Reichert, Manfred and Deshpande, Aniruddha and Schlee, Winfried and Langguth, Berthold and Baumeister, Harald and Probst, Thomas and Spiliopoulou, Myra and Pryss, R{\"u}diger}, title = {Efficient processing of geospatial mHealth data using a scalable crowdsensing platform}, series = {Sensors}, volume = {20}, journal = {Sensors}, number = {12}, issn = {1424-8220}, doi = {10.3390/s20123456}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-207826}, year = {2020}, abstract = {Smart sensors and smartphones are becoming increasingly prevalent. Both can be used to gather environmental data (e.g., noise). Importantly, these devices can be connected to each other as well as to the Internet to collect large amounts of sensor data, which leads to many new opportunities. In particular, mobile crowdsensing techniques can be used to capture phenomena of common interest. Especially valuable insights can be gained if the collected data are additionally related to the time and place of the measurements. However, many technical solutions still use monolithic backends that are not capable of processing crowdsensing data in a flexible, efficient, and scalable manner. In this work, an architectural design was conceived with the goal to manage geospatial data in challenging crowdsensing healthcare scenarios. It will be shown how the proposed approach can be used to provide users with an interactive map of environmental noise, allowing tinnitus patients and other health-conscious people to avoid locations with harmful sound levels. Technically, the shown approach combines cloud-native applications with Big Data and stream processing concepts. In general, the presented architectural design shall serve as a foundation to implement practical and scalable crowdsensing platforms for various healthcare scenarios beyond the addressed use case.}, language = {en} } @article{KraftReichertPryss2021, author = {Kraft, Robin and Reichert, Manfred and Pryss, R{\"u}diger}, title = {Towards the interpretation of sound measurements from smartphones collected with mobile crowdsensing in the healthcare domain: an experiment with Android devices}, series = {Sensors}, volume = {22}, journal = {Sensors}, number = {1}, issn = {1424-8220}, doi = {10.3390/s22010170}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-252246}, year = {2021}, abstract = {The ubiquity of mobile devices fosters the combined use of ecological momentary assessments (EMA) and mobile crowdsensing (MCS) in the field of healthcare. This combination not only allows researchers to collect ecologically valid data, but also to use smartphone sensors to capture the context in which these data are collected. The TrackYourTinnitus (TYT) platform uses EMA to track users' individual subjective tinnitus perception and MCS to capture an objective environmental sound level while the EMA questionnaire is filled in. However, the sound level data cannot be used directly among the different smartphones used by TYT users, since uncalibrated raw values are stored. This work describes an approach towards making these values comparable. In the described setting, the evaluation of sensor measurements from different smartphone users becomes increasingly prevalent. Therefore, the shown approach can be also considered as a more general solution as it not only shows how it helped to interpret TYT sound level data, but may also stimulate other researchers, especially those who need to interpret sensor data in a similar setting. Altogether, the approach will show that measuring sound levels with mobile devices is possible in healthcare scenarios, but there are many challenges to ensuring that the measured values are interpretable.}, language = {en} } @phdthesis{Krenzer2023, author = {Krenzer, Adrian}, title = {Machine learning to support physicians in endoscopic examinations with a focus on automatic polyp detection in images and videos}, doi = {10.25972/OPUS-31911}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-319119}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {Deep learning enables enormous progress in many computer vision-related tasks. Artificial Intel- ligence (AI) steadily yields new state-of-the-art results in the field of detection and classification. Thereby AI performance equals or exceeds human performance. Those achievements impacted many domains, including medical applications. One particular field of medical applications is gastroenterology. In gastroenterology, machine learning algorithms are used to assist examiners during interventions. One of the most critical concerns for gastroenterologists is the development of Colorectal Cancer (CRC), which is one of the leading causes of cancer-related deaths worldwide. Detecting polyps in screening colonoscopies is the essential procedure to prevent CRC. Thereby, the gastroenterologist uses an endoscope to screen the whole colon to find polyps during a colonoscopy. Polyps are mucosal growths that can vary in severity. This thesis supports gastroenterologists in their examinations with automated detection and clas- sification systems for polyps. The main contribution is a real-time polyp detection system. This system is ready to be installed in any gastroenterology practice worldwide using open-source soft- ware. The system achieves state-of-the-art detection results and is currently evaluated in a clinical trial in four different centers in Germany. The thesis presents two additional key contributions: One is a polyp detection system with ex- tended vision tested in an animal trial. Polyps often hide behind folds or in uninvestigated areas. Therefore, the polyp detection system with extended vision uses an endoscope assisted by two additional cameras to see behind those folds. If a polyp is detected, the endoscopist receives a vi- sual signal. While the detection system handles the additional two camera inputs, the endoscopist focuses on the main camera as usual. The second one are two polyp classification models, one for the classification based on shape (Paris) and the other on surface and texture (NBI International Colorectal Endoscopic (NICE) classification). Both classifications help the endoscopist with the treatment of and the decisions about the detected polyp. The key algorithms of the thesis achieve state-of-the-art performance. Outstandingly, the polyp detection system tested on a highly demanding video data set shows an F1 score of 90.25 \% while working in real-time. The results exceed all real-time systems in the literature. Furthermore, the first preliminary results of the clinical trial of the polyp detection system suggest a high Adenoma Detection Rate (ADR). In the preliminary study, all polyps were detected by the polyp detection system, and the system achieved a high usability score of 96.3 (max 100). The Paris classification model achieved an F1 score of 89.35 \% which is state-of-the-art. The NICE classification model achieved an F1 score of 81.13 \%. Furthermore, a large data set for polyp detection and classification was created during this thesis. Therefore a fast and robust annotation system called Fast Colonoscopy Annotation Tool (FastCAT) was developed. The system simplifies the annotation process for gastroenterologists. Thereby the i gastroenterologists only annotate key parts of the endoscopic video. Afterward, those video parts are pre-labeled by a polyp detection AI to speed up the process. After the AI has pre-labeled the frames, non-experts correct and finish the annotation. This annotation process is fast and ensures high quality. FastCAT reduces the overall workload of the gastroenterologist on average by a factor of 20 compared to an open-source state-of-art annotation tool.}, subject = {Deep Learning}, language = {en} } @article{KrenzerHeilFittingetal., author = {Krenzer, Adrian and Heil, Stefan and Fitting, Daniel and Matti, Safa and Zoller, Wolfram G. and Hann, Alexander and Puppe, Frank}, title = {Automated classification of polyps using deep learning architectures and few-shot learning}, series = {BMC Medical Imaging}, volume = {23}, journal = {BMC Medical Imaging}, doi = {10.1186/s12880-023-01007-4}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-357465}, abstract = {Background Colorectal cancer is a leading cause of cancer-related deaths worldwide. The best method to prevent CRC is a colonoscopy. However, not all colon polyps have the risk of becoming cancerous. Therefore, polyps are classified using different classification systems. After the classification, further treatment and procedures are based on the classification of the polyp. Nevertheless, classification is not easy. Therefore, we suggest two novel automated classifications system assisting gastroenterologists in classifying polyps based on the NICE and Paris classification. Methods We build two classification systems. One is classifying polyps based on their shape (Paris). The other classifies polyps based on their texture and surface patterns (NICE). A two-step process for the Paris classification is introduced: First, detecting and cropping the polyp on the image, and secondly, classifying the polyp based on the cropped area with a transformer network. For the NICE classification, we design a few-shot learning algorithm based on the Deep Metric Learning approach. The algorithm creates an embedding space for polyps, which allows classification from a few examples to account for the data scarcity of NICE annotated images in our database. Results For the Paris classification, we achieve an accuracy of 89.35 \%, surpassing all papers in the literature and establishing a new state-of-the-art and baseline accuracy for other publications on a public data set. For the NICE classification, we achieve a competitive accuracy of 81.13 \% and demonstrate thereby the viability of the few-shot learning paradigm in polyp classification in data-scarce environments. Additionally, we show different ablations of the algorithms. Finally, we further elaborate on the explainability of the system by showing heat maps of the neural network explaining neural activations. Conclusion Overall we introduce two polyp classification systems to assist gastroenterologists. We achieve state-of-the-art performance in the Paris classification and demonstrate the viability of the few-shot learning paradigm in the NICE classification, addressing the prevalent data scarcity issues faced in medical machine learning.}, language = {en} } @article{KrenzerMakowskiHekaloetal.2022, author = {Krenzer, Adrian and Makowski, Kevin and Hekalo, Amar and Fitting, Daniel and Troya, Joel and Zoller, Wolfram G. and Hann, Alexander and Puppe, Frank}, title = {Fast machine learning annotation in the medical domain: a semi-automated video annotation tool for gastroenterologists}, series = {BioMedical Engineering OnLine}, volume = {21}, journal = {BioMedical Engineering OnLine}, number = {1}, doi = {10.1186/s12938-022-01001-x}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-300231}, year = {2022}, abstract = {Background Machine learning, especially deep learning, is becoming more and more relevant in research and development in the medical domain. For all the supervised deep learning applications, data is the most critical factor in securing successful implementation and sustaining the progress of the machine learning model. Especially gastroenterological data, which often involves endoscopic videos, are cumbersome to annotate. Domain experts are needed to interpret and annotate the videos. To support those domain experts, we generated a framework. With this framework, instead of annotating every frame in the video sequence, experts are just performing key annotations at the beginning and the end of sequences with pathologies, e.g., visible polyps. Subsequently, non-expert annotators supported by machine learning add the missing annotations for the frames in-between. Methods In our framework, an expert reviews the video and annotates a few video frames to verify the object's annotations for the non-expert. In a second step, a non-expert has visual confirmation of the given object and can annotate all following and preceding frames with AI assistance. After the expert has finished, relevant frames will be selected and passed on to an AI model. This information allows the AI model to detect and mark the desired object on all following and preceding frames with an annotation. Therefore, the non-expert can adjust and modify the AI predictions and export the results, which can then be used to train the AI model. Results Using this framework, we were able to reduce workload of domain experts on average by a factor of 20 on our data. This is primarily due to the structure of the framework, which is designed to minimize the workload of the domain expert. Pairing this framework with a state-of-the-art semi-automated AI model enhances the annotation speed further. Through a prospective study with 10 participants, we show that semi-automated annotation using our tool doubles the annotation speed of non-expert annotators compared to a well-known state-of-the-art annotation tool. Conclusion In summary, we introduce a framework for fast expert annotation for gastroenterologists, which reduces the workload of the domain expert considerably while maintaining a very high annotation quality. The framework incorporates a semi-automated annotation system utilizing trained object detection models. The software and framework are open-source.}, language = {en} } @article{KruegerFriedrichFoersteretal.2012, author = {Krueger, Beate and Friedrich, Torben and F{\"o}rster, Frank and Bernhardt, J{\"o}rg and Gross, Roy and Dandekar, Thomas}, title = {Different evolutionary modifications as a guide to rewire two-component systems}, series = {Bioinformatics and Biology Insights}, volume = {6}, journal = {Bioinformatics and Biology Insights}, doi = {10.4137/BBI.S9356}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-123647}, pages = {97-128}, year = {2012}, abstract = {Two-component systems (TCS) are short signalling pathways generally occurring in prokaryotes. They frequently regulate prokaryotic stimulus responses and thus are also of interest for engineering in biotechnology and synthetic biology. The aim of this study is to better understand and describe rewiring of TCS while investigating different evolutionary scenarios. Based on large-scale screens of TCS in different organisms, this study gives detailed data, concrete alignments, and structure analysis on three general modification scenarios, where TCS were rewired for new responses and functions: (i) exchanges in the sequence within single TCS domains, (ii) exchange of whole TCS domains; (iii) addition of new components modulating TCS function. As a result, the replacement of stimulus and promotor cassettes to rewire TCS is well defined exploiting the alignments given here. The diverged TCS examples are non-trivial and the design is challenging. Designed connector proteins may also be useful to modify TCS in selected cases.}, language = {en} } @article{KrupitzerEberhardingerGerostathopoulosetal.2020, author = {Krupitzer, Christian and Eberhardinger, Benedikt and Gerostathopoulos, Ilias and Raibulet, Claudia}, title = {Introduction to the special issue "Applications in Self-Aware Computing Systems and their Evaluation"}, series = {Computers}, volume = {9}, journal = {Computers}, number = {1}, issn = {2073-431X}, doi = {10.3390/computers9010022}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-203439}, year = {2020}, abstract = {The joint 1st Workshop on Evaluations and Measurements in Self-Aware Computing Systems (EMSAC 2019) and Workshop on Self-Aware Computing (SeAC) was held as part of the FAS* conference alliance in conjunction with the 16th IEEE International Conference on Autonomic Computing (ICAC) and the 13th IEEE International Conference on Self-Adaptive and Self-Organizing Systems (SASO) in Ume{\aa}, Sweden on 20 June 2019. The goal of this one-day workshop was to bring together researchers and practitioners from academic environments and from the industry to share their solutions, ideas, visions, and doubts in self-aware computing systems in general and in the evaluation and measurements of such systems in particular. The workshop aimed to enable discussions, partnerships, and collaborations among the participants. This special issue follows the theme of the workshop. It contains extended versions of workshop presentations as well as additional contributions.}, language = {en} } @article{KuhnGrippFliederetal.2015, author = {Kuhn, Joachim and Gripp, Tatjana and Flieder, Tobias and Dittrich, Marcus and Hendig, Doris and Busse, Jessica and Knabbe, Cornelius and Birschmann, Ingvild}, title = {UPLC-MRM Mass Spectrometry Method for Measurement of the Coagulation Inhibitors Dabigatran and Rivaroxaban in Human Plasma and Its Comparison with Functional Assays}, series = {PLOS ONE}, volume = {10}, journal = {PLOS ONE}, number = {12}, doi = {10.1371/journal.pone.0145478}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-136023}, pages = {e0145478}, year = {2015}, abstract = {Introduction The fast, precise, and accurate measurement of the new generation of oral anticoagulants such as dabigatran and rivaroxaban in patients' plasma my provide important information in different clinical circumstances such as in the case of suspicion of overdose, when patients switch from existing oral anticoagulant, in patients with hepatic or renal impairment, by concomitant use of interaction drugs, or to assess anticoagulant concentration in patients' blood before major surgery. Methods Here, we describe a quick and precise method to measure the coagulation inhibitors dabigatran and rivaroxaban using ultra-performance liquid chromatography electrospray ionization-tandem mass spectrometry in multiple reactions monitoring (MRM) mode (UPLC-MRM MS). Internal standards (ISs) were added to the sample and after protein precipitation; the sample was separated on a reverse phase column. After ionization of the analytes the ions were detected using electrospray ionization-tandem mass spectrometry. Run time was 2.5 minutes per injection. Ion suppression was characterized by means of post-column infusion. Results The calibration curves of dabigatran and rivaroxaban were linear over the working range between 0.8 and 800 mu g/L (r > 0.99). Limits of detection (LOD) in the plasma matrix were 0.21 mu g/L for dabigatran and 0.34 mu g/L for rivaroxaban, and lower limits of quantification (LLOQ) in the plasma matrix were 0.46 mu g/L for dabigatran and 0.54 mu g/L for rivaroxaban. The intraassay coefficients of variation (CVs) for dabigatran and rivaroxaban were < 4\% and 6\%; respectively, the interassay CVs were < 6\% for dabigatran and < 9\% for rivaroxaban. Inaccuracy was < 5\% for both substances. The mean recovery was 104.5\% (range 83.8-113.0\%) for dabigatran and 87.0\%(range 73.6-105.4\%) for rivaroxaban. No significant ion suppressions were detected at the elution times of dabigatran or rivaroxaban. Both coagulation inhibitors were stable in citrate plasma at -20 degrees C, 4 degrees C and even at RT for at least one week. A method comparison between our UPLC-MRM MS method, the commercially available automated Direct Thrombin Inhibitor assay (DTI assay) for dabigatran measurement from CoaChrom Diagnostica, as well as the automated anti-Xa assay for rivaroxaban measurement from Chromogenix both performed by ACL-TOP showed a high degree of correlation. However, UPLC-MRM MS measurement of dabigatran and rivaroxaban has a much better selectivity than classical functional assays measuring activities of various coagulation factors which are susceptible to interference by other coagulant drugs. Conclusions Overall, we developed and validated a sensitive and specific UPLC-MRM MS assay for the quick and specific measurement of dabigatran and rivaroxaban in human plasma.}, language = {en} } @article{KunzLiangNillaetal.2016, author = {Kunz, Meik and Liang, Chunguang and Nilla, Santosh and Cecil, Alexander and Dandekar, Thomas}, title = {The drug-minded protein interaction database (DrumPID) for efficient target analysis and drug development}, series = {Database}, volume = {2016}, journal = {Database}, doi = {10.1093/database/baw041}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-147369}, pages = {baw041}, year = {2016}, abstract = {The drug-minded protein interaction database (DrumPID) has been designed to provide fast, tailored information on drugs and their protein networks including indications, protein targets and side-targets. Starting queries include compound, target and protein interactions and organism-specific protein families. Furthermore, drug name, chemical structures and their SMILES notation, affected proteins (potential drug targets), organisms as well as diseases can be queried including various combinations and refinement of searches. Drugs and protein interactions are analyzed in detail with reference to protein structures and catalytic domains, related compound structures as well as potential targets in other organisms. DrumPID considers drug functionality, compound similarity, target structure, interactome analysis and organismic range for a compound, useful for drug development, predicting drug side-effects and structure-activity relationships.}, language = {en} } @article{LandeckAlvarezIgarzabalUnruhetal.2022, author = {Landeck, Maximilian and Alvarez Igarz{\´a}bal, Federico and Unruh, Fabian and Habenicht, Hannah and Khoshnoud, Shiva and Wittmann, Marc and Lugrin, Jean-Luc and Latoschik, Marc Erich}, title = {Journey through a virtual tunnel: Simulated motion and its effects on the experience of time}, series = {Frontiers in Virtual Reality}, volume = {3}, journal = {Frontiers in Virtual Reality}, issn = {2673-4192}, doi = {10.3389/frvir.2022.1059971}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-301519}, year = {2022}, abstract = {This paper examines the relationship between time and motion perception in virtual environments. Previous work has shown that the perception of motion can affect the perception of time. We developed a virtual environment that simulates motion in a tunnel and measured its effects on the estimation of the duration of time, the speed at which perceived time passes, and the illusion of self-motion, also known as vection. When large areas of the visual field move in the same direction, vection can occur; observers often perceive this as self-motion rather than motion of the environment. To generate different levels of vection and investigate its effects on time perception, we developed an abstract procedural tunnel generator. The generator can simulate different speeds and densities of tunnel sections (visibly distinguishable sections that form the virtual tunnel), as well as the degree of embodiment of the user avatar (with or without virtual hands). We exposed participants to various tunnel simulations with different durations, speeds, and densities in a remote desktop and a virtual reality (VR) laboratory study. Time passed subjectively faster under high-speed and high-density conditions in both studies. The experience of self-motion was also stronger under high-speed and high-density conditions. Both studies revealed a significant correlation between the perceived passage of time and perceived self-motion. Subjects in the virtual reality study reported a stronger self-motion experience, a faster perceived passage of time, and shorter time estimates than subjects in the desktop study. Our results suggest that a virtual tunnel simulation can manipulate time perception in virtual reality. We will explore these results for the development of virtual reality applications for therapeutic approaches in our future work. This could be particularly useful in treating disorders like depression, autism, and schizophrenia, which are known to be associated with distortions in time perception. For example, the tunnel could be therapeutically applied by resetting patients' time perceptions by exposing them to the tunnel under different conditions, such as increasing or decreasing perceived time.}, language = {en} } @article{LatoschikWienrich2022, author = {Latoschik, Marc Erich and Wienrich, Carolin}, title = {Congruence and plausibility, not presence: pivotal conditions for XR experiences and effects, a novel approach}, series = {Frontiers in Virtual Reality}, volume = {3}, journal = {Frontiers in Virtual Reality}, issn = {2673-4192}, doi = {10.3389/frvir.2022.694433}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-284787}, year = {2022}, abstract = {Presence is often considered the most important quale describing the subjective feeling of being in a computer-generated and/or computer-mediated virtual environment. The identification and separation of orthogonal presence components, i.e., the place illusion and the plausibility illusion, has been an accepted theoretical model describing Virtual Reality (VR) experiences for some time. This perspective article challenges this presence-oriented VR theory. First, we argue that a place illusion cannot be the major construct to describe the much wider scope of virtual, augmented, and mixed reality (VR, AR, MR: or XR for short). Second, we argue that there is no plausibility illusion but merely plausibility, and we derive the place illusion caused by the congruent and plausible generation of spatial cues and similarly for all the current model's so-defined illusions. Finally, we propose congruence and plausibility to become the central essential conditions in a novel theoretical model describing XR experiences and effects.}, language = {en} } @techreport{LeGrossmannKrieger2022, type = {Working Paper}, author = {Le, Duy Thanh and Großmann, Marcel and Krieger, Udo R.}, title = {Cloudless Resource Monitoring in a Fog Computing System Enabled by an SDN/NFV Infrastructure}, series = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, journal = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, doi = {10.25972/OPUS-28072}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-280723}, pages = {4}, year = {2022}, abstract = {Today's advanced Internet-of-Things applications raise technical challenges on cloud, edge, and fog computing. The design of an efficient, virtualized, context-aware, self-configuring orchestration system of a fog computing system constitutes a major development effort within this very innovative area of research. In this paper we describe the architecture and relevant implementation aspects of a cloudless resource monitoring system interworking with an SDN/NFV infrastructure. It realizes the basic monitoring component of the fundamental MAPE-K principles employed in autonomic computing. Here we present the hierarchical layering and functionality within the underlying fog nodes to generate a working prototype of an intelligent, self-managed orchestrator for advanced IoT applications and services. The latter system has the capability to monitor automatically various performance aspects of the resource allocation among multiple hosts of a fog computing system interconnected by SDN.}, subject = {Datennetz}, language = {en} } @phdthesis{Lehrieder2013, author = {Lehrieder, Frank}, title = {Performance Evaluation and Optimization of Content Distribution using Overlay Networks}, doi = {10.25972/OPUS-6420}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-76018}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2013}, abstract = {The work presents a performance evaluation and optimization of so-called overlay networks for content distribution in the Internet. Chapter 1 describes the importance which have such networks in today's Internet, for example, for the transmission of video content. The focus of this work is on overlay networks based on the peer-to-peer principle. These are characterized by the fact that users who download content, also contribute to the distribution process by sharing parts of the data to other users. This enables efficient content distribution because each user not only consumes resources in the system, but also provides its own resources. Chapter 2 of the monograph contains a detailed description of the functionality of today's most popular overlay network BitTorrent. It explains the various components and their interaction. This is followed by an illustration of why such overlay networks for Internet service providers (ISPs) are problematic. The reason lies in the large amount of inter-ISP traffic that is produced by these overlay networks. Since this inter-ISP traffic leads to high costs for ISPs, they try to reduce it by improved mechanisms for overlay networks. One optimization approach is the use of topology awareness within the overlay networks. It provides users of the overlay networks with information about the underlying physical network topology. This allows them to avoid inter-ISP traffic by exchanging data preferrentially with other users that are connected to the same ISP. Another approach to save inter-ISP traffic is caching. In this case the ISP provides additional computers in its network, called caches, which store copies of popular content. The users of this ISP can then obtain such content from the cache. This prevents that the content must be retrieved from locations outside of the ISP's network, and saves costly inter-ISP traffic in this way. In the third chapter of the thesis, the results of a comprehensive measurement study of overlay networks, which can be found in today's Internet, are presented. After a short description of the measurement methodology, the results of the measurements are described. These results contain data on a variety of characteristics of current P2P overlay networks in the Internet. These include the popularity of content, i.e., how many users are interested in specific content, the evolution of the popularity and the size of the files. The distribution of users within the Internet is investigated in detail. Special attention is given to the number of users that exchange a particular file within the same ISP. On the basis of these measurement results, an estimation of the traffic savings that can achieved by topology awareness is derived. This new estimation is of scientific and practical importance, since it is not limited to individual ISPs and files, but considers the whole Internet and the total amount of data exchanged in overlay networks. Finally, the characteristics of regional content are considered, in which the popularity is limited to certain parts of the Internet. This is for example the case of videos in German, Italian or French language. Chapter 4 of the thesis is devoted to the optimization of overlay networks for content distribution through caching. It presents a deterministic flow model that describes the influence of caches. On the basis of this model, it derives an estimate of the inter-ISP traffic that is generated by an overlay network, and which part can be saved by caches. The results show that the influence of the cache depends on the structure of the overlay networks, and that caches can also lead to an increase in inter-ISP traffic under certain circumstances. The described model is thus an important tool for ISPs to decide for which overlay networks caches are useful and to dimension them. Chapter 5 summarizes the content of the work and emphasizes the importance of the findings. In addition, it explains how the findings can be applied to the optimization of future overlay networks. Special attention is given to the growing importance of video-on-demand and real-time video transmissions.}, subject = {Leistungsbewertung}, language = {en} } @article{LeschKoenigKounevetal.2022, author = {Lesch, Veronika and K{\"o}nig, Maximilian and Kounev, Samuel and Stein, Anthony and Krupitzer, Christian}, title = {Tackling the rich vehicle routing problem with nature-inspired algorithms}, series = {Applied Intelligence}, volume = {52}, journal = {Applied Intelligence}, issn = {1573-7497}, doi = {10.1007/s10489-021-03035-5}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-268942}, pages = {9476-9500}, year = {2022}, abstract = {In the last decades, the classical Vehicle Routing Problem (VRP), i.e., assigning a set of orders to vehicles and planning their routes has been intensively researched. As only the assignment of order to vehicles and their routes is already an NP-complete problem, the application of these algorithms in practice often fails to take into account the constraints and restrictions that apply in real-world applications, the so called rich VRP (rVRP) and are limited to single aspects. In this work, we incorporate the main relevant real-world constraints and requirements. We propose a two-stage strategy and a Timeline algorithm for time windows and pause times, and apply a Genetic Algorithm (GA) and Ant Colony Optimization (ACO) individually to the problem to find optimal solutions. Our evaluation of eight different problem instances against four state-of-the-art algorithms shows that our approach handles all given constraints in a reasonable time.}, language = {en} } @techreport{LhamoNguyenFitzek2022, type = {Working Paper}, author = {Lhamo, Osel and Nguyen, Giang T. and Fitzek, Frank H. P.}, title = {Virtual Queues for QoS Compliance of Haptic Data Streams in Teleoperation}, series = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, journal = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, doi = {10.25972/OPUS-28076}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-280762}, pages = {4}, year = {2022}, abstract = {Tactile Internet aims at allowing perceived real-time interactions between humans and machines. This requires satisfying a stringent latency requirement of haptic data streams whose data rates vary drastically as the results of perceptual codecs. This introduces a complex problem for the underlying network infrastructure to fulfill the pre-defined level of Quality of Service (QoS). However, novel networking hardware with data plane programming capability allows processing packets differently and opens up a new opportunity. For example, a dynamic and network-aware resource management strategy can help satisfy the QoS requirements of different priority flows without wasting precious bandwidth. This paper introduces virtual queues for service differentiation between different types of traffic streams, leveraging protocol independent switch architecture (PISA). We propose coordinating the management of all the queues and dynamically adapting their sizes to minimize packet loss and delay due to network congestion and ensure QoS compliance.}, subject = {Datennetz}, language = {en} } @article{LiGuanGaoetal.2020, author = {Li, Ningbo and Guan, Lianwu and Gao, Yanbin and Du, Shitong and Wu, Menghao and Guang, Xingxing and Cong, Xiaodan}, title = {Indoor and outdoor low-cost seamless integrated navigation system based on the integration of INS/GNSS/LIDAR system}, series = {Remote Sensing}, volume = {12}, journal = {Remote Sensing}, number = {19}, issn = {2072-4292}, doi = {10.3390/rs12193271}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-216229}, year = {2020}, abstract = {Global Navigation Satellite System (GNSS) provides accurate positioning data for vehicular navigation in open outdoor environment. In an indoor environment, Light Detection and Ranging (LIDAR) Simultaneous Localization and Mapping (SLAM) establishes a two-dimensional map and provides positioning data. However, LIDAR can only provide relative positioning data and it cannot directly provide the latitude and longitude of the current position. As a consequence, GNSS/Inertial Navigation System (INS) integrated navigation could be employed in outdoors, while the indoors part makes use of INS/LIDAR integrated navigation and the corresponding switching navigation will make the indoor and outdoor positioning consistent. In addition, when the vehicle enters the garage, the GNSS signal will be blurred for a while and then disappeared. Ambiguous GNSS satellite signals will lead to the continuous distortion or overall drift of the positioning trajectory in the indoor condition. Therefore, an INS/LIDAR seamless integrated navigation algorithm and a switching algorithm based on vehicle navigation system are designed. According to the experimental data, the positioning accuracy of the INS/LIDAR navigation algorithm in the simulated environmental experiment is 50\% higher than that of the Dead Reckoning (DR) algorithm. Besides, the switching algorithm developed based on the INS/LIDAR integrated navigation algorithm can achieve 80\% success rate in navigation mode switching.}, language = {en} } @article{LimanMayFetteetal.2023, author = {Liman, Leon and May, Bernd and Fette, Georg and Krebs, Jonathan and Puppe, Frank}, title = {Using a clinical data warehouse to calculate and present key metrics for the radiology department: implementation and performance evaluation}, series = {JMIR Medical Informatics}, volume = {11}, journal = {JMIR Medical Informatics}, issn = {2291-9694}, doi = {10.2196/41808}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-349411}, year = {2023}, abstract = {Background: Due to the importance of radiologic examinations, such as X-rays or computed tomography scans, for many clinical diagnoses, the optimal use of the radiology department is 1 of the primary goals of many hospitals. Objective: This study aims to calculate the key metrics of this use by creating a radiology data warehouse solution, where data from radiology information systems (RISs) can be imported and then queried using a query language as well as a graphical user interface (GUI). Methods: Using a simple configuration file, the developed system allowed for the processing of radiology data exported from any kind of RIS into a Microsoft Excel, comma-separated value (CSV), or JavaScript Object Notation (JSON) file. These data were then imported into a clinical data warehouse. Additional values based on the radiology data were calculated during this import process by implementing 1 of several provided interfaces. Afterward, the query language and GUI of the data warehouse were used to configure and calculate reports on these data. For the most common types of requested reports, a web interface was created to view their numbers as graphics. Results: The tool was successfully tested with the data of 4 different German hospitals from 2018 to 2021, with a total of 1,436,111 examinations. The user feedback was good, since all their queries could be answered if the available data were sufficient. The initial processing of the radiology data for using them with the clinical data warehouse took (depending on the amount of data provided by each hospital) between 7 minutes and 1 hour 11 minutes. Calculating 3 reports of different complexities on the data of each hospital was possible in 1-3 seconds for reports with up to 200 individual calculations and in up to 1.5 minutes for reports with up to 8200 individual calculations. Conclusions: A system was developed with the main advantage of being generic concerning the export of different RISs as well as concerning the configuration of queries for various reports. The queries could be configured easily using the GUI of the data warehouse, and their results could be exported into the standard formats Excel and CSV for further processing.}, language = {en} } @article{LinsenmannMaerzDufneretal.2021, author = {Linsenmann, Thomas and M{\"a}rz, Alexander and Dufner, Vera and Stetter, Christian and Weiland, Judith and Westermaier, Thomas}, title = {Optimization of radiation settings for angiography using 3D fluoroscopy for imaging of intracranial aneurysms}, series = {Computer Assisted Surgery}, volume = {26}, journal = {Computer Assisted Surgery}, number = {1}, doi = {10.1080/24699322.2021.1894240}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-259251}, pages = {22-30}, year = {2021}, abstract = {Mobile 3D fluoroscopes have become increasingly available in neurosurgical operating rooms. We recently reported its use for imaging cerebral vascular malformations and aneurysms. This study was conducted to evaluate various radiation settings for the imaging of cerebral aneurysms before and after surgical occlusion. Eighteen patients with cerebral aneurysms with the indication for surgical clipping were included in this prospective analysis. Before surgery the patients were randomized into one of three different scan protocols according (default settings of the 3D fluoroscope): Group 1: 110 kV, 80 mA (enhanced cranial mode), group 2: 120 kV, 64 mA (lumbar spine mode), group 3: 120 kV, 25 mA (head/neck settings). Prior to surgery, a rotational fluoroscopy scan (duration 24 s) was performed without contrast agent followed by another scan with 50 ml of intravenous iodine contrast agent. The image files of both scans were transferred to an Apple PowerMac(R) workstation, subtracted and reconstructed using OsiriX(R) MD 10.0 software. The procedure was repeated after clip placement. The image quality regarding preoperative aneurysm configuration and postoperative assessment of aneurysm occlusion and vessel patency was analyzed by 2 independent reviewers using a 6-grade scale. This technique quickly supplies images of adequate quality to depict intracranial aneurysms and distal vessel patency after aneurysm clipping. Regarding these features, a further optimization to our previous protocol seems possible lowering the voltage and increasing tube current. For quick intraoperative assessment, image subtraction seems not necessary. Thus, a native scan without a contrast agent is not necessary. Further optimization may be possible using a different contrast injection protocol.}, language = {en} } @article{LodaKrebsDanhofetal.2019, author = {Loda, Sophia and Krebs, Jonathan and Danhof, Sophia and Schreder, Martin and Solimando, Antonio G. and Strifler, Susanne and Rasche, Leo and Kort{\"u}m, Martin and Kerscher, Alexander and Knop, Stefan and Puppe, Frank and Einsele, Hermann and Bittrich, Max}, title = {Exploration of artificial intelligence use with ARIES in multiple myeloma research}, series = {Journal of Clinical Medicine}, volume = {8}, journal = {Journal of Clinical Medicine}, number = {7}, issn = {2077-0383}, doi = {10.3390/jcm8070999}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-197231}, pages = {999}, year = {2019}, abstract = {Background: Natural language processing (NLP) is a powerful tool supporting the generation of Real-World Evidence (RWE). There is no NLP system that enables the extensive querying of parameters specific to multiple myeloma (MM) out of unstructured medical reports. We therefore created a MM-specific ontology to accelerate the information extraction (IE) out of unstructured text. Methods: Our MM ontology consists of extensive MM-specific and hierarchically structured attributes and values. We implemented "A Rule-based Information Extraction System" (ARIES) that uses this ontology. We evaluated ARIES on 200 randomly selected medical reports of patients diagnosed with MM. Results: Our system achieved a high F1-Score of 0.92 on the evaluation dataset with a precision of 0.87 and recall of 0.98. Conclusions: Our rule-based IE system enables the comprehensive querying of medical reports. The IE accelerates the extraction of data and enables clinicians to faster generate RWE on hematological issues. RWE helps clinicians to make decisions in an evidence-based manner. Our tool easily accelerates the integration of research evidence into everyday clinical practice.}, language = {en} } @techreport{LohGeisslerHossfeld2022, type = {Working Paper}, author = {Loh, Frank and Geißler, Stefan and Hoßfeld, Tobias}, title = {LoRaWAN Network Planning in Smart Environments: Towards Reliability, Scalability, and Cost Reduction}, series = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, journal = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, doi = {10.25972/OPUS-28082}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-280829}, pages = {4}, year = {2022}, abstract = {The goal in this work is to present a guidance for LoRaWAN planning to improve overall reliability for message transmissions and scalability. At the end, the cost component is discussed. Therefore, a five step approach is presented that helps to plan a LoRaWAN deployment step by step: Based on the device locations, an initial gateway placement is suggested followed by in-depth frequency and channel access planning. After an initial planning phase, updates for channel access and the initial gateway planning is suggested that should also be done periodically during network operation. Since current gateway placement approaches are only studied with random channel access, there is a lot of potential in the cell planning phase. Furthermore, the performance of different channel access approaches is highly related on network load, and thus cell size and sensor density. Last, the influence of different cell planning ideas on expected costs are discussed.}, subject = {Datennetz}, language = {en} } @article{LohMehlingHossfeld2022, author = {Loh, Frank and Mehling, Noah and Hoßfeld, Tobias}, title = {Towards LoRaWAN without data loss: studying the performance of different channel access approaches}, series = {Sensors}, volume = {22}, journal = {Sensors}, number = {2}, issn = {1424-8220}, doi = {10.3390/s22020691}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-302418}, year = {2022}, abstract = {The Long Range Wide Area Network (LoRaWAN) is one of the fastest growing Internet of Things (IoT) access protocols. It operates in the license free 868 MHz band and gives everyone the possibility to create their own small sensor networks. The drawback of this technology is often unscheduled or random channel access, which leads to message collisions and potential data loss. For that reason, recent literature studies alternative approaches for LoRaWAN channel access. In this work, state-of-the-art random channel access is compared with alternative approaches from the literature by means of collision probability. Furthermore, a time scheduled channel access methodology is presented to completely avoid collisions in LoRaWAN. For this approach, an exhaustive simulation study was conducted and the performance was evaluated with random access cross-traffic. In a general theoretical analysis the limits of the time scheduled approach are discussed to comply with duty cycle regulations in LoRaWAN.}, language = {en} } @article{LohPoigneeWamseretal.2021, author = {Loh, Frank and Poign{\´e}e, Fabian and Wamser, Florian and Leidinger, Ferdinand and Hoßfeld, Tobias}, title = {Uplink vs. Downlink: Machine Learning-Based Quality Prediction for HTTP Adaptive Video Streaming}, series = {Sensors}, volume = {21}, journal = {Sensors}, number = {12}, issn = {1424-8220}, doi = {10.3390/s21124172}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-241121}, year = {2021}, abstract = {Streaming video is responsible for the bulk of Internet traffic these days. For this reason, Internet providers and network operators try to make predictions and assessments about the streaming quality for an end user. Current monitoring solutions are based on a variety of different machine learning approaches. The challenge for providers and operators nowadays is that existing approaches require large amounts of data. In this work, the most relevant quality of experience metrics, i.e., the initial playback delay, the video streaming quality, video quality changes, and video rebuffering events, are examined using a voluminous data set of more than 13,000 YouTube video streaming runs that were collected with the native YouTube mobile app. Three Machine Learning models are developed and compared to estimate playback behavior based on uplink request information. The main focus has been on developing a lightweight approach using as few features and as little data as possible, while maintaining state-of-the-art performance.}, language = {en} } @techreport{LohRaffeckGeissleretal.2023, type = {Working Paper}, author = {Loh, Frank and Raffeck, Simon and Geißler, Stefan and Hoßfeld, Tobias}, title = {Paving the Way for an Energy Efficient and Sustainable Future Internet of Things}, series = {KuVS Fachgespr{\"a}ch - W{\"u}rzburg Workshop on Modeling, Analysis and Simulation of Next-Generation Communication Networks 2023 (WueWoWAS'23)}, journal = {KuVS Fachgespr{\"a}ch - W{\"u}rzburg Workshop on Modeling, Analysis and Simulation of Next-Generation Communication Networks 2023 (WueWoWAS'23)}, doi = {10.25972/OPUS-32216}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-322161}, pages = {4}, year = {2023}, abstract = {In this work, we describe the network from data collection to data processing and storage as a system based on different layers. We outline the different layers and highlight major tasks and dependencies with regard to energy consumption and energy efficiency. With this view, we can outwork challenges and questions a future system architect must answer to provide a more sustainable, green, resource friendly, and energy efficient application or system. Therefore, all system layers must be considered individually but also altogether for future IoT solutions. This requires, in particular, novel sustainability metrics in addition to current Quality of Service and Quality of Experience metrics to provide a high power, user satisfying, and sustainable network.}, language = {en} } @article{LohWamserPoigneeetal.2022, author = {Loh, Frank and Wamser, Florian and Poign{\´e}e, Fabian and Geißler, Stefan and Hoßfeld, Tobias}, title = {YouTube Dataset on Mobile Streaming for Internet Traffic Modeling and Streaming Analysis}, series = {Scientific Data}, volume = {9}, journal = {Scientific Data}, number = {1}, doi = {10.1038/s41597-022-01418-y}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-300240}, year = {2022}, abstract = {Around 4.9 billion Internet users worldwide watch billions of hours of online video every day. As a result, streaming is by far the predominant type of traffic in communication networks. According to Google statistics, three out of five video views come from mobile devices. Thus, in view of the continuous technological advances in end devices and increasing mobile use, datasets for mobile streaming are indispensable in research but only sparsely dealt with in literature so far. With this public dataset, we provide 1,081 hours of time-synchronous video measurements at network, transport, and application layer with the native YouTube streaming client on mobile devices. The dataset includes 80 network scenarios with 171 different individual bandwidth settings measured in 5,181 runs with limited bandwidth, 1,939 runs with emulated 3 G/4 G traces, and 4,022 runs with pre-defined bandwidth changes. This corresponds to 332 GB video payload. We present the most relevant quality indicators for scientific use, i.e., initial playback delay, streaming video quality, adaptive video quality changes, video rebuffering events, and streaming phases.}, language = {en} } @article{LopezArreguinMontenegro2019, author = {Lopez-Arreguin, A. J. R. and Montenegro, S.}, title = {Improving engineering models of terramechanics for planetary exploration}, series = {Results in Engineering}, volume = {3}, journal = {Results in Engineering}, doi = {10.1016/j.rineng.2019.100027}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-202490}, pages = {100027}, year = {2019}, abstract = {This short letter proposes more consolidated explicit solutions for the forces and torques acting on typical rover wheels, that can be used as a method to determine their average mobility characteristics in planetary soils. The closed loop solutions stand in one of the verified methods, but at difference of the previous, observables are decoupled requiring a less amount of physical parameters to measure. As a result, we show that with knowledge of terrain properties, wheel driving performance rely in a single observable only. Because of their generality, the formulated equations established here can have further implications in autonomy and control of rovers or planetary soil characterization.}, language = {en} } @article{LugrinLatoschikHabeletal.2016, author = {Lugrin, Jean-Luc and Latoschik, Marc Erich and Habel, Michael and Roth, Daniel and Seufert, Christian and Grafe, Silke}, title = {Breaking Bad Behaviors: A New Tool for Learning Classroom Management Using Virtual Reality}, series = {Frontiers in ICT}, volume = {3}, journal = {Frontiers in ICT}, number = {26}, doi = {10.3389/fict.2016.00026}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-147945}, year = {2016}, abstract = {This article presents an immersive virtual reality (VR) system for training classroom management skills, with a specific focus on learning to manage disruptive student behavior in face-to-face, one-to-many teaching scenarios. The core of the system is a real-time 3D virtual simulation of a classroom populated by twenty-four semi-autonomous virtual students. The system has been designed as a companion tool for classroom management seminars in a syllabus for primary and secondary school teachers. This will allow lecturers to link theory with practice using the medium of VR. The system is therefore designed for two users: a trainee teacher and an instructor supervising the training session. The teacher is immersed in a real-time 3D simulation of a classroom by means of a head-mounted display and headphone. The instructor operates a graphical desktop console, which renders a view of the class and the teacher whose avatar movements are captured by a marker less tracking system. This console includes a 2D graphics menu with convenient behavior and feedback control mechanisms to provide human-guided training sessions. The system is built using low-cost consumer hardware and software. Its architecture and technical design are described in detail. A first evaluation confirms its conformance to critical usability requirements (i.e., safety and comfort, believability, simplicity, acceptability, extensibility, affordability, and mobility). Our initial results are promising and constitute the necessary first step toward a possible investigation of the efficiency and effectiveness of such a system in terms of learning outcomes and experience.}, language = {en} } @phdthesis{Loeffler2021, author = {L{\"o}ffler, Andre}, title = {Constrained Graph Layouts: Vertices on the Outer Face and on the Integer Grid}, edition = {1. Auflage}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-146-4}, doi = {10.25972/WUP-978-3-95826-147-1}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-215746}, school = {W{\"u}rzburg University Press}, pages = {viii, 161}, year = {2021}, abstract = {Constraining graph layouts - that is, restricting the placement of vertices and the routing of edges to obey certain constraints - is common practice in graph drawing. In this book, we discuss algorithmic results on two different restriction types: placing vertices on the outer face and on the integer grid. For the first type, we look into the outer k-planar and outer k-quasi-planar graphs, as well as giving a linear-time algorithm to recognize full and closed outer k-planar graphs Monadic Second-order Logic. For the second type, we consider the problem of transferring a given planar drawing onto the integer grid while perserving the original drawings topology; we also generalize a variant of Cauchy's rigidity theorem for orthogonal polyhedra of genus 0 to those of arbitrary genus.}, subject = {Graphenzeichnen}, language = {en} } @article{MadeiraGromerLatoschiketal.2021, author = {Madeira, Octavia and Gromer, Daniel and Latoschik, Marc Erich and Pauli, Paul}, title = {Effects of Acrophobic Fear and Trait Anxiety on Human Behavior in a Virtual Elevated Plus-Maze}, series = {Frontiers in Virtual Reality}, volume = {2}, journal = {Frontiers in Virtual Reality}, doi = {10.3389/frvir.2021.635048}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-258709}, year = {2021}, abstract = {The Elevated Plus-Maze (EPM) is a well-established apparatus to measure anxiety in rodents, i.e., animals exhibiting an increased relative time spent in the closed vs. the open arms are considered anxious. To examine whether such anxiety-modulated behaviors are conserved in humans, we re-translated this paradigm to a human setting using virtual reality in a Cave Automatic Virtual Environment (CAVE) system. In two studies, we examined whether the EPM exploration behavior of humans is modulated by their trait anxiety and also assessed the individuals' levels of acrophobia (fear of height), claustrophobia (fear of confined spaces), sensation seeking, and the reported anxiety when on the maze. First, we constructed an exact virtual copy of the animal EPM adjusted to human proportions. In analogy to animal EPM studies, participants (N = 30) freely explored the EPM for 5 min. In the second study (N = 61), we redesigned the EPM to make it more human-adapted and to differentiate influences of trait anxiety and acrophobia by introducing various floor textures and lower walls of closed arms to the height of standard handrails. In the first experiment, hierarchical regression analyses of exploration behavior revealed the expected association between open arm avoidance and Trait Anxiety, an even stronger association with acrophobic fear. In the second study, results revealed that acrophobia was associated with avoidance of open arms with mesh-floor texture, whereas for trait anxiety, claustrophobia, and sensation seeking, no effect was detected. Also, subjects' fear rating was moderated by all psychometrics but trait anxiety. In sum, both studies consistently indicate that humans show no general open arm avoidance analogous to rodents and that human EPM behavior is modulated strongest by acrophobic fear, whereas trait anxiety plays a subordinate role. Thus, we conclude that the criteria for cross-species validity are met insufficiently in this case. Despite the exploratory nature, our studies provide in-depth insights into human exploration behavior on the virtual EPM.}, language = {en} } @article{MaiwaldBruschkeSchneideretal.2023, author = {Maiwald, Ferdinand and Bruschke, Jonas and Schneider, Danilo and Wacker, Markus and Niebling, Florian}, title = {Giving historical photographs a new perspective: introducing camera orientation parameters as new metadata in a large-scale 4D application}, series = {Remote Sensing}, volume = {15}, journal = {Remote Sensing}, number = {7}, issn = {2072-4292}, doi = {10.3390/rs15071879}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-311103}, year = {2023}, abstract = {The ongoing digitization of historical photographs in archives allows investigating the quality, quantity, and distribution of these images. However, the exact interior and exterior camera orientations of these photographs are usually lost during the digitization process. The proposed method uses content-based image retrieval (CBIR) to filter exterior images of single buildings in combination with metadata information. The retrieved photographs are automatically processed in an adapted structure-from-motion (SfM) pipeline to determine the camera parameters. In an interactive georeferencing process, the calculated camera positions are transferred into a global coordinate system. As all image and camera data are efficiently stored in the proposed 4D database, they can be conveniently accessed afterward to georeference newly digitized images by using photogrammetric triangulation and spatial resection. The results show that the CBIR and the subsequent SfM are robust methods for various kinds of buildings and different quantity of data. The absolute accuracy of the camera positions after georeferencing lies in the range of a few meters likely introduced by the inaccurate LOD2 models used for transformation. The proposed photogrammetric method, the database structure, and the 4D visualization interface enable adding historical urban photographs and 3D models from other locations.}, language = {en} } @article{MandelHoernleinIflandetal.2011, author = {Mandel, Alexander and H{\"o}rnlein, Alexander and Ifland, Marianus and L{\"u}neburg, Edeltraud and Deckert, J{\"u}rgen and Puppe, Frank}, title = {Aufwandsanalyse f{\"u}r computerunterst{\"u}tzte Multiple-Choice Papierklausuren}, series = {GMS Journal for Medical Education}, volume = {28}, journal = {GMS Journal for Medical Education}, number = {4}, doi = {10.3205/zma000767}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-134386}, pages = {1-15, Doc55}, year = {2011}, abstract = {Introduction: Multiple-choice-examinations are still fundamental for assessment in medical degree programs. In addition to content related research, the optimization of the technical procedure is an important question. Medical examiners face three options: paper-based examinations with or without computer support or completely electronic examinations. Critical aspects are the effort for formatting, the logistic effort during the actual examination, quality, promptness and effort of the correction, the time for making the documents available for inspection by the students, and the statistical analysis of the examination results. Methods: Since three semesters a computer program for input and formatting of MC-questions in medical and other paper-based examinations is used and continuously improved at Wuerzburg University. In the winter semester (WS) 2009/10 eleven, in the summer semester (SS) 2010 twelve and in WS 2010/11 thirteen medical examinations were accomplished with the program and automatically evaluated. For the last two semesters the remaining manual workload was recorded. Results: The cost of the formatting and the subsequent analysis including adjustments of the analysis of an average examination with about 140 participants and about 35 questions was 5-7 hours for exams without complications in the winter semester 2009/2010, about 2 hours in SS 2010 and about 1.5 hours in the winter semester 2010/11. Including exams with complications, the average time was about 3 hours per exam in SS 2010 and 2.67 hours for the WS 10/11. Discussion: For conventional multiple-choice exams the computer-based formatting and evaluation of paper-based exams offers a significant time reduction for lecturers in comparison with the manual correction of paper-based exams and compared to purely electronically conducted exams it needs a much simpler technological infrastructure and fewer staff during the exam."}, language = {de} } @phdthesis{Martin2008, author = {Martin, R{\"u}diger}, title = {Resilience, Provisioning, and Control for the Network of the Future}, doi = {10.25972/OPUS-2504}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-28497}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2008}, abstract = {The Internet sees an ongoing transformation process from a single best-effort service network into a multi-service network. In addition to traditional applications like e-mail,WWW-traffic, or file transfer, future generation networks (FGNs) will carry services with real-time constraints and stringent availability and reliability requirements like Voice over IP (VoIP), video conferencing, virtual private networks (VPNs) for finance, other real-time business applications, tele-medicine, or tele-robotics. Hence, quality of service (QoS) guarantees and resilience to failures are crucial characteristics of an FGN architecture. At the same time, network operations must be efficient. This necessitates sophisticated mechanisms for the provisioning and the control of future communication infrastructures. In this work we investigate such echanisms for resilient FGNs. There are many aspects of the provisioning and control of resilient FGNs such as traffic matrix estimation, traffic characterization, traffic forecasting, mechanisms for QoS enforcement also during failure cases, resilient routing, or calability concerns for future routing and addressing mechanisms. In this work we focus on three important aspects for which performance analysis can deliver substantial insights: load balancing for multipath Internet routing, fast resilience concepts, and advanced dimensioning techniques for resilient networks. Routing in modern communication networks is often based on multipath structures, e.g., equal-cost multipath routing (ECMP) in IP networks, to facilitate traffic engineering and resiliency. When multipath routing is applied, load balancing algorithms distribute the traffic over available paths towards the destination according to pre-configured distribution values. State-of-the-art load balancing algorithms operate either on the packet or the flow level. Packet level mechanisms achieve highly accurate traffic distributions, but are known to have negative effects on the performance of transport protocols and should not be applied. Flow level mechanisms avoid performance degradations, but at the expense of reduced accuracy. These inaccuracies may have unpredictable effects on link capacity requirements and complicate resource management. Thus, it is important to exactly understand the accuracy and dynamics of load balancing algorithms in order to be able to exercise better network control. Knowing about their weaknesses, it is also important to look for alternatives and to assess their applicability in different networking scenarios. This is the first aspect of this work. Component failures are inevitable during the operation of communication networks and lead to routing disruptions if no special precautions are taken. In case of a failure, the robust shortest-path routing of the Internet reconverges after some time to a state where all nodes are again reachable - provided physical connectivity still exists. But stringent availability and reliability criteria of new services make a fast reaction to failures obligatory for resilient FGNs. This led to the development of fast reroute (FRR) concepts for MPLS and IP routing. The operations of MPLS-FRR have already been standardized. Still, the standards leave some degrees of freedom for the resilient path layout and it is important to understand the tradeoffs between different options for the path layout to efficiently provision resilient FGNs. In contrast, the standardization for IP-FRR is an ongoing process. The applicability and possible combinations of different concepts still are open issues. IP-FRR also facilitates a comprehensive resilience framework for IP routing covering all steps of the failure recovery cycle. These points constitute another aspect of this work. Finally, communication networks are usually over-provisioned, i.e., they have much more capacity installed than actually required during normal operation. This is a precaution for various challenges such as network element failures. An alternative to this capacity overprovisioning (CO) approach is admission control (AC). AC blocks new flows in case of imminent overload due to unanticipated events to protect the QoS for already admitted flows. On the one hand, CO is generally viewed as a simple mechanism, AC as a more complex mechanism that complicates the network control plane and raises interoperability issues. On the other hand, AC appears more cost-efficient than CO. To obtain advanced provisioning methods for resilient FGNs, it is important to find suitable models for irregular events, such as failures and different sources of overload, and to incorporate them into capacity dimensioning methods. This allows for a fair comparison between CO and AC in various situations and yields a better understanding of the strengths and weaknesses of both concepts. Such an advanced capacity dimensioning method for resilient FGNs represents the third aspect of this work.}, subject = {Backbone-Netz}, language = {en} } @techreport{MartinoDeutschmannHielscheretal.2023, type = {Working Paper}, author = {Martino, Luigi and Deutschmann, J{\"o}rg and Hielscher, Kai-Steffen and German, Reinhard}, title = {Towards a 5G Satellite Communication Framework for V2X}, series = {KuVS Fachgespr{\"a}ch - W{\"u}rzburg Workshop on Modeling, Analysis and Simulation of Next-Generation Communication Networks 2023 (WueWoWAS'23)}, journal = {KuVS Fachgespr{\"a}ch - W{\"u}rzburg Workshop on Modeling, Analysis and Simulation of Next-Generation Communication Networks 2023 (WueWoWAS'23)}, doi = {10.25972/OPUS-32214}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-322148}, pages = {5}, year = {2023}, abstract = {In recent years, satellite communication has been expanding its field of application in the world of computer networks. This paper aims to provide an overview of how a typical scenario involving 5G Non-Terrestrial Networks (NTNs) for vehicle to everything (V2X) applications is characterized. In particular, a first implementation of a system that integrates them together will be described. Such a framework will later be used to evaluate the performance of applications such as Vehicle Monitoring (VM), Remote Driving (RD), Voice Over IP (VoIP), and others. Different configuration scenarios such as Low Earth Orbit and Geostationary Orbit will be considered.}, language = {en} } @techreport{MazighBeausencourtBodeetal.2023, type = {Working Paper}, author = {Mazigh, Sadok Mehdi and Beausencourt, Marcel and Bode, Max Julius and Scheffler, Thomas}, title = {Using P4-INT on Tofino for Measuring Device Performance Characteristics in a Network Lab}, series = {KuVS Fachgespr{\"a}ch - W{\"u}rzburg Workshop on Modeling, Analysis and Simulation of Next-Generation Communication Networks 2023 (WueWoWAS'23)}, journal = {KuVS Fachgespr{\"a}ch - W{\"u}rzburg Workshop on Modeling, Analysis and Simulation of Next-Generation Communication Networks 2023 (WueWoWAS'23)}, doi = {10.25972/OPUS-32208}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-322084}, pages = {4}, year = {2023}, abstract = {This paper presents a prototypical implementation of the In-band Network Telemetry (INT) specification in P4 and demonstrates a use case, where a Tofino Switch is used to measure device and network performance in a lab setting. This work is based on research activities in the area of P4 data plane programming conducted at the network lab of HTW Berlin.}, language = {en} } @phdthesis{Menth2004, author = {Menth, Michael}, title = {Efficient admission control and routing for resilient communication networks}, doi = {10.25972/OPUS-846}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-9949}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2004}, abstract = {This work is subdivided into two main areas: resilient admission control and resilient routing. The work gives an overview of the state of the art of quality of service mechanisms in communication networks and proposes a categorization of admission control (AC) methods. These approaches are investigated regarding performance, more precisely, regarding the potential resource utilization by dimensioning the capacity for a network with a given topology, traffic matrix, and a required flow blocking probability. In case of a failure, the affected traffic is rerouted over backup paths which increases the traffic rate on the respective links. To guarantee the effectiveness of admission control also in failure scenarios, the increased traffic rate must be taken into account for capacity dimensioning and leads to resilient AC. Capacity dimensioning is not feasible for existing networks with already given link capacities. For the application of resilient NAC in this case, the size of distributed AC budgets must be adapted according to the traffic matrix in such a way that the maximum blocking probability for all flows is minimized and that the capacity of all links is not exceeded by the admissible traffic rate in any failure scenario. Several algorithms for the solution of that problem are presented and compared regarding their efficiency and fairness. A prototype for resilient AC was implemented in the laboratories of Siemens AG in Munich within the scope of the project KING. Resilience requires additional capacity on the backup paths for failure scenarios. The amount of this backup capacity depends on the routing and can be minimized by routing optimization. New protection switching mechanisms are presented that deviate the traffic quickly around outage locations. They are simple and can be implemented, e.g, by MPLS technology. The Self-Protecting Multi-Path (SPM) is a multi-path consisting of disjoint partial paths. The traffic is distributed over all faultless partial paths according to an optimized load balancing function both in the working case and in failure scenarios. Performance studies show that the network topology and the traffic matrix also influence the amount of required backup capacity significantly. The example of the COST-239 network illustrates that conventional shortest path routing may need 50\% more capacity than the optimized SPM if all single link and node failures are protected.}, subject = {Kommunikation}, language = {en} } @article{MergetKoetschanHackletal.2012, author = {Merget, Benjamin and Koetschan, Christian and Hackl, Thomas and F{\"o}rster, Frank and Dandekar, Thomas and M{\"u}ller, Tobias and Schultz, J{\"o}rg and Wolf, Matthias}, title = {The ITS2 Database}, series = {Journal of Visual Expression}, volume = {61}, journal = {Journal of Visual Expression}, number = {e3806}, doi = {10.3791/3806}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-124600}, year = {2012}, abstract = {The internal transcribed spacer 2 (ITS2) has been used as a phylogenetic marker for more than two decades. As ITS2 research mainly focused on the very variable ITS2 sequence, it confined this marker to low-level phylogenetics only. However, the combination of the ITS2 sequence and its highly conserved secondary structure improves the phylogenetic resolution1 and allows phylogenetic inference at multiple taxonomic ranks, including species delimitation. The ITS2 Database presents an exhaustive dataset of internal transcribed spacer 2 sequences from NCBI GenBank accurately reannotated. Following an annotation by profile Hidden Markov Models (HMMs), the secondary structure of each sequence is predicted. First, it is tested whether a minimum energy based fold (direct fold) results in a correct, four helix conformation. If this is not the case, the structure is predicted by homology modeling. In homology modeling, an already known secondary structure is transferred to another ITS2 sequence, whose secondary structure was not able to fold correctly in a direct fold. The ITS2 Database is not only a database for storage and retrieval of ITS2 sequence-structures. It also provides several tools to process your own ITS2 sequences, including annotation, structural prediction, motif detection and BLAST search on the combined sequence-structure information. Moreover, it integrates trimmed versions of 4SALE and ProfDistS for multiple sequence-structure alignment calculation and Neighbor Joining tree reconstruction. Together they form a coherent analysis pipeline from an initial set of sequences to a phylogeny based on sequence and secondary structure. In a nutshell, this workbench simplifies first phylogenetic analyses to only a few mouse-clicks, while additionally providing tools and data for comprehensive large-scale analyses.}, language = {en} } @techreport{Metzger2020, type = {Working Paper}, author = {Metzger, Florian}, title = {Crowdsensed QoE for the community - a concept to make QoE assessment accessible}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-203748}, pages = {7}, year = {2020}, abstract = {In recent years several community testbeds as well as participatory sensing platforms have successfully established themselves to provide open data to everyone interested. Each of them with a specific goal in mind, ranging from collecting radio coverage data up to environmental and radiation data. Such data can be used by the community in their decision making, whether to subscribe to a specific mobile phone service that provides good coverage in an area or in finding a sunny and warm region for the summer holidays. However, the existing platforms are usually limiting themselves to directly measurable network QoS. If such a crowdsourced data set provides more in-depth derived measures, this would enable an even better decision making. A community-driven crowdsensing platform that derives spatial application-layer user experience from resource-friendly bandwidth estimates would be such a case, video streaming services come to mind as a prime example. In this paper we present a concept for such a system based on an initial prototype that eases the collection of data necessary to determine mobile-specific QoE at large scale. In addition we reason why the simple quality metric proposed here can hold its own.}, subject = {Quality of Experience}, language = {en} } @phdthesis{Milbrandt2007, author = {Milbrandt, Jens}, title = {Performance Evaluation of Efficient Resource Management Concepts for Next Generation IP Networks}, doi = {10.25972/OPUS-1991}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-23332}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2007}, abstract = {Next generation networks (NGNs) must integrate the services of current circuit-switched telephone networks and packet-switched data networks. This convergence towards a unified communication infrastructure necessitates from the high capital expenditures (CAPEX) and operational expenditures (OPEX) due to the coexistence of separate networks for voice and data. In the end, NGNs must offer the same services as these legacy networks and, therefore, they must provide a low-cost packet-switched solution with real-time transport capabilities for telephony and multimedia applications. In addition, NGNs must be fault-tolerant to guarantee user satisfaction and to support business-critical processes also in case of network failures. A key technology for the operation of NGNs is the Internet Protocol (IP) which evolved to a common and well accepted standard for networking in the Internet during the last 25 years. There are two basically different approaches to achieve QoS in IP networks. With capacity overprovisioning (CO), an IP network is equipped with sufficient bandwidth such that network congestion becomes very unlikely and QoS is maintained most of the time. The second option to achieve QoS in IP networks is admission control (AC). AC represents a network-inherent intelligence that admits real-time traffic flows to a single link or an entire network only if enough resources are available such that the requirements on packet loss and delay can be met. Otherwise, the request of a new flow is blocked. This work focuses on resource management and control mechanisms for NGNs, in particular on AC and associated bandwidth allocation methods. The first contribution consists of a new link-oriented AC method called experience-based admission control (EBAC) which is a hybrid approach dealing with the problems inherent to conventional AC mechanisms like parameter-based or measurement-based AC (PBAC/MBAC). PBAC provides good QoS but suffers from poor resource utilization and, vice versa, MBAC uses resources efficiently but is susceptible to QoS violations. Hence, EBAC aims at increasing the resource efficiency while maintaining the QoS which increases the revenues of ISPs and postpones their CAPEX for infrastructure upgrades. To show the advantages of EBAC, we first review today's AC approaches and then develop the concept of EBAC. EBAC is a simple mechanism that safely overbooks the capacity of a single link to increase its resource utilization. We evaluate the performance of EBAC by its simulation under various traffic conditions. The second contribution concerns dynamic resource allocation in transport networks which implement a specific network admission control (NAC) architecture. In general, the performance of different NAC systems may be evaluated by conventional methods such as call blocking analysis which has often been applied in the context of multi-service asynchronous transfer mode (ATM) networks. However, to yield more practical results than abstract blocking probabilities, we propose a new method to compare different AC approaches by their respective bandwidth requirements. To present our new method for comparing different AC systems, we first give an overview of network resource management (NRM) in general. Then we present the concept of adaptive bandwidth allocation (ABA) in capacity tunnels and illustrate the analytical performance evaluation framework to compare different AC systems by their capacity requirements. Different network characteristics influence the performance of ABA. Therefore, the impact of various traffic demand models and tunnel implementations, and the influence of resilience requirements is investigated. In conclusion, the resources in NGNs must be exclusively dedicated to admitted traffic to guarantee QoS. For that purpose, robust and efficient concepts for NRM are required to control the requested bandwidth with regard to the available transmission capacity. Sophisticated AC will be a key function for NRM in NGNs and, therefore, efficient resource management concepts like experience-based admission control and adaptive bandwidth allocation for admission-controlled capacity tunnels, as presented in this work are appealing for NGN solutions.}, subject = {Ressourcenmanagement}, language = {en} } @phdthesis{Maeder2008, author = {M{\"a}der, Andreas}, title = {Performance Models for UMTS 3.5G Mobile Wireless Systems}, doi = {10.25972/OPUS-2766}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-32525}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2008}, abstract = {Mobile telecommunication systems of the 3.5th generation (3.5G) constitute a first step towards the requirements of an all-IP world. As the denotation suggests, 3.5G systems are not completely new designed from scratch. Instead, they are evolved from existing 3G systems like UMTS or cdma2000. 3.5G systems are primarily designed and optimized for packet-switched best-effort traffic, but they are also intended to increase system capacity by exploiting available radio resources more efficiently. Systems based on cdma2000 are enhanced with 1xEV-DO (EV-DO: evolution, data-optimized). In the UMTS domain, the 3G partnership project (3GPP) specified the High Speed Packet Access (HSPA) family, consisting of High Speed Downlink Packet Access (HSDPA) and its counterpart High Speed Uplink Packet Access (HSUPA) or Enhanced Uplink. The focus of this monograph is on HSPA systems, although the operation principles of other 3.5G systems are similar. One of the main contributions of our work are performance models which allow a holistic view on the system. The models consider user traffic on flow-level, such that only on significant changes of the system state a recalculation of parameters like bandwidth is necessary. The impact of lower layers is captured by stochastic models. This approach combines accurate modeling and the ability to cope with computational complexity. Adopting this approach to HSDPA, we develop a new physical layer abstraction model that takes radio resources, scheduling discipline, radio propagation and mobile device capabilities into account. Together with models for the calculation of network-wide interference and transmit powers, a discrete-event simulation and an analytical model based on a queuing-theoretical approach are proposed. For the Enhanced Uplink, we develop analytical models considering independent and correlated other-cell interference.}, subject = {Mobilfunk}, language = {en} } @article{MuellerLeppichGeissetal.2023, author = {M{\"u}ller, Konstantin and Leppich, Robert and Geiß, Christian and Borst, Vanessa and Pelizari, Patrick Aravena and Kounev, Samuel and Taubenb{\"o}ck, Hannes}, title = {Deep neural network regression for normalized digital surface model generation with Sentinel-2 imagery}, series = {IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing}, volume = {16}, journal = {IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing}, issn = {1939-1404}, doi = {10.1109/JSTARS.2023.3297710}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-349424}, pages = {8508-8519}, year = {2023}, abstract = {In recent history, normalized digital surface models (nDSMs) have been constantly gaining importance as a means to solve large-scale geographic problems. High-resolution surface models are precious, as they can provide detailed information for a specific area. However, measurements with a high resolution are time consuming and costly. Only a few approaches exist to create high-resolution nDSMs for extensive areas. This article explores approaches to extract high-resolution nDSMs from low-resolution Sentinel-2 data, allowing us to derive large-scale models. We thereby utilize the advantages of Sentinel 2 being open access, having global coverage, and providing steady updates through a high repetition rate. Several deep learning models are trained to overcome the gap in producing high-resolution surface maps from low-resolution input data. With U-Net as a base architecture, we extend the capabilities of our model by integrating tailored multiscale encoders with differently sized kernels in the convolution as well as conformed self-attention inside the skip connection gates. Using pixelwise regression, our U-Net base models can achieve a mean height error of approximately 2 m. Moreover, through our enhancements to the model architecture, we reduce the model error by more than 7\%.}, language = {en} } @article{NaglerNaegeleGillietal.2018, author = {Nagler, Matthias and N{\"a}gele, Thomas and Gilli, Christian and Fragner, Lena and Korte, Arthur and Platzer, Alexander and Farlow, Ashley and Nordborg, Magnus and Weckwerth, Wolfram}, title = {Eco-Metabolomics and Metabolic Modeling: Making the Leap From Model Systems in the Lab to Native Populations in the Field}, series = {Frontiers in Plant Science}, volume = {9}, journal = {Frontiers in Plant Science}, number = {1556}, issn = {1664-462X}, doi = {10.3389/fpls.2018.01556}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-189560}, year = {2018}, abstract = {Experimental high-throughput analysis of molecular networks is a central approach to characterize the adaptation of plant metabolism to the environment. However, recent studies have demonstrated that it is hardly possible to predict in situ metabolic phenotypes from experiments under controlled conditions, such as growth chambers or greenhouses. This is particularly due to the high molecular variance of in situ samples induced by environmental fluctuations. An approach of functional metabolome interpretation of field samples would be desirable in order to be able to identify and trace back the impact of environmental changes on plant metabolism. To test the applicability of metabolomics studies for a characterization of plant populations in the field, we have identified and analyzed in situ samples of nearby grown natural populations of Arabidopsis thaliana in Austria. A. thaliana is the primary molecular biological model system in plant biology with one of the best functionally annotated genomes representing a reference system for all other plant genome projects. The genomes of these novel natural populations were sequenced and phylogenetically compared to a comprehensive genome database of A. thaliana ecotypes. Experimental results on primary and secondary metabolite profiling and genotypic variation were functionally integrated by a data mining strategy, which combines statistical output of metabolomics data with genome-derived biochemical pathway reconstruction and metabolic modeling. Correlations of biochemical model predictions and population-specific genetic variation indicated varying strategies of metabolic regulation on a population level which enabled the direct comparison, differentiation, and prediction of metabolic adaptation of the same species to different habitats. These differences were most pronounced at organic and amino acid metabolism as well as at the interface of primary and secondary metabolism and allowed for the direct classification of population-specific metabolic phenotypes within geographically contiguous sampling sites.}, language = {en} } @article{NaseemDandekar2012, author = {Naseem, Muhammad and Dandekar, Thomas}, title = {The Role of Auxin-Cytokinin Antagonism in Plant-Pathogen Interactions}, series = {PLOS Pathogens}, volume = {8}, journal = {PLOS Pathogens}, number = {11}, doi = {10.1371/journal.ppat.1003026}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-131901}, pages = {e1003026}, year = {2012}, abstract = {No abstract available.}, language = {en} } @unpublished{Nassourou2011, author = {Nassourou, Mohamadou}, title = {A Rule-based Statistical Classifier for Determining a Base Text and Ranking Witnesses In Textual Documents Collation Process}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-57465}, year = {2011}, abstract = {Given a collection of diverging documents about some lost original text, any person interested in the text would try reconstructing it from the diverging documents. Whether it is eclecticism, stemmatics, or copy-text, one is expected to explicitly or indirectly select one of the documents as a starting point or as a base text, which could be emended through comparison with remaining documents, so that a text that could be designated as the original document is generated. Unfortunately the process of giving priority to one of the documents also known as witnesses is a subjective approach. In fact even Cladistics, which could be considered as a computer-based approach of implementing stemmatics, does not present or recommend users to select a certain witness as a starting point for the process of reconstructing the original document. In this study, a computational method using a rule-based Bayesian classifier is used, to assist text scholars in their attempts of reconstructing a non-existing document from some available witnesses. The method developed in this study consists of selecting a base text successively and collating it with remaining documents. Each completed collation cycle stores the selected base text and its closest witness, along with a weighted score of their similarities and differences. At the end of the collation process, a witness selected more often by majority of base texts is considered as the probable base text of the collection. Witnesses' scores are weighted using a weighting system, based on effects of types of textual modifications on the process of reconstructing original documents. Users have the possibility to select between baseless and base text collation. If a base text is selected, the task is reduced to ranking the witnesses with respect to the base text, otherwise a base text as well as ranking of the witnesses with respect to the base text are computed and displayed on a histogram.}, subject = {Textvergleich}, language = {en} } @unpublished{Nassourou2011, author = {Nassourou, Mohamadou}, title = {Assisting Understanding, Retention, and Dissemination of Religious Texts Knowledge with Modeling, and Visualization Techniques: The Case of The Quran}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-55927}, year = {2011}, abstract = {Learning a book in general involves reading it, underlining important words, adding comments, summarizing some passages, and marking up some text or concepts. Once deeper understanding is achieved, one would like to organize and manage her/his knowledge in such a way that, it could be easily remembered and efficiently transmitted to others. In this paper, books organized in terms of chapters consisting of verses, are considered as the source of knowledge to be modeled. The knowledge model consists of verses with their metadata and semantic annotations. The metadata represent the multiple perspectives of knowledge modeling. Verses with their metadata and annotations form a meta-model, which will be published on a web Mashup. The meta-model with linking between its elements constitute a knowledge base. An XML-based annotation system breaking down the learning process into specific tasks, helps constructing the desired meta-model. The system is made up of user interfaces for creating metadata, annotating chapters' contents according to user selected semantics, and templates for publishing the generated knowledge on the Internet. The proposed software system improves comprehension and retention of knowledge contained in religious texts through modeling and visualization. The system has been applied to the Quran, and the result obtained shows that multiple perspectives of information modeling can be successfully applied to religious texts. It is expected that this short ongoing study would motivate others to engage in devising and offering software systems for cross-religions learning.}, subject = {Wissensmanagement}, language = {en} } @unpublished{Nassourou2011, author = {Nassourou, Mohamadou}, title = {Design and Implementation of Architectures for Interactive Textual Documents Collation Systems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-56601}, year = {2011}, abstract = {One of the main purposes of textual documents collation is to identify a base text or closest witness to the base text, by analyzing and interpreting differences also known as types of changes that might exist between those documents. Based on this fact, it is reasonable to argue that, explicit identification of types of changes such as deletions, additions, transpositions, and mutations should be part of the collation process. The identification could be carried out by an interpretation module after alignment has taken place. Unfortunately existing collation software such as CollateX1 and Juxta2's collation engine do not have interpretation modules. In fact they implement the Gothenburg model [1] for collation process which does not include an interpretation unit. Currently both CollateX and Juxta's collation engine do not distinguish in their critical apparatus between the types of changes, and do not offer statistics about those changes. This paper presents a model for both integrated and distributed collation processes that improves the Gothenburg model. The model introduces an interpretation component for computing and distinguishing between the types of changes that documents could have undergone. Moreover two architectures implementing the model in order to solve the problem of interactive collation are discussed as well. Each architecture uses CollateX library, and provides on the one hand preprocessing functions for transforming input documents into CollateX input format, and on the other hand a post-processing module for enabling interactive collation. Finally simple algorithms for distinguishing between types of changes, and linking collated source documents with the collation results are also introduced.}, subject = {Softwarearchitektur}, language = {en} } @unpublished{Nassourou2011, author = {Nassourou, Mohamadou}, title = {Assisting Analysis and Understanding of Quran Search Results with Interactive Scatter Plots and Tables}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-55840}, year = {2011}, abstract = {The Quran is the holy book of Islam consisting of 6236 verses divided into 114 chapters called suras. Many verses are similar and even identical. Searching for similar texts (e.g verses) could return thousands of verses, that when displayed completely or partly as textual list would make analysis and understanding difficult and confusing. Moreover it would be visually impossible to instantly figure out the overall distribution of the retrieved verses in the Quran. As consequence reading and analyzing the verses would be tedious and unintuitive. In this study a combination of interactive scatter plots and tables has been developed to assist analysis and understanding of the search result. Retrieved verses are clustered by chapters, and a weight is assigned to each cluster according to number of verses it contains, so that users could visually identify most relevant areas, and figure out the places of revelation of the verses. Users visualize the complete result and can select a region of the plot to zoom in, click on a marker to display a table containing verses with English translation side by side.}, subject = {Text Mining}, language = {en} } @unpublished{Nassourou2011, author = {Nassourou, Mohamadou}, title = {A Knowledge-based Hybrid Statistical Classifier for Reconstructing the Chronology of the Quran}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-54712}, year = {2011}, abstract = {Computationally categorizing Quran's chapters has been mainly confined to the determination of chapters' revelation places. However this broad classification is not sufficient to effectively and thoroughly understand and interpret the Quran. The chronology of revelation would not only improve comprehending the philosophy of Islam, but also the easiness of implementing and memorizing its laws and recommendations. This paper attempts estimating possible chapters' dates of revelation through their lexical frequency profiles. A hybrid statistical classifier consisting of stemming and clustering algorithms for comparing lexical frequency profiles of chapters, and deriving dates of revelation has been developed. The classifier is trained using some chapters with known dates of revelation. Then it classifies chapters with uncertain dates of revelation by computing their proximity to the training ones. The results reported here indicate that the proposed methodology yields usable results in estimating dates of revelation of the Quran's chapters based on their lexical contents.}, subject = {Text Mining}, language = {en} } @unpublished{Nassourou2010, author = {Nassourou, Mohamadou}, title = {Markup overlap: Improving Fragmentation Method}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-49084}, year = {2010}, abstract = {Overlapping is a common word used to describe documents whose structural dimensions cannot be adequately represented using tree structure. For instance a quotation that starts in one verse and ends in another verse. The problem of overlapping hierarchies is a recurring one, which has been addressed by a variety of approaches. There are XML based solutions as well as Non-XML ones. The XML-based solutions are: multiple documents, empty elements, fragmentation, out-of-line markup, JITT and BUVH. And the Non-XML approaches comprise CONCUR/XCONCUR, MECS, LMNL ...etc. This paper presents shortly state-of-the-art in overlapping hierarchies, and introduces two variations on the TEI fragmentation markup that have several advantages.}, subject = {XML}, language = {en} } @unpublished{Nassourou2010, author = {Nassourou, Mohamadou}, title = {Understanding the Vex Rendering Engine}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-51333}, year = {2010}, abstract = {The Visual Editor for XML (Vex)[1] used by TextGrid [2]and other applications has got rendering and layout engines. The layout engine is well documented but the rendering engine is not. This lack of documenting the rendering engine has made refactoring and extending the editor hard and tedious. For instance many CSS2.1 and upcoming CSS3 properties have not been implemented. Software developers in different projects such as TextGrid using Vex would like to update its CSS rendering engine in order to provide advanced user interfaces as well as support different document types. In order to minimize the effort of extending Vex functionality, I found it beneficial to write a basic documentation about Vex software architecture in general and its CSS rendering engine in particular. The documentation is mainly based on the idea of architectural layered diagrams. In fact layered diagrams can help developers understand software's source code faster and easier in order to alter it, and fix errors. This paper is written for the purpose of providing direct support for exploration in the comprehension process of Vex source code. It discusses Vex software architecture. The organization of packages that make up the software, the architecture of its CSS rendering engine, an algorithm explaining the working principle of its rendering engine are described.}, subject = {Cascading Style Sheets}, language = {en} } @unpublished{Nassourou2010, author = {Nassourou, Mohamadou}, title = {Reference Architecture, Design of Cascading Style Sheets Processing Model}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-51328}, year = {2010}, abstract = {The technique of using Cascading Style Sheets (CSS) to format and present structured data is called CSS processing model. For instance a CSS processing model for XML documents describes steps involved in formatting and presenting XML documents on screens or papers. Many software applications such as browsers and XML editors have their own CSS processing models which are part of their rendering engines. For instance each browser based on its CSS processing model renders CSS layout differently, as a result an inconsistency in the support of CSS features arises. Some browsers support more CSS features than others, and the rendering itself varies. Moreover the W3C standards are not even adhered by some browsers such as Internet Explorer. Test suites and other hacks and filters cannot definitely solve these problems, because these solutions are temporary and fragile. To palliate this inconsistency and browser compatibility issues with respect to CSS, a reference CSS processing model is needed. By extension it could even allow interoperability across CSS rendering engines. A reference architecture would provide common software architecture and interfaces, and facilitate refactoring, reuse, and automated unit testing. In [2] a reference architecture for browsers has been proposed. However this reference architecture is a macro reference model which does not consider separately individual components of rendering and layout engines. In this paper an attempt to develop a reference architecture for CSS processing models is discussed. In addition the Vex editor [3] rendering and layout engines, as well as an extended version of the editor used in TextGrid project [5] are also presented in order to validate the proposed reference architecture.}, subject = {Cascading Style Sheets}, language = {en} } @unpublished{Nassourou2010, author = {Nassourou, Mohamadou}, title = {Empirical Study on Screen Scraping Web Service Creation: Case of Rhein-Main-Verkehrsverbund (RMV)}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-49396}, year = {2010}, abstract = {Internet is the biggest database that science and technology have ever produced. The world wide web is a large repository of information that cannot be used for automation by many applications due to its limited target audience. One of the solutions to the automation problem is to develop wrappers. Wrapping is a process whereby unstructured extracted information is transformed into a more structured one such as XML, which could be provided as webservice to other applications. A web service is a web page whose content is well structured so that a computer program can consume it automatically. This paper describes steps involved in constructing wrappers manually in order to automatically generate web services.}, subject = {HTML}, language = {en} } @unpublished{Nassourou2010, author = {Nassourou, Mohamadou}, title = {Java Web Frameworks Which One to Choose?}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-49407}, year = {2010}, abstract = {This article discusses web frameworks that are available to a software developer in Java language. It introduces MVC paradigm and some frameworks that implement it. The article presents an overview of Struts, Spring MVC, JSF Frameworks, as well as guidelines for selecting one of them as development environment.}, subject = {Java Frameworks}, language = {en} } @unpublished{Nassourou2010, author = {Nassourou, Mohamadou}, title = {Doing Webservices Composition by Content-based Mashup: Example of a Web-based Simulator for Itinerary Planning}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-50036}, year = {2010}, abstract = {Webservices composition is traditionally carried out using composition technologies such as Business Process Execution Language (BPEL) [1] and Web Service Choreography Interface (WSCI) [2]. The composition technology involves the process of web service discovery, invocation, and composition. However these technologies are not easy and flexible enough because they are mainly developer-centric. Moreover majority of websites have not yet embarked into the world of web service, although they have very important and useful information to offer. Is it because they have not understood the usefulness of web services or is it because of the costs? Whatever might be the answers to these questions, time and money are definitely required in order to create and offer web services. To avoid these expenditures, wrappers [7] to automatically generate webservices from websites would be a cheaper and easier solution. Mashups offer a different way of doing webservices composition. In web environment a Mashup is a web application that brings together data from several sources using webservices, APIs, wrappers and so on, in order to create entirely a new application that was not provided before. This paper presents first an overview of Mashups and the process of web service invocation and composition based on Mashup, then describes an example of a web-based simulator for navigation system in Germany.}, subject = {Mashup }, language = {en} } @unpublished{Nassourou2011, author = {Nassourou, Mohamadou}, title = {Using Machine Learning Algorithms for Categorizing Quranic Chaptersby Major Phases of Prophet Mohammad's Messengership}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-66862}, year = {2011}, abstract = {This paper discusses the categorization of Quranic chapters by major phases of Prophet Mohammad's messengership using machine learning algorithms. First, the chapters were categorized by places of revelation using Support Vector Machine and na{\"i}ve Bayesian classifiers separately, and their results were compared to each other, as well as to the existing traditional Islamic and western orientalists classifications. The chapters were categorized into Meccan (revealed in Mecca) and Medinan (revealed in Medina). After that, chapters of each category were clustered using a kind of fuzzy-single linkage clustering approach, in order to correspond to the major phases of Prophet Mohammad's life. The major phases of the Prophet's life were manually derived from the Quranic text, as well as from the secondary Islamic literature e.g hadiths, exegesis. Previous studies on computing the places of revelation of Quranic chapters relied heavily on features extracted from existing background knowledge of the chapters. For instance, it is known that Meccan chapters contain mostly verses about faith and related problems, while Medinan ones encompass verses dealing with social issues, battles…etc. These features are by themselves insufficient as a basis for assigning the chapters to their respective places of revelation. In fact, there are exceptions, since some chapters do contain both Meccan and Medinan features. In this study, features of each category were automatically created from very few chapters, whose places of revelation have been determined through identification of historical facts and events such as battles, migration to Medina…etc. Chapters having unanimously agreed places of revelation were used as the initial training set, while the remaining chapters formed the testing set. The classification process was made recursive by regularly augmenting the training set with correctly classified chapters, in order to classify the whole testing set. Each chapter was preprocessed by removing unimportant words, stemming, and representation with vector space model. The result of this study shows that, the two classifiers have produced useable results, with an outperformance of the support vector machine classifier. This study indicates that, the proposed methodology yields encouraging results for arranging Quranic chapters by phases of Prophet Mohammad's messengership.}, subject = {Koran}, language = {en} } @unpublished{Nassourou2012, author = {Nassourou, Mohamadou}, title = {Towards a Knowledge-Based Learning System for The Quranic Text}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-70003}, year = {2012}, abstract = {In this research, an attempt to create a knowledge-based learning system for the Quranic text has been performed. The knowledge base is made up of the Quranic text along with detailed information about each chapter and verse, and some rules. The system offers the possibility to study the Quran through web-based interfaces, implementing novel visualization techniques for browsing, querying, consulting, and testing the acquired knowledge. Additionally the system possesses knowledge acquisition facilities for maintaining the knowledge base.}, subject = {Wissensbanksystem}, language = {en} } @unpublished{Nassourou2011, author = {Nassourou, Mohamadou}, title = {Computing Generic Causes of Revelation of the Quranic Verses Using Machine Learning Techniques}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-66083}, year = {2011}, abstract = {Because many verses of the holy Quran are similar, there is high probability that, similar verses addressing same issues share same generic causes of revelation. In this study, machine learning techniques have been employed in order to automatically derive causes of revelation of Quranic verses. The derivation of the causes of revelation is viewed as a classification problem. Initially the categories are based on the verses with known causes of revelation, and the testing set consists of the remaining verses. Based on a computed threshold value, a na{\"i}ve Bayesian classifier is used to categorize some verses. After that, using a decision tree classifier the remaining uncategorized verses are separated into verses that contain indicators (resultative connectors, causative expressions…), and those that do not. As for those verses having indicators, each one is segmented into its constituent clauses by identification of the linking indicators. Then a dominant clause is extracted and considered either as the cause of revelation, or post-processed by adding or subtracting some terms to form a causal clause that constitutes the cause of revelation. Concerning remaining unclassified verses without indicators, a naive Bayesian classifier is again used to assign each one of them to one of the existing classes based on features and topics similarity. As for verses that could not be classified so far, manual classification was made by considering each verse as a category on its own. The result obtained in this study is encouraging, and shows that automatic derivation of Quranic verses' generic causes of revelation is achievable, and reasonably reliable for understanding and implementing the teachings of the Quran.}, subject = {Text Mining}, language = {en} } @unpublished{Nassourou2011, author = {Nassourou, Mohamadou}, title = {Design and Implementation of a Model-driven XML-based Integrated System Architecture for Assisting Analysis, Understanding, and Retention of Religious Texts:The Case of The Quran}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-65737}, year = {2011}, abstract = {Learning a book in general involves reading it, underlining important words, adding comments, summarizing some passages, and marking up some text or concepts. Once deeper understanding is achieved, one would like to organize and manage her/his knowledge in such a way that, it could be easily remembered and efficiently transmitted to others. This paper discusses about modeling religious texts using semantic XML markup based on frame-based knowledge representation, with the purpose of assisting understanding, retention, and sharing of knowledge they contain. In this study, books organized in terms of chapters made up of verses are considered as the source of knowledge to model. Some metadata representing the multiple perspectives of knowledge modeling are assigned to each chapter and verse. Chapters and verses with their metadata form a meta-model, which is represented using frames, and published on a web mashup. An XML-based annotation and visualization system equipped with user interfaces for creating static and dynamic metadata, annotating chapters' contents according to user selected semantics, and templates for publishing generated knowledge on the Internet, has been developed. The system has been applied to the Quran, and the result obtained shows that multiple perspectives of information modeling can be successfully applied to religious texts, in order to support analysis, understanding, and retention of the texts.}, subject = {Wissensrepr{\"a}sentation}, language = {en} } @unpublished{Nassourou2011, author = {Nassourou, Mohamadou}, title = {Computer-based Textual Documents Collation System for Reconstructing the Original Text from Automatically Identified Base Text and Ranked Witnesses}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-65749}, year = {2011}, abstract = {Given a collection of diverging documents about some lost original text, any person interested in the text would try reconstructing it from the diverging documents. Whether it is eclecticism, stemmatics, or copy-text, one is expected to explicitly or indirectly select one of the documents as a starting point or as a base text, which could be emended through comparison with remaining documents, so that a text that could be designated as the original document is generated. Unfortunately the process of giving priority to one of the documents also known as witnesses is a subjective approach. In fact even Cladistics, which could be considered as a computer-based approach of implementing stemmatics, does not present or recommend users to select a certain witness as a starting point for the process of reconstructing the original document. In this study, a computational method using a rule-based Bayesian classifier is used, to assist text scholars in their attempts of reconstructing a non-existing document from some available witnesses. The method developed in this study consists of selecting a base text successively and collating it with remaining documents. Each completed collation cycle stores the selected base text and its closest witness, along with a weighted score of their similarities and differences. At the end of the collation process, a witness selected more often by majority of base texts is considered as the probable base text of the collection. Witnesses' scores are weighted using a weighting system, based on effects of types of textual modifications on the process of reconstructing original documents. Users have the possibility to select between baseless and base text collation. If a base text is selected, the task is reduced to ranking the witnesses with respect to the base text, otherwise a base text as well as ranking of the witnesses with respect to the base text are computed and displayed on a bar diagram. Additionally this study includes a recursive algorithm for automatically reconstructing the original text from the identified base text and ranked witnesses.}, subject = {Textvergleich}, language = {en} } @unpublished{Nassourou2011, author = {Nassourou, Mohamadou}, title = {Philosophical and Computational Approaches for Estimating and Visualizing Months of Revelations of Quranic Chapters}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-65784}, year = {2011}, abstract = {The question of why the Quran structure does not follow its chronology of revelation is a recurring one. Some Islamic scholars such as [1] have answered the question using hadiths, as well as other philosophical reasons based on internal evidences of the Quran itself. Unfortunately till today many are still wondering about this issue. Muslims believe that the Quran is a summary and a copy of the content of a preserved tablet called Lawhul-Mahfuz located in the heaven. Logically speaking, this suggests that the arrangement of the verses and chapters is expected to be similar to that of the Lawhul-Mahfuz. As for the arrangement of the verses in each chapter, there is unanimity that it was carried out by the Prophet himself under the guidance of Angel Gabriel with the recommendation of God. But concerning the ordering of the chapters, there are reports about some divergences [3] among the Prophet's companions as to which chapter should precede which one. This paper argues that Quranic chapters might have been arranged according to months and seasons of revelation. In fact, based on some verses of the Quran, it is defendable that the Lawhul-Mahfuz itself is understood to have been structured in terms of the months of the year. In this study, philosophical and mathematical arguments for computing chapters' months of revelation are discussed, and the result is displayed on an interactive scatter plot.}, subject = {Text Mining}, language = {en} } @techreport{NavadeMaileGerman2023, type = {Working Paper}, author = {Navade, Piyush and Maile, Lisa and German, Reinhard}, title = {Multiple DCLC Routing Algorithms for Ultra-Reliable and Time-Sensitive Applications}, series = {KuVS Fachgespr{\"a}ch - W{\"u}rzburg Workshop on Modeling, Analysis and Simulation of Next-Generation Communication Networks 2023 (WueWoWAS'23)}, journal = {KuVS Fachgespr{\"a}ch - W{\"u}rzburg Workshop on Modeling, Analysis and Simulation of Next-Generation Communication Networks 2023 (WueWoWAS'23)}, doi = {10.25972/OPUS-32217}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-322177}, pages = {4}, year = {2023}, abstract = {This paper discusses the problem of finding multiple shortest disjoint paths in modern communication networks, which is essential for ultra-reliable and time-sensitive applications. Dijkstra's algorithm has been a popular solution for the shortest path problem, but repetitive use of it to find multiple paths is not scalable. The Multiple Disjoint Path Algorithm (MDPAlg), published in 2021, proposes the use of a single full graph to construct multiple disjoint paths. This paper proposes modifications to the algorithm to include a delay constraint, which is important in time-sensitive applications. Different delay constraint least-cost routing algorithms are compared in a comprehensive manner to evaluate the benefits of the adapted MDPAlg algorithm. Fault tolerance, and thereby reliability, is ensured by generating multiple link-disjoint paths from source to destination.}, language = {en} } @techreport{NguyenLohHossfeld2023, type = {Working Paper}, author = {Nguyen, Kien and Loh, Frank and Hoßfeld, Tobias}, title = {Challenges of Serverless Deployment in Edge-MEC-Cloud}, series = {KuVS Fachgespr{\"a}ch - W{\"u}rzburg Workshop on Modeling, Analysis and Simulation of Next-Generation Communication Networks 2023 (WueWoWAS'23)}, journal = {KuVS Fachgespr{\"a}ch - W{\"u}rzburg Workshop on Modeling, Analysis and Simulation of Next-Generation Communication Networks 2023 (WueWoWAS'23)}, doi = {10.25972/OPUS-32202}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-322025}, pages = {4}, year = {2023}, abstract = {The emerging serverless computing may meet Edge Cloud in a beneficial manner as the two offer flexibility and dynamicity in optimizing finite hardware resources. However, the lack of proper study of a joint platform leaves a gap in literature about consumption and performance of such integration. To this end, this paper identifies the key questions and proposes a methodology to answer them.}, language = {en} } @phdthesis{Niebler2019, author = {Niebler, Thomas}, title = {Extracting and Learning Semantics from Social Web Data}, doi = {10.25972/OPUS-17866}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-178666}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2019}, abstract = {Making machines understand natural language is a dream of mankind that existed since a very long time. Early attempts at programming machines to converse with humans in a supposedly intelligent way with humans relied on phrase lists and simple keyword matching. However, such approaches cannot provide semantically adequate answers, as they do not consider the specific meaning of the conversation. Thus, if we want to enable machines to actually understand language, we need to be able to access semantically relevant background knowledge. For this, it is possible to query so-called ontologies, which are large networks containing knowledge about real-world entities and their semantic relations. However, creating such ontologies is a tedious task, as often extensive expert knowledge is required. Thus, we need to find ways to automatically construct and update ontologies that fit human intuition of semantics and semantic relations. More specifically, we need to determine semantic entities and find relations between them. While this is usually done on large corpora of unstructured text, previous work has shown that we can at least facilitate the first issue of extracting entities by considering special data such as tagging data or human navigational paths. Here, we do not need to detect the actual semantic entities, as they are already provided because of the way those data are collected. Thus we can mainly focus on the problem of assessing the degree of semantic relatedness between tags or web pages. However, there exist several issues which need to be overcome, if we want to approximate human intuition of semantic relatedness. For this, it is necessary to represent words and concepts in a way that allows easy and highly precise semantic characterization. This also largely depends on the quality of data from which these representations are constructed. In this thesis, we extract semantic information from both tagging data created by users of social tagging systems and human navigation data in different semantic-driven social web systems. Our main goal is to construct high quality and robust vector representations of words which can the be used to measure the relatedness of semantic concepts. First, we show that navigation in the social media systems Wikipedia and BibSonomy is driven by a semantic component. After this, we discuss and extend methods to model the semantic information in tagging data as low-dimensional vectors. Furthermore, we show that tagging pragmatics influences different facets of tagging semantics. We then investigate the usefulness of human navigational paths in several different settings on Wikipedia and BibSonomy for measuring semantic relatedness. Finally, we propose a metric-learning based algorithm in adapt pre-trained word embeddings to datasets containing human judgment of semantic relatedness. This work contributes to the field of studying semantic relatedness between words by proposing methods to extract semantic relatedness from web navigation, learn highquality and low-dimensional word representations from tagging data, and to learn semantic relatedness from any kind of vector representation by exploiting human feedback. Applications first and foremest lie in ontology learning for the Semantic Web, but also semantic search or query expansion.}, subject = {Semantik}, language = {en} } @phdthesis{Nogatz2023, author = {Nogatz, Falco}, title = {Defining and Implementing Domain-Specific Languages with Prolog}, doi = {10.25972/OPUS-30187}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-301872}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {The landscape of today's programming languages is manifold. With the diversity of applications, the difficulty of adequately addressing and specifying the used programs increases. This often leads to newly designed and implemented domain-specific languages. They enable domain experts to express knowledge in their preferred format, resulting in more readable and concise programs. Due to its flexible and declarative syntax without reserved keywords, the logic programming language Prolog is particularly suitable for defining and embedding domain-specific languages. This thesis addresses the questions and challenges that arise when integrating domain-specific languages into Prolog. We compare the two approaches to define them either externally or internally, and provide assisting tools for each. The grammar of a formal language is usually defined in the extended Backus-Naur form. In this work, we handle this formalism as a domain-specific language in Prolog, and define term expansions that allow to translate it into equivalent definite clause grammars. We present the package library(dcg4pt) for SWI-Prolog, which enriches them by an additional argument to automatically process the term's corresponding parse tree. To simplify the work with definite clause grammars, we visualise their application by a web-based tracer. The external integration of domain-specific languages requires the programmer to keep the grammar, parser, and interpreter in sync. In many cases, domain-specific languages can instead be directly embedded into Prolog by providing appropriate operator definitions. In addition, we propose syntactic extensions for Prolog to expand its expressiveness, for instance to state logic formulas with their connectives verbatim. This allows to use all tools that were originally written for Prolog, for instance code linters and editors with syntax highlighting. We present the package library(plammar), a standard-compliant parser for Prolog source code, written in Prolog. It is able to automatically infer from example sentences the required operator definitions with their classes and precedences as well as the required Prolog language extensions. As a result, we can automatically answer the question: Is it possible to model these example sentences as valid Prolog clauses, and how? We discuss and apply the two approaches to internal and external integrations for several domain-specific languages, namely the extended Backus-Naur form, GraphQL, XPath, and a controlled natural language to represent expert rules in if-then form. The created toolchain with library(dcg4pt) and library(plammar) yields new application opportunities for static Prolog source code analysis, which we also present.}, subject = {PROLOG }, language = {en} } @article{OberdoerferBirnstielLatoschiketal.2021, author = {Oberd{\"o}rfer, Sebastian and Birnstiel, Sandra and Latoschik, Marc Erich and Grafe, Silke}, title = {Mutual Benefits: Interdisciplinary Education of Pre-Service Teachers and HCI Students in VR/AR Learning Environment Design}, series = {Frontiers in Education}, volume = {6}, journal = {Frontiers in Education}, issn = {2504-284X}, doi = {10.3389/feduc.2021.693012}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-241612}, year = {2021}, abstract = {The successful development and classroom integration of Virtual (VR) and Augmented Reality (AR) learning environments requires competencies and content knowledge with respect to media didactics and the respective technologies. The paper discusses a pedagogical concept specifically aiming at the interdisciplinary education of pre-service teachers in collaboration with human-computer interaction students. The students' overarching goal is the interdisciplinary realization and integration of VR/AR learning environments in teaching and learning concepts. To assist this approach, we developed a specific tutorial guiding the developmental process. We evaluate and validate the effectiveness of the overall pedagogical concept by analyzing the change in attitudes regarding 1) the use of VR/AR for educational purposes and in competencies and content knowledge regarding 2) media didactics and 3) technology. Our results indicate a significant improvement in the knowledge of media didactics and technology. We further report on four STEM learning environments that have been developed during the seminar.}, language = {en} } @article{OberdoerferHeidrichBirnstieletal.2021, author = {Oberd{\"o}rfer, Sebastian and Heidrich, David and Birnstiel, Sandra and Latoschik, Marc Erich}, title = {Enchanted by Your Surrounding? Measuring the Effects of Immersion and Design of Virtual Environments on Decision-Making}, series = {Frontiers in Virtual Reality}, volume = {2}, journal = {Frontiers in Virtual Reality}, doi = {10.3389/frvir.2021.679277}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-260101}, pages = {679277}, year = {2021}, abstract = {Impaired decision-making leads to the inability to distinguish between advantageous and disadvantageous choices. The impairment of a person's decision-making is a common goal of gambling games. Given the recent trend of gambling using immersive Virtual Reality it is crucial to investigate the effects of both immersion and the virtual environment (VE) on decision-making. In a novel user study, we measured decision-making using three virtual versions of the Iowa Gambling Task (IGT). The versions differed with regard to the degree of immersion and design of the virtual environment. While emotions affect decision-making, we further measured the positive and negative affect of participants. A higher visual angle on a stimulus leads to an increased emotional response. Thus, we kept the visual angle on the Iowa Gambling Task the same between our conditions. Our results revealed no significant impact of immersion or the VE on the IGT. We further found no significant difference between the conditions with regard to positive and negative affect. This suggests that neither the medium used nor the design of the VE causes an impairment of decision-making. However, in combination with a recent study, we provide first evidence that a higher visual angle on the IGT leads to an effect of impairment.}, language = {en} } @article{OberdoerferLatoschik2019, author = {Oberd{\"o}rfer, Sebastian and Latoschik, Marc Erich}, title = {Knowledge encoding in game mechanics: transfer-oriented knowledge learning in desktop-3D and VR}, series = {International Journal of Computer Games Technology}, volume = {2019}, journal = {International Journal of Computer Games Technology}, doi = {10.1155/2019/7626349}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-201159}, pages = {7626349}, year = {2019}, abstract = {Affine Transformations (ATs) are a complex and abstract learning content. Encoding the AT knowledge in Game Mechanics (GMs) achieves a repetitive knowledge application and audiovisual demonstration. Playing a serious game providing these GMs leads to motivating and effective knowledge learning. Using immersive Virtual Reality (VR) has the potential to even further increase the serious game's learning outcome and learning quality. This paper compares the effectiveness and efficiency of desktop-3D and VR in respect to the achieved learning outcome. Also, the present study analyzes the effectiveness of an enhanced audiovisual knowledge encoding and the provision of a debriefing system. The results validate the effectiveness of the knowledge encoding in GMs to achieve knowledge learning. The study also indicates that VR is beneficial for the overall learning quality and that an enhanced audiovisual encoding has only a limited effect on the learning outcome.}, language = {en} } @article{ObremskiFriedrichHaaketal.2022, author = {Obremski, David and Friedrich, Paula and Haak, Nora and Schaper, Philipp and Lugrin, Birgit}, title = {The impact of mixed-cultural speech on the stereotypical perception of a virtual robot}, series = {Frontiers in Robotics and AI}, volume = {9}, journal = {Frontiers in Robotics and AI}, issn = {2296-9144}, doi = {10.3389/frobt.2022.983955}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-293531}, year = {2022}, abstract = {Despite the fact that mixed-cultural backgrounds become of increasing importance in our daily life, the representation of multiple cultural backgrounds in one entity is still rare in socially interactive agents (SIAs). This paper's contribution is twofold. First, it provides a survey of research on mixed-cultured SIAs. Second, it presents a study investigating how mixed-cultural speech (in this case, non-native accent) influences how a virtual robot is perceived in terms of personality, warmth, competence and credibility. Participants with English or German respectively as their first language watched a video of a virtual robot speaking in either standard English or German-accented English. It was expected that the German-accented speech would be rated more positively by native German participants as well as elicit the German stereotypes credibility and conscientiousness for both German and English participants. Contrary to the expectations, German participants rated the virtual robot lower in terms of competence and credibility when it spoke with a German accent, whereas English participants perceived the virtual robot with a German accent as more credible compared to the version without an accent. Both the native English and native German listeners classified the virtual robot with a German accent as significantly more neurotic than the virtual robot speaking standard English. This work shows that by solely implementing a non-native accent in a virtual robot, stereotypes are partly transferred. It also shows that the implementation of a non-native accent leads to differences in the perception of the virtual robot.}, language = {en} } @article{ObremskiLugrinSchaperetal.2021, author = {Obremski, David and Lugrin, Jean-Luc and Schaper, Philipp and Lugrin, Birgit}, title = {Non-native speaker perception of Intelligent Virtual Agents in two languages: the impact of amount and type of grammatical mistakes}, series = {Journal on Multimodal User Interfaces}, volume = {15}, journal = {Journal on Multimodal User Interfaces}, number = {2}, issn = {1783-8738}, doi = {10.1007/s12193-021-00369-9}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-269984}, pages = {229-238}, year = {2021}, abstract = {Having a mixed-cultural membership becomes increasingly common in our modern society. It is thus beneficial in several ways to create Intelligent Virtual Agents (IVAs) that reflect a mixed-cultural background as well, e.g., for educational settings. For research with such IVAs, it is essential that they are classified as non-native by members of a target culture. In this paper, we focus on variations of IVAs' speech to create the impression of non-native speakers that are identified as such by speakers of two different mother tongues. In particular, we investigate grammatical mistakes and identify thresholds beyond which the agents is clearly categorised as a non-native speaker. Therefore, we conducted two experiments: one for native speakers of German, and one for native speakers of English. Results of the German study indicate that beyond 10\% of word order mistakes and 25\% of infinitive mistakes German-speaking IVAs are perceived as non-native speakers. Results of the English study indicate that beyond 50\% of omission mistakes and 50\% of infinitive mistakes English-speaking IVAs are perceived as non-native speakers. We believe these thresholds constitute helpful guidelines for computational approaches of non-native speaker generation, simplifying research with IVAs in mixed-cultural settings.}, language = {en} } @techreport{OdhahGrassKraemer2022, type = {Working Paper}, author = {Odhah, Najib and Grass, Eckhard and Kraemer, Rolf}, title = {Effective Rate of URLLC with Short Block-Length Information Theory}, series = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, journal = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, doi = {10.25972/OPUS-28085}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-280859}, pages = {4}, year = {2022}, abstract = {Shannon channel capacity estimation, based on large packet length is used in traditional Radio Resource Management (RRM) optimization. This is good for the normal transmission of data in a wired or wireless system. For industrial automation and control, rather short packages are used due to the short-latency requirements. Using Shannon's formula leads in this case to inaccurate RRM solutions, thus another formula should be used to optimize radio resources in short block-length packet transmission, which is the basic of Ultra-Reliable Low-Latency Communications (URLLCs). The stringent requirement of delay Quality of Service (QoS) for URLLCs requires a link-level channel model rather than a physical level channel model. After finding the basic and accurate formula of the achievable rate of short block-length packet transmission, the RRM optimization problem can be accurately formulated and solved under the new constraints of URLLCs. In this short paper, the current mathematical models, which are used in formulating the effective transmission rate of URLLCs, will be briefly explained. Then, using this rate in RRM for URLLC will be discussed.}, subject = {Datennetz}, language = {en} } @phdthesis{Oechsner2010, author = {Oechsner, Simon}, title = {Performance Challenges and Optimization Potential of Peer-to-Peer Overlay Technologies}, doi = {10.25972/OPUS-4159}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-50015}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2010}, abstract = {In today's Internet, building overlay structures to provide a service is becoming more and more common. This approach allows for the utilization of client resources, thus being more scalable than a client-server model in this respect. However, in these architectures the quality of the provided service depends on the clients and is therefore more complex to manage. Resource utilization, both at the clients themselves and in the underlying network, determine the efficiency of the overlay application. Here, a trade-off exists between the resource providers and the end users that can be tuned via overlay mechanisms. Thus, resource management and traffic management is always quality-of-service management as well. In this monograph, the three currently significant and most widely used overlay types in the Internet are considered. These overlays are implemented in popular applications which only recently have gained importance. Thus, these overlay networks still face real-world technical challenges which are of high practical relevance. We identify the specific issues for each of the considered overlays, and show how their optimization affects the trade-offs between resource efficiency and service quality. Thus, we supply new insights and system knowledge that is not provided by previous work.}, subject = {Overlay-Netz}, language = {en} } @article{OsmanogluKhaledAlSeiariAlKhoorietal.2021, author = {Osmanoglu, {\"O}zge and Khaled AlSeiari, Mariam and AlKhoori, Hasa Abduljaleel and Shams, Shabana and Bencurova, Elena and Dandekar, Thomas and Naseem, Muhammad}, title = {Topological Analysis of the Carbon-Concentrating CETCH Cycle and a Photorespiratory Bypass Reveals Boosted CO\(_2\)-Sequestration by Plants}, series = {Frontiers in Bioengineering and Biotechnology}, volume = {9}, journal = {Frontiers in Bioengineering and Biotechnology}, issn = {2296-4185}, doi = {10.3389/fbioe.2021.708417}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-249260}, year = {2021}, abstract = {Synthetically designed alternative photorespiratory pathways increase the biomass of tobacco and rice plants. Likewise, some in planta-tested synthetic carbon-concentrating cycles (CCCs) hold promise to increase plant biomass while diminishing atmospheric carbon dioxide burden. Taking these individual contributions into account, we hypothesize that the integration of bypasses and CCCs will further increase plant productivity. To test this in silico, we reconstructed a metabolic model by integrating photorespiration and photosynthesis with the synthetically designed alternative pathway 3 (AP3) enzymes and transporters. We calculated fluxes of the native plant system and those of AP3 combined with the inhibition of the glycolate/glycerate transporter by using the YANAsquare package. The activity values corresponding to each enzyme in photosynthesis, photorespiration, and for synthetically designed alternative pathways were estimated. Next, we modeled the effect of the crotonyl-CoA/ethylmalonyl-CoA/hydroxybutyryl-CoA cycle (CETCH), which is a set of natural and synthetically designed enzymes that fix CO₂ manifold more than the native Calvin-Benson-Bassham (CBB) cycle. We compared estimated fluxes across various pathways in the native model and under an introduced CETCH cycle. Moreover, we combined CETCH and AP3-w/plgg1RNAi, and calculated the fluxes. We anticipate higher carbon dioxide-harvesting potential in plants with an AP3 bypass and CETCH-AP3 combination. We discuss the in vivo implementation of these strategies for the improvement of C3 plants and in natural high carbon harvesters.}, language = {en} } @phdthesis{Ostermayer2017, author = {Ostermayer, Ludwig}, title = {Integration of Prolog and Java with the Connector Architecture CAPJa}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-150713}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2017}, abstract = {Modern software is often realized as a modular combination of subsystems for, e. g., knowledge management, visualization, verification, or the interaction with users. As a result, software libraries from possibly different programming languages have to work together. Even more complex the case is if different programming paradigms have to be combined. This type of diversification of programming languages and paradigms in just one software application can only be mastered by mechanisms for a seamless integration of the involved programming languages. However, the integration of the common logic programming language Prolog and the popular object-oriented programming language Java is complicated by various interoperability problems which stem on the one hand from the paradigmatic gap between the programming languages, and on the other hand, from the diversity of the available Prolog systems. The subject of the thesis is the investigation of novel mechanisms for the integration of logic programming in Prolog and object-oriented programming in Java. We are particularly interested in an object-oriented, uniform approach which is not specific to just one Prolog system. Therefore, we have first identified several important criteria for the seamless integration of Prolog and Java from the object-oriented perspective. The main contribution of the thesis is a novel integration framework called the Connector Architecture for Prolog and Java (CAPJa). The framework is completely implemented in Java and imposes no modifications to the Java Virtual Machine or Prolog. CAPJa provides a semi-automated mechanism for the integration of Prolog predicates into Java. For compact, readable, and object-oriented queries to Prolog, CAPJa exploits lambda expressions with conditional and relational operators in Java. The communication between Java and Prolog is based on a fully automated mapping of Java objects to Prolog terms, and vice versa. In Java, an extensible system of gateways provides connectivity with various Prolog system and, moreover, makes any connected Prolog system easily interchangeable, without major adaption in Java.}, subject = {Logische Programmierung}, language = {en} } @article{PawellekKrmarLeistneretal.2021, author = {Pawellek, Ruben and Krmar, Jovana and Leistner, Adrian and Djajić, Nevena and Otašević, Biljana and Protić, Ana and Holzgrabe, Ulrike}, title = {Charged aerosol detector response modeling for fatty acids based on experimental settings and molecular features: a machine learning approach}, series = {Journal of Cheminformatics}, volume = {13}, journal = {Journal of Cheminformatics}, number = {1}, doi = {10.1186/s13321-021-00532-0}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-261618}, year = {2021}, abstract = {The charged aerosol detector (CAD) is the latest representative of aerosol-based detectors that generate a response independent of the analytes' chemical structure. This study was aimed at accurately predicting the CAD response of homologous fatty acids under varying experimental conditions. Fatty acids from C12 to C18 were used as model substances due to semivolatile characterics that caused non-uniform CAD behaviour. Considering both experimental conditions and molecular descriptors, a mixed quantitative structure-property relationship (QSPR) modeling was performed using Gradient Boosted Trees (GBT). The ensemble of 10 decisions trees (learning rate set at 0.55, the maximal depth set at 5, and the sample rate set at 1.0) was able to explain approximately 99\% (Q\(^2\): 0.987, RMSE: 0.051) of the observed variance in CAD responses. Validation using an external test compound confirmed the high predictive ability of the model established (R-2: 0.990, RMSEP: 0.050). With respect to the intrinsic attribute selection strategy, GBT used almost all independent variables during model building. Finally, it attributed the highest importance to the power function value, the flow rate of the mobile phase, evaporation temperature, the content of the organic solvent in the mobile phase and the molecular descriptors such as molecular weight (MW), Radial Distribution Function-080/weighted by mass (RDF080m) and average coefficient of the last eigenvector from distance/detour matrix (Ve2_D/Dt). The identification of the factors most relevant to the CAD responsiveness has contributed to a better understanding of the underlying mechanisms of signal generation. An increased CAD response that was obtained for acetone as organic modifier demonstrated its potential to replace the more expensive and environmentally harmful acetonitrile.}, language = {en} } @phdthesis{Peng2019, author = {Peng, Dongliang}, title = {An Optimization-Based Approach for Continuous Map Generalization}, edition = {1. Auflage}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-104-4}, doi = {10.25972/WUP-978-3-95826-105-1}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-174427}, school = {W{\"u}rzburg University Press}, pages = {xv, 132}, year = {2019}, abstract = {Maps are the main tool to represent geographical information. Geographical information is usually scale-dependent, so users need to have access to maps at different scales. In our digital age, the access is realized by zooming. As discrete changes during the zooming tend to distract users, smooth changes are preferred. This is why some digital maps are trying to make the zooming as continuous as they can. The process of producing maps at different scales with smooth changes is called continuous map generalization. In order to produce maps of high quality, cartographers often take into account additional requirements. These requirements are transferred to models in map generalization. Optimization for map generalization is important not only because it finds optimal solutions in the sense of the models, but also because it helps us to evaluate the quality of the models. Optimization, however, becomes more delicate when we deal with continuous map generalization. In this area, there are requirements not only for a specific map but also for relations between maps at difference scales. This thesis is about continuous map generalization based on optimization. First, we show the background of our research topics. Second, we find optimal sequences for aggregating land-cover areas. We compare the A\$^{\!\star}\$\xspace algorithm and integer linear programming in completing this task. Third, we continuously generalize county boundaries to provincial boundaries based on compatible triangulations. We morph between the two sets of boundaries, using dynamic programming to compute the correspondence. Fourth, we continuously generalize buildings to built-up areas by aggregating and growing. In this work, we group buildings with the help of a minimum spanning tree. Fifth, we define vertex trajectories that allow us to morph between polylines. We require that both the angles and the edge lengths change linearly over time. As it is impossible to fulfill all of these requirements simultaneously, we mediate between them using least-squares adjustment. Sixth, we discuss the performance of some commonly used data structures for a specific spatial problem. Seventh, we conclude this thesis and present open problems.}, subject = {Generalisierung }, language = {en} } @article{PetschkeStaab2018, author = {Petschke, Danny and Staab, Torsten E.M.}, title = {DLTPulseGenerator: a library for the simulation of lifetime spectra based on detector-output pulses}, series = {SoftwareX}, volume = {7}, journal = {SoftwareX}, doi = {10.1016/j.softx.2018.04.002}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-176883}, pages = {122-128}, year = {2018}, abstract = {The quantitative analysis of lifetime spectra relevant in both life and materials sciences presents one of the ill-posed inverse problems and, hence, leads to most stringent requirements on the hardware specifications and the analysis algorithms. Here we present DLTPulseGenerator, a library written in native C++ 11, which provides a simulation of lifetime spectra according to the measurement setup. The simulation is based on pairs of non-TTL detector output-pulses. Those pulses require the Constant Fraction Principle (CFD) for the determination of the exact timing signal and, thus, the calculation of the time difference i.e. the lifetime. To verify the functionality, simulation results were compared to experimentally obtained data using Positron Annihilation Lifetime Spectroscopy (PALS) on pure tin.}, language = {en} }