@phdthesis{Reul2020, author = {Reul, Christian}, title = {An Intelligent Semi-Automatic Workflow for Optical Character Recognition of Historical Printings}, doi = {10.25972/OPUS-20923}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-209239}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2020}, abstract = {Optical Character Recognition (OCR) on historical printings is a challenging task mainly due to the complexity of the layout and the highly variant typography. Nevertheless, in the last few years great progress has been made in the area of historical OCR resulting in several powerful open-source tools for preprocessing, layout analysis and segmentation, Automatic Text Recognition (ATR) and postcorrection. Their major drawback is that they only offer limited applicability by non-technical users like humanist scholars, in particular when it comes to the combined use of several tools in a workflow. Furthermore, depending on the material, these tools are usually not able to fully automatically achieve sufficiently low error rates, let alone perfect results, creating a demand for an interactive postcorrection functionality which, however, is generally not incorporated. This thesis addresses these issues by presenting an open-source OCR software called OCR4all which combines state-of-the-art OCR components and continuous model training into a comprehensive workflow. While a variety of materials can already be processed fully automatically, books with more complex layouts require manual intervention by the users. This is mostly due to the fact that the required Ground Truth (GT) for training stronger mixed models (for segmentation as well as text recognition) is not available, yet, neither in the desired quantity nor quality. To deal with this issue in the short run, OCR4all offers better recognition capabilities in combination with a very comfortable Graphical User Interface (GUI) that allows error corrections not only in the final output, but already in early stages to minimize error propagation. In the long run this constant manual correction produces large quantities of valuable, high quality training material which can be used to improve fully automatic approaches. Further on, extensive configuration capabilities are provided to set the degree of automation of the workflow and to make adaptations to the carefully selected default parameters for specific printings, if necessary. The architecture of OCR4all allows for an easy integration (or substitution) of newly developed tools for its main components by supporting standardized interfaces like PageXML, thus aiming at continual higher automation for historical printings. In addition to OCR4all, several methodical extensions in the form of accuracy improving techniques for training and recognition are presented. Most notably an effective, sophisticated, and adaptable voting methodology using a single ATR engine, a pretraining procedure, and an Active Learning (AL) component are proposed. Experiments showed that combining pretraining and voting significantly improves the effectiveness of book-specific training, reducing the obtained Character Error Rates (CERs) by more than 50\%. The proposed extensions were further evaluated during two real world case studies: First, the voting and pretraining techniques are transferred to the task of constructing so-called mixed models which are trained on a variety of different fonts. This was done by using 19th century Fraktur script as an example, resulting in a considerable improvement over a variety of existing open-source and commercial engines and models. Second, the extension from ATR on raw text to the adjacent topic of typography recognition was successfully addressed by thoroughly indexing a historical lexicon that heavily relies on different font types in order to encode its complex semantic structure. During the main experiments on very complex early printed books even users with minimal or no experience were able to not only comfortably deal with the challenges presented by the complex layout, but also to recognize the text with manageable effort and great quality, achieving excellent CERs below 0.5\%. Furthermore, the fully automated application on 19th century novels showed that OCR4all (average CER of 0.85\%) can considerably outperform the commercial state-of-the-art tool ABBYY Finereader (5.3\%) on moderate layouts if suitably pretrained mixed ATR models are available.}, subject = {Optische Zeichenerkennung}, language = {en} } @phdthesis{Krug2020, author = {Krug, Markus}, title = {Techniques for the Automatic Extraction of Character Networks in German Historic Novels}, doi = {10.25972/OPUS-20918}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-209186}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2020}, abstract = {Recent advances in Natural Language Preprocessing (NLP) allow for a fully automatic extraction of character networks for an incoming text. These networks serve as a compact and easy to grasp representation of literary fiction. They offer an aggregated view of the text, which can be used during distant reading approaches for the analysis of literary hypotheses. In their core, the networks consist of nodes, which represent literary characters, and edges, which represent relations between characters. For an automatic extraction of such a network, the first step is the detection of the references of all fictional entities that are of importance for a text. References to the fictional entities appear in the form of names, noun phrases and pronouns and prior to this work, no components capable of automatic detection of character references were available. Existing tools are only capable of detecting proper nouns, a subset of all character references. When evaluated on the task of detecting proper nouns in the domain of literary fiction, they still underperform at an F1-score of just about 50\%. This thesis uses techniques from the field of semi-supervised learning, such as Distant supervision and Generalized Expectations, and improves the results of an existing tool to about 82\%, when evaluated on all three categories in literary fiction, but without the need for annotated data in the target domain. However, since this quality is still not sufficient, the decision to annotate DROC, a corpus comprising 90 fragments of German novels was made. This resulted in a new general purpose annotation environment titled as ATHEN, as well as annotated data that spans about 500.000 tokens in total. Using this data, the combination of supervised algorithms and a tailored rule based algorithm, which in combination are able to exploit both - local consistencies as well as global consistencies - yield an algorithm with an F1-score of about 93\%. This component is referred to as the Kallimachos tagger. A character network can not directly display references however, instead they need to be clustered so that all references that belong to a real world or fictional entity are grouped together. This process widely known as coreference resolution is a hard problem in the focus of research for more than half a century. This work experimented with adaptations of classical feature based machine learning, with a dedicated rule based algorithm and with modern techniques of Deep Learning, but no approach can surpass 55\% B-Cubed F1, when evaluated on DROC. Due to this barrier, many researchers do not use a fully-fledged coreference resolution when they extract character networks, but only focus on a more forgiving subset- the names. For novels such as Alice's Adventures in Wonderland by Lewis Caroll, this would however only result in a network in which many important characters are missing. In order to integrate important characters into the network that are not named by the author, this work makes use of automatic detection of speaker and addressees for direct speech utterances (all entities involved in a dialog are considered to be of importance). This problem is by itself not an easy task, however the most successful system analysed in this thesis is able to correctly determine the speaker to about 85\% of the utterances as well as about 65\% of the addressees. This speaker information can not only help to identify the most dominant characters, but also serves as a way to model the relations between entities. During the span of this work, components have been developed to model relations between characters using speaker attribution, using co-occurrences as well as by the usage of true interactions, for which yet again a dataset was annotated using ATHEN. Furthermore, since relations between characters are usually typed, a component for the extraction of a typed relation was developed. Similar to the experiments for the character reference detection, a combination of a rule based and a Maximum Entropy classifier yielded the best overall results, with the extraction of family relations showing a score of about 80\% and the quality of love relations with a score of about 50\%. For family relations, a kernel for a Support Vector Machine was developed that even exceeded the scores of the combined approach but is behind on the other labels. In addition, this work presents new ways to evaluate automatically extracted networks without the need of domain experts, instead it relies on the usage of expert summaries. It also refrains from the uses of social network analysis for the evaluation, but instead presents ranked evaluations using Precision@k and the Spearman Rank correlation coefficient for the evaluation of the nodes and edges of the network. An analysis using these metrics showed, that the central characters of a novel are contained with high probability but the quality drops rather fast if more than five entities are analyzed. The quality of the edges is mainly dominated by the quality of the coreference resolution and the correlation coefficient between gold edges and system edges therefore varies between 30 and 60\%. All developed components are aggregated alongside a large set of other preprocessing modules in the Kallimachos pipeline and can be reused without any restrictions.}, subject = {Textanalyse}, language = {en} } @techreport{BlenkKellererHossfeld2020, type = {Working Paper}, author = {Blenk, Andreas and Kellerer, Wolfgang and Hoßfeld, Tobias}, title = {Technical Report on DFG Project SDN-App: SDN-enabled Application-aware Network Control Architectures and their Performance Assessment}, doi = {10.25972/OPUS-20755}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-207558}, year = {2020}, abstract = {The DFG project "SDN-enabled Application-aware Network Control Architectures and their Performance Assessment" (DFG SDN-App) focused in phase 1 (Jan 2017 - Dec 2019) on software defined networking (SDN). Being a fundamental paradigm shift, SDN enables a remote control of networking devices made by different vendors from a logically centralized controller. In principle, this enables a more dynamic and flexible management of network resources compared to the traditional legacy networks. Phase 1 focused on multimedia applications and their users' Quality of Experience (QoE). This documents reports the achievements of the first phase (Jan 2017 - Dec 2019), which is jointly carried out by the Technical University of Munich, Technical University of Berlin, and University of W{\"u}rzburg. The project started at the institutions in Munich and W{\"u}rzburg in January 2017 and lasted until December 2019. In Phase 1, the project targeted the development of fundamental control mechanisms for network-aware application control and application-aware network control in Software Defined Networks (SDN) so to enhance the user perceived quality (QoE). The idea is to leverage the QoE from multiple applications as control input parameter for application-and network control mechanisms. These mechanisms are implemented by an Application Control Plane (ACP) and a Network Control Plane (NCP). In order to obtain a global view of the current system state, applications and network parameters are monitored and communicated to the respective control plane interface. Network and application information and their demands are exchanged between the control planes so to derive appropriate control actions. To this end, a methodology is developed to assess the application performance and in particular the QoE. This requires an appropriate QoE modeling of the applications considered in the project as well as metrics like QoE fairness to be utilized within QoE management. In summary, the application-network interaction can improve the QoE for multi-application scenarios. This is ensured by utilizing information from the application layer, which are mapped by appropriate QoS-QoE models to QoE within a network control plane. On the other hand, network information is monitored and communicated to the application control plane. Network and application information and their demands are exchanged between the control planes so to derive appropriate control actions.}, subject = {Software-defined networking}, language = {en} } @phdthesis{Djebko2020, author = {Djebko, Kirill}, title = {Quantitative modellbasierte Diagnose am Beispiel der Energieversorgung des SONATE-Nanosatelliten mit automatisch kalibrierten Modellkomponenten}, doi = {10.25972/OPUS-20662}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-206628}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2020}, abstract = {Von technischen Systemen wird in der heutigen Zeit erwartet, dass diese stets fehlerfrei funktionieren, um einen reibungslosen Ablauf des Alltags zu gew{\"a}hrleisten. Technische Systeme jedoch k{\"o}nnen Defekte aufweisen, die deren Funktionsweise einschr{\"a}nken oder zu deren Totalausfall f{\"u}hren k{\"o}nnen. Grunds{\"a}tzlich zeigen sich Defekte durch eine Ver{\"a}nderung im Verhalten von einzelnen Komponenten. Diese Abweichungen vom Nominalverhalten nehmen dabei an Intensit{\"a}t zu, je n{\"a}her die entsprechende Komponente an einem Totalausfall ist. Aus diesem Grund sollte das Fehlverhalten von Komponenten rechtzeitig erkannt werden, um permanenten Schaden zu verhindern. Von besonderer Bedeutung ist dies f{\"u}r die Luft- und Raumfahrt. Bei einem Satelliten kann keine Wartung seiner Komponenten durchgef{\"u}hrt werden, wenn er sich bereits im Orbit befindet. Der Defekt einer einzelnen Komponente, wie der Batterie der Energieversorgung, kann hierbei den Verlust der gesamten Mission bedeuten. Grunds{\"a}tzlich l{\"a}sst sich Fehlererkennung manuell durchf{\"u}hren, wie es im Satellitenbetrieb oft {\"u}blich ist. Hierf{\"u}r muss ein menschlicher Experte, ein sogenannter Operator, das System {\"u}berwachen. Diese Form der {\"U}berwachung ist allerdings stark von der Zeit, Verf{\"u}gbarkeit und Expertise des Operators, der die {\"U}berwachung durchf{\"u}hrt, abh{\"a}ngig. Ein anderer Ansatz ist die Verwendung eines dedizierten Diagnosesystems. Dieses kann das technische System permanent {\"u}berwachen und selbstst{\"a}ndig Diagnosen berechnen. Die Diagnosen k{\"o}nnen dann durch einen Experten eingesehen werden, der auf ihrer Basis Aktionen durchf{\"u}hren kann. Das in dieser Arbeit vorgestellte modellbasierte Diagnosesystem verwendet ein quantitatives Modell eines technischen Systems, das dessen Nominalverhalten beschreibt. Das beobachtete Verhalten des technischen Systems, gegeben durch Messwerte, wird mit seinem erwarteten Verhalten, gegeben durch simulierte Werte des Modells, verglichen und Diskrepanzen bestimmt. Jede Diskrepanz ist dabei ein Symptom. Diagnosen werden dadurch berechnet, dass zun{\"a}chst zu jedem Symptom eine sogenannte Konfliktmenge berechnet wird. Dies ist eine Menge von Komponenten, sodass der Defekt einer dieser Komponenten das entsprechende Symptom erkl{\"a}ren k{\"o}nnte. Mithilfe dieser Konfliktmengen werden sogenannte Treffermengen berechnet. Eine Treffermenge ist eine Menge von Komponenten, sodass der gleichzeitige Defekt aller Komponenten dieser Menge alle beobachteten Symptome erkl{\"a}ren k{\"o}nnte. Jede minimale Treffermenge entspricht dabei einer Diagnose. Zur Berechnung dieser Mengen nutzt das Diagnosesystem ein Verfahren, bei dem zun{\"a}chst abh{\"a}ngige Komponenten bestimmt werden und diese von symptombehafteten Komponenten belastet und von korrekt funktionierenden Komponenten entlastet werden. F{\"u}r die einzelnen Komponenten werden Bewertungen auf Basis dieser Be- und Entlastungen berechnet und mit ihnen Diagnosen gestellt. Da das Diagnosesystem auf ausreichend genaue Modelle angewiesen ist und die manuelle Kalibrierung dieser Modelle mit erheblichem Aufwand verbunden ist, wurde ein Verfahren zur automatischen Kalibrierung entwickelt. Dieses verwendet einen Zyklischen Genetischen Algorithmus, um mithilfe von aufgezeichneten Werten der realen Komponenten Modellparameter zu bestimmen, sodass die Modelle die aufgezeichneten Daten m{\"o}glichst gut reproduzieren k{\"o}nnen. Zur Evaluation der automatischen Kalibrierung wurden ein Testaufbau und verschiedene dynamische und manuell schwierig zu kalibrierende Komponenten des Qualifikationsmodells eines realen Nanosatelliten, dem SONATE-Nanosatelliten modelliert und kalibriert. Der Testaufbau bestand dabei aus einem Batteriepack, einem Laderegler, einem Tiefentladeschutz, einem Entladeregler, einem Stepper Motor HAT und einem Motor. Er wurde zus{\"a}tzlich zur automatischen Kalibrierung unabh{\"a}ngig manuell kalibriert. Die automatisch kalibrierten Satellitenkomponenten waren ein Reaktionsrad, ein Entladeregler, Magnetspulen, bestehend aus einer Ferritkernspule und zwei Luftspulen, eine Abschlussleiterplatine und eine Batterie. Zur Evaluation des Diagnosesystems wurde die Energieversorgung des Qualifikationsmodells des SONATE-Nanosatelliten modelliert. F{\"u}r die Batterien, die Entladeregler, die Magnetspulen und die Reaktionsr{\"a}der wurden die vorher automatisch kalibrierten Modelle genutzt. F{\"u}r das Modell der Energieversorgung wurden Fehler simuliert und diese diagnostiziert. Die Ergebnisse der Evaluation der automatischen Kalibrierung waren, dass die automatische Kalibrierung eine mit der manuellen Kalibrierung vergleichbare Genauigkeit f{\"u}r den Testaufbau lieferte und diese sogar leicht {\"u}bertraf und dass die automatisch kalibrierten Satellitenkomponenten eine durchweg hohe Genauigkeit aufwiesen und damit f{\"u}r den Einsatz im Diagnosesystem geeignet waren. Die Ergebnisse der Evaluation des Diagnosesystems waren, dass die simulierten Fehler zuverl{\"a}ssig gefunden wurden und dass das Diagnosesystem in der Lage war die plausiblen Ursachen dieser Fehler zu diagnostizieren.}, subject = {Satellit}, language = {de} } @techreport{GrigorjewMetzgerHossfeldetal.2020, author = {Grigorjew, Alexej and Metzger, Florian and Hoßfeld, Tobias and Specht, Johannes and G{\"o}tz, Franz-Josef and Chen, Feng and Schmitt, J{\"u}rgen}, title = {Asynchronous Traffic Shaping with Jitter Control}, doi = {10.25972/OPUS-20582}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-205824}, pages = {8}, year = {2020}, abstract = {Asynchronous Traffic Shaping enabled bounded latency with low complexity for time sensitive networking without the need for time synchronization. However, its main focus is the guaranteed maximum delay. Jitter-sensitive applications may still be forced towards synchronization. This work proposes traffic damping to reduce end-to-end delay jitter. It discusses its application and shows that both the prerequisites and the guaranteed delay of traffic damping and ATS are very similar. Finally, it presents a brief evaluation of delay jitter in an example topology by means of a simulation and worst case estimation.}, subject = {Echtzeit}, language = {en} } @techreport{Metzger2020, type = {Working Paper}, author = {Metzger, Florian}, title = {Crowdsensed QoE for the community - a concept to make QoE assessment accessible}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-203748}, pages = {7}, year = {2020}, abstract = {In recent years several community testbeds as well as participatory sensing platforms have successfully established themselves to provide open data to everyone interested. Each of them with a specific goal in mind, ranging from collecting radio coverage data up to environmental and radiation data. Such data can be used by the community in their decision making, whether to subscribe to a specific mobile phone service that provides good coverage in an area or in finding a sunny and warm region for the summer holidays. However, the existing platforms are usually limiting themselves to directly measurable network QoS. If such a crowdsourced data set provides more in-depth derived measures, this would enable an even better decision making. A community-driven crowdsensing platform that derives spatial application-layer user experience from resource-friendly bandwidth estimates would be such a case, video streaming services come to mind as a prime example. In this paper we present a concept for such a system based on an initial prototype that eases the collection of data necessary to determine mobile-specific QoE at large scale. In addition we reason why the simple quality metric proposed here can hold its own.}, subject = {Quality of Experience}, language = {en} } @techreport{OPUS4-20232, type = {Working Paper}, title = {White Paper on Crowdsourced Network and QoE Measurements - Definitions, Use Cases and Challenges}, editor = {Hoßfeld, Tobias and Wunderer, Stefan}, doi = {10.25972/OPUS-20232}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-202327}, pages = {24}, year = {2020}, abstract = {The goal of the white paper at hand is as follows. The definitions of the terms build a framework for discussions around the hype topic 'crowdsourcing'. This serves as a basis for differentiation and a consistent view from different perspectives on crowdsourced network measurements, with the goal to provide a commonly accepted definition in the community. The focus is on the context of mobile and fixed network operators, but also on measurements of different layers (network, application, user layer). In addition, the white paper shows the value of crowdsourcing for selected use cases, e.g., to improve QoE or regulatory issues. Finally, the major challenges and issues for researchers and practitioners are highlighted. This white paper is the outcome of the W{\"u}rzburg seminar on "Crowdsourced Network and QoE Measurements" which took place from 25-26 September 2019 in W{\"u}rzburg, Germany. International experts were invited from industry and academia. They are well known in their communities, having different backgrounds in crowdsourcing, mobile networks, network measurements, network performance, Quality of Service (QoS), and Quality of Experience (QoE). The discussions in the seminar focused on how crowdsourcing will support vendors, operators, and regulators to determine the Quality of Experience in new 5G networks that enable various new applications and network architectures. As a result of the discussions, the need for a white paper manifested, with the goal of providing a scientific discussion of the terms "crowdsourced network measurements" and "crowdsourced QoE measurements", describing relevant use cases for such crowdsourced data, and its underlying challenges. During the seminar, those main topics were identified, intensively discussed in break-out groups, and brought back into the plenum several times. The outcome of the seminar is this white paper at hand which is - to our knowledge - the first one covering the topic of crowdsourced network and QoE measurements.}, subject = {Crowdsourcing}, language = {en} } @phdthesis{Azar2020, author = {Azar, Isabel}, title = {Konzeption und Evaluation eines webbasierten Patienteninformationsprogrammes zur {\"U}berpr{\"u}fung internistischer Verdachtsdiagnosen}, doi = {10.25972/OPUS-19964}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-199641}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2020}, abstract = {Das Thema dieser Dissertation lautet „Konzeption und Evaluation eines webbasierten Patienteninformationsprogrammes zur {\"U}berpr{\"u}fung internistischer Verdachtsdiagnosen". Zusammen mit dem Institut f{\"u}r Informatik wurde das wissensbasierte second-opinion-System SymptomCheck entwickelt. Das Programm dient zur {\"U}berpr{\"u}fung von Verdachtsdiagnosen. Es wurden Wissensbasen erstellt, in denen Symptome, Befunde und Untersuchungen nach einem Bewertungsschema beurteilt werden. Folgend wurde eine online erreichbare Startseite erstellt, auf der Nutzer vornehmlich internistische Verdachtsdiagnosen {\"u}berpr{\"u}fen k{\"o}nnen. Das Programm wurde in zwei Studien bez{\"u}glich seiner Sensitivit{\"a}t und Spezifit{\"a}t sowie der Benutzerfreundlichkeit getestet. In der ersten Studie wurden die Verdachtsdiagnosen ambulanter Patienten mit den {\"a}rztlich gestellten Diagnosen verglichen, eine zweite an die Allgemeinbev{\"o}lkerung gerichtete Onlinestudie galt vor allem der Bewertung der Benutzerfreundlichkeit. Soweit bekannt ist dies die erste Studie in der ein selbst entwickeltes Programm selbstst{\"a}ndig an echten Patienten getestet wurde.}, subject = {Entscheidungsunterst{\"u}tzungssystem}, language = {de} } @phdthesis{Roth2020, author = {Roth, Daniel}, title = {Intrapersonal, Interpersonal, and Hybrid Interactions in Virtual Reality}, doi = {10.25972/OPUS-18862}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-188627}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2020}, abstract = {Virtual reality and related media and communication technologies have a growing impact on professional application fields and our daily life. Virtual environments have the potential to change the way we perceive ourselves and how we interact with others. In comparison to other technologies, virtual reality allows for the convincing display of a virtual self-representation, an avatar, to oneself and also to others. This is referred to as user embodiment. Avatars can be of varying realism and abstraction in their appearance and in the behaviors they convey. Such userembodying interfaces, in turn, can impact the perception of the self as well as the perception of interactions. For researchers, designers, and developers it is of particular interest to understand these perceptual impacts, to apply them to therapy, assistive applications, social platforms, or games, for example. The present thesis investigates and relates these impacts with regard to three areas: intrapersonal effects, interpersonal effects, and effects of social augmentations provided by the simulation. With regard to intrapersonal effects, we specifically explore which simulation properties impact the illusion of owning and controlling a virtual body, as well as a perceived change in body schema. Our studies lead to the construction of an instrument to measure these dimensions and our results indicate that these dimensions are especially affected by the level of immersion, the simulation latency, as well as the level of personalization of the avatar. With regard to interpersonal effects we compare physical and user-embodied social interactions, as well as different degrees of freedom in the replication of nonverbal behavior. Our results suggest that functional levels of interaction are maintained, whereas aspects of presence can be affected by avatar-mediated interactions, and collaborative motor coordination can be disturbed by immersive simulations. Social interaction is composed of many unknown symbols and harmonic patterns that define our understanding and interpersonal rapport. For successful virtual social interactions, a mere replication of physical world behaviors to virtual environments may seem feasible. However, the potential of mediated social interactions goes beyond this mere replication. In a third vein of research, we propose and evaluate alternative concepts on how computers can be used to actively engage in mediating social interactions, namely hybrid avatar-agent technologies. Specifically, we investigated the possibilities to augment social behaviors by modifying and transforming user input according to social phenomena and behavior, such as nonverbal mimicry, directed gaze, joint attention, and grouping. Based on our results we argue that such technologies could be beneficial for computer-mediated social interactions such as to compensate for lacking sensory input and disturbances in data transmission or to increase aspects of social presence by visual substitution or amplification of social behaviors. Based on related work and presented findings, the present thesis proposes the perspective of considering computers as social mediators. Concluding from prototypes and empirical studies, the potential of technology to be an active mediator of social perception with regard to the perception of the self, as well as the perception of social interactions may benefit our society by enabling further methods for diagnosis, treatment, and training, as well as the inclusion of individuals with social disorders. To this regard, we discuss implications for our society and ethical aspects. This thesis extends previous empirical work and further presents novel instruments, concepts, and implications to open up new perspectives for the development of virtual reality, mixed reality, and augmented reality applications.}, subject = {Virtuelle Realit{\"a}t}, language = {en} } @techreport{GrigorjewMetzgerHossfeldetal.2020, author = {Grigorjew, Alexej and Metzger, Florian and Hoßfeld, Tobias and Specht, Johannes and G{\"o}tz, Franz-Josef and Schmitt, J{\"u}rgen and Chen, Feng}, title = {Technical Report on Bridge-Local Guaranteed Latency with Strict Priority Scheduling}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-198310}, year = {2020}, abstract = {Bridge-local latency computation is often regarded with caution, as historic efforts with the Credit-Based Shaper (CBS) showed that CBS requires network wide information for tight bounds. Recently, new shaping mechanisms and timed gates were applied to achieve such guarantees nonetheless, but they require support for these new mechanisms in the forwarding devices. This document presents a per-hop latency bound for individual streams in a class-based network that applies the IEEE 802.1Q strict priority transmission selection algorithm. It is based on self-pacing talkers and uses the accumulated latency fields during the reservation process to provide upper bounds with bridge-local information. The presented delay bound is proven mathematically and then evaluated with respect to its accuracy. It indicates the required information that must be provided for admission control, e.g., implemented by a resource reservation protocol such as IEEE 802.1Qdd. Further, it hints at potential improvements regarding new mechanisms and higher accuracy given more information.}, subject = {Echtzeit}, language = {en} }