@article{HolfelderMulanskySchleeetal.2021, author = {Holfelder, Marc and Mulansky, Lena and Schlee, Winfried and Baumeister, Harald and Schobel, Johannes and Greger, Helmut and Hoff, Andreas and Pryss, R{\"u}diger}, title = {Medical device regulation efforts for mHealth apps during the COVID-19 pandemic — an experience report of Corona Check and Corona Health}, series = {J — Multidisciplinary Scientific Journal}, volume = {4}, journal = {J — Multidisciplinary Scientific Journal}, number = {2}, issn = {2571-8800}, doi = {10.3390/j4020017}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-285434}, pages = {206 -- 222}, year = {2021}, abstract = {Within the healthcare environment, mobile health (mHealth) applications (apps) are becoming more and more important. The number of new mHealth apps has risen steadily in the last years. Especially the COVID-19 pandemic has led to an enormous amount of app releases. In most countries, mHealth applications have to be compliant with several regulatory aspects to be declared a "medical app". However, the latest applicable medical device regulation (MDR) does not provide more details on the requirements for mHealth applications. When developing a medical app, it is essential that all contributors in an interdisciplinary team — especially software engineers — are aware of the specific regulatory requirements beforehand. The development process, however, should not be stalled due to integration of the MDR. Therefore, a developing framework that includes these aspects is required to facilitate a reliable and quick development process. The paper at hand introduces the creation of such a framework on the basis of the Corona Health and Corona Check apps. The relevant regulatory guidelines are listed and summarized as a guidance for medical app developments during the pandemic and beyond. In particular, the important stages and challenges faced that emerged during the entire development process are highlighted.}, language = {en} } @unpublished{Nassourou2010, author = {Nassourou, Mohamadou}, title = {Markup overlap: Improving Fragmentation Method}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-49084}, year = {2010}, abstract = {Overlapping is a common word used to describe documents whose structural dimensions cannot be adequately represented using tree structure. For instance a quotation that starts in one verse and ends in another verse. The problem of overlapping hierarchies is a recurring one, which has been addressed by a variety of approaches. There are XML based solutions as well as Non-XML ones. The XML-based solutions are: multiple documents, empty elements, fragmentation, out-of-line markup, JITT and BUVH. And the Non-XML approaches comprise CONCUR/XCONCUR, MECS, LMNL ...etc. This paper presents shortly state-of-the-art in overlapping hierarchies, and introduces two variations on the TEI fragmentation markup that have several advantages.}, subject = {XML}, language = {en} } @techreport{RieglerWernerKayal2022, type = {Working Paper}, author = {Riegler, Clemens and Werner, Lennart and Kayal, Hakan}, title = {MAPLE: Marsian Autorotation Probe Lander Experiment}, doi = {10.25972/OPUS-28239}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-282390}, pages = {7}, year = {2022}, abstract = {The first step towards aerial planetary exploration has been made. Ingenuity shows extremely promising results, and new missions are already underway. Rotorcraft are capable of flight. This capability could be utilized to support the last stages of Entry, Descent, and Landing. Thus, mass and complexity could be scaled down. Autorotation is one method of descent. It describes unpowered descent and landing, typically performed by helicopters in case of an engine failure. MAPLE is suggested to test these procedures and understand autorotation on other planets. In this series of experiments, the Ingenuity helicopter is utilized. Ingenuity would autorotate a "mid-air-landing" before continuing with normal flight. Ultimately, the collected data shall help to understand autorotation on Mars and its utilization for interplanetary exploration.}, language = {en} } @phdthesis{Krenzer2023, author = {Krenzer, Adrian}, title = {Machine learning to support physicians in endoscopic examinations with a focus on automatic polyp detection in images and videos}, doi = {10.25972/OPUS-31911}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-319119}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {Deep learning enables enormous progress in many computer vision-related tasks. Artificial Intel- ligence (AI) steadily yields new state-of-the-art results in the field of detection and classification. Thereby AI performance equals or exceeds human performance. Those achievements impacted many domains, including medical applications. One particular field of medical applications is gastroenterology. In gastroenterology, machine learning algorithms are used to assist examiners during interventions. One of the most critical concerns for gastroenterologists is the development of Colorectal Cancer (CRC), which is one of the leading causes of cancer-related deaths worldwide. Detecting polyps in screening colonoscopies is the essential procedure to prevent CRC. Thereby, the gastroenterologist uses an endoscope to screen the whole colon to find polyps during a colonoscopy. Polyps are mucosal growths that can vary in severity. This thesis supports gastroenterologists in their examinations with automated detection and clas- sification systems for polyps. The main contribution is a real-time polyp detection system. This system is ready to be installed in any gastroenterology practice worldwide using open-source soft- ware. The system achieves state-of-the-art detection results and is currently evaluated in a clinical trial in four different centers in Germany. The thesis presents two additional key contributions: One is a polyp detection system with ex- tended vision tested in an animal trial. Polyps often hide behind folds or in uninvestigated areas. Therefore, the polyp detection system with extended vision uses an endoscope assisted by two additional cameras to see behind those folds. If a polyp is detected, the endoscopist receives a vi- sual signal. While the detection system handles the additional two camera inputs, the endoscopist focuses on the main camera as usual. The second one are two polyp classification models, one for the classification based on shape (Paris) and the other on surface and texture (NBI International Colorectal Endoscopic (NICE) classification). Both classifications help the endoscopist with the treatment of and the decisions about the detected polyp. The key algorithms of the thesis achieve state-of-the-art performance. Outstandingly, the polyp detection system tested on a highly demanding video data set shows an F1 score of 90.25 \% while working in real-time. The results exceed all real-time systems in the literature. Furthermore, the first preliminary results of the clinical trial of the polyp detection system suggest a high Adenoma Detection Rate (ADR). In the preliminary study, all polyps were detected by the polyp detection system, and the system achieved a high usability score of 96.3 (max 100). The Paris classification model achieved an F1 score of 89.35 \% which is state-of-the-art. The NICE classification model achieved an F1 score of 81.13 \%. Furthermore, a large data set for polyp detection and classification was created during this thesis. Therefore a fast and robust annotation system called Fast Colonoscopy Annotation Tool (FastCAT) was developed. The system simplifies the annotation process for gastroenterologists. Thereby the i gastroenterologists only annotate key parts of the endoscopic video. Afterward, those video parts are pre-labeled by a polyp detection AI to speed up the process. After the AI has pre-labeled the frames, non-experts correct and finish the annotation. This annotation process is fast and ensures high quality. FastCAT reduces the overall workload of the gastroenterologist on average by a factor of 20 compared to an open-source state-of-art annotation tool.}, subject = {Deep Learning}, language = {en} } @techreport{LohGeisslerHossfeld2022, type = {Working Paper}, author = {Loh, Frank and Geißler, Stefan and Hoßfeld, Tobias}, title = {LoRaWAN Network Planning in Smart Environments: Towards Reliability, Scalability, and Cost Reduction}, series = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, journal = {W{\"u}rzburg Workshop on Next-Generation Communication Networks (WueWoWas'22)}, doi = {10.25972/OPUS-28082}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-280829}, pages = {4}, year = {2022}, abstract = {The goal in this work is to present a guidance for LoRaWAN planning to improve overall reliability for message transmissions and scalability. At the end, the cost component is discussed. Therefore, a five step approach is presented that helps to plan a LoRaWAN deployment step by step: Based on the device locations, an initial gateway placement is suggested followed by in-depth frequency and channel access planning. After an initial planning phase, updates for channel access and the initial gateway planning is suggested that should also be done periodically during network operation. Since current gateway placement approaches are only studied with random channel access, there is a lot of potential in the cell planning phase. Furthermore, the performance of different channel access approaches is highly related on network load, and thus cell size and sensor density. Last, the influence of different cell planning ideas on expected costs are discussed.}, subject = {Datennetz}, language = {en} } @article{WienrichCarolusMarkusetal.2023, author = {Wienrich, Carolin and Carolus, Astrid and Markus, Andr{\´e} and Augustin, Yannik and Pfister, Jan and Hotho, Andreas}, title = {Long-term effects of perceived friendship with intelligent voice assistants on usage behavior, user experience, and social perceptions}, series = {Computers}, volume = {12}, journal = {Computers}, number = {4}, issn = {2073-431X}, doi = {10.3390/computers12040077}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-313552}, year = {2023}, abstract = {Social patterns and roles can develop when users talk to intelligent voice assistants (IVAs) daily. The current study investigates whether users assign different roles to devices and how this affects their usage behavior, user experience, and social perceptions. Since social roles take time to establish, we equipped 106 participants with Alexa or Google assistants and some smart home devices and observed their interactions for nine months. We analyzed diverse subjective (questionnaire) and objective data (interaction data). By combining social science and data science analyses, we identified two distinct clusters—users who assigned a friendship role to IVAs over time and users who did not. Interestingly, these clusters exhibited significant differences in their usage behavior, user experience, and social perceptions of the devices. For example, participants who assigned a role to IVAs attributed more friendship to them used them more frequently, reported more enjoyment during interactions, and perceived more empathy for IVAs. In addition, these users had distinct personal requirements, for example, they reported more loneliness. This study provides valuable insights into the role-specific effects and consequences of voice assistants. Recent developments in conversational language models such as ChatGPT suggest that the findings of this study could make an important contribution to the design of dialogic human-AI interactions.}, language = {en} } @article{KlemzRote2022, author = {Klemz, Boris and Rote, G{\"u}nter}, title = {Linear-Time Algorithms for Maximum-Weight Induced Matchings and Minimum Chain Covers in Convex Bipartite Graphs}, series = {Algorithmica}, volume = {84}, journal = {Algorithmica}, number = {4}, issn = {1432-0541}, doi = {10.1007/s00453-021-00904-w}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-267876}, pages = {1064-1080}, year = {2022}, abstract = {A bipartite graph G=(U,V,E) is convex if the vertices in V can be linearly ordered such that for each vertex u∈U, the neighbors of u are consecutive in the ordering of V. An induced matching H of G is a matching for which no edge of E connects endpoints of two different edges of H. We show that in a convex bipartite graph with n vertices and m weighted edges, an induced matching of maximum total weight can be computed in O(n+m) time. An unweighted convex bipartite graph has a representation of size O(n) that records for each vertex u∈U the first and last neighbor in the ordering of V. Given such a compact representation, we compute an induced matching of maximum cardinality in O(n) time. In convex bipartite graphs, maximum-cardinality induced matchings are dual to minimum chain covers. A chain cover is a covering of the edge set by chain subgraphs, that is, subgraphs that do not contain induced matchings of more than one edge. Given a compact representation, we compute a representation of a minimum chain cover in O(n) time. If no compact representation is given, the cover can be computed in O(n+m) time. All of our algorithms achieve optimal linear running time for the respective problem and model, and they improve and generalize the previous results in several ways: The best algorithms for the unweighted problem versions had a running time of O(n\(^{2}\)) (Brandst{\"a}dt et al. in Theor. Comput. Sci. 381(1-3):260-265, 2007. https://doi.org/10.1016/j.tcs.2007.04.006). The weighted case has not been considered before.}, language = {en} } @article{FischerHarteltPuppe2023, author = {Fischer, Norbert and Hartelt, Alexander and Puppe, Frank}, title = {Line-level layout recognition of historical documents with background knowledge}, series = {Algorithms}, volume = {16}, journal = {Algorithms}, number = {3}, issn = {1999-4893}, doi = {10.3390/a16030136}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-310938}, year = {2023}, abstract = {Digitization and transcription of historic documents offer new research opportunities for humanists and are the topics of many edition projects. However, manual work is still required for the main phases of layout recognition and the subsequent optical character recognition (OCR) of early printed documents. This paper describes and evaluates how deep learning approaches recognize text lines and can be extended to layout recognition using background knowledge. The evaluation was performed on five corpora of early prints from the 15th and 16th Centuries, representing a variety of layout features. While the main text with standard layouts could be recognized in the correct reading order with a precision and recall of up to 99.9\%, also complex layouts were recognized at a rate as high as 90\% by using background knowledge, the full potential of which was revealed if many pages of the same source were transcribed.}, language = {en} } @phdthesis{Somody2023, author = {Somody, Joseph Christian Campbell}, title = {Leveraging deep learning for identification and structural determination of novel protein complexes from \(in\) \(situ\) electron cryotomography of \(Mycoplasma\) \(pneumoniae\)}, doi = {10.25972/OPUS-31344}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-313447}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {The holy grail of structural biology is to study a protein in situ, and this goal has been fast approaching since the resolution revolution and the achievement of atomic resolution. A cell's interior is not a dilute environment, and proteins have evolved to fold and function as needed in that environment; as such, an investigation of a cellular component should ideally include the full complexity of the cellular environment. Imaging whole cells in three dimensions using electron cryotomography is the best method to accomplish this goal, but it comes with a limitation on sample thickness and produces noisy data unamenable to direct analysis. This thesis establishes a novel workflow to systematically analyse whole-cell electron cryotomography data in three dimensions and to find and identify instances of protein complexes in the data to set up a determination of their structure and identity for success. Mycoplasma pneumoniae is a very small parasitic bacterium with fewer than 700 protein-coding genes, is thin enough and small enough to be imaged in large quantities by electron cryotomography, and can grow directly on the grids used for imaging, making it ideal for exploratory studies in structural proteomics. As part of the workflow, a methodology for training deep-learning-based particle-picking models is established. As a proof of principle, a dataset of whole-cell Mycoplasma pneumoniae tomograms is used with this workflow to characterize a novel membrane-associated complex observed in the data. Ultimately, 25431 such particles are picked from 353 tomograms and refined to a density map with a resolution of 11 {\AA}. Making good use of orthogonal datasets to filter search space and verify results, structures were predicted for candidate proteins and checked for suitable fit in the density map. In the end, with this approach, nine proteins were found to be part of the complex, which appears to be associated with chaperone activity and interact with translocon machinery. Visual proteomics refers to the ultimate potential of in situ electron cryotomography: the comprehensive interpretation of tomograms. The workflow presented here is demonstrated to help in reaching that potential.}, subject = {Kryoelektronenmikroskopie}, language = {en} } @phdthesis{Dang2012, author = {Dang, Nghia Duc}, title = {Konzeption und Evaluation eines hybriden, skalierbaren Werkzeugs zur mechatronischen Systemdiagnose am Beispiel eines Diagnosesystems f{\"u}r freie Kfz-Werkst{\"a}tten}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-70774}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2012}, abstract = {Die Entwicklung eines wissensbasierten Systems, speziell eines Diagnosesystems, ist eine Teildisziplin der k{\"u}nstlichen Intelligenz und angewandten Informatik. Im Laufe der Forschung auf diesem Gebiet wurden verschiedene L{\"o}sungsans{\"a}tze mit unterschiedlichem Erfolg bei der Anwendung in der Kraftfahrzeugdiagnose entwickelt. Diagnosesysteme in Vertragswerkst{\"a}tten, das heißt in Fahrzeughersteller gebundenen Werkst{\"a}tten, wenden haupts{\"a}chlich die fallbasierte Diagnostik an. Zum einen h{\"a}lt sich hier die Fahrzeugvielfalt in Grenzen und zum anderen besteht eine Meldepflicht bei neuen, nicht im System vorhandenen F{\"a}llen. Die freien Werkst{\"a}tten verf{\"u}gen nicht {\"u}ber eine solche Datenbank. Somit ist der fallbasierte Ansatz schwer umsetzbar. In freien Werkst{\"a}tten - Fahrzeughersteller unabh{\"a}ngigen Werkst{\"a}tten - basiert die Fehlersuche haupts{\"a}chlich auf Fehlerb{\"a}umen. Wegen der wachsenden Fahrzeugkomplexit{\"a}t, welche wesentlich durch die stark zunehmende Anzahl der durch mechatronische Systeme realisierten Funktionen bedingt ist, und der steigenden Typenvielfalt ist die gef{\"u}hrte Fehlersuche in freien Werkst{\"a}tten nicht immer zielf{\"u}hrend. Um die Unterst{\"u}tzung des Personals von freien Werkst{\"a}tten bei der zuk{\"u}nftigen Fehlersuche zu gew{\"a}hrleisten, werden neue Generationen von herstellerunabh{\"a}ngigen Diagnosetools ben{\"o}tigt, die die Probleme der Variantenvielfalt und Komplexit{\"a}t l{\"o}sen. In der vorliegenden Arbeit wird ein L{\"o}sungsansatz vorgestellt, der einen qualitativen, modellbasierten Diagnoseansatz mit einem auf heuristischem Diagnosewissen basierenden Ansatz vereint. Neben der Grundlage zur Wissenserhebung werden in dieser Arbeit die theoretische Grundlage zur Beherrschung der Variantenvielfalt sowie die Tests f{\"u}r die erstellten Diagnosemodelle behandelt. Die Diagnose ist symptombasiert und die Inferenzmechanismen zur Verarbeitung des Diagnosewissens sind eine Kombination aus Propagierung der abweichenden physikalischen Gr{\"o}ßen im Modell und der Auswertung des heuristischen Wissens. Des Weiteren werden in dieser Arbeit verschiedene Aspekte der Realisierung der entwickelten theoretischen Grundlagen dargestellt, zum Beispiel: Systemarchitektur, Wissenserhebungsprozess, Ablauf des Diagnosevorgangs in den Werkst{\"a}tten. Die Evaluierung der entwickelten L{\"o}sung bei der Wissenserhebung in Form von Modellerstellungen und Modellierungsworkshops sowie Feldtests dient nicht nur zur Best{\"a}tigung des entwickelten Ansatzes, sondern auch zur Ideenfindung f{\"u}r die Integration der entwickelten Tools in die existierende IT-Infrastruktur.}, subject = {Diagnosesystem}, language = {de} } @phdthesis{Atzmueller2006, author = {Atzm{\"u}ller, Martin}, title = {Knowledge-Intensive Subgroup Mining - Techniques for Automatic and Interactive Discovery}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-21004}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2006}, abstract = {Data mining has proved its significance in various domains and applications. As an important subfield of the general data mining task, subgroup mining can be used, e.g., for marketing purposes in business domains, or for quality profiling and analysis in medical domains. The goal is to efficiently discover novel, potentially useful and ultimately interesting knowledge. However, in real-world situations these requirements often cannot be fulfilled, e.g., if the applied methods do not scale for large data sets, if too many results are presented to the user, or if many of the discovered patterns are already known to the user. This thesis proposes a combination of several techniques in order to cope with the sketched problems: We discuss automatic methods, including heuristic and exhaustive approaches, and especially present the novel SD-Map algorithm for exhaustive subgroup discovery that is fast and effective. For an interactive approach we describe techniques for subgroup introspection and analysis, and we present advanced visualization methods, e.g., the zoomtable that directly shows the most important parameters of a subgroup and that can be used for optimization and exploration. We also describe various visualizations for subgroup comparison and evaluation in order to support the user during these essential steps. Furthermore, we propose to include possibly available background knowledge that is easy to formalize into the mining process. We can utilize the knowledge in many ways: To focus the search process, to restrict the search space, and ultimately to increase the efficiency of the discovery method. We especially present background knowledge to be applied for filtering the elements of the problem domain, for constructing abstractions, for aggregating values of attributes, and for the post-processing of the discovered set of patterns. Finally, the techniques are combined into a knowledge-intensive process supporting both automatic and interactive methods for subgroup mining. The practical significance of the proposed approach strongly depends on the available tools. We introduce the VIKAMINE system as a highly-integrated environment for knowledge-intensive active subgroup mining. Also, we present an evaluation consisting of two parts: With respect to objective evaluation criteria, i.e., comparing the efficiency and the effectiveness of the subgroup discovery methods, we provide an experimental evaluation using generated data. For that task we present a novel data generator that allows a simple and intuitive specification of the data characteristics. The results of the experimental evaluation indicate that the novel SD-Map method outperforms the other described algorithms using data sets similar to the intended application concerning the efficiency, and also with respect to precision and recall for the heuristic methods. Subjective evaluation criteria include the user acceptance, the benefit of the approach, and the interestingness of the results. We present five case studies utilizing the presented techniques: The approach has been successfully implemented in medical and technical applications using real-world data sets. The method was very well accepted by the users that were able to discover novel, useful, and interesting knowledge.}, subject = {Data Mining}, language = {en} } @article{OberdoerferLatoschik2019, author = {Oberd{\"o}rfer, Sebastian and Latoschik, Marc Erich}, title = {Knowledge encoding in game mechanics: transfer-oriented knowledge learning in desktop-3D and VR}, series = {International Journal of Computer Games Technology}, volume = {2019}, journal = {International Journal of Computer Games Technology}, doi = {10.1155/2019/7626349}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-201159}, pages = {7626349}, year = {2019}, abstract = {Affine Transformations (ATs) are a complex and abstract learning content. Encoding the AT knowledge in Game Mechanics (GMs) achieves a repetitive knowledge application and audiovisual demonstration. Playing a serious game providing these GMs leads to motivating and effective knowledge learning. Using immersive Virtual Reality (VR) has the potential to even further increase the serious game's learning outcome and learning quality. This paper compares the effectiveness and efficiency of desktop-3D and VR in respect to the achieved learning outcome. Also, the present study analyzes the effectiveness of an enhanced audiovisual knowledge encoding and the provision of a debriefing system. The results validate the effectiveness of the knowledge encoding in GMs to achieve knowledge learning. The study also indicates that VR is beneficial for the overall learning quality and that an enhanced audiovisual encoding has only a limited effect on the learning outcome.}, language = {en} } @article{FathyDarwishAbdelhamidetal.2022, author = {Fathy, Moustafa and Darwish, Mostafa A. and Abdelhamid, Al-Shaimaa M. and Alrashedy, Gehad M. and Othman, Othman Ali and Naseem, Muhammad and Dandekar, Thomas and Othman, Eman M.}, title = {Kinetin ameliorates cisplatin-induced hepatotoxicity and lymphotoxicity via attenuating oxidative damage, cell apoptosis and inflammation in rats}, series = {Biomedicines}, volume = {10}, journal = {Biomedicines}, number = {7}, issn = {2227-9059}, doi = {10.3390/biomedicines10071620}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-281686}, year = {2022}, abstract = {Though several previous studies reported the in vitro and in vivo antioxidant effect of kinetin (Kn), details on its action in cisplatin-induced toxicity are still scarce. In this study we evaluated, for the first time, the effects of kinetin in cisplatin (cp)- induced liver and lymphocyte toxicity in rats. Wistar male albino rats were divided into nine groups: (i) the control (C), (ii) groups 2,3 and 4, which received 0.25, 0.5 and 1 mg/kg kinetin for 10 days; (iii) the cisplatin (cp) group, which received a single intraperitoneal injection of CP (7.0 mg/kg); and (iv) groups 6, 7, 8 and 9, which received, for 10 days, 0.25, 0.5 and 1 mg/kg kinetin or 200 mg/kg vitamin C, respectively, and Cp on the fourth day. CP-injected rats showed a significant impairment in biochemical, oxidative stress and inflammatory parameters in hepatic tissue and lymphocytes. PCR showed a profound increase in caspase-3, and a significant decline in AKT gene expression. Intriguingly, Kn treatment restored the biochemical, redox status and inflammatory parameters. Hepatic AKT and caspase-3 expression as well as CD95 levels in lymphocytes were also restored. In conclusion, Kn mitigated oxidative imbalance, inflammation and apoptosis in CP-induced liver and lymphocyte toxicity; therefore, it can be considered as a promising therapy.}, language = {en} } @article{KempfKrugPuppe2023, author = {Kempf, Sebastian and Krug, Markus and Puppe, Frank}, title = {KIETA: Key-insight extraction from scientific tables}, series = {Applied Intelligence}, volume = {53}, journal = {Applied Intelligence}, number = {8}, issn = {0924-669X}, doi = {10.1007/s10489-022-03957-8}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-324180}, pages = {9513-9530}, year = {2023}, abstract = {An important but very time consuming part of the research process is literature review. An already large and nevertheless growing ground set of publications as well as a steadily increasing publication rate continue to worsen the situation. Consequently, automating this task as far as possible is desirable. Experimental results of systems are key-insights of high importance during literature review and usually represented in form of tables. Our pipeline KIETA exploits these tables to contribute to the endeavor of automation by extracting them and their contained knowledge from scientific publications. The pipeline is split into multiple steps to guarantee modularity as well as analyzability, and agnosticim regarding the specific scientific domain up until the knowledge extraction step, which is based upon an ontology. Additionally, a dataset of corresponding articles has been manually annotated with information regarding table and knowledge extraction. Experiments show promising results that signal the possibility of an automated system, while also indicating limits of extracting knowledge from tables without any context.}, language = {en} } @phdthesis{Fehler2010, author = {Fehler, Manuel}, title = {Kalibrierung Agenten-basierter Simulationen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-64762}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2010}, abstract = {In der vorliegenden Arbeit wird das Problem der Kalibrierung Agenten-basierter Simulationen (ABS) behandelt, also das Problem, die Parameterwerte eines Agenten-basierten Simulationsmodells so einzustellen, dass valides Simulationsverhalten erreicht wird. Das Kalibrierungsproblem f{\"u}r Simulationen an sich ist nicht neu und ist im Rahmen klassischer Simulationsparadigmen, wie z.B. der Makro-Simulation, fester Bestandteil der Forschung. Im Vergleich zu den dort betrachteten Kalibrierungsproblemen zeichnet sich das Kalibrierungsproblem f{\"u}r ABS jedoch durch eine Reihe zus{\"a}tzlicher Herausforderungen aus, welche die direkte Anwendung existierender Kalibrierungsverfahren in begrenzter Zeit erschweren, bzw. nicht mehr sinnvoll zulassen. Die L{\"o}sung dieser Probleme steht im Zentrum dieser Dissertation: Das Ziel besteht darin, den Nutzer bei der Kalibrierung von ABS auf der Basis von unzureichenden, potentiell fehlerhaften Daten und Wissen zu unterst{\"u}tzen. Dabei sollen drei Hauptprobleme gel{\"o}st werden: 1)Vereinfachung der Kalibrierung großer Agenten-Parametermengen auf der Mikro- Ebene in Agenten-basierten Simulationen durch Ausnutzung der spezifischen Struktur von ABS (n{\"a}mlich dem Aufbau aus einer Menge von Agentenmodellen). 2)Kalibrierung Agenten-basierter Simulationen, so dass auf allen relevanten Beobachtungsebenen valides Simulationsverhalten erzeugt wird (mindestens Mikro und Makro-Ebene). Als erschwerende Randbedingung muss die Kalibrierung unter der Voraussetzung einer Makro-Mikro-Wissensl{\"u}cke durchgef{\"u}hrt werden. 3)Kalibrierung Agenten-basierter Simulationen auf der Mikro-Ebene unter der Voraussetzung, dass zur Kalibrierung einzelner Agentenmodelle nicht ausreichend und potentiell verf{\"a}lschte Daten zur Verhaltensvalidierung zur Verf{\"u}gung stehen. Hierzu wird in dieser Arbeit das sogenannte Makro-Mikro-Verfahren zur Kalibrierung von Agenten-basierten Simulationen entwickelt. Das Verfahren besteht aus einem Basisverfahren, das im Verlauf der Arbeit um verschiedene Zusatzverfahren erweitert wird. Das Makro-Mikro-Verfahren und seine Erweiterungen sollen dazu dienen, die Modellkalibrierung trotz stark verrauschter Daten und eingeschr{\"a}nktem Wissen {\"u}ber die Wirkungszusammenh{\"a}nge im Originalsystem geeignet zu erm{\"o}glichen und dabei den Kalibrierungsprozess zu beschleunigen: 1) Makro-Mikro-Kalibrierungsverfahren: Das in dieser Arbeit entwickelte Makro- Mikro-Verfahren unterst{\"u}tzt den Nutzer durch eine kombinierte Kalibrierung auf der Mikro- und der Makro-Beobachtungsebene, die gegebenenfalls durch Zwischenebenen erweitert werden kann. Der Grundgedanke des Verfahrens besteht darin, das Kalibrierungsproblem in eines auf aggregierter Verhaltensebene und eines auf der Ebene des Mikro-Agentenverhaltens aufzuteilen. Auf der Makro-Ebene wird nach validen idealen aggregierten Verhaltensmodellen (IVM) der Agenten gesucht. Auf der Mikro-Ebene wird versucht die individuellen Modelle der Agenten auf Basis des erw{\"u}nschten Gesamtverhaltens und der ermittelten IVM so zu kalibrieren, das insgesamt Simulationsverhalten entsteht, das sowohl auf Mikro- als auch auf Makro-Ebene valide ist. 2) Erweiterung 1: Robuste Kalibrierung: Um den Umgang mit potentiell verrauschten Validierungskriterien (d.h. mit verrauschten Daten {\"u}ber ein Originalsystem, auf denen die Validierungskriterien der Simulation beruhen) und Modellteilen w{\"a}hrend der Kalibrierung von ABS zu erm{\"o}glichen, wird eine robuste Kalibrierungstechnik zur Anwendung im Makro-Mikro-Verfahren entwickelt. 3) Erweiterung 2: Kalibrierung mit Heterogenit{\"a}tssuche: Als zweite Erweiterung des Makro-Mikro-Verfahrens wird ein Verfahren entwickelt, das das Problem des unklaren Detaillierungsgrades von ABS auf der Ebene der Parameterwerte adressiert. Prinzipiell kann zwar jeder Agent unterschiedliche Parameterwerte verwenden, obwohl eine geringere Heterogenit{\"a}t zur Erzeugung validen Verhaltens ausreichend w{\"a}re. Die entwickelte Erweiterung versucht, w{\"a}hrend der Kalibrierung, eine geeignete Heterogenit{\"a}tsauspr{\"a}gung f{\"u}r die Parameterwerte der Agenten zu ermitteln. Unter einer Heterogenit{\"a}tsauspr{\"a}gung wird dabei eine Einteilung der simulierten Agenten in Gruppen mit jeweils gleichen Parameterwerten verstanden. Die Heterogenit{\"a}tssuche dient dazu, einen Kompromiss zu finden zwischen der Notwendigkeit, sehr große Parametersuchr{\"a}ume durchsuchen zu m{\"u}ssen und gleichzeitig den Suchraum so klein wie m{\"o}glich halten zu wollen.}, subject = {Computersimulation}, language = {de} } @article{SteinhaeusserOberdoerfervonMammenetal.2022, author = {Steinhaeusser, Sophia C. and Oberd{\"o}rfer, Sebastian and von Mammen, Sebastian and Latoschik, Marc Erich and Lugrin, Birgit}, title = {Joyful adventures and frightening places - designing emotion-inducing virtual environments}, series = {Frontiers in Virtual Reality}, volume = {3}, journal = {Frontiers in Virtual Reality}, issn = {2673-4192}, doi = {10.3389/frvir.2022.919163}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-284831}, year = {2022}, abstract = {Virtual environments (VEs) can evoke and support emotions, as experienced when playing emotionally arousing games. We theoretically approach the design of fear and joy evoking VEs based on a literature review of empirical studies on virtual and real environments as well as video games' reviews and content analyses. We define the design space and identify central design elements that evoke specific positive and negative emotions. Based on that, we derive and present guidelines for emotion-inducing VE design with respect to design themes, colors and textures, and lighting configurations. To validate our guidelines in two user studies, we 1) expose participants to 360° videos of VEs designed following the individual guidelines and 2) immerse them in a neutral, positive and negative emotion-inducing VEs combining all respective guidelines in Virtual Reality. The results support our theoretically derived guidelines by revealing significant differences in terms of fear and joy induction.}, language = {en} } @article{LandeckAlvarezIgarzabalUnruhetal.2022, author = {Landeck, Maximilian and Alvarez Igarz{\´a}bal, Federico and Unruh, Fabian and Habenicht, Hannah and Khoshnoud, Shiva and Wittmann, Marc and Lugrin, Jean-Luc and Latoschik, Marc Erich}, title = {Journey through a virtual tunnel: Simulated motion and its effects on the experience of time}, series = {Frontiers in Virtual Reality}, volume = {3}, journal = {Frontiers in Virtual Reality}, issn = {2673-4192}, doi = {10.3389/frvir.2022.1059971}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-301519}, year = {2022}, abstract = {This paper examines the relationship between time and motion perception in virtual environments. Previous work has shown that the perception of motion can affect the perception of time. We developed a virtual environment that simulates motion in a tunnel and measured its effects on the estimation of the duration of time, the speed at which perceived time passes, and the illusion of self-motion, also known as vection. When large areas of the visual field move in the same direction, vection can occur; observers often perceive this as self-motion rather than motion of the environment. To generate different levels of vection and investigate its effects on time perception, we developed an abstract procedural tunnel generator. The generator can simulate different speeds and densities of tunnel sections (visibly distinguishable sections that form the virtual tunnel), as well as the degree of embodiment of the user avatar (with or without virtual hands). We exposed participants to various tunnel simulations with different durations, speeds, and densities in a remote desktop and a virtual reality (VR) laboratory study. Time passed subjectively faster under high-speed and high-density conditions in both studies. The experience of self-motion was also stronger under high-speed and high-density conditions. Both studies revealed a significant correlation between the perceived passage of time and perceived self-motion. Subjects in the virtual reality study reported a stronger self-motion experience, a faster perceived passage of time, and shorter time estimates than subjects in the desktop study. Our results suggest that a virtual tunnel simulation can manipulate time perception in virtual reality. We will explore these results for the development of virtual reality applications for therapeutic approaches in our future work. This could be particularly useful in treating disorders like depression, autism, and schizophrenia, which are known to be associated with distortions in time perception. For example, the tunnel could be therapeutically applied by resetting patients' time perceptions by exposing them to the tunnel under different conditions, such as increasing or decreasing perceived time.}, language = {en} } @article{KarlDandekar2013, author = {Karl, Stefan and Dandekar, Thomas}, title = {Jimena: Efficient computing and system state identification for genetic regulatory networks}, series = {BMC Bioinformatics}, volume = {14}, journal = {BMC Bioinformatics}, doi = {10.1186/1471-2105-14-306}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-128671}, year = {2013}, abstract = {Background: Boolean networks capture switching behavior of many naturally occurring regulatory networks. For semi-quantitative modeling, interpolation between ON and OFF states is necessary. The high degree polynomial interpolation of Boolean genetic regulatory networks (GRNs) in cellular processes such as apoptosis or proliferation allows for the modeling of a wider range of node interactions than continuous activator-inhibitor models, but suffers from scaling problems for networks which contain nodes with more than ~10 inputs. Many GRNs from literature or new gene expression experiments exceed those limitations and a new approach was developed. Results: (i) As a part of our new GRN simulation framework Jimena we introduce and setup Boolean-tree-based data structures; (ii) corresponding algorithms greatly expedite the calculation of the polynomial interpolation in almost all cases, thereby expanding the range of networks which can be simulated by this model in reasonable time. (iii) Stable states for discrete models are efficiently counted and identified using binary decision diagrams. As application example, we show how system states can now be sampled efficiently in small up to large scale hormone disease networks (Arabidopsis thaliana development and immunity, pathogen Pseudomonas syringae and modulation by cytokinins and plant hormones). Conclusions: Jimena simulates currently available GRNs about 10-100 times faster than the previous implementation of the polynomial interpolation model and even greater gains are achieved for large scale-free networks. This speed-up also facilitates a much more thorough sampling of continuous state spaces which may lead to the identification of new stable states. Mutants of large networks can be constructed and analyzed very quickly enabling new insights into network robustness and behavior.}, language = {en} } @techreport{RauberBrechtelSchotten2023, type = {Working Paper}, author = {Rauber, Christof A. O. and Brechtel, Lukas and Schotten, Hans D.}, title = {JCAS-Enabled Sensing as a Service in 6th-Generation Mobile Communication Networks}, series = {KuVS Fachgespr{\"a}ch - W{\"u}rzburg Workshop on Modeling, Analysis and Simulation of Next-Generation Communication Networks 2023 (WueWoWAS'23)}, journal = {KuVS Fachgespr{\"a}ch - W{\"u}rzburg Workshop on Modeling, Analysis and Simulation of Next-Generation Communication Networks 2023 (WueWoWAS'23)}, doi = {10.25972/OPUS-32213}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-322135}, pages = {4}, year = {2023}, abstract = {The introduction of new types of frequency spectrum in 6G technology facilitates the convergence of conventional mobile communications and radar functions. Thus, the mobile network itself becomes a versatile sensor system. This enables mobile network operators to offer a sensing service in addition to conventional data and telephony services. The potential benefits are expected to accrue to various stakeholders, including individuals, the environment, and society in general. The paper discusses technological development, possible integration, and use cases, as well as future development areas.}, language = {en} } @unpublished{Nassourou2010, author = {Nassourou, Mohamadou}, title = {Java Web Frameworks Which One to Choose?}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-49407}, year = {2010}, abstract = {This article discusses web frameworks that are available to a software developer in Java language. It introduces MVC paradigm and some frameworks that implement it. The article presents an overview of Struts, Spring MVC, JSF Frameworks, as well as guidelines for selecting one of them as development environment.}, subject = {Java Frameworks}, language = {en} }