@article{KlemzRote2022, author = {Klemz, Boris and Rote, G{\"u}nter}, title = {Linear-Time Algorithms for Maximum-Weight Induced Matchings and Minimum Chain Covers in Convex Bipartite Graphs}, series = {Algorithmica}, volume = {84}, journal = {Algorithmica}, number = {4}, issn = {1432-0541}, doi = {10.1007/s00453-021-00904-w}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-267876}, pages = {1064-1080}, year = {2022}, abstract = {A bipartite graph G=(U,V,E) is convex if the vertices in V can be linearly ordered such that for each vertex u∈U, the neighbors of u are consecutive in the ordering of V. An induced matching H of G is a matching for which no edge of E connects endpoints of two different edges of H. We show that in a convex bipartite graph with n vertices and m weighted edges, an induced matching of maximum total weight can be computed in O(n+m) time. An unweighted convex bipartite graph has a representation of size O(n) that records for each vertex u∈U the first and last neighbor in the ordering of V. Given such a compact representation, we compute an induced matching of maximum cardinality in O(n) time. In convex bipartite graphs, maximum-cardinality induced matchings are dual to minimum chain covers. A chain cover is a covering of the edge set by chain subgraphs, that is, subgraphs that do not contain induced matchings of more than one edge. Given a compact representation, we compute a representation of a minimum chain cover in O(n) time. If no compact representation is given, the cover can be computed in O(n+m) time. All of our algorithms achieve optimal linear running time for the respective problem and model, and they improve and generalize the previous results in several ways: The best algorithms for the unweighted problem versions had a running time of O(n\(^{2}\)) (Brandst{\"a}dt et al. in Theor. Comput. Sci. 381(1-3):260-265, 2007. https://doi.org/10.1016/j.tcs.2007.04.006). The weighted case has not been considered before.}, language = {en} } @article{FischerHarteltPuppe2023, author = {Fischer, Norbert and Hartelt, Alexander and Puppe, Frank}, title = {Line-level layout recognition of historical documents with background knowledge}, series = {Algorithms}, volume = {16}, journal = {Algorithms}, number = {3}, issn = {1999-4893}, doi = {10.3390/a16030136}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-310938}, year = {2023}, abstract = {Digitization and transcription of historic documents offer new research opportunities for humanists and are the topics of many edition projects. However, manual work is still required for the main phases of layout recognition and the subsequent optical character recognition (OCR) of early printed documents. This paper describes and evaluates how deep learning approaches recognize text lines and can be extended to layout recognition using background knowledge. The evaluation was performed on five corpora of early prints from the 15th and 16th Centuries, representing a variety of layout features. While the main text with standard layouts could be recognized in the correct reading order with a precision and recall of up to 99.9\%, also complex layouts were recognized at a rate as high as 90\% by using background knowledge, the full potential of which was revealed if many pages of the same source were transcribed.}, language = {en} } @phdthesis{Somody2023, author = {Somody, Joseph Christian Campbell}, title = {Leveraging deep learning for identification and structural determination of novel protein complexes from \(in\) \(situ\) electron cryotomography of \(Mycoplasma\) \(pneumoniae\)}, doi = {10.25972/OPUS-31344}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-313447}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {The holy grail of structural biology is to study a protein in situ, and this goal has been fast approaching since the resolution revolution and the achievement of atomic resolution. A cell's interior is not a dilute environment, and proteins have evolved to fold and function as needed in that environment; as such, an investigation of a cellular component should ideally include the full complexity of the cellular environment. Imaging whole cells in three dimensions using electron cryotomography is the best method to accomplish this goal, but it comes with a limitation on sample thickness and produces noisy data unamenable to direct analysis. This thesis establishes a novel workflow to systematically analyse whole-cell electron cryotomography data in three dimensions and to find and identify instances of protein complexes in the data to set up a determination of their structure and identity for success. Mycoplasma pneumoniae is a very small parasitic bacterium with fewer than 700 protein-coding genes, is thin enough and small enough to be imaged in large quantities by electron cryotomography, and can grow directly on the grids used for imaging, making it ideal for exploratory studies in structural proteomics. As part of the workflow, a methodology for training deep-learning-based particle-picking models is established. As a proof of principle, a dataset of whole-cell Mycoplasma pneumoniae tomograms is used with this workflow to characterize a novel membrane-associated complex observed in the data. Ultimately, 25431 such particles are picked from 353 tomograms and refined to a density map with a resolution of 11 {\AA}. Making good use of orthogonal datasets to filter search space and verify results, structures were predicted for candidate proteins and checked for suitable fit in the density map. In the end, with this approach, nine proteins were found to be part of the complex, which appears to be associated with chaperone activity and interact with translocon machinery. Visual proteomics refers to the ultimate potential of in situ electron cryotomography: the comprehensive interpretation of tomograms. The workflow presented here is demonstrated to help in reaching that potential.}, subject = {Kryoelektronenmikroskopie}, language = {en} } @phdthesis{Dang2012, author = {Dang, Nghia Duc}, title = {Konzeption und Evaluation eines hybriden, skalierbaren Werkzeugs zur mechatronischen Systemdiagnose am Beispiel eines Diagnosesystems f{\"u}r freie Kfz-Werkst{\"a}tten}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-70774}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2012}, abstract = {Die Entwicklung eines wissensbasierten Systems, speziell eines Diagnosesystems, ist eine Teildisziplin der k{\"u}nstlichen Intelligenz und angewandten Informatik. Im Laufe der Forschung auf diesem Gebiet wurden verschiedene L{\"o}sungsans{\"a}tze mit unterschiedlichem Erfolg bei der Anwendung in der Kraftfahrzeugdiagnose entwickelt. Diagnosesysteme in Vertragswerkst{\"a}tten, das heißt in Fahrzeughersteller gebundenen Werkst{\"a}tten, wenden haupts{\"a}chlich die fallbasierte Diagnostik an. Zum einen h{\"a}lt sich hier die Fahrzeugvielfalt in Grenzen und zum anderen besteht eine Meldepflicht bei neuen, nicht im System vorhandenen F{\"a}llen. Die freien Werkst{\"a}tten verf{\"u}gen nicht {\"u}ber eine solche Datenbank. Somit ist der fallbasierte Ansatz schwer umsetzbar. In freien Werkst{\"a}tten - Fahrzeughersteller unabh{\"a}ngigen Werkst{\"a}tten - basiert die Fehlersuche haupts{\"a}chlich auf Fehlerb{\"a}umen. Wegen der wachsenden Fahrzeugkomplexit{\"a}t, welche wesentlich durch die stark zunehmende Anzahl der durch mechatronische Systeme realisierten Funktionen bedingt ist, und der steigenden Typenvielfalt ist die gef{\"u}hrte Fehlersuche in freien Werkst{\"a}tten nicht immer zielf{\"u}hrend. Um die Unterst{\"u}tzung des Personals von freien Werkst{\"a}tten bei der zuk{\"u}nftigen Fehlersuche zu gew{\"a}hrleisten, werden neue Generationen von herstellerunabh{\"a}ngigen Diagnosetools ben{\"o}tigt, die die Probleme der Variantenvielfalt und Komplexit{\"a}t l{\"o}sen. In der vorliegenden Arbeit wird ein L{\"o}sungsansatz vorgestellt, der einen qualitativen, modellbasierten Diagnoseansatz mit einem auf heuristischem Diagnosewissen basierenden Ansatz vereint. Neben der Grundlage zur Wissenserhebung werden in dieser Arbeit die theoretische Grundlage zur Beherrschung der Variantenvielfalt sowie die Tests f{\"u}r die erstellten Diagnosemodelle behandelt. Die Diagnose ist symptombasiert und die Inferenzmechanismen zur Verarbeitung des Diagnosewissens sind eine Kombination aus Propagierung der abweichenden physikalischen Gr{\"o}ßen im Modell und der Auswertung des heuristischen Wissens. Des Weiteren werden in dieser Arbeit verschiedene Aspekte der Realisierung der entwickelten theoretischen Grundlagen dargestellt, zum Beispiel: Systemarchitektur, Wissenserhebungsprozess, Ablauf des Diagnosevorgangs in den Werkst{\"a}tten. Die Evaluierung der entwickelten L{\"o}sung bei der Wissenserhebung in Form von Modellerstellungen und Modellierungsworkshops sowie Feldtests dient nicht nur zur Best{\"a}tigung des entwickelten Ansatzes, sondern auch zur Ideenfindung f{\"u}r die Integration der entwickelten Tools in die existierende IT-Infrastruktur.}, subject = {Diagnosesystem}, language = {de} } @phdthesis{Atzmueller2006, author = {Atzm{\"u}ller, Martin}, title = {Knowledge-Intensive Subgroup Mining - Techniques for Automatic and Interactive Discovery}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-21004}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2006}, abstract = {Data mining has proved its significance in various domains and applications. As an important subfield of the general data mining task, subgroup mining can be used, e.g., for marketing purposes in business domains, or for quality profiling and analysis in medical domains. The goal is to efficiently discover novel, potentially useful and ultimately interesting knowledge. However, in real-world situations these requirements often cannot be fulfilled, e.g., if the applied methods do not scale for large data sets, if too many results are presented to the user, or if many of the discovered patterns are already known to the user. This thesis proposes a combination of several techniques in order to cope with the sketched problems: We discuss automatic methods, including heuristic and exhaustive approaches, and especially present the novel SD-Map algorithm for exhaustive subgroup discovery that is fast and effective. For an interactive approach we describe techniques for subgroup introspection and analysis, and we present advanced visualization methods, e.g., the zoomtable that directly shows the most important parameters of a subgroup and that can be used for optimization and exploration. We also describe various visualizations for subgroup comparison and evaluation in order to support the user during these essential steps. Furthermore, we propose to include possibly available background knowledge that is easy to formalize into the mining process. We can utilize the knowledge in many ways: To focus the search process, to restrict the search space, and ultimately to increase the efficiency of the discovery method. We especially present background knowledge to be applied for filtering the elements of the problem domain, for constructing abstractions, for aggregating values of attributes, and for the post-processing of the discovered set of patterns. Finally, the techniques are combined into a knowledge-intensive process supporting both automatic and interactive methods for subgroup mining. The practical significance of the proposed approach strongly depends on the available tools. We introduce the VIKAMINE system as a highly-integrated environment for knowledge-intensive active subgroup mining. Also, we present an evaluation consisting of two parts: With respect to objective evaluation criteria, i.e., comparing the efficiency and the effectiveness of the subgroup discovery methods, we provide an experimental evaluation using generated data. For that task we present a novel data generator that allows a simple and intuitive specification of the data characteristics. The results of the experimental evaluation indicate that the novel SD-Map method outperforms the other described algorithms using data sets similar to the intended application concerning the efficiency, and also with respect to precision and recall for the heuristic methods. Subjective evaluation criteria include the user acceptance, the benefit of the approach, and the interestingness of the results. We present five case studies utilizing the presented techniques: The approach has been successfully implemented in medical and technical applications using real-world data sets. The method was very well accepted by the users that were able to discover novel, useful, and interesting knowledge.}, subject = {Data Mining}, language = {en} } @article{OberdoerferLatoschik2019, author = {Oberd{\"o}rfer, Sebastian and Latoschik, Marc Erich}, title = {Knowledge encoding in game mechanics: transfer-oriented knowledge learning in desktop-3D and VR}, series = {International Journal of Computer Games Technology}, volume = {2019}, journal = {International Journal of Computer Games Technology}, doi = {10.1155/2019/7626349}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-201159}, pages = {7626349}, year = {2019}, abstract = {Affine Transformations (ATs) are a complex and abstract learning content. Encoding the AT knowledge in Game Mechanics (GMs) achieves a repetitive knowledge application and audiovisual demonstration. Playing a serious game providing these GMs leads to motivating and effective knowledge learning. Using immersive Virtual Reality (VR) has the potential to even further increase the serious game's learning outcome and learning quality. This paper compares the effectiveness and efficiency of desktop-3D and VR in respect to the achieved learning outcome. Also, the present study analyzes the effectiveness of an enhanced audiovisual knowledge encoding and the provision of a debriefing system. The results validate the effectiveness of the knowledge encoding in GMs to achieve knowledge learning. The study also indicates that VR is beneficial for the overall learning quality and that an enhanced audiovisual encoding has only a limited effect on the learning outcome.}, language = {en} } @article{FathyDarwishAbdelhamidetal.2022, author = {Fathy, Moustafa and Darwish, Mostafa A. and Abdelhamid, Al-Shaimaa M. and Alrashedy, Gehad M. and Othman, Othman Ali and Naseem, Muhammad and Dandekar, Thomas and Othman, Eman M.}, title = {Kinetin ameliorates cisplatin-induced hepatotoxicity and lymphotoxicity via attenuating oxidative damage, cell apoptosis and inflammation in rats}, series = {Biomedicines}, volume = {10}, journal = {Biomedicines}, number = {7}, issn = {2227-9059}, doi = {10.3390/biomedicines10071620}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-281686}, year = {2022}, abstract = {Though several previous studies reported the in vitro and in vivo antioxidant effect of kinetin (Kn), details on its action in cisplatin-induced toxicity are still scarce. In this study we evaluated, for the first time, the effects of kinetin in cisplatin (cp)- induced liver and lymphocyte toxicity in rats. Wistar male albino rats were divided into nine groups: (i) the control (C), (ii) groups 2,3 and 4, which received 0.25, 0.5 and 1 mg/kg kinetin for 10 days; (iii) the cisplatin (cp) group, which received a single intraperitoneal injection of CP (7.0 mg/kg); and (iv) groups 6, 7, 8 and 9, which received, for 10 days, 0.25, 0.5 and 1 mg/kg kinetin or 200 mg/kg vitamin C, respectively, and Cp on the fourth day. CP-injected rats showed a significant impairment in biochemical, oxidative stress and inflammatory parameters in hepatic tissue and lymphocytes. PCR showed a profound increase in caspase-3, and a significant decline in AKT gene expression. Intriguingly, Kn treatment restored the biochemical, redox status and inflammatory parameters. Hepatic AKT and caspase-3 expression as well as CD95 levels in lymphocytes were also restored. In conclusion, Kn mitigated oxidative imbalance, inflammation and apoptosis in CP-induced liver and lymphocyte toxicity; therefore, it can be considered as a promising therapy.}, language = {en} } @article{KempfKrugPuppe2023, author = {Kempf, Sebastian and Krug, Markus and Puppe, Frank}, title = {KIETA: Key-insight extraction from scientific tables}, series = {Applied Intelligence}, volume = {53}, journal = {Applied Intelligence}, number = {8}, issn = {0924-669X}, doi = {10.1007/s10489-022-03957-8}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-324180}, pages = {9513-9530}, year = {2023}, abstract = {An important but very time consuming part of the research process is literature review. An already large and nevertheless growing ground set of publications as well as a steadily increasing publication rate continue to worsen the situation. Consequently, automating this task as far as possible is desirable. Experimental results of systems are key-insights of high importance during literature review and usually represented in form of tables. Our pipeline KIETA exploits these tables to contribute to the endeavor of automation by extracting them and their contained knowledge from scientific publications. The pipeline is split into multiple steps to guarantee modularity as well as analyzability, and agnosticim regarding the specific scientific domain up until the knowledge extraction step, which is based upon an ontology. Additionally, a dataset of corresponding articles has been manually annotated with information regarding table and knowledge extraction. Experiments show promising results that signal the possibility of an automated system, while also indicating limits of extracting knowledge from tables without any context.}, language = {en} } @phdthesis{Fehler2010, author = {Fehler, Manuel}, title = {Kalibrierung Agenten-basierter Simulationen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-64762}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2010}, abstract = {In der vorliegenden Arbeit wird das Problem der Kalibrierung Agenten-basierter Simulationen (ABS) behandelt, also das Problem, die Parameterwerte eines Agenten-basierten Simulationsmodells so einzustellen, dass valides Simulationsverhalten erreicht wird. Das Kalibrierungsproblem f{\"u}r Simulationen an sich ist nicht neu und ist im Rahmen klassischer Simulationsparadigmen, wie z.B. der Makro-Simulation, fester Bestandteil der Forschung. Im Vergleich zu den dort betrachteten Kalibrierungsproblemen zeichnet sich das Kalibrierungsproblem f{\"u}r ABS jedoch durch eine Reihe zus{\"a}tzlicher Herausforderungen aus, welche die direkte Anwendung existierender Kalibrierungsverfahren in begrenzter Zeit erschweren, bzw. nicht mehr sinnvoll zulassen. Die L{\"o}sung dieser Probleme steht im Zentrum dieser Dissertation: Das Ziel besteht darin, den Nutzer bei der Kalibrierung von ABS auf der Basis von unzureichenden, potentiell fehlerhaften Daten und Wissen zu unterst{\"u}tzen. Dabei sollen drei Hauptprobleme gel{\"o}st werden: 1)Vereinfachung der Kalibrierung großer Agenten-Parametermengen auf der Mikro- Ebene in Agenten-basierten Simulationen durch Ausnutzung der spezifischen Struktur von ABS (n{\"a}mlich dem Aufbau aus einer Menge von Agentenmodellen). 2)Kalibrierung Agenten-basierter Simulationen, so dass auf allen relevanten Beobachtungsebenen valides Simulationsverhalten erzeugt wird (mindestens Mikro und Makro-Ebene). Als erschwerende Randbedingung muss die Kalibrierung unter der Voraussetzung einer Makro-Mikro-Wissensl{\"u}cke durchgef{\"u}hrt werden. 3)Kalibrierung Agenten-basierter Simulationen auf der Mikro-Ebene unter der Voraussetzung, dass zur Kalibrierung einzelner Agentenmodelle nicht ausreichend und potentiell verf{\"a}lschte Daten zur Verhaltensvalidierung zur Verf{\"u}gung stehen. Hierzu wird in dieser Arbeit das sogenannte Makro-Mikro-Verfahren zur Kalibrierung von Agenten-basierten Simulationen entwickelt. Das Verfahren besteht aus einem Basisverfahren, das im Verlauf der Arbeit um verschiedene Zusatzverfahren erweitert wird. Das Makro-Mikro-Verfahren und seine Erweiterungen sollen dazu dienen, die Modellkalibrierung trotz stark verrauschter Daten und eingeschr{\"a}nktem Wissen {\"u}ber die Wirkungszusammenh{\"a}nge im Originalsystem geeignet zu erm{\"o}glichen und dabei den Kalibrierungsprozess zu beschleunigen: 1) Makro-Mikro-Kalibrierungsverfahren: Das in dieser Arbeit entwickelte Makro- Mikro-Verfahren unterst{\"u}tzt den Nutzer durch eine kombinierte Kalibrierung auf der Mikro- und der Makro-Beobachtungsebene, die gegebenenfalls durch Zwischenebenen erweitert werden kann. Der Grundgedanke des Verfahrens besteht darin, das Kalibrierungsproblem in eines auf aggregierter Verhaltensebene und eines auf der Ebene des Mikro-Agentenverhaltens aufzuteilen. Auf der Makro-Ebene wird nach validen idealen aggregierten Verhaltensmodellen (IVM) der Agenten gesucht. Auf der Mikro-Ebene wird versucht die individuellen Modelle der Agenten auf Basis des erw{\"u}nschten Gesamtverhaltens und der ermittelten IVM so zu kalibrieren, das insgesamt Simulationsverhalten entsteht, das sowohl auf Mikro- als auch auf Makro-Ebene valide ist. 2) Erweiterung 1: Robuste Kalibrierung: Um den Umgang mit potentiell verrauschten Validierungskriterien (d.h. mit verrauschten Daten {\"u}ber ein Originalsystem, auf denen die Validierungskriterien der Simulation beruhen) und Modellteilen w{\"a}hrend der Kalibrierung von ABS zu erm{\"o}glichen, wird eine robuste Kalibrierungstechnik zur Anwendung im Makro-Mikro-Verfahren entwickelt. 3) Erweiterung 2: Kalibrierung mit Heterogenit{\"a}tssuche: Als zweite Erweiterung des Makro-Mikro-Verfahrens wird ein Verfahren entwickelt, das das Problem des unklaren Detaillierungsgrades von ABS auf der Ebene der Parameterwerte adressiert. Prinzipiell kann zwar jeder Agent unterschiedliche Parameterwerte verwenden, obwohl eine geringere Heterogenit{\"a}t zur Erzeugung validen Verhaltens ausreichend w{\"a}re. Die entwickelte Erweiterung versucht, w{\"a}hrend der Kalibrierung, eine geeignete Heterogenit{\"a}tsauspr{\"a}gung f{\"u}r die Parameterwerte der Agenten zu ermitteln. Unter einer Heterogenit{\"a}tsauspr{\"a}gung wird dabei eine Einteilung der simulierten Agenten in Gruppen mit jeweils gleichen Parameterwerten verstanden. Die Heterogenit{\"a}tssuche dient dazu, einen Kompromiss zu finden zwischen der Notwendigkeit, sehr große Parametersuchr{\"a}ume durchsuchen zu m{\"u}ssen und gleichzeitig den Suchraum so klein wie m{\"o}glich halten zu wollen.}, subject = {Computersimulation}, language = {de} } @article{SteinhaeusserOberdoerfervonMammenetal.2022, author = {Steinhaeusser, Sophia C. and Oberd{\"o}rfer, Sebastian and von Mammen, Sebastian and Latoschik, Marc Erich and Lugrin, Birgit}, title = {Joyful adventures and frightening places - designing emotion-inducing virtual environments}, series = {Frontiers in Virtual Reality}, volume = {3}, journal = {Frontiers in Virtual Reality}, issn = {2673-4192}, doi = {10.3389/frvir.2022.919163}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-284831}, year = {2022}, abstract = {Virtual environments (VEs) can evoke and support emotions, as experienced when playing emotionally arousing games. We theoretically approach the design of fear and joy evoking VEs based on a literature review of empirical studies on virtual and real environments as well as video games' reviews and content analyses. We define the design space and identify central design elements that evoke specific positive and negative emotions. Based on that, we derive and present guidelines for emotion-inducing VE design with respect to design themes, colors and textures, and lighting configurations. To validate our guidelines in two user studies, we 1) expose participants to 360° videos of VEs designed following the individual guidelines and 2) immerse them in a neutral, positive and negative emotion-inducing VEs combining all respective guidelines in Virtual Reality. The results support our theoretically derived guidelines by revealing significant differences in terms of fear and joy induction.}, language = {en} }