@unpublished{Nassourou2011, author = {Nassourou, Mohamadou}, title = {Using Machine Learning Algorithms for Categorizing Quranic Chaptersby Major Phases of Prophet Mohammad's Messengership}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-66862}, year = {2011}, abstract = {This paper discusses the categorization of Quranic chapters by major phases of Prophet Mohammad's messengership using machine learning algorithms. First, the chapters were categorized by places of revelation using Support Vector Machine and na{\"i}ve Bayesian classifiers separately, and their results were compared to each other, as well as to the existing traditional Islamic and western orientalists classifications. The chapters were categorized into Meccan (revealed in Mecca) and Medinan (revealed in Medina). After that, chapters of each category were clustered using a kind of fuzzy-single linkage clustering approach, in order to correspond to the major phases of Prophet Mohammad's life. The major phases of the Prophet's life were manually derived from the Quranic text, as well as from the secondary Islamic literature e.g hadiths, exegesis. Previous studies on computing the places of revelation of Quranic chapters relied heavily on features extracted from existing background knowledge of the chapters. For instance, it is known that Meccan chapters contain mostly verses about faith and related problems, while Medinan ones encompass verses dealing with social issues, battles…etc. These features are by themselves insufficient as a basis for assigning the chapters to their respective places of revelation. In fact, there are exceptions, since some chapters do contain both Meccan and Medinan features. In this study, features of each category were automatically created from very few chapters, whose places of revelation have been determined through identification of historical facts and events such as battles, migration to Medina…etc. Chapters having unanimously agreed places of revelation were used as the initial training set, while the remaining chapters formed the testing set. The classification process was made recursive by regularly augmenting the training set with correctly classified chapters, in order to classify the whole testing set. Each chapter was preprocessed by removing unimportant words, stemming, and representation with vector space model. The result of this study shows that, the two classifiers have produced useable results, with an outperformance of the support vector machine classifier. This study indicates that, the proposed methodology yields encouraging results for arranging Quranic chapters by phases of Prophet Mohammad's messengership.}, subject = {Koran}, language = {en} } @phdthesis{Schlosser2011, author = {Schlosser, Daniel}, title = {Quality of Experience Management in Virtual Future Networks}, doi = {10.25972/OPUS-5719}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-69986}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2011}, abstract = {Aktuell beobachten wir eine drastische Vervielf{\"a}ltigung der Dienste und Anwendungen, die das Internet f{\"u}r den Datentransport nutzen. Dabei unterscheiden sich die Anforderungen dieser Dienste an das Netzwerk deutlich. Das Netzwerkmanagement wird durch diese Diversit{\"a}t der nutzenden Dienste aber deutlich erschwert, da es einem Datentransportdienstleister kaum m{\"o}glich ist, die unterschiedlichen Verbindungen zu unterscheiden, ohne den Inhalt der transportierten Daten zu analysieren. Netzwerkvirtualisierung ist eine vielversprechende L{\"o}sung f{\"u}r dieses Problem, da sie es erm{\"o}glicht f{\"u}r verschiedene Dienste unterschiedliche virtuelle Netze auf dem gleichen physikalischen Substrat zu betreiben. Diese Diensttrennung erm{\"o}glicht es, jedes einzelne Netz anwendungsspezifisch zu steuern. Ziel einer solchen Netzsteuerung ist es, sowohl die vom Nutzer erfahrene Dienstg{\"u}te als auch die Kosteneffizienz des Datentransports zu optimieren. Dar{\"u}ber hinaus wird es mit Netzwerkvirtualisierung m{\"o}glich das physikalische Netz so weit zu abstrahieren, dass die aktuell fest verzahnten Rollen von Netzwerkbesitzer und Netzwerkbetreiber entkoppelt werden k{\"o}nnen. Dar{\"u}ber hinaus stellt Netzwerkvirtualisierung sicher, dass unterschiedliche Datennetze, die gleichzeitig auf dem gleichen physikalischen Netz betrieben werden, sich gegenseitig weder beeinflussen noch st{\"o}ren k{\"o}nnen. Diese Arbeit  besch{\"a}ftigt sich mit ausgew{\"a}hlten Aspekten dieses Themenkomplexes und fokussiert sich darauf, ein virtuelles Netzwerk mit bestm{\"o}glicher Dienstqualit{\"a}t f{\"u}r den Nutzer zu betreiben und zu steuern. Daf{\"u}r wird ein Top-down-Ansatz gew{\"a}hlt, der von den Anwendungsf{\"a}llen, einer m{\"o}glichen Netzwerkvirtualisierungs-Architektur und aktuellen M{\"o}glichkeiten der Hardwarevirtualisierung ausgeht. Im Weiteren fokussiert sich die Arbeit dann in Richtung Bestimmung und Optimierung der vom Nutzer erfahrenen Dienstqualit{\"a}t (QoE) auf Applikationsschicht und diskutiert M{\"o}glichkeiten zur Messung und {\"U}berwachung von wesentlichen Netzparametern in virtualisierten Netzen.}, subject = {Netzwerkmanagement}, language = {en} } @unpublished{Nassourou2011, author = {Nassourou, Mohamadou}, title = {Computing Generic Causes of Revelation of the Quranic Verses Using Machine Learning Techniques}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-66083}, year = {2011}, abstract = {Because many verses of the holy Quran are similar, there is high probability that, similar verses addressing same issues share same generic causes of revelation. In this study, machine learning techniques have been employed in order to automatically derive causes of revelation of Quranic verses. The derivation of the causes of revelation is viewed as a classification problem. Initially the categories are based on the verses with known causes of revelation, and the testing set consists of the remaining verses. Based on a computed threshold value, a na{\"i}ve Bayesian classifier is used to categorize some verses. After that, using a decision tree classifier the remaining uncategorized verses are separated into verses that contain indicators (resultative connectors, causative expressions…), and those that do not. As for those verses having indicators, each one is segmented into its constituent clauses by identification of the linking indicators. Then a dominant clause is extracted and considered either as the cause of revelation, or post-processed by adding or subtracting some terms to form a causal clause that constitutes the cause of revelation. Concerning remaining unclassified verses without indicators, a naive Bayesian classifier is again used to assign each one of them to one of the existing classes based on features and topics similarity. As for verses that could not be classified so far, manual classification was made by considering each verse as a category on its own. The result obtained in this study is encouraging, and shows that automatic derivation of Quranic verses' generic causes of revelation is achievable, and reasonably reliable for understanding and implementing the teachings of the Quran.}, subject = {Text Mining}, language = {en} } @unpublished{Nassourou2011, author = {Nassourou, Mohamadou}, title = {Design and Implementation of a Model-driven XML-based Integrated System Architecture for Assisting Analysis, Understanding, and Retention of Religious Texts:The Case of The Quran}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-65737}, year = {2011}, abstract = {Learning a book in general involves reading it, underlining important words, adding comments, summarizing some passages, and marking up some text or concepts. Once deeper understanding is achieved, one would like to organize and manage her/his knowledge in such a way that, it could be easily remembered and efficiently transmitted to others. This paper discusses about modeling religious texts using semantic XML markup based on frame-based knowledge representation, with the purpose of assisting understanding, retention, and sharing of knowledge they contain. In this study, books organized in terms of chapters made up of verses are considered as the source of knowledge to model. Some metadata representing the multiple perspectives of knowledge modeling are assigned to each chapter and verse. Chapters and verses with their metadata form a meta-model, which is represented using frames, and published on a web mashup. An XML-based annotation and visualization system equipped with user interfaces for creating static and dynamic metadata, annotating chapters' contents according to user selected semantics, and templates for publishing generated knowledge on the Internet, has been developed. The system has been applied to the Quran, and the result obtained shows that multiple perspectives of information modeling can be successfully applied to religious texts, in order to support analysis, understanding, and retention of the texts.}, subject = {Wissensrepr{\"a}sentation}, language = {en} } @unpublished{Nassourou2011, author = {Nassourou, Mohamadou}, title = {Computer-based Textual Documents Collation System for Reconstructing the Original Text from Automatically Identified Base Text and Ranked Witnesses}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-65749}, year = {2011}, abstract = {Given a collection of diverging documents about some lost original text, any person interested in the text would try reconstructing it from the diverging documents. Whether it is eclecticism, stemmatics, or copy-text, one is expected to explicitly or indirectly select one of the documents as a starting point or as a base text, which could be emended through comparison with remaining documents, so that a text that could be designated as the original document is generated. Unfortunately the process of giving priority to one of the documents also known as witnesses is a subjective approach. In fact even Cladistics, which could be considered as a computer-based approach of implementing stemmatics, does not present or recommend users to select a certain witness as a starting point for the process of reconstructing the original document. In this study, a computational method using a rule-based Bayesian classifier is used, to assist text scholars in their attempts of reconstructing a non-existing document from some available witnesses. The method developed in this study consists of selecting a base text successively and collating it with remaining documents. Each completed collation cycle stores the selected base text and its closest witness, along with a weighted score of their similarities and differences. At the end of the collation process, a witness selected more often by majority of base texts is considered as the probable base text of the collection. Witnesses' scores are weighted using a weighting system, based on effects of types of textual modifications on the process of reconstructing original documents. Users have the possibility to select between baseless and base text collation. If a base text is selected, the task is reduced to ranking the witnesses with respect to the base text, otherwise a base text as well as ranking of the witnesses with respect to the base text are computed and displayed on a bar diagram. Additionally this study includes a recursive algorithm for automatically reconstructing the original text from the identified base text and ranked witnesses.}, subject = {Textvergleich}, language = {en} } @unpublished{Nassourou2011, author = {Nassourou, Mohamadou}, title = {Philosophical and Computational Approaches for Estimating and Visualizing Months of Revelations of Quranic Chapters}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-65784}, year = {2011}, abstract = {The question of why the Quran structure does not follow its chronology of revelation is a recurring one. Some Islamic scholars such as [1] have answered the question using hadiths, as well as other philosophical reasons based on internal evidences of the Quran itself. Unfortunately till today many are still wondering about this issue. Muslims believe that the Quran is a summary and a copy of the content of a preserved tablet called Lawhul-Mahfuz located in the heaven. Logically speaking, this suggests that the arrangement of the verses and chapters is expected to be similar to that of the Lawhul-Mahfuz. As for the arrangement of the verses in each chapter, there is unanimity that it was carried out by the Prophet himself under the guidance of Angel Gabriel with the recommendation of God. But concerning the ordering of the chapters, there are reports about some divergences [3] among the Prophet's companions as to which chapter should precede which one. This paper argues that Quranic chapters might have been arranged according to months and seasons of revelation. In fact, based on some verses of the Quran, it is defendable that the Lawhul-Mahfuz itself is understood to have been structured in terms of the months of the year. In this study, philosophical and mathematical arguments for computing chapters' months of revelation are discussed, and the result is displayed on an interactive scatter plot.}, subject = {Text Mining}, language = {en} } @phdthesis{Selbach2011, author = {Selbach, Stefan}, title = {Hybride bitparallele Volltextsuche}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-66476}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2011}, abstract = {Der große Vorteil eines q-Gramm Indexes liegt darin, dass es m{\"o}glich ist beliebige Zeichenketten in einer Dokumentensammlung zu suchen. Ein Nachteil jedoch liegt darin, dass bei gr{\"o}ßer werdenden Datenmengen dieser Index dazu neigt, sehr groß zu werden, was mit einem deutlichem Leistungsabfall verbunden ist. In dieser Arbeit wird eine neuartige Technik vorgestellt, die die Leistung eines q-Gramm Indexes mithilfe zus{\"a}tzlicher M-Matrizen f{\"u}r jedes q-Gramm und durch die Kombination mit einem invertierten Index erh{\"o}ht. Eine M-Matrix ist eine Bit-Matrix, die Informationen {\"u}ber die Positionen eines q-Gramms enth{\"a}lt. Auch bei der Kombination von zwei oder mehreren Q-Grammen bieten diese M-Matrizen Informationen {\"u}ber die Positionen der Kombination. Dies kann verwendet werden, um die Komplexit{\"a}t der Zusammenf{\"u}hrung der q-Gramm Trefferlisten f{\"u}r eine gegebene Suchanfrage zu reduzieren und verbessert die Leistung des n-Gramm-invertierten Index. Die Kombination mit einem termbasierten invertierten Index beschleunigt die durchschnittliche Suchzeit zus{\"a}tzlich und vereint die Vorteile beider Index-Formate. Redundante Informationen werden in dem q-Gramm Index reduziert und weitere Funktionalit{\"a}t hinzugef{\"u}gt, wie z.B. die Bewertung von Treffern nach Relevanz, die M{\"o}glichkeit, nach Konzepten zu suchen oder Indexpartitionierungen nach Wichtigkeit der enthaltenen Terme zu erstellen.}, subject = {Information Retrieval}, language = {de} } @article{MandelHoernleinIflandetal.2011, author = {Mandel, Alexander and H{\"o}rnlein, Alexander and Ifland, Marianus and L{\"u}neburg, Edeltraud and Deckert, J{\"u}rgen and Puppe, Frank}, title = {Aufwandsanalyse f{\"u}r computerunterst{\"u}tzte Multiple-Choice Papierklausuren}, series = {GMS Journal for Medical Education}, volume = {28}, journal = {GMS Journal for Medical Education}, number = {4}, doi = {10.3205/zma000767}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-134386}, pages = {1-15, Doc55}, year = {2011}, abstract = {Introduction: Multiple-choice-examinations are still fundamental for assessment in medical degree programs. In addition to content related research, the optimization of the technical procedure is an important question. Medical examiners face three options: paper-based examinations with or without computer support or completely electronic examinations. Critical aspects are the effort for formatting, the logistic effort during the actual examination, quality, promptness and effort of the correction, the time for making the documents available for inspection by the students, and the statistical analysis of the examination results. Methods: Since three semesters a computer program for input and formatting of MC-questions in medical and other paper-based examinations is used and continuously improved at Wuerzburg University. In the winter semester (WS) 2009/10 eleven, in the summer semester (SS) 2010 twelve and in WS 2010/11 thirteen medical examinations were accomplished with the program and automatically evaluated. For the last two semesters the remaining manual workload was recorded. Results: The cost of the formatting and the subsequent analysis including adjustments of the analysis of an average examination with about 140 participants and about 35 questions was 5-7 hours for exams without complications in the winter semester 2009/2010, about 2 hours in SS 2010 and about 1.5 hours in the winter semester 2010/11. Including exams with complications, the average time was about 3 hours per exam in SS 2010 and 2.67 hours for the WS 10/11. Discussion: For conventional multiple-choice exams the computer-based formatting and evaluation of paper-based exams offers a significant time reduction for lecturers in comparison with the manual correction of paper-based exams and compared to purely electronically conducted exams it needs a much simpler technological infrastructure and fewer staff during the exam."}, language = {de} } @article{Zeeshan2010, author = {Zeeshan, Ahmed}, title = {Towards Performance Measurement and Metrics Based Analysis of PLA Applications}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-68188}, year = {2010}, abstract = {This article is about a measurement analysis based approach to help software practitioners in managing the additional level complexities and variabilities in software product line applications. The architecture of the proposed approach i.e. ZAC is designed and implemented to perform preprocessesed source code analysis, calculate traditional and product line metrics and visualize results in two and three dimensional diagrams. Experiments using real time data sets are performed which concluded with the results that the ZAC can be very helpful for the software practitioners in understanding the overall structure and complexity of product line applications. Moreover the obtained results prove strong positive correlation between calculated traditional and product line measures.}, subject = {Programmierbare logische Anordnung}, language = {en} } @phdthesis{Oechsner2010, author = {Oechsner, Simon}, title = {Performance Challenges and Optimization Potential of Peer-to-Peer Overlay Technologies}, doi = {10.25972/OPUS-4159}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-50015}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2010}, abstract = {In today's Internet, building overlay structures to provide a service is becoming more and more common. This approach allows for the utilization of client resources, thus being more scalable than a client-server model in this respect. However, in these architectures the quality of the provided service depends on the clients and is therefore more complex to manage. Resource utilization, both at the clients themselves and in the underlying network, determine the efficiency of the overlay application. Here, a trade-off exists between the resource providers and the end users that can be tuned via overlay mechanisms. Thus, resource management and traffic management is always quality-of-service management as well. In this monograph, the three currently significant and most widely used overlay types in the Internet are considered. These overlays are implemented in popular applications which only recently have gained importance. Thus, these overlay networks still face real-world technical challenges which are of high practical relevance. We identify the specific issues for each of the considered overlays, and show how their optimization affects the trade-offs between resource efficiency and service quality. Thus, we supply new insights and system knowledge that is not provided by previous work.}, subject = {Overlay-Netz}, language = {en} } @phdthesis{Henjes2010, author = {Henjes, Robert}, title = {Performance Evaluation of Publish/Subscribe Middleware Architectures}, doi = {10.25972/OPUS-4536}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-53388}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2010}, abstract = {While developing modern applications, it is necessary to ensure an efficient and performant communication between different applications. In current environments, a middleware software is used, which supports the publish/subscribe communication pattern. Using this communication pattern, a publisher sends information encapsulated in messages to the middleware. A subscriber registers its interests at the middleware. The monograph describes three different steps to determine the performance of such a system. In a first step, the message throughput performance of a publish/subscribe in different scenarios is measured using a Java Message Service (JMS) based implementation. In the second step the maximum achievable message throughput is described by adapted models depending on the filter complexity and the replication grade. Using the model, the performance characteristics of a specific system in a given scenario can be determined. These numbers are used for the queuing model described in the third part of the thesis, which supports the dimensioning of a system in realistic scenarios. Additionally, we introduce a method to approximate an M/G/1 system numerically in an efficient way, which can be used for real time analysis to predict the expected performance in a certain scenario. Finally, the analytical model is used to investigate different possibilities to ensure the scalability of the maximum achievable message throughput of the overall system.}, subject = {Middleware}, language = {en} } @inproceedings{OPUS4-4233, title = {9. Fachgespr{\"a}ch Sensornetze der GI/ITG Fachgruppe Kommunikation und Verteilte Systeme}, editor = {Kolla, Reiner}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-51106}, year = {2010}, abstract = {J{\"a}hrliches Fachgespr{\"a}ch zu Sensornetzen der GI/ITG Fachgruppe Kommunikation und Verteilte Systeme, 16. - 17. September 2010, Universit{\"a}t W{\"u}rzburg}, subject = {Drahtloses Sensorsystem}, language = {mul} } @phdthesis{Zeiger2010, author = {Zeiger, Florian}, title = {Internet Protocol based networking of mobile robots}, isbn = {978-3-923959-59-4}, doi = {10.25972/OPUS-4661}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-54776}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2010}, abstract = {This work is composed of three main parts: remote control of mobile systems via Internet, ad-hoc networks of mobile robots, and remote control of mobile robots via 3G telecommunication technologies. The first part gives a detailed state of the art and a discussion of the problems to be solved in order to teleoperate mobile robots via the Internet. The focus of the application to be realized is set on a distributed tele-laboratory with remote experiments on mobile robots which can be accessed world-wide via the Internet. Therefore, analyses of the communication link are used in order to realize a robust system. The developed and implemented architecture of this distributed tele-laboratory allows for a smooth access also with a variable or low link quality. The second part covers the application of ad-hoc networks for mobile robots. The networking of mobile robots via mobile ad-hoc networks is a very promising approach to realize integrated telematic systems without relying on preexisting communication infrastructure. Relevant civilian application scenarios are for example in the area of search and rescue operations where first responders are supported by multi-robot systems. Here, mobile robots, humans, and also existing stationary sensors can be connected very fast and efficient. Therefore, this work investigates and analyses the performance of different ad-hoc routing protocols for IEEE 802.11 based wireless networks in relevant scenarios. The analysis of the different protocols allows for an optimization of the parameter settings in order to use these ad-hoc routing protocols for mobile robot teleoperation. Also guidelines for the realization of such telematics systems are given. Also traffic shaping mechanisms of application layer are presented which allow for a more efficient use of the communication link. An additional application scenario, the integration of a small size helicopter into an IP based ad-hoc network, is presented. The teleoperation of mobile robots via 3G telecommunication technologies is addressed in the third part of this work. The high availability, high mobility, and the high bandwidth provide a very interesting opportunity to realize scenarios for the teleoperation of mobile robots or industrial remote maintenance. This work analyses important parameters of the UMTS communication link and investigates also the characteristics for different data streams. These analyses are used to give guidelines which are necessary for the realization of or industrial remote maintenance or mobile robot teleoperation scenarios. All the results and guidelines for the design of telematic systems in this work were derived from analyses and experiments with real hardware.}, subject = {Robotik}, language = {en} } @phdthesis{Sauer2010, author = {Sauer, Markus}, title = {Mixed-Reality for Enhanced Robot Teleoperation}, isbn = {978-3-923959-67-9}, doi = {10.25972/OPUS-4666}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-55083}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2010}, abstract = {In den letzten Jahren ist die Forschung in der Robotik soweit fortgeschritten, dass die Mensch-Maschine Schnittstelle zunehmend die kritischste Komponente f{\"u}r eine hohe Gesamtperformanz von Systemen zur Navigation und Koordination von Robotern wird. In dieser Dissertation wird untersucht wie Mixed-Reality Technologien f{\"u}r Nutzerschnittstellen genutzt werden k{\"o}nnen, um diese Gesamtperformanz zu erh{\"o}hen. Hierzu werden Konzepte und Technologien entwickelt, die durch Evaluierung mit Nutzertest ein optimiertes und anwenderbezogenes Design von Mixed-Reality Nutzerschnittstellen erm{\"o}glichen. Er werden somit sowohl die technische Anforderungen als auch die menschlichen Faktoren f{\"u}r ein konsistentes Systemdesign ber{\"u}cksichtigt. Nach einer detaillierten Problemanalyse und der Erstellung eines Systemmodels, das den Menschen als Schl{\"u}sselkomponente mit einbezieht, wird zun{\"a}chst die Anwendung der neuartigen 3D-Time-of-Flight Kamera zur Navigation von Robotern, aber auch f{\"u}r den Einsatz in Mixed-Reality Schnittstellen analysiert und optimiert. Weiterhin wird gezeigt, wie sich der Netzwerkverkehr des Videostroms als wichtigstes Informationselement der meisten Nutzerschnittstellen f{\"u}r die Navigationsaufgabe auf der Netzwerk Applikationsebene in typischen Multi-Roboter Netzwerken mit dynamischen Topologien und Lastsituation optimieren l{\"a}sst. Hierdurch ist es m{\"o}glich in sonst in sonst typischen Ausfallszenarien den Videostrom zu erhalten und die Bildrate zu stabilisieren. Diese fortgeschrittenen Technologien werden dann auch dem entwickelten Konzept der generischen 3D Mixed Reality Schnittselle eingesetzt. Dieses Konzept erm{\"o}glicht eine integrierte 3D Darstellung der verf{\"u}gbaren Information, so dass r{\"a}umliche Beziehungen von Informationen aufrechterhalten werden und somit die Anzahl der mentalen Transformationen beim menschlichen Bediener reduziert wird. Gleichzeitig werden durch diesen Ansatz auch immersive Stereo Anzeigetechnologien unterst{\"u}tzt, welche zus{\"a}tzlich das r{\"a}umliche Verst{\"a}ndnis der entfernten Situation f{\"o}rdern. Die in der Dissertation vorgestellten und evaluierten Ans{\"a}tze nutzen auch die Tatsache, dass sich eine lokale Autonomie von Robotern heute sehr robust realisieren l{\"a}sst. Dies wird zum Beispiel zur Realisierung eines Assistenzsystems mit variabler Autonomie eingesetzt. Hierbei erh{\"a}lt der Fernbediener {\"u}ber eine Kraftr{\"u}ckkopplung kombiniert mit einer integrierten Augmented Reality Schnittstelle, einen Eindruck {\"u}ber die Situation am entfernten Arbeitsbereich, aber auch {\"u}ber die aktuelle Navigationsintention des Roboters. Die durchgef{\"u}hrten Nutzertests belegen die signifikante Steigerung der Navigationsperformanz durch den entwickelten Ansatz. Die robuste lokale Autonomie erm{\"o}glicht auch den in der Dissertation eingef{\"u}hrten Ansatz der pr{\"a}diktiven Mixed-Reality Schnittstelle. Die durch diesen Ansatz entkoppelte Regelschleife {\"u}ber den Menschen erm{\"o}glicht es die Sichtbarkeit von unvermeidbaren Systemverz{\"o}gerungen signifikant zu reduzieren. Zus{\"a}tzlich k{\"o}nnen durch diesen Ansatz beide f{\"u}r die Navigation hilfreichen Blickwinkel in einer 3D-Nutzerschnittstelle kombiniert werden - der exozentrische Blickwinkel und der egozentrische Blickwinkel als Augmented Reality Sicht.}, subject = {Mobiler Roboter}, language = {en} } @unpublished{Nassourou2010, author = {Nassourou, Mohamadou}, title = {Markup overlap: Improving Fragmentation Method}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-49084}, year = {2010}, abstract = {Overlapping is a common word used to describe documents whose structural dimensions cannot be adequately represented using tree structure. For instance a quotation that starts in one verse and ends in another verse. The problem of overlapping hierarchies is a recurring one, which has been addressed by a variety of approaches. There are XML based solutions as well as Non-XML ones. The XML-based solutions are: multiple documents, empty elements, fragmentation, out-of-line markup, JITT and BUVH. And the Non-XML approaches comprise CONCUR/XCONCUR, MECS, LMNL ...etc. This paper presents shortly state-of-the-art in overlapping hierarchies, and introduces two variations on the TEI fragmentation markup that have several advantages.}, subject = {XML}, language = {en} } @phdthesis{Klein2010, author = {Klein, Alexander}, title = {Performance Issues of MAC and Routing Protocols in Wireless Sensor Networks}, doi = {10.25972/OPUS-4465}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-52870}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2010}, abstract = {The focus of this work lies on the communication issues of Medium Access Control (MAC) and routing protocols in the context of WSNs. The communication challenges in these networks mainly result from high node density, low bandwidth, low energy constraints and the hardware limitations in terms of memory, computational power and sensing capabilities of low-power transceivers. For this reason, the structure of WSNs is always kept as simple as possible to minimize the impact of communication issues. Thus, the majority of WSNs apply a simple one hop star topology since multi-hop communication has high demands on the routing protocol since it increases the bandwidth requirements of the network. Moreover, medium access becomes a challenging problem due to the fact that low-power transceivers are very limited in their sensing capabilities. The first contribution is represented by the Backoff Preamble-based MAC Protocol with Sequential Contention Resolution (BPS-MAC) which is designed to overcome the limitations of low-power transceivers. Two communication issues, namely the Clear Channel Assessment (CCA) delay and the turnaround time, are directly addressed by the protocol. The CCA delay represents the period of time which is required by the transceiver to detect a busy radio channel while the turnaround time specifies the period of time which is required to switch between receive and transmit mode. Standard Carrier Sense Multiple Access (CSMA) protocols do not achieve high performance in terms of packet loss if the traffic is highly correlated due to the fact that the transceiver is not able to sense the medium during the switching phase. Therefore, a node may start to transmit data while another node is already transmitting since it has sensed an idle medium right before it started to switch its transceiver from receive to transmit mode. The BPS-MAC protocol uses a new sequential preamble-based medium access strategy which can be adapted to the hardware capabilities of the transceivers. The protocol achieves a very low packet loss rate even in wireless networks with high node density and event-driven traffic without the need of synchronization. This makes the protocol attractive to applications such as structural health monitoring, where event suppression is not an option. Moreover, acknowledgments or complex retransmission strategies become almost unnecessary since the sequential preamble-based contention resolution mechanism minimizes the collision probability. However, packets can still be lost as a consequence of interference or other issues which affect signal propagation. The second contribution consists of a new routing protocol which is able to quickly detect topology changes without generating a large amount of overhead. The key characteristics of the Statistic-Based Routing (SBR) protocol are high end-to-end reliability (in fixed and mobile networks), load balancing capabilities, a smooth continuous routing metric, quick adaptation to changing network conditions, low processing and memory requirements, low overhead, support of unidirectional links and simplicity. The protocol can establish routes in a hybrid or a proactive mode and uses an adaptive continuous routing metric which makes it very flexible in terms of scalability while maintaining stable routes. The hybrid mode is optimized for low-power WSNs since routes are only established on demand. The difference of the hybrid mode to reactive routing strategies is that routing messages are periodically transmitted to maintain already established routes. However, the protocol stops the transmission of routing messages if no data packets are transmitted for a certain time period in order to minimize the routing overhead and the energy consumption. The proactive mode is designed for high data rate networks which have less energy constraints. In this mode, the protocol periodically transmits routing messages to establish routes in a proactive way even in the absence of data traffic. Thus, nodes in the network can immediately transmit data since the route to the destination is already established in advance. In addition, a new delay-based routing message forwarding strategy is introduced. The forwarding strategy is part of SBR but can also be applied to many routing protocols in order to modify the established topology. The strategy can be used, e.g. in mobile networks, to decrease the packet loss by deferring routing messages with respect to the neighbor change rate. Thus, nodes with a stable neighborhood forward messages faster than nodes within a fast changing neighborhood. As a result, routes are established through nodes with correlated movement which results in fewer topology changes due to higher link durations.}, subject = {Routing}, language = {en} } @unpublished{Nassourou2010, author = {Nassourou, Mohamadou}, title = {Understanding the Vex Rendering Engine}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-51333}, year = {2010}, abstract = {The Visual Editor for XML (Vex)[1] used by TextGrid [2]and other applications has got rendering and layout engines. The layout engine is well documented but the rendering engine is not. This lack of documenting the rendering engine has made refactoring and extending the editor hard and tedious. For instance many CSS2.1 and upcoming CSS3 properties have not been implemented. Software developers in different projects such as TextGrid using Vex would like to update its CSS rendering engine in order to provide advanced user interfaces as well as support different document types. In order to minimize the effort of extending Vex functionality, I found it beneficial to write a basic documentation about Vex software architecture in general and its CSS rendering engine in particular. The documentation is mainly based on the idea of architectural layered diagrams. In fact layered diagrams can help developers understand software's source code faster and easier in order to alter it, and fix errors. This paper is written for the purpose of providing direct support for exploration in the comprehension process of Vex source code. It discusses Vex software architecture. The organization of packages that make up the software, the architecture of its CSS rendering engine, an algorithm explaining the working principle of its rendering engine are described.}, subject = {Cascading Style Sheets}, language = {en} } @unpublished{Nassourou2010, author = {Nassourou, Mohamadou}, title = {Reference Architecture, Design of Cascading Style Sheets Processing Model}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-51328}, year = {2010}, abstract = {The technique of using Cascading Style Sheets (CSS) to format and present structured data is called CSS processing model. For instance a CSS processing model for XML documents describes steps involved in formatting and presenting XML documents on screens or papers. Many software applications such as browsers and XML editors have their own CSS processing models which are part of their rendering engines. For instance each browser based on its CSS processing model renders CSS layout differently, as a result an inconsistency in the support of CSS features arises. Some browsers support more CSS features than others, and the rendering itself varies. Moreover the W3C standards are not even adhered by some browsers such as Internet Explorer. Test suites and other hacks and filters cannot definitely solve these problems, because these solutions are temporary and fragile. To palliate this inconsistency and browser compatibility issues with respect to CSS, a reference CSS processing model is needed. By extension it could even allow interoperability across CSS rendering engines. A reference architecture would provide common software architecture and interfaces, and facilitate refactoring, reuse, and automated unit testing. In [2] a reference architecture for browsers has been proposed. However this reference architecture is a macro reference model which does not consider separately individual components of rendering and layout engines. In this paper an attempt to develop a reference architecture for CSS processing models is discussed. In addition the Vex editor [3] rendering and layout engines, as well as an extended version of the editor used in TextGrid project [5] are also presented in order to validate the proposed reference architecture.}, subject = {Cascading Style Sheets}, language = {en} } @unpublished{Nassourou2010, author = {Nassourou, Mohamadou}, title = {Empirical Study on Screen Scraping Web Service Creation: Case of Rhein-Main-Verkehrsverbund (RMV)}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-49396}, year = {2010}, abstract = {Internet is the biggest database that science and technology have ever produced. The world wide web is a large repository of information that cannot be used for automation by many applications due to its limited target audience. One of the solutions to the automation problem is to develop wrappers. Wrapping is a process whereby unstructured extracted information is transformed into a more structured one such as XML, which could be provided as webservice to other applications. A web service is a web page whose content is well structured so that a computer program can consume it automatically. This paper describes steps involved in constructing wrappers manually in order to automatically generate web services.}, subject = {HTML}, language = {en} } @unpublished{Nassourou2010, author = {Nassourou, Mohamadou}, title = {Java Web Frameworks Which One to Choose?}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-49407}, year = {2010}, abstract = {This article discusses web frameworks that are available to a software developer in Java language. It introduces MVC paradigm and some frameworks that implement it. The article presents an overview of Struts, Spring MVC, JSF Frameworks, as well as guidelines for selecting one of them as development environment.}, subject = {Java Frameworks}, language = {en} } @unpublished{Nassourou2010, author = {Nassourou, Mohamadou}, title = {Doing Webservices Composition by Content-based Mashup: Example of a Web-based Simulator for Itinerary Planning}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-50036}, year = {2010}, abstract = {Webservices composition is traditionally carried out using composition technologies such as Business Process Execution Language (BPEL) [1] and Web Service Choreography Interface (WSCI) [2]. The composition technology involves the process of web service discovery, invocation, and composition. However these technologies are not easy and flexible enough because they are mainly developer-centric. Moreover majority of websites have not yet embarked into the world of web service, although they have very important and useful information to offer. Is it because they have not understood the usefulness of web services or is it because of the costs? Whatever might be the answers to these questions, time and money are definitely required in order to create and offer web services. To avoid these expenditures, wrappers [7] to automatically generate webservices from websites would be a cheaper and easier solution. Mashups offer a different way of doing webservices composition. In web environment a Mashup is a web application that brings together data from several sources using webservices, APIs, wrappers and so on, in order to create entirely a new application that was not provided before. This paper presents first an overview of Mashups and the process of web service invocation and composition based on Mashup, then describes an example of a web-based simulator for navigation system in Germany.}, subject = {Mashup }, language = {en} } @article{VainshteinSanchezBrazmaetal.2010, author = {Vainshtein, Yevhen and Sanchez, Mayka and Brazma, Alvis and Hentze, Matthias W. and Dandekar, Thomas and Muckenthaler, Martina U.}, title = {The IronChip evaluation package: a package of perl modules for robust analysis of custom microarrays}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-67869}, year = {2010}, abstract = {Background: Gene expression studies greatly contribute to our understanding of complex relationships in gene regulatory networks. However, the complexity of array design, production and manipulations are limiting factors, affecting data quality. The use of customized DNA microarrays improves overall data quality in many situations, however, only if for these specifically designed microarrays analysis tools are available. Results: The IronChip Evaluation Package (ICEP) is a collection of Perl utilities and an easy to use data evaluation pipeline for the analysis of microarray data with a focus on data quality of custom-designed microarrays. The package has been developed for the statistical and bioinformatical analysis of the custom cDNA microarray IronChip but can be easily adapted for other cDNA or oligonucleotide-based designed microarray platforms. ICEP uses decision tree-based algorithms to assign quality flags and performs robust analysis based on chip design properties regarding multiple repetitions, ratio cut-off, background and negative controls. Conclusions: ICEP is a stand-alone Windows application to obtain optimal data quality from custom-designed microarrays and is freely available here (see "Additional Files" section) and at: http://www.alice-dsl.net/evgeniy. vainshtein/ICEP/}, subject = {Microarray}, language = {en} } @article{SchmidSchindelinCardonaetal.2010, author = {Schmid, Benjamin and Schindelin, Johannes and Cardona, Albert and Longair, Martin and Heisenberg, Martin}, title = {A high-level 3D visualization API for Java and ImageJ}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-67851}, year = {2010}, abstract = {Background: Current imaging methods such as Magnetic Resonance Imaging (MRI), Confocal microscopy, Electron Microscopy (EM) or Selective Plane Illumination Microscopy (SPIM) yield three-dimensional (3D) data sets in need of appropriate computational methods for their analysis. The reconstruction, segmentation and registration are best approached from the 3D representation of the data set. Results: Here we present a platform-independent framework based on Java and Java 3D for accelerated rendering of biological images. Our framework is seamlessly integrated into ImageJ, a free image processing package with a vast collection of community-developed biological image analysis tools. Our framework enriches the ImageJ software libraries with methods that greatly reduce the complexity of developing image analysis tools in an interactive 3D visualization environment. In particular, we provide high-level access to volume rendering, volume editing, surface extraction, and image annotation. The ability to rely on a library that removes the low-level details enables concentrating software development efforts on the algorithm implementation parts. Conclusions: Our framework enables biomedical image software development to be built with 3D visualization capabilities with very little effort. We offer the source code and convenient binary packages along with extensive documentation at http://3dviewer.neurofly.de.}, subject = {Visualisierung}, language = {en} } @phdthesis{Fehler2010, author = {Fehler, Manuel}, title = {Kalibrierung Agenten-basierter Simulationen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-64762}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2010}, abstract = {In der vorliegenden Arbeit wird das Problem der Kalibrierung Agenten-basierter Simulationen (ABS) behandelt, also das Problem, die Parameterwerte eines Agenten-basierten Simulationsmodells so einzustellen, dass valides Simulationsverhalten erreicht wird. Das Kalibrierungsproblem f{\"u}r Simulationen an sich ist nicht neu und ist im Rahmen klassischer Simulationsparadigmen, wie z.B. der Makro-Simulation, fester Bestandteil der Forschung. Im Vergleich zu den dort betrachteten Kalibrierungsproblemen zeichnet sich das Kalibrierungsproblem f{\"u}r ABS jedoch durch eine Reihe zus{\"a}tzlicher Herausforderungen aus, welche die direkte Anwendung existierender Kalibrierungsverfahren in begrenzter Zeit erschweren, bzw. nicht mehr sinnvoll zulassen. Die L{\"o}sung dieser Probleme steht im Zentrum dieser Dissertation: Das Ziel besteht darin, den Nutzer bei der Kalibrierung von ABS auf der Basis von unzureichenden, potentiell fehlerhaften Daten und Wissen zu unterst{\"u}tzen. Dabei sollen drei Hauptprobleme gel{\"o}st werden: 1)Vereinfachung der Kalibrierung großer Agenten-Parametermengen auf der Mikro- Ebene in Agenten-basierten Simulationen durch Ausnutzung der spezifischen Struktur von ABS (n{\"a}mlich dem Aufbau aus einer Menge von Agentenmodellen). 2)Kalibrierung Agenten-basierter Simulationen, so dass auf allen relevanten Beobachtungsebenen valides Simulationsverhalten erzeugt wird (mindestens Mikro und Makro-Ebene). Als erschwerende Randbedingung muss die Kalibrierung unter der Voraussetzung einer Makro-Mikro-Wissensl{\"u}cke durchgef{\"u}hrt werden. 3)Kalibrierung Agenten-basierter Simulationen auf der Mikro-Ebene unter der Voraussetzung, dass zur Kalibrierung einzelner Agentenmodelle nicht ausreichend und potentiell verf{\"a}lschte Daten zur Verhaltensvalidierung zur Verf{\"u}gung stehen. Hierzu wird in dieser Arbeit das sogenannte Makro-Mikro-Verfahren zur Kalibrierung von Agenten-basierten Simulationen entwickelt. Das Verfahren besteht aus einem Basisverfahren, das im Verlauf der Arbeit um verschiedene Zusatzverfahren erweitert wird. Das Makro-Mikro-Verfahren und seine Erweiterungen sollen dazu dienen, die Modellkalibrierung trotz stark verrauschter Daten und eingeschr{\"a}nktem Wissen {\"u}ber die Wirkungszusammenh{\"a}nge im Originalsystem geeignet zu erm{\"o}glichen und dabei den Kalibrierungsprozess zu beschleunigen: 1) Makro-Mikro-Kalibrierungsverfahren: Das in dieser Arbeit entwickelte Makro- Mikro-Verfahren unterst{\"u}tzt den Nutzer durch eine kombinierte Kalibrierung auf der Mikro- und der Makro-Beobachtungsebene, die gegebenenfalls durch Zwischenebenen erweitert werden kann. Der Grundgedanke des Verfahrens besteht darin, das Kalibrierungsproblem in eines auf aggregierter Verhaltensebene und eines auf der Ebene des Mikro-Agentenverhaltens aufzuteilen. Auf der Makro-Ebene wird nach validen idealen aggregierten Verhaltensmodellen (IVM) der Agenten gesucht. Auf der Mikro-Ebene wird versucht die individuellen Modelle der Agenten auf Basis des erw{\"u}nschten Gesamtverhaltens und der ermittelten IVM so zu kalibrieren, das insgesamt Simulationsverhalten entsteht, das sowohl auf Mikro- als auch auf Makro-Ebene valide ist. 2) Erweiterung 1: Robuste Kalibrierung: Um den Umgang mit potentiell verrauschten Validierungskriterien (d.h. mit verrauschten Daten {\"u}ber ein Originalsystem, auf denen die Validierungskriterien der Simulation beruhen) und Modellteilen w{\"a}hrend der Kalibrierung von ABS zu erm{\"o}glichen, wird eine robuste Kalibrierungstechnik zur Anwendung im Makro-Mikro-Verfahren entwickelt. 3) Erweiterung 2: Kalibrierung mit Heterogenit{\"a}tssuche: Als zweite Erweiterung des Makro-Mikro-Verfahrens wird ein Verfahren entwickelt, das das Problem des unklaren Detaillierungsgrades von ABS auf der Ebene der Parameterwerte adressiert. Prinzipiell kann zwar jeder Agent unterschiedliche Parameterwerte verwenden, obwohl eine geringere Heterogenit{\"a}t zur Erzeugung validen Verhaltens ausreichend w{\"a}re. Die entwickelte Erweiterung versucht, w{\"a}hrend der Kalibrierung, eine geeignete Heterogenit{\"a}tsauspr{\"a}gung f{\"u}r die Parameterwerte der Agenten zu ermitteln. Unter einer Heterogenit{\"a}tsauspr{\"a}gung wird dabei eine Einteilung der simulierten Agenten in Gruppen mit jeweils gleichen Parameterwerten verstanden. Die Heterogenit{\"a}tssuche dient dazu, einen Kompromiss zu finden zwischen der Notwendigkeit, sehr große Parametersuchr{\"a}ume durchsuchen zu m{\"u}ssen und gleichzeitig den Suchraum so klein wie m{\"o}glich halten zu wollen.}, subject = {Computersimulation}, language = {de} } @phdthesis{Pries2010, author = {Pries, Jan Rastin}, title = {Performance Optimization of Wireless Infrastructure and Mesh Networks}, doi = {10.25972/OPUS-3723}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-46097}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2010}, abstract = {Future broadband wireless networks should be able to support not only best effort traffic but also real-time traffic with strict Quality of Service (QoS) constraints. In addition, their available resources are scare and limit the number of users. To facilitate QoS guarantees and increase the maximum number of concurrent users, wireless networks require careful planning and optimization. In this monograph, we studied three aspects of performance optimization in wireless networks: resource optimization in WLAN infrastructure networks, quality of experience control in wireless mesh networks, and planning and optimization of wireless mesh networks. An adaptive resource management system is required to effectively utilize the limited resources on the air interface and to guarantee QoS for real-time applications. Thereby, both WLAN infrastructure and WLAN mesh networks have to be considered. An a-priori setting of the access parameters is not meaningful due to the contention-based medium access and the high dynamics of the system. Thus, a management system is required which dynamically adjusts the channel access parameters based on the network load. While this is sufficient for wireless infrastructure networks, interferences on neighboring paths and self-interferences have to be considered for wireless mesh networks. In addition, a careful channel allocation and route assignment is needed. Due to the large parameter space, standard optimization techniques fail for optimizing large wireless mesh networks. In this monograph, we reveal that biology-inspired optimization techniques, namely genetic algorithms, are well-suitable for the planning and optimization of wireless mesh networks. Although genetic algorithms generally do not always find the optimal solution, we show that with a good parameter set for the genetic algorithm, the overall throughput of the wireless mesh network can be significantly improved while still sharing the resources fairly among the users.}, subject = {IEEE 802.11}, language = {en} } @inproceedings{SchlosserJarschelDuellietal.2010, author = {Schlosser, Daniel and Jarschel, Michael and Duelli, Michael and Hoßfeld, Tobias and Hoffmann, Klaus and Hoffmann, Marco and Morper, Hans Jochen and Jurca, Dan and Khan, Ashiq}, title = {A Use Case Driven Approach to Network Virtualization}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-55611}, year = {2010}, abstract = {In today's Internet, services are very different in their requirements on the underlying transport network. In the future, this diversity will increase and it will be more difficult to accommodate all services in a single network. A possible approach to cope with this diversity within future networks is the introduction of support for running isolated networks for different services on top of a single shared physical substrate. This would also enable easy network management and ensure an economically sound operation. End-customers will readily adopt this approach as it enables new and innovative services without being expensive. In order to arrive at a concept that enables this kind of network, it needs to be designed around and constantly checked against realistic use cases. In this contribution, we present three use cases for future networks. We describe functional blocks of a virtual network architecture, which are necessary to support these use cases within the network. Furthermore, we discuss the interfaces needed between the functional blocks and consider standardization issues that arise in order to achieve a global consistent control and management structure of virtual networks.}, subject = {Virtualisierung}, language = {en} } @phdthesis{Spoerhase2009, author = {Spoerhase, Joachim}, title = {Competitive and Voting Location}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-52978}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2009}, abstract = {We consider competitive location problems where two competing providers place their facilities sequentially and users can decide between the competitors. We assume that both competitors act non-cooperatively and aim at maximizing their own benefits. We investigate the complexity and approximability of such problems on graphs, in particular on simple graph classes such as trees and paths. We also develop fast algorithms for single competitive location problems where each provider places a single facilty. Voting location, in contrast, aims at identifying locations that meet social criteria. The provider wants to satisfy the users (customers) of the facility to be opened. In general, there is no location that is favored by all users. Therefore, a satisfactory compromise has to be found. To this end, criteria arising from voting theory are considered. The solution of the location problem is understood as the winner of a virtual election among the users of the facilities, in which the potential locations play the role of the candidates and the users represent the voters. Competitive and voting location problems turn out to be closely related.}, subject = {Standortproblem}, language = {en} } @phdthesis{Saska2009, author = {Saska, Martin}, title = {Trajectory planning and optimal control for formations of autonomous robots}, isbn = {978-3-923959-56-3}, doi = {10.25972/OPUS-4622}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-53175}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2009}, abstract = {In this thesis, we present novel approaches for formation driving of nonholonomic robots and optimal trajectory planning to reach a target region. The methods consider a static known map of the environment as well as unknown and dynamic obstacles detected by sensors of the formation. The algorithms are based on leader following techniques, where the formation of car-like robots is maintained in a shape determined by curvilinear coordinates. Beyond this, the general methods of formation driving are specialized and extended for an application of airport snow shoveling. Detailed descriptions of the algorithms complemented by relevant stability and convergence studies will be provided in the following chapters. Furthermore, discussions of the applicability will be verified by various simulations in existing robotic environments and also by a hardware experiment.}, subject = {Autonomer Roboter}, language = {en} } @phdthesis{Hossfeld2009, author = {Hoßfeld, Tobias}, title = {Performance Evaluation of Future Internet Applications and Emerging User Behavior}, doi = {10.25972/OPUS-3067}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-37570}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2009}, abstract = {In future telecommunication systems, we observe an increasing diversity of access networks. The separation of transport services and applications or services leads to multi-network services, i.e., a future service has to work transparently to the underlying network infrastructure. Multi-network services with edge-based intelligence, like P2P file sharing or the Skype VoIP service, impose new traffic control paradigms on the future Internet. Such services adapt the amount of consumed bandwidth to reach different goals. A selfish behavior tries to keep the QoE of a single user above a certain level. Skype, for instance, repeats voice samples depending on the perceived end-to-end loss. From the viewpoint of a single user, the replication of voice data overcomes the degradation caused by packet loss and enables to maintain a certain QoE. The cost for this achievement is a higher amount of consumed bandwidth. However, if the packet loss is caused by congestion in the network, this additionally required bandwidth even worsens the network situation. Altruistic behavior, on the other side, would reduce the bandwidth consumption in such a way that the pressure on the network is released and thus the overall network performance is improved. In this monograph, we analyzed the impact of the overlay, P2P, and QoE paradigms in future Internet applications and the interactions from the observing user behavior. The shift of intelligence toward the edge is accompanied by a change in the emerging user behavior and traffic profile, as well as a change from multi-service networks to multi-networks services. In addition, edge-based intelligence may lead to a higher dynamics in the network topology, since the applications are often controlled by an overlay network, which can rapidly change in size and structure as new nodes can leave or join the overlay network in an entirely distributed manner. As a result, we found that the performance evaluation of such services provides new challenges, since novel key performance factors have to be first identified, like pollution of P2P systems, and appropriate models of the emerging user behavior are required, e.g. taking into account user impatience. As common denominator of the presented studies in this work, we focus on a user-centric view when evaluating the performance of future Internet applications. For a subscriber of a certain application or service, the perceived quality expressed as QoE will be the major criterion of the user's satisfaction with the network and service providers. We selected three different case studies and characterized the application's performance from the end user's point of view. Those are (1) cooperation in mobile P2P file sharing networks, (2) modeling of online TV recording services, and (3) QoE of edge-based VoIP applications. The user-centric approach facilitates the development of new mechanisms to overcome problems arising from the changing user behavior. An example is the proposed CycPriM cooperation strategy, which copes with selfish user behavior in mobile P2P file sharing system. An adequate mechanism has also been shown to be efficient in a heterogeneous B3G network with mobile users conducting vertical handovers between different wireless access technologies. The consideration of the user behavior and the user perceived quality guides to an appropriate modeling of future Internet applications. In the case of the online TV recording service, this enables the comparison between different technical realizations of the system, e.g. using server clusters or P2P technology, to properly dimension the installed network elements and to assess the costs for service providers. Technologies like P2P help to overcome phenomena like flash crowds and improve scalability compared to server clusters, which may get overloaded in such situations. Nevertheless, P2P technology invokes additional challenges and different user behavior to that seen in traditional client/server systems. Beside the willingness to share files and the churn of users, peers may be malicious and offer fake contents to disturb the data dissemination. Finally, the understanding and the quantification of QoE with respect to QoS degradations permits designing sophisticated edge-based applications. To this end, we identified and formulated the IQX hypothesis as an exponential interdependency between QoE and QoS parameters, which we validated for different examples. The appropriate modeling of the emerging user behavior taking into account the user's perceived quality and its interactions with the overlay and P2P paradigm will finally help to design future Internet applications.}, subject = {Leistungsbewertung}, language = {en} } @phdthesis{Hess2009, author = {Hess, Martin}, title = {Motion coordination and control in systems of nonholonomic autonomous vehicles}, isbn = {978-3-923959-55-6}, doi = {10.25972/OPUS-3794}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-46442}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2009}, abstract = {This work focuses on coordination methods and the control of motion in groups of nonholonomic wheeled mobile robots, in particular of the car-like type. These kind of vehicles are particularly restricted in their mobility. In the main part of this work the two problems of formation motion coordination and of rendezvous in distributed multi-vehicle systems are considered. We introduce several enhancements to an existing motion planning approach for formations of nonholonomic mobile robots. Compared to the original method, the extended approach is able to handle time-varying reference speeds as well as adjustments of the formation's shape during reference trajectory segments with continuously differentiable curvature. Additionally, undesired discontinuities in the speed and steering profiles of the vehicles are avoided. Further, the scenario of snow shoveling on an airfield by utilizing multiple formations of autonomous snowplows is discussed. We propose solutions to the subproblems of motion planning for the formations and tracking control for the individual vehicles. While all situations that might occur have been tested in a simulation environment, we also verified the developed tracking controller in real robot hardware experiments. The task of the rendezvous problem in groups of car-like robots is to drive all vehicles to a common position by means of decentralized control laws. Typically there exists no direct interaction link between all of the vehicles. In this work we present decentralized rendezvous control laws for vehicles with free and with bounded steering. The convergence properties of the approaches are analyzed by utilizing Lyapunov based techniques. Furthermore, they are evaluated within various simulation experiments, while the bounded steering case is also verified within laboratory hardware experiments. Finally we introduce a modification to the bounded steering system that increases the convergence speed at the expense of a higher traveled distance of the vehicles.}, subject = {Robotik}, language = {en} } @misc{Kaempgen2009, type = {Master Thesis}, author = {Kaempgen, Benedikt}, title = {Deskriptives Data-Mining f{\"u}r Entscheidungstr{\"a}ger: Eine Mehrfachfallstudie}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-46343}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2009}, abstract = {Das Potenzial der Wissensentdeckung in Daten wird h{\"a}ufig nicht ausgenutzt, was haupts{\"a}chlich auf Barrieren zwischen dem Entwicklerteam und dem Endnutzer des Data-Mining zur{\"u}ckzuf{\"u}hren ist. In dieser Arbeit wird ein transparenter Ansatz zum Beschreiben und Erkl{\"a}ren von Daten f{\"u}r Entscheidungstr{\"a}ger vorgestellt. In Entscheidungstr{\"a}ger-zentrierten Aufgaben werden die Projektanforderungen definiert und die Ergebnisse zu einer Geschichte zusammengestellt. Eine Anforderung besteht dabei aus einem tabellarischen Bericht und ggf. Mustern in seinem Inhalt, jeweils verst{\"a}ndlich f{\"u}r einen Entscheidungstr{\"a}ger. Die technischen Aufgaben bestehen aus einer Datenpr{\"u}fung, der Integration der Daten in einem Data-Warehouse sowie dem Generieren von Berichten und dem Entdecken von Mustern wie in den Anforderungen beschrieben. Mehrere Data-Mining-Projekte k{\"o}nnen durch Wissensmanagement sowie eine geeignete Infrastruktur voneinander profitieren. Der Ansatz wurde in zwei Projekten unter Verwendung von ausschließlich Open-Source-Software angewendet.}, subject = {Data Mining}, language = {de} } @phdthesis{Hopfner2008, author = {Hopfner, Marbod}, title = {Source Code Analysis, Management, and Visualization for PROLOG}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-36300}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2008}, abstract = {This thesis deals with the management and analysis of source code, which is represented in XML. Using the elementary methods of the XML repository, the XML source code representation is accessed, changed, updated, and saved. We reason about the source code, refactor source code and we visualize dependency graphs for call analysis. The visualized dependencies between files, modules, or packages are used to structure the source code in order to get a system, which is easily to comprehend, to modify and to complete. Sophisticated methods have been developed to slice the source code in order to obtain a working package of a large system, containing only a specific functionality. The basic methods, on which the visualizations and analyses are built on can be changed like changing a plug-in. The visualization methods can be reused in order to handle arbitrary source code representations, e.g., JAML, PHPML, PROLOGML. Dependencies of other context can be visualized, too, e.g., ER diagrams, or website references. The tool SCAV supports source code visualization and analyzing methods.}, subject = {Refactoring}, language = {en} } @misc{Feineis2008, type = {Master Thesis}, author = {Feineis, Markus}, title = {Wortgenaue Annotation digitalisierter mittelalterlicher Handschriften}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-30448}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2008}, abstract = {No abstract available}, subject = {Annotation}, language = {de} } @phdthesis{Maeder2008, author = {M{\"a}der, Andreas}, title = {Performance Models for UMTS 3.5G Mobile Wireless Systems}, doi = {10.25972/OPUS-2766}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-32525}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2008}, abstract = {Mobile telecommunication systems of the 3.5th generation (3.5G) constitute a first step towards the requirements of an all-IP world. As the denotation suggests, 3.5G systems are not completely new designed from scratch. Instead, they are evolved from existing 3G systems like UMTS or cdma2000. 3.5G systems are primarily designed and optimized for packet-switched best-effort traffic, but they are also intended to increase system capacity by exploiting available radio resources more efficiently. Systems based on cdma2000 are enhanced with 1xEV-DO (EV-DO: evolution, data-optimized). In the UMTS domain, the 3G partnership project (3GPP) specified the High Speed Packet Access (HSPA) family, consisting of High Speed Downlink Packet Access (HSDPA) and its counterpart High Speed Uplink Packet Access (HSUPA) or Enhanced Uplink. The focus of this monograph is on HSPA systems, although the operation principles of other 3.5G systems are similar. One of the main contributions of our work are performance models which allow a holistic view on the system. The models consider user traffic on flow-level, such that only on significant changes of the system state a recalculation of parameters like bandwidth is necessary. The impact of lower layers is captured by stochastic models. This approach combines accurate modeling and the ability to cope with computational complexity. Adopting this approach to HSDPA, we develop a new physical layer abstraction model that takes radio resources, scheduling discipline, radio propagation and mobile device capabilities into account. Together with models for the calculation of network-wide interference and transmit powers, a discrete-event simulation and an analytical model based on a queuing-theoretical approach are proposed. For the Enhanced Uplink, we develop analytical models considering independent and correlated other-cell interference.}, subject = {Mobilfunk}, language = {en} } @phdthesis{Tischler2008, author = {Tischler, German}, title = {Theory and Applications of Parametric Weighted Finite Automata}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-28145}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2008}, abstract = {Parametric weighted finite automata (PWFA) are a multi-dimensional generalization of weighted finite automata. The expressiveness of PWFA contains the expressiveness of weighted finite automata as well as the expressiveness of affine iterated function system. The thesis discusses theory and applications of PWFA. The properties of PWFA definable sets are studied and it is shown that some fractal generator systems can be simulated using PWFA and that various real and complex functions can be represented by PWFA. Furthermore, the decoding of PWFA and the interpretation of PWFA definable sets is discussed.}, subject = {Automat }, language = {en} } @phdthesis{Martin2008, author = {Martin, R{\"u}diger}, title = {Resilience, Provisioning, and Control for the Network of the Future}, doi = {10.25972/OPUS-2504}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-28497}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2008}, abstract = {The Internet sees an ongoing transformation process from a single best-effort service network into a multi-service network. In addition to traditional applications like e-mail,WWW-traffic, or file transfer, future generation networks (FGNs) will carry services with real-time constraints and stringent availability and reliability requirements like Voice over IP (VoIP), video conferencing, virtual private networks (VPNs) for finance, other real-time business applications, tele-medicine, or tele-robotics. Hence, quality of service (QoS) guarantees and resilience to failures are crucial characteristics of an FGN architecture. At the same time, network operations must be efficient. This necessitates sophisticated mechanisms for the provisioning and the control of future communication infrastructures. In this work we investigate such echanisms for resilient FGNs. There are many aspects of the provisioning and control of resilient FGNs such as traffic matrix estimation, traffic characterization, traffic forecasting, mechanisms for QoS enforcement also during failure cases, resilient routing, or calability concerns for future routing and addressing mechanisms. In this work we focus on three important aspects for which performance analysis can deliver substantial insights: load balancing for multipath Internet routing, fast resilience concepts, and advanced dimensioning techniques for resilient networks. Routing in modern communication networks is often based on multipath structures, e.g., equal-cost multipath routing (ECMP) in IP networks, to facilitate traffic engineering and resiliency. When multipath routing is applied, load balancing algorithms distribute the traffic over available paths towards the destination according to pre-configured distribution values. State-of-the-art load balancing algorithms operate either on the packet or the flow level. Packet level mechanisms achieve highly accurate traffic distributions, but are known to have negative effects on the performance of transport protocols and should not be applied. Flow level mechanisms avoid performance degradations, but at the expense of reduced accuracy. These inaccuracies may have unpredictable effects on link capacity requirements and complicate resource management. Thus, it is important to exactly understand the accuracy and dynamics of load balancing algorithms in order to be able to exercise better network control. Knowing about their weaknesses, it is also important to look for alternatives and to assess their applicability in different networking scenarios. This is the first aspect of this work. Component failures are inevitable during the operation of communication networks and lead to routing disruptions if no special precautions are taken. In case of a failure, the robust shortest-path routing of the Internet reconverges after some time to a state where all nodes are again reachable - provided physical connectivity still exists. But stringent availability and reliability criteria of new services make a fast reaction to failures obligatory for resilient FGNs. This led to the development of fast reroute (FRR) concepts for MPLS and IP routing. The operations of MPLS-FRR have already been standardized. Still, the standards leave some degrees of freedom for the resilient path layout and it is important to understand the tradeoffs between different options for the path layout to efficiently provision resilient FGNs. In contrast, the standardization for IP-FRR is an ongoing process. The applicability and possible combinations of different concepts still are open issues. IP-FRR also facilitates a comprehensive resilience framework for IP routing covering all steps of the failure recovery cycle. These points constitute another aspect of this work. Finally, communication networks are usually over-provisioned, i.e., they have much more capacity installed than actually required during normal operation. This is a precaution for various challenges such as network element failures. An alternative to this capacity overprovisioning (CO) approach is admission control (AC). AC blocks new flows in case of imminent overload due to unanticipated events to protect the QoS for already admitted flows. On the one hand, CO is generally viewed as a simple mechanism, AC as a more complex mechanism that complicates the network control plane and raises interoperability issues. On the other hand, AC appears more cost-efficient than CO. To obtain advanced provisioning methods for resilient FGNs, it is important to find suitable models for irregular events, such as failures and different sources of overload, and to incorporate them into capacity dimensioning methods. This allows for a fair comparison between CO and AC in various situations and yields a better understanding of the strengths and weaknesses of both concepts. Such an advanced capacity dimensioning method for resilient FGNs represents the third aspect of this work.}, subject = {Backbone-Netz}, language = {en} } @phdthesis{Driewer2008, author = {Driewer, Frauke}, title = {Teleoperation Interfaces in Human-Robot Teams}, isbn = {978-3-923959-57-0}, doi = {10.25972/OPUS-2955}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-36351}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2008}, abstract = {Diese Arbeit besch{\"a}ftigt sich mit der Verbesserung von Mensch-Roboter Interaktion in Mensch-Roboter Teams f{\"u}r Teleoperation Szenarien, wie z.B. robotergest{\"u}tzte Feuerwehreins{\"a}tze. Hierbei wird ein Konzept und eine Architektur f{\"u}r ein System zur Unterst{\"u}tzung von Teleoperation von Mensch-Roboter Teams vorgestellt. Die Anforderungen an Informationsaustausch und -verarbeitung, insbesondere f{\"u}r die Anwendung Rettungseinsatz, werden ausgearbeitet. Weiterhin wird das Design der Benutzerschnittstellen f{\"u}r Mensch-Roboter Teams dargestellt und Prinzipien f{\"u}r Teleoperation-Systeme und Benutzerschnittstellen erarbeitet. Alle Studien und Ans{\"a}tze werden in einem Prototypen-System implementiert und in verschiedenen Benutzertests abgesichert. Erweiterungsm{\"o}glichkeiten zum Einbinden von 3D Sensordaten und die Darstellung auf Stereovisualisierungssystemen werden gezeigt.}, subject = {Robotik}, language = {en} } @phdthesis{Binzenhoefer2007, author = {Binzenh{\"o}fer, Andreas}, title = {Performance Analysis of Structured Overlay Networks}, doi = {10.25972/OPUS-2250}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-26291}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2007}, abstract = {Overlay networks establish logical connections between users on top of the physical network. While randomly connected overlay networks provide only a best effort service, a new generation of structured overlay systems based on Distributed Hash Tables (DHTs) was proposed by the research community. However, there is still a lack of understanding the performance of such DHTs. Additionally, those architectures are highly distributed and therefore appear as a black box to the operator. Yet an operator does not want to lose control over his system and needs to be able to continuously observe and examine its current state at runtime. This work addresses both problems and shows how the solutions can be combined into a more self-organizing overlay concept. At first, we evaluate the performance of structured overlay networks under different aspects and thereby illuminate in how far such architectures are able to support carrier-grade applications. Secondly, to enable operators to monitor and understand their deployed system in more detail, we introduce both active as well as passive methods to gather information about the current state of the overlay network.}, subject = {Overlay-Netz}, language = {en} } @phdthesis{Herrler2007, author = {Herrler, Rainer}, title = {Agentenbasierte Simulation zur Ablaufoptimierung in Krankenh{\"a}usern und anderen verteilten, dynamischen Umgebungen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-24483}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2007}, abstract = {Verteilte dynamische Systeme unter lokalen und globalen Gesichtspunkten zu optimieren ist eine schwierige Aufgabe. Zwar sind grunds{\"a}tzliche Auswirkungen einzelner Maßnahmen h{\"a}ufig bekannt, durch widerstrebende Ziele, Wechselwirkungen zwischen Prozessen und Nebenwirkungen von Maßnahmen ist ein analytisches Vorgehen bei der Optimierung nicht m{\"o}glich. Besonders schwierig wird es, wenn lokale Einheiten einerseits ihre Ziele und Autonomie behalten sollen, aber durch zentrale Vorgaben bzw. Anreize so gesteuert werden sollen, dass ein {\"u}bergeordnetes Ziel erreicht wird. Ein praktisches Beispiel dieses allgemeinen Optimierungsproblems findet sich im Gesundheitswesen. Das Management von modernen Kliniken ist stets mit dem Problem konfrontiert, die Qualit{\"a}t der Pflege zu gew{\"a}hrleisten und gleichzeitig kosteneffizient zu arbeiten. Hier gilt es unter gegeben Rahmenbedingungen und bei Respektierung der Autonomie der Funktionseinheiten, Optimierungsmaßnahmen zu finden und durchzuf{\"u}hren. Vorhandene Werkzeuge zur Simulation und Modellierung bieten f{\"u}r diese Aufgabe keine ausreichend guten Vorgehensmodelle und Modellierungsmechanismen. Die agentenbasierte Simulation erm{\"o}glicht die Abbildung solcher Systeme und die Durchf{\"u}hrung von Simulationsexperimenten zur Bewertung einzelner Maßnahmen. Es werden L{\"o}sungswege und Werkzeuge vorgestellt und evaluiert, die den Benutzer bei der Formalisierung des Wissens und der Modellierung solch komplexer Szenarien unterst{\"u}tzen und ein systematisches Vorgehen zur Optimierung erm{\"o}glichen.}, subject = {Simulation}, language = {de} } @phdthesis{Milbrandt2007, author = {Milbrandt, Jens}, title = {Performance Evaluation of Efficient Resource Management Concepts for Next Generation IP Networks}, doi = {10.25972/OPUS-1991}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-23332}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2007}, abstract = {Next generation networks (NGNs) must integrate the services of current circuit-switched telephone networks and packet-switched data networks. This convergence towards a unified communication infrastructure necessitates from the high capital expenditures (CAPEX) and operational expenditures (OPEX) due to the coexistence of separate networks for voice and data. In the end, NGNs must offer the same services as these legacy networks and, therefore, they must provide a low-cost packet-switched solution with real-time transport capabilities for telephony and multimedia applications. In addition, NGNs must be fault-tolerant to guarantee user satisfaction and to support business-critical processes also in case of network failures. A key technology for the operation of NGNs is the Internet Protocol (IP) which evolved to a common and well accepted standard for networking in the Internet during the last 25 years. There are two basically different approaches to achieve QoS in IP networks. With capacity overprovisioning (CO), an IP network is equipped with sufficient bandwidth such that network congestion becomes very unlikely and QoS is maintained most of the time. The second option to achieve QoS in IP networks is admission control (AC). AC represents a network-inherent intelligence that admits real-time traffic flows to a single link or an entire network only if enough resources are available such that the requirements on packet loss and delay can be met. Otherwise, the request of a new flow is blocked. This work focuses on resource management and control mechanisms for NGNs, in particular on AC and associated bandwidth allocation methods. The first contribution consists of a new link-oriented AC method called experience-based admission control (EBAC) which is a hybrid approach dealing with the problems inherent to conventional AC mechanisms like parameter-based or measurement-based AC (PBAC/MBAC). PBAC provides good QoS but suffers from poor resource utilization and, vice versa, MBAC uses resources efficiently but is susceptible to QoS violations. Hence, EBAC aims at increasing the resource efficiency while maintaining the QoS which increases the revenues of ISPs and postpones their CAPEX for infrastructure upgrades. To show the advantages of EBAC, we first review today's AC approaches and then develop the concept of EBAC. EBAC is a simple mechanism that safely overbooks the capacity of a single link to increase its resource utilization. We evaluate the performance of EBAC by its simulation under various traffic conditions. The second contribution concerns dynamic resource allocation in transport networks which implement a specific network admission control (NAC) architecture. In general, the performance of different NAC systems may be evaluated by conventional methods such as call blocking analysis which has often been applied in the context of multi-service asynchronous transfer mode (ATM) networks. However, to yield more practical results than abstract blocking probabilities, we propose a new method to compare different AC approaches by their respective bandwidth requirements. To present our new method for comparing different AC systems, we first give an overview of network resource management (NRM) in general. Then we present the concept of adaptive bandwidth allocation (ABA) in capacity tunnels and illustrate the analytical performance evaluation framework to compare different AC systems by their capacity requirements. Different network characteristics influence the performance of ABA. Therefore, the impact of various traffic demand models and tunnel implementations, and the influence of resilience requirements is investigated. In conclusion, the resources in NGNs must be exclusively dedicated to admitted traffic to guarantee QoS. For that purpose, robust and efficient concepts for NRM are required to control the requested bandwidth with regard to the available transmission capacity. Sophisticated AC will be a key function for NRM in NGNs and, therefore, efficient resource management concepts like experience-based admission control and adaptive bandwidth allocation for admission-controlled capacity tunnels, as presented in this work are appealing for NGN solutions.}, subject = {Ressourcenmanagement}, language = {en} } @phdthesis{Travers2007, author = {Travers, Stephen}, title = {Structural Properties of NP-Hard Sets and Uniform Characterisations of Complexity Classes}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-27124}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2007}, abstract = {This thesis is devoted to the study of computational complexity theory, a branch of theoretical computer science. Computational complexity theory investigates the inherent difficulty in designing efficient algorithms for computational problems. By doing so, it analyses the scalability of computational problems and algorithms and places practical limits on what computers can actually accomplish. Computational problems are categorised into complexity classes. Among the most important complexity classes are the class NP and the subclass of NP-complete problems, which comprises many important optimisation problems in the field of operations research. Moreover, with the P-NP-problem, the class NP represents the most important unsolved question in computer science. The first part of this thesis is devoted to the study of NP-complete-, and more generally, NP-hard problems. It aims at improving our understanding of this important complexity class by systematically studying how altering NP-hard sets affects their NP-hardness. This research is related to longstanding open questions concerning the complexity of unions of disjoint NP-complete sets, and the existence of sparse NP-hard sets. The second part of the thesis is also dedicated to complexity classes but takes a different perspective: In a sense, after investigating the interior of complexity classes in the first part, the focus shifts to the description of complexity classes and thereby to the exterior in the second part. It deals with the description of complexity classes through leaf languages, a uniform framework which allows us to characterise a great variety of important complexity classes. The known concepts are complemented by a new leaf-language model. To a certain extent, this new approach combines the advantages of the known models. The presented results give evidence that the connection between the theory of formal languages and computational complexity theory might be closer than formerly known.}, subject = {Berechnungskomplexit{\"a}t}, language = {en} } @phdthesis{Binder2006, author = {Binder, Andreas}, title = {Die stochastische Wissenschaft und zwei Teilsysteme eines Web-basierten Informations- und Anwendungssystems zu ihrer Etablierung}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-26146}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2006}, abstract = {Das stochastische Denken, die Bernoullische Stochastik und dessen informationstechnologische Umsetzung, namens Stochastikon stellen die Grundlage f{\"u}r das Verst{\"a}ndnis und die erfolgreiche Nutzung einer stochastischen Wissenschaft dar. Im Rahmen dieser Arbeit erfolgt eine Kl{\"a}rung des Begriffs des stochastischen Denkens, eine anschauliche Darstellung der von Elart von Collani entwickelten Bernoullischen Stochastik und eine Beschreibung von Stochastikon. Dabei werden sowohl das Gesamtkonzept von Stochastikon, sowie die Ziele, Aufgaben und die Realisierung der beiden Teilsysteme namens Mentor und Encyclopedia vorgestellt. Das stochastische Denken erlaubt eine realit{\"a}tsnahe Sichtweise der Dinge, d.h. eine Sichtweise, die mit den menschlichen Beobachtungen und Erfahrungen im Einklang steht und somit die Unsicherheit {\"u}ber zuk{\"u}nftige Entwicklungen ber{\"u}cksichtigt. Der in diesem Kontext verwendete Begriff der Unsicherheit bezieht sich ausschließlich auf zuk{\"u}nftige Entwicklungen und {\"a}ußert sich in Variabilit{\"a}t. Quellen der Unsicherheit sind einerseits die menschliche Ignoranz und andererseits der Zufall. Unter Ignoranz wird hierbei die Unwissenheit des Menschen {\"u}ber die unbekannten, aber feststehenden Fakten verstanden, die die Anfangsbedingungen der zuk{\"u}nftigen Entwicklung repr{\"a}sentieren. Die Bernoullische Stochastik liefert ein Regelwerk und erm{\"o}glicht die Entwicklung eines quantitativen Modells zur Beschreibung der Unsicherheit und expliziter Einbeziehung der beiden Quellen Ignoranz und Zufall. Das Modell tr{\"a}gt den Namen Bernoulli-Raum und bildet die Grundlage f{\"u}r die Herleitung quantitativer Verfahren, um zuverl{\"a}ssige und genaue Aussagen sowohl {\"u}ber die nicht-existente zuf{\"a}llige Zukunft (Vorhersageverfahren), als auch {\"u}ber die unbekannte feststehende Vergangenheit (Messverfahren). Das Softwaresystem Stochastikon implementiert die Bernoullische Stochastik in Form einer Reihe autarker, miteinander kommunizierender Teilsysteme. Ziel des Teilsystems Encyclopedia ist die Bereitstellung und Bewertung stochastischen Wissens. Das Teilsystem Mentor dient der Unterst{\"u}tzung des Anwenders bei der Probleml{\"o}sungsfindung durch Identifikation eines richtigen Modells bzw. eines korrekten Bernoulli-Raums. Der L{\"o}sungsfindungsprozess selber enth{\"a}lt keinerlei Unsicherheit. Die ganze Unsicherheit steckt in der L{\"o}sung, d.h. im Bernoulli-Raum, der explizit die vorhandene Unwissenheit (Ignoranz) und den vorliegenden Zufall abdeckend enth{\"a}lt.}, subject = {Stochastik}, language = {de} } @misc{Hoehn2006, type = {Master Thesis}, author = {H{\"o}hn, Winfried}, title = {Mustererkennung in Fr{\"u}hdrucken}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-30429}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2006}, abstract = {No abstract available}, subject = {Mustererkennung}, language = {de} } @phdthesis{Atzmueller2006, author = {Atzm{\"u}ller, Martin}, title = {Knowledge-Intensive Subgroup Mining - Techniques for Automatic and Interactive Discovery}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-21004}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2006}, abstract = {Data mining has proved its significance in various domains and applications. As an important subfield of the general data mining task, subgroup mining can be used, e.g., for marketing purposes in business domains, or for quality profiling and analysis in medical domains. The goal is to efficiently discover novel, potentially useful and ultimately interesting knowledge. However, in real-world situations these requirements often cannot be fulfilled, e.g., if the applied methods do not scale for large data sets, if too many results are presented to the user, or if many of the discovered patterns are already known to the user. This thesis proposes a combination of several techniques in order to cope with the sketched problems: We discuss automatic methods, including heuristic and exhaustive approaches, and especially present the novel SD-Map algorithm for exhaustive subgroup discovery that is fast and effective. For an interactive approach we describe techniques for subgroup introspection and analysis, and we present advanced visualization methods, e.g., the zoomtable that directly shows the most important parameters of a subgroup and that can be used for optimization and exploration. We also describe various visualizations for subgroup comparison and evaluation in order to support the user during these essential steps. Furthermore, we propose to include possibly available background knowledge that is easy to formalize into the mining process. We can utilize the knowledge in many ways: To focus the search process, to restrict the search space, and ultimately to increase the efficiency of the discovery method. We especially present background knowledge to be applied for filtering the elements of the problem domain, for constructing abstractions, for aggregating values of attributes, and for the post-processing of the discovered set of patterns. Finally, the techniques are combined into a knowledge-intensive process supporting both automatic and interactive methods for subgroup mining. The practical significance of the proposed approach strongly depends on the available tools. We introduce the VIKAMINE system as a highly-integrated environment for knowledge-intensive active subgroup mining. Also, we present an evaluation consisting of two parts: With respect to objective evaluation criteria, i.e., comparing the efficiency and the effectiveness of the subgroup discovery methods, we provide an experimental evaluation using generated data. For that task we present a novel data generator that allows a simple and intuitive specification of the data characteristics. The results of the experimental evaluation indicate that the novel SD-Map method outperforms the other described algorithms using data sets similar to the intended application concerning the efficiency, and also with respect to precision and recall for the heuristic methods. Subjective evaluation criteria include the user acceptance, the benefit of the approach, and the interestingness of the results. We present five case studies utilizing the presented techniques: The approach has been successfully implemented in medical and technical applications using real-world data sets. The method was very well accepted by the users that were able to discover novel, useful, and interesting knowledge.}, subject = {Data Mining}, language = {en} } @phdthesis{Heck2005, author = {Heck, Klaus}, title = {Wireless LAN performance studies in the context of 4G networks}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-14896}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2005}, abstract = {Wireless communication is nothing new. The first data transmissions based on electromagnetic waves have been successfully performed at the end of the 19th century. However, it took almost another century until the technology was ripe for mass market. The first mobile communication systems based on the transmission of digital data were introduced in the late 1980s. Within just a couple of years they have caused a revolution in the way people communicate. The number of cellular phones started to outnumber the fixed telephone lines in many countries and is still rising. New technologies in 3G systems, such as UMTS, allow higher data rates and support various kinds of multimedia services. Nevertheless, the end of the road in wireless communication is far from being reached. In the near future, the Internet and cellular phone systems are expected to be integrated to a new form of wireless system. Bandwidth requirements for a rich set of wireless services, e.g.\ video telephony, video streaming, online gaming, will be easily met. The transmission of voice data will just be another IP based service. On the other hand, building such a system is by far not an easy task. The problems in the development of the UMTS system showed the high complexity of wireless systems with support for bandwidth-hungry, IP-based services. But the technological challenges are just one difficulty. Telecommunication systems are planned on a world-wide basis, such that standard bodies, governments, institutions, hardware vendors, and service providers have to find agreements and compromises on a number of different topics. In this work, we provide the reader with a discussion of many of the topics involved in the planning of a Wireless LAN system that is capable of being integrated into the 4th generation mobile networks (4G) that is being discussed nowadays. Therefore, it has to be able to cope with interactive voice and video traffic while still offering high data rates for best effort traffic. Let us assume a scenario where a huge office complex is completely covered with Wireless LAN access points. Different antenna systems are applied in order to reduce the number of access points that are needed on the one hand, while optimizing the coverage on the other. No additional infrastructure is implemented. Our goal is to evaluate whether the Wireless LAN technology is capable of dealing with the various demands of such a scenario. First, each single access point has to be capable of supporting best-effort and Quality of Service (QoS) demanding applications simultaneously. The IT infrastructure in our scenario consists solely of Wireless LAN, such that it has to allow users surfing the Web, while others are involved in voice calls or video conferences. Then, there is the problem of overlapping cells. Users attached to one access point produce interference for others. However, the QoS support has to be maintained, which is not an easy task. Finally, there are nomadic users, which roam from one Wireless LAN cell to another even during a voice call. There are mechanisms in the standard that allow for mobility, but their capabilities for QoS support are yet to be studied. This shows the large number of unresolved issues when it comes to Wireless LAN in the context of 4G networks. In this work we want to tackle some of the problems.}, subject = {Drahtloses lokales Netz}, language = {en} } @phdthesis{Eichelberger2005, author = {Eichelberger, Holger}, title = {Aesthetics and automatic layout of UML class diagrams}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-14831}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2005}, abstract = {In the last years, visual methods have been introduced in industrial software production and teaching of software engineering. In particular, the international standardization of a graphical software engineering language, the Unified Modeling Language (UML) was a reason for this tendency. Unfortunately, various problems exist in concrete realizations of tools, e.g. due to a missing compliance to the standard. One problem is the automatic layout, which is required for a consistent automatic software design. The thesis derives reasons and criteria for an automatic layout method, which produces drawings of UML class diagrams according to the UML specification and issues of human computer interaction, e.g. readability. A unique set of aesthetic criteria is combined from four different disciplines involved in this topic. Based on these aethetic rules, a hierarchical layout algorithm is developed, analyzed, measured by specialized measuring techniques and compared to related work. Then, the realization of the algorithm as a Java framework is given as an architectural description. Finally, adaptions to anticipated future changes of the UML, improvements of the framework and example drawings of the implementation are given.}, subject = {URL}, language = {en} } @phdthesis{Boehler2005, author = {B{\"o}hler, Elmar}, title = {Algebraic closures in complexity theory}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-16106}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2005}, abstract = {We use algebraic closures and structures which are derived from these in complexity theory. We classify problems with Boolean circuits and Boolean constraints according to their complexity. We transfer algebraic structures to structural complexity. We use the generation problem to classify important complexity classes.}, subject = {Komplexit{\"a}tstheorie}, language = {en} } @phdthesis{Betz2005, author = {Betz, Christian}, title = {Scalable authoring of diagnostic case based training systems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-17885}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2005}, abstract = {Diagnostic Case Based Training Systems (D-CBT) provide learners with a means to learn and exercise knowledge in a realistic context. In medical education, D-CBT Systems present virtual patients to the learners who are asked to examine, diagnose and state therapies for these patients. Due a number of conflicting and changing requirements, e.g. time for learning, authoring effort, several systems were developed so far. These systems range from simple, easy-to-use presentation systems to highly complex knowledge based systems supporting explorative learning. This thesis presents an approach and tools to create D-CBT systems from existing sources (documents, e.g. dismissal records) using existing tools (word processors): Authors annotate and extend the documents to model the knowledge. A scalable knowledge representation is able to capture the content on multiple levels, from simple to highly structured knowledge. Thus, authoring of D-CBT systems requires less prerequisites and pre-knowledge and is faster than approaches using specialized authoring environments. Also, authors can iteratively add and structure more knowledge to adapt training cases to their learners needs. The theses also discusses the application of the same approach to other domains, especially to knowledge acquisition for the Semantic Web.}, subject = {Computerunterst{\"u}tztes Lernen}, language = {en} } @phdthesis{Kluge2004, author = {Kluge, Boris}, title = {Motion coordination for a mobile robot in dynamic environments}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-15508}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2004}, abstract = {Generating coordinated motion for a mobile robot operating in natural, continuously changing environments among moving obstacles such as humans is a complex task which requires the solution of various sub problems. In this thesis, we will cover the topics of perception and navigation in dynamic environments, as well as reasoning about the motion of the obstacles and of the robot itself. Perception is mainly considered for a laser range finder, and an according method for obstacle detection and tracking is proposed. Network optimization algorithms are used for data association in the tracking step, resulting in considerable robustness with respect to clutter by small objects. Navigation in general is accomplished using an adaptation of the velocity obstacle approach to the given vehicle kinematics, and cooperative motion coordination between the robot and a human guide is achieved using an appropriate selection rule for collision-free velocities. Next, the robot is enabled to compare its path to the path of a human guide using one of a collection of presented distance measures, which permits the detection of exceptional conditions. Furthermore, a taxonomy for the assessment of situations concerning the robot is presented, and following a summary of existing approaches to more intelligent and comprehensive perception, we propose a method for obstruction detection. Finally, a new approach to reflective navigation behaviors is described where the robot reasons about intelligent moving obstacles in its environment, which allows to adjust the character of the robot motion from regardful and defensive to more self-confident and aggressive behaviors.}, subject = {Bewegungsablauf}, language = {de} } @phdthesis{Menth2004, author = {Menth, Michael}, title = {Efficient admission control and routing for resilient communication networks}, doi = {10.25972/OPUS-846}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-9949}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2004}, abstract = {This work is subdivided into two main areas: resilient admission control and resilient routing. The work gives an overview of the state of the art of quality of service mechanisms in communication networks and proposes a categorization of admission control (AC) methods. These approaches are investigated regarding performance, more precisely, regarding the potential resource utilization by dimensioning the capacity for a network with a given topology, traffic matrix, and a required flow blocking probability. In case of a failure, the affected traffic is rerouted over backup paths which increases the traffic rate on the respective links. To guarantee the effectiveness of admission control also in failure scenarios, the increased traffic rate must be taken into account for capacity dimensioning and leads to resilient AC. Capacity dimensioning is not feasible for existing networks with already given link capacities. For the application of resilient NAC in this case, the size of distributed AC budgets must be adapted according to the traffic matrix in such a way that the maximum blocking probability for all flows is minimized and that the capacity of all links is not exceeded by the admissible traffic rate in any failure scenario. Several algorithms for the solution of that problem are presented and compared regarding their efficiency and fairness. A prototype for resilient AC was implemented in the laboratories of Siemens AG in Munich within the scope of the project KING. Resilience requires additional capacity on the backup paths for failure scenarios. The amount of this backup capacity depends on the routing and can be minimized by routing optimization. New protection switching mechanisms are presented that deviate the traffic quickly around outage locations. They are simple and can be implemented, e.g, by MPLS technology. The Self-Protecting Multi-Path (SPM) is a multi-path consisting of disjoint partial paths. The traffic is distributed over all faultless partial paths according to an optimized load balancing function both in the working case and in failure scenarios. Performance studies show that the network topology and the traffic matrix also influence the amount of required backup capacity significantly. The example of the COST-239 network illustrates that conventional shortest path routing may need 50\% more capacity than the optimized SPM if all single link and node failures are protected.}, subject = {Kommunikation}, language = {en} }