@phdthesis{Fehler2010, author = {Fehler, Manuel}, title = {Kalibrierung Agenten-basierter Simulationen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-64762}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2010}, abstract = {In der vorliegenden Arbeit wird das Problem der Kalibrierung Agenten-basierter Simulationen (ABS) behandelt, also das Problem, die Parameterwerte eines Agenten-basierten Simulationsmodells so einzustellen, dass valides Simulationsverhalten erreicht wird. Das Kalibrierungsproblem f{\"u}r Simulationen an sich ist nicht neu und ist im Rahmen klassischer Simulationsparadigmen, wie z.B. der Makro-Simulation, fester Bestandteil der Forschung. Im Vergleich zu den dort betrachteten Kalibrierungsproblemen zeichnet sich das Kalibrierungsproblem f{\"u}r ABS jedoch durch eine Reihe zus{\"a}tzlicher Herausforderungen aus, welche die direkte Anwendung existierender Kalibrierungsverfahren in begrenzter Zeit erschweren, bzw. nicht mehr sinnvoll zulassen. Die L{\"o}sung dieser Probleme steht im Zentrum dieser Dissertation: Das Ziel besteht darin, den Nutzer bei der Kalibrierung von ABS auf der Basis von unzureichenden, potentiell fehlerhaften Daten und Wissen zu unterst{\"u}tzen. Dabei sollen drei Hauptprobleme gel{\"o}st werden: 1)Vereinfachung der Kalibrierung großer Agenten-Parametermengen auf der Mikro- Ebene in Agenten-basierten Simulationen durch Ausnutzung der spezifischen Struktur von ABS (n{\"a}mlich dem Aufbau aus einer Menge von Agentenmodellen). 2)Kalibrierung Agenten-basierter Simulationen, so dass auf allen relevanten Beobachtungsebenen valides Simulationsverhalten erzeugt wird (mindestens Mikro und Makro-Ebene). Als erschwerende Randbedingung muss die Kalibrierung unter der Voraussetzung einer Makro-Mikro-Wissensl{\"u}cke durchgef{\"u}hrt werden. 3)Kalibrierung Agenten-basierter Simulationen auf der Mikro-Ebene unter der Voraussetzung, dass zur Kalibrierung einzelner Agentenmodelle nicht ausreichend und potentiell verf{\"a}lschte Daten zur Verhaltensvalidierung zur Verf{\"u}gung stehen. Hierzu wird in dieser Arbeit das sogenannte Makro-Mikro-Verfahren zur Kalibrierung von Agenten-basierten Simulationen entwickelt. Das Verfahren besteht aus einem Basisverfahren, das im Verlauf der Arbeit um verschiedene Zusatzverfahren erweitert wird. Das Makro-Mikro-Verfahren und seine Erweiterungen sollen dazu dienen, die Modellkalibrierung trotz stark verrauschter Daten und eingeschr{\"a}nktem Wissen {\"u}ber die Wirkungszusammenh{\"a}nge im Originalsystem geeignet zu erm{\"o}glichen und dabei den Kalibrierungsprozess zu beschleunigen: 1) Makro-Mikro-Kalibrierungsverfahren: Das in dieser Arbeit entwickelte Makro- Mikro-Verfahren unterst{\"u}tzt den Nutzer durch eine kombinierte Kalibrierung auf der Mikro- und der Makro-Beobachtungsebene, die gegebenenfalls durch Zwischenebenen erweitert werden kann. Der Grundgedanke des Verfahrens besteht darin, das Kalibrierungsproblem in eines auf aggregierter Verhaltensebene und eines auf der Ebene des Mikro-Agentenverhaltens aufzuteilen. Auf der Makro-Ebene wird nach validen idealen aggregierten Verhaltensmodellen (IVM) der Agenten gesucht. Auf der Mikro-Ebene wird versucht die individuellen Modelle der Agenten auf Basis des erw{\"u}nschten Gesamtverhaltens und der ermittelten IVM so zu kalibrieren, das insgesamt Simulationsverhalten entsteht, das sowohl auf Mikro- als auch auf Makro-Ebene valide ist. 2) Erweiterung 1: Robuste Kalibrierung: Um den Umgang mit potentiell verrauschten Validierungskriterien (d.h. mit verrauschten Daten {\"u}ber ein Originalsystem, auf denen die Validierungskriterien der Simulation beruhen) und Modellteilen w{\"a}hrend der Kalibrierung von ABS zu erm{\"o}glichen, wird eine robuste Kalibrierungstechnik zur Anwendung im Makro-Mikro-Verfahren entwickelt. 3) Erweiterung 2: Kalibrierung mit Heterogenit{\"a}tssuche: Als zweite Erweiterung des Makro-Mikro-Verfahrens wird ein Verfahren entwickelt, das das Problem des unklaren Detaillierungsgrades von ABS auf der Ebene der Parameterwerte adressiert. Prinzipiell kann zwar jeder Agent unterschiedliche Parameterwerte verwenden, obwohl eine geringere Heterogenit{\"a}t zur Erzeugung validen Verhaltens ausreichend w{\"a}re. Die entwickelte Erweiterung versucht, w{\"a}hrend der Kalibrierung, eine geeignete Heterogenit{\"a}tsauspr{\"a}gung f{\"u}r die Parameterwerte der Agenten zu ermitteln. Unter einer Heterogenit{\"a}tsauspr{\"a}gung wird dabei eine Einteilung der simulierten Agenten in Gruppen mit jeweils gleichen Parameterwerten verstanden. Die Heterogenit{\"a}tssuche dient dazu, einen Kompromiss zu finden zwischen der Notwendigkeit, sehr große Parametersuchr{\"a}ume durchsuchen zu m{\"u}ssen und gleichzeitig den Suchraum so klein wie m{\"o}glich halten zu wollen.}, subject = {Computersimulation}, language = {de} } @phdthesis{Henjes2010, author = {Henjes, Robert}, title = {Performance Evaluation of Publish/Subscribe Middleware Architectures}, doi = {10.25972/OPUS-4536}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-53388}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2010}, abstract = {While developing modern applications, it is necessary to ensure an efficient and performant communication between different applications. In current environments, a middleware software is used, which supports the publish/subscribe communication pattern. Using this communication pattern, a publisher sends information encapsulated in messages to the middleware. A subscriber registers its interests at the middleware. The monograph describes three different steps to determine the performance of such a system. In a first step, the message throughput performance of a publish/subscribe in different scenarios is measured using a Java Message Service (JMS) based implementation. In the second step the maximum achievable message throughput is described by adapted models depending on the filter complexity and the replication grade. Using the model, the performance characteristics of a specific system in a given scenario can be determined. These numbers are used for the queuing model described in the third part of the thesis, which supports the dimensioning of a system in realistic scenarios. Additionally, we introduce a method to approximate an M/G/1 system numerically in an efficient way, which can be used for real time analysis to predict the expected performance in a certain scenario. Finally, the analytical model is used to investigate different possibilities to ensure the scalability of the maximum achievable message throughput of the overall system.}, subject = {Middleware}, language = {en} } @phdthesis{Klein2010, author = {Klein, Alexander}, title = {Performance Issues of MAC and Routing Protocols in Wireless Sensor Networks}, doi = {10.25972/OPUS-4465}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-52870}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2010}, abstract = {The focus of this work lies on the communication issues of Medium Access Control (MAC) and routing protocols in the context of WSNs. The communication challenges in these networks mainly result from high node density, low bandwidth, low energy constraints and the hardware limitations in terms of memory, computational power and sensing capabilities of low-power transceivers. For this reason, the structure of WSNs is always kept as simple as possible to minimize the impact of communication issues. Thus, the majority of WSNs apply a simple one hop star topology since multi-hop communication has high demands on the routing protocol since it increases the bandwidth requirements of the network. Moreover, medium access becomes a challenging problem due to the fact that low-power transceivers are very limited in their sensing capabilities. The first contribution is represented by the Backoff Preamble-based MAC Protocol with Sequential Contention Resolution (BPS-MAC) which is designed to overcome the limitations of low-power transceivers. Two communication issues, namely the Clear Channel Assessment (CCA) delay and the turnaround time, are directly addressed by the protocol. The CCA delay represents the period of time which is required by the transceiver to detect a busy radio channel while the turnaround time specifies the period of time which is required to switch between receive and transmit mode. Standard Carrier Sense Multiple Access (CSMA) protocols do not achieve high performance in terms of packet loss if the traffic is highly correlated due to the fact that the transceiver is not able to sense the medium during the switching phase. Therefore, a node may start to transmit data while another node is already transmitting since it has sensed an idle medium right before it started to switch its transceiver from receive to transmit mode. The BPS-MAC protocol uses a new sequential preamble-based medium access strategy which can be adapted to the hardware capabilities of the transceivers. The protocol achieves a very low packet loss rate even in wireless networks with high node density and event-driven traffic without the need of synchronization. This makes the protocol attractive to applications such as structural health monitoring, where event suppression is not an option. Moreover, acknowledgments or complex retransmission strategies become almost unnecessary since the sequential preamble-based contention resolution mechanism minimizes the collision probability. However, packets can still be lost as a consequence of interference or other issues which affect signal propagation. The second contribution consists of a new routing protocol which is able to quickly detect topology changes without generating a large amount of overhead. The key characteristics of the Statistic-Based Routing (SBR) protocol are high end-to-end reliability (in fixed and mobile networks), load balancing capabilities, a smooth continuous routing metric, quick adaptation to changing network conditions, low processing and memory requirements, low overhead, support of unidirectional links and simplicity. The protocol can establish routes in a hybrid or a proactive mode and uses an adaptive continuous routing metric which makes it very flexible in terms of scalability while maintaining stable routes. The hybrid mode is optimized for low-power WSNs since routes are only established on demand. The difference of the hybrid mode to reactive routing strategies is that routing messages are periodically transmitted to maintain already established routes. However, the protocol stops the transmission of routing messages if no data packets are transmitted for a certain time period in order to minimize the routing overhead and the energy consumption. The proactive mode is designed for high data rate networks which have less energy constraints. In this mode, the protocol periodically transmits routing messages to establish routes in a proactive way even in the absence of data traffic. Thus, nodes in the network can immediately transmit data since the route to the destination is already established in advance. In addition, a new delay-based routing message forwarding strategy is introduced. The forwarding strategy is part of SBR but can also be applied to many routing protocols in order to modify the established topology. The strategy can be used, e.g. in mobile networks, to decrease the packet loss by deferring routing messages with respect to the neighbor change rate. Thus, nodes with a stable neighborhood forward messages faster than nodes within a fast changing neighborhood. As a result, routes are established through nodes with correlated movement which results in fewer topology changes due to higher link durations.}, subject = {Routing}, language = {en} } @unpublished{Nassourou2010, author = {Nassourou, Mohamadou}, title = {Markup overlap: Improving Fragmentation Method}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-49084}, year = {2010}, abstract = {Overlapping is a common word used to describe documents whose structural dimensions cannot be adequately represented using tree structure. For instance a quotation that starts in one verse and ends in another verse. The problem of overlapping hierarchies is a recurring one, which has been addressed by a variety of approaches. There are XML based solutions as well as Non-XML ones. The XML-based solutions are: multiple documents, empty elements, fragmentation, out-of-line markup, JITT and BUVH. And the Non-XML approaches comprise CONCUR/XCONCUR, MECS, LMNL ...etc. This paper presents shortly state-of-the-art in overlapping hierarchies, and introduces two variations on the TEI fragmentation markup that have several advantages.}, subject = {XML}, language = {en} } @unpublished{Nassourou2010, author = {Nassourou, Mohamadou}, title = {Understanding the Vex Rendering Engine}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-51333}, year = {2010}, abstract = {The Visual Editor for XML (Vex)[1] used by TextGrid [2]and other applications has got rendering and layout engines. The layout engine is well documented but the rendering engine is not. This lack of documenting the rendering engine has made refactoring and extending the editor hard and tedious. For instance many CSS2.1 and upcoming CSS3 properties have not been implemented. Software developers in different projects such as TextGrid using Vex would like to update its CSS rendering engine in order to provide advanced user interfaces as well as support different document types. In order to minimize the effort of extending Vex functionality, I found it beneficial to write a basic documentation about Vex software architecture in general and its CSS rendering engine in particular. The documentation is mainly based on the idea of architectural layered diagrams. In fact layered diagrams can help developers understand software's source code faster and easier in order to alter it, and fix errors. This paper is written for the purpose of providing direct support for exploration in the comprehension process of Vex source code. It discusses Vex software architecture. The organization of packages that make up the software, the architecture of its CSS rendering engine, an algorithm explaining the working principle of its rendering engine are described.}, subject = {Cascading Style Sheets}, language = {en} } @unpublished{Nassourou2010, author = {Nassourou, Mohamadou}, title = {Reference Architecture, Design of Cascading Style Sheets Processing Model}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-51328}, year = {2010}, abstract = {The technique of using Cascading Style Sheets (CSS) to format and present structured data is called CSS processing model. For instance a CSS processing model for XML documents describes steps involved in formatting and presenting XML documents on screens or papers. Many software applications such as browsers and XML editors have their own CSS processing models which are part of their rendering engines. For instance each browser based on its CSS processing model renders CSS layout differently, as a result an inconsistency in the support of CSS features arises. Some browsers support more CSS features than others, and the rendering itself varies. Moreover the W3C standards are not even adhered by some browsers such as Internet Explorer. Test suites and other hacks and filters cannot definitely solve these problems, because these solutions are temporary and fragile. To palliate this inconsistency and browser compatibility issues with respect to CSS, a reference CSS processing model is needed. By extension it could even allow interoperability across CSS rendering engines. A reference architecture would provide common software architecture and interfaces, and facilitate refactoring, reuse, and automated unit testing. In [2] a reference architecture for browsers has been proposed. However this reference architecture is a macro reference model which does not consider separately individual components of rendering and layout engines. In this paper an attempt to develop a reference architecture for CSS processing models is discussed. In addition the Vex editor [3] rendering and layout engines, as well as an extended version of the editor used in TextGrid project [5] are also presented in order to validate the proposed reference architecture.}, subject = {Cascading Style Sheets}, language = {en} } @unpublished{Nassourou2010, author = {Nassourou, Mohamadou}, title = {Empirical Study on Screen Scraping Web Service Creation: Case of Rhein-Main-Verkehrsverbund (RMV)}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-49396}, year = {2010}, abstract = {Internet is the biggest database that science and technology have ever produced. The world wide web is a large repository of information that cannot be used for automation by many applications due to its limited target audience. One of the solutions to the automation problem is to develop wrappers. Wrapping is a process whereby unstructured extracted information is transformed into a more structured one such as XML, which could be provided as webservice to other applications. A web service is a web page whose content is well structured so that a computer program can consume it automatically. This paper describes steps involved in constructing wrappers manually in order to automatically generate web services.}, subject = {HTML}, language = {en} } @unpublished{Nassourou2010, author = {Nassourou, Mohamadou}, title = {Java Web Frameworks Which One to Choose?}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-49407}, year = {2010}, abstract = {This article discusses web frameworks that are available to a software developer in Java language. It introduces MVC paradigm and some frameworks that implement it. The article presents an overview of Struts, Spring MVC, JSF Frameworks, as well as guidelines for selecting one of them as development environment.}, subject = {Java Frameworks}, language = {en} } @unpublished{Nassourou2010, author = {Nassourou, Mohamadou}, title = {Doing Webservices Composition by Content-based Mashup: Example of a Web-based Simulator for Itinerary Planning}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-50036}, year = {2010}, abstract = {Webservices composition is traditionally carried out using composition technologies such as Business Process Execution Language (BPEL) [1] and Web Service Choreography Interface (WSCI) [2]. The composition technology involves the process of web service discovery, invocation, and composition. However these technologies are not easy and flexible enough because they are mainly developer-centric. Moreover majority of websites have not yet embarked into the world of web service, although they have very important and useful information to offer. Is it because they have not understood the usefulness of web services or is it because of the costs? Whatever might be the answers to these questions, time and money are definitely required in order to create and offer web services. To avoid these expenditures, wrappers [7] to automatically generate webservices from websites would be a cheaper and easier solution. Mashups offer a different way of doing webservices composition. In web environment a Mashup is a web application that brings together data from several sources using webservices, APIs, wrappers and so on, in order to create entirely a new application that was not provided before. This paper presents first an overview of Mashups and the process of web service invocation and composition based on Mashup, then describes an example of a web-based simulator for navigation system in Germany.}, subject = {Mashup }, language = {en} } @phdthesis{Oechsner2010, author = {Oechsner, Simon}, title = {Performance Challenges and Optimization Potential of Peer-to-Peer Overlay Technologies}, doi = {10.25972/OPUS-4159}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-50015}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2010}, abstract = {In today's Internet, building overlay structures to provide a service is becoming more and more common. This approach allows for the utilization of client resources, thus being more scalable than a client-server model in this respect. However, in these architectures the quality of the provided service depends on the clients and is therefore more complex to manage. Resource utilization, both at the clients themselves and in the underlying network, determine the efficiency of the overlay application. Here, a trade-off exists between the resource providers and the end users that can be tuned via overlay mechanisms. Thus, resource management and traffic management is always quality-of-service management as well. In this monograph, the three currently significant and most widely used overlay types in the Internet are considered. These overlays are implemented in popular applications which only recently have gained importance. Thus, these overlay networks still face real-world technical challenges which are of high practical relevance. We identify the specific issues for each of the considered overlays, and show how their optimization affects the trade-offs between resource efficiency and service quality. Thus, we supply new insights and system knowledge that is not provided by previous work.}, subject = {Overlay-Netz}, language = {en} }