@unpublished{Kiesler1992, author = {Kiesler, Reinhard}, title = {Por uma fon{\´e}tica ar{\´a}bigo-portuguesa}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-83519}, year = {1992}, abstract = {Der Aufsatz besch{\"a}ftigt sich mit der lautlichen Anpassung der arabischen Lehnw{\"o}rter im Portugiesischen. Behandelt werden die Betonung, der Vokalismus und der Konsonantismus sowie verschiedene kontextabh{\"a}ngige Lautwandelerscheinungen.}, subject = {Phonetik}, language = {pt} } @unpublished{Buhr2012, author = {Buhr, Christian}, title = {Arthurische Vermittlung : "Tristan" von den R{\"a}ndern her gelesen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-78765}, year = {2012}, abstract = {Obgleich das erz{\"a}hlerische Syntagma der Tristansage sich ohne weitere Zugaben zu entfalten vermag, ist die Liebesgeschichte von Tristan und Isolde schon in der Mitte des 12. Jahrhunderts eng mit den Legenden von K{\"o}nig Artus und seinen Rittern verwoben. Die Frage, wie sich der keltische Tristanstoff zum Artusroman verh{\"a}lt, ist in der medi{\"a}vistischen Forschung bislang jedoch nur in Ans{\"a}tzen und zumeist wenig kontrovers diskutiert worden. Dabei bietet gerade die kontingente Verbindung beider literarischer Welten bei Eilhart und B{\´e}roul zu einem arthurisierten Tristanroman die M{\"o}glichkeit, die zentralen Scharnierstellen zweier Erz{\"a}hltradition zu untersuchen und nach den M{\"o}glichkeiten und Grenzen des Erz{\"a}hlens von der Liebe um 1200 zu fragen.}, subject = {Artus}, language = {de} } @unpublished{GeiselhartGielenLazaretal.2013, author = {Geiselhart, Roman and Gielen, Rob H. and Lazar, Mircea and Wirth, Fabian R.}, title = {An Alternative Converse Lyapunov Theorem for Discrete-Time Systems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-78512}, year = {2013}, abstract = {This paper presents an alternative approach for obtaining a converse Lyapunov theorem for discrete-time systems. The proposed approach is constructive, as it provides an explicit Lyapunov function. The developed converse theorem establishes existence of global Lyapunov functions for globally exponentially stable (GES) systems and semi-global practical Lyapunov functions for globally asymptotically stable systems. Furthermore, for specific classes of sys- tems, the developed converse theorem can be used to establish non-conservatism of a particular type of Lyapunov functions. Most notably, a proof that conewise linear Lyapunov functions are non-conservative for GES conewise linear systems is given and, as a by-product, tractable construction of polyhedral Lyapunov functions for linear systems is attained.}, subject = {Ljapunov-Funktion}, language = {en} } @unpublished{VolkmannBockSeibtetal.2012, author = {Volkmann, Armin and Bock, Sina and Seibt, Daniela and K{\"u}mmet, Sonja and Weiß, Michael and Dietz, Elisabeth and Huss, Patrick and Heer, Anna and El Hassan, Naitelqadi}, title = {Geisteswissenschaft und Geografische Informationssysteme (GIS): Erstellung von Kartierungen mit kommerzieller und Open Source Software im Vergleich}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-74470}, year = {2012}, abstract = {Der Einsatz von Geographischen Informationssystemen (GIS) bietet auch f{\"u}r die Geisteswissenschaften zahlreiche Ans{\"a}tze zur Generierung von neuem Wissen. Die GIS-Software ist jedoch unterschiedlich geeignet f{\"u}r geisteswissenschaftliche Fragestellungen. Getestet wurden daher zwei kommerzielle und vier Open Source GIS-Programme: MapInfo, ArcGIS, Quantum GIS, gvSIG, DIVA-GIS und SAGA. MapInfo zeichnet sich besonders f{\"u}r GIS-Anf{\"a}nger durch seine große Benutzerfreundlichkeit aus. Jedoch sind die Anschaffungskosten recht hoch. ArcGIS weist den gr{\"o}ßten Nutzungsumfang auf, wobei jedoch keine oder kaum eine „intuitive" Nutzung m{\"o}glich ist. Zudem sind die laufenden Kosten durch aufw{\"a}ndige Abo-Lizenzvertr{\"a}ge besonders hoch. Quantum GIS ist eine freie Software, die benutzerfreundlich ist und auch Anf{\"a}ngern einen leichten Einstieg erm{\"o}glicht. Hunderte Erweiterungen machen Quantum GIS sehr leistungsstark und universal einsetzbar. gvSIG ist nicht ganz leicht zu bedienen, da zudem die Dokumentation nur fragmentarisch vorliegt. Der große Funktionsumfang macht es jedoch zu einem vollwertigen GIS, wenn auch manch erg{\"a}nzende Funktion fehlt. DIVA-GIS erm{\"o}glicht einen schnellen Einstieg durch seine gute Dokumentation. Man gelangt jedoch recht bald an die Grenzen des Nutzungsumfangs durch die eingeschr{\"a}nkte Funktionalit{\"a}t. SAGA hingegen erf{\"u}llte alle hier gestellten Anforderungen, sodass es, trotz der geringeren Anzahl von Erweiterungen, zusammen mit Quantum GIS als Open Source eine echte Alternative zu kommerziellen GIS-Programmen darstellt.}, subject = {Geoinformationssystem}, language = {de} } @unpublished{Volkmann2012, author = {Volkmann, Armin}, title = {Stempelverzierte Keramikfunde der V{\"o}lkerwanderungszeit im Barbaricum - Neue Funde vom fr{\"u}hmittelalterlichen Burgwall bei Kopchin (Lkr. Bautzen)}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-74432}, year = {2012}, abstract = {Durch die systematische Sichtung des Fundmaterials des fr{\"u}hmittelalterlichen Burgwalls von Kopchin in der Oberlausitz konnten einige Keramikscherben identifiziert werden, die wohl {\"a}lter als bisher angenommen sind und in die V{\"o}lkerwanderungszeit datieren. Dies ist von besonderer Relevanz, da f{\"u}r Nordostdeutschland traditionell eine Besiedlungsl{\"u}cke im 5.-7. Jh. AD postuliert wird. Dieser Hiatus ist offenbar teils auch der schwierigen sicheren Datierung der oft recht unspezifischen Keramiktypen geschuldet. So konnten mit wachsendem Kenntnisstand dieser Keramiken in den letzten Jahren auch einige v{\"o}lkerwanderungszeitliche Fundstellen, besonders in Nordbrandenburg und im deutsch-polnischen Pommern lokalisiert werden. In Nordost-Sachsen sind die vorgestellten singul{\"a}ren Funde des 5.-6. Jhs. AD jedoch bisher ohne sichere Parallelen, auch wenn mittlerweile einige Fundstellen der V{\"o}lkerwanderungszeit in der Region erkannt worden sind.}, subject = {Germanen}, language = {de} } @unpublished{Volkmann2012, author = {Volkmann, Armin}, title = {Eisenproduktionswerkpl{\"a}tze der sp{\"a}ten r{\"o}mischen Kaiserzeit (3.-5. Jh. AD) im inneren Barbaricum}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-74420}, year = {2012}, abstract = {Durch systematische Prospektionen in S{\"u}dbrandenburg wurden auch bei den devastierten Ortschaften Klein G{\"o}rigk und Kausche zahlreiche bisher unbekannte Fundplatze entdeckt (vgl. Abb. 1). Diese verdeutlichen den Fundreichtum dieser kargen Landschaft als „arch{\"a}ologisches Fenster" einer fallbeispielhaft intensiv erforschten Region. Die sehr zahlreichen Werkpl{\"a}tze der sp{\"a}ten r{\"o}mischen Kaiserzeit (3.-5. Jh. AD) belegen eine massenhafte Eisenproduktion, die {\"u}ber den Eigenbedarf weit hinausging und die Grundlage f{\"u}r Handel darstellte. Interessanterweise sind im Eisenverh{\"u}ttungszentrum des Niederlausitzer Grenzwalls keine zeitgleichen Siedlungen und Gr{\"a}berfelder entdeckt worden. Diese liegen etwas weiter entfernt in den fruchtbareren Niederungs- und Beckenlandschaften der Umgebung. Die Werkpl{\"a}tze sind also nur tempor{\"a}r zur Eisenverh{\"u}ttung aufgesucht worden. Die stereotyp errichteten Eisenproduktionsst{\"a}tten wurden in unmittelbarer N{\"a}he zum lokal vorkommenden „Raseneisenerz" im waldreichen Gebiet errichtet. Durch die massenhafte Eisenproduktion, die {\"a}ußerst viel Holzkohle ben{\"o}tigte, ist auch von negativen Folgen auf die pr{\"a}historische Umwelt auszugehen. Indizien einer mutmaßlichen „{\"o}kologischen Krise" zum Ende der sp{\"a}tgermanischen Kultur (Mitte 5. Jh. AD) konnten jedoch bisher nicht sicher belegt werden.}, subject = {Eisenproduktion}, language = {de} } @unpublished{Reiss2012, author = {Reiss, Harald}, title = {Time scales and existence of time holes in non-transparent media}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-73554}, year = {2012}, abstract = {The analysis presented in this paper applies to experimental situations where observers or objects to be studied, all at stationary positions, are located in environments the optical thickness of which is strongly different. Non-transparent media comprise thin metallic films, packed or fluidised beds, superconductors, the Earth's crust, and even dark clouds and other cosmological objects. The analysis applies mapping functions that correlate physical events, e, in non-transparent media, with their images, f(e), tentatively located on standard physical time scale. The analysis demonstrates, however, that physical time, in its rigorous sense, does not exist under non-transparency conditions. A proof of this conclusion is attempted in three steps: i) the theorem "there is no time without space and events" is accepted, (ii) images f[e(s,t)] do not constitute a dense, uncountably infinite set, and (iii) sets of images that are not uncountably infinite do not create physical time but only time-like sequences. As a consequence, mapping f[e(s,t)] in non-transparent space does not create physical analogues to the mathematical structure of the ordered, dense half-set R+ of real numbers, and reverse mapping, f-1f[e(s,t)], the mathematical inverse problem, would not allow unique identification and reconstruction of original events from their images. In these cases, causality as well as invariance of physical processes under time reversal, might be violated. An interesting problem is whether temporal cloaking (a time hole) in a transparent medium, as very recently reported in the literature, can be explained by the present analysis. Existence of time holes could perhaps be possible, not in transparent but in non-transparent media, as follows from the sequence of images, f[e(s,t)], that is not uncountably infinite, in contrast to R+. Impacts are expected for understanding physical diffusion-like, radiative transfer processes and stability models to protect superconductors against quenchs. There might be impacts also in relativity, quantum mechanics, nuclear decay, or in systems close to their phase transitions. The analysis is not restricted to objects of laboratory dimensions.}, subject = {Zeitrichtung}, language = {en} } @unpublished{Nassourou2012, author = {Nassourou, Mohamadou}, title = {Towards a Knowledge-Based Learning System for The Quranic Text}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-70003}, year = {2012}, abstract = {In this research, an attempt to create a knowledge-based learning system for the Quranic text has been performed. The knowledge base is made up of the Quranic text along with detailed information about each chapter and verse, and some rules. The system offers the possibility to study the Quran through web-based interfaces, implementing novel visualization techniques for browsing, querying, consulting, and testing the acquired knowledge. Additionally the system possesses knowledge acquisition facilities for maintaining the knowledge base.}, subject = {Wissensbanksystem}, language = {en} } @unpublished{Reiss2012, author = {Reiss, Harald}, title = {Physical time and existence of time holes in non-transparent media}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-67268}, year = {2012}, abstract = {The analysis presented in this paper applies to experimental situations where observers or objects to be studied (both stationary, with respect to each other) are located in environments the optical thickness of which is strongly different. By their large optical thickness, non-transparent media are clearly distinguished from their transparent counterparts. Non-transparent media comprise thin metallic films, packed or fluidised beds, the Earth's crust, and even dark clouds and other cosmological objects. As a representative example, a non-transparent slab is subjected to transient disturbances, and a rigorous analysis is presented whether physical time reasonably could be constructed under such condition. The analysis incorporates mapping functions that correlate physical events, e, in non-transparent media, with their images, f(e), tentatively located on a standard physical time scale. The analysis demonstrates, however, that physical time, in its rigorous sense, does not exist under non-transparency conditions. A proof of this conclusion is attempted in three steps: i) the theorem "there is no time without space and events" is accepted, (ii) images f[e(s,t)] do not constitute a dense, uncountably infinite set, and (iii) sets of images that are not uncountably infinite do not create physical time but only time-like sequences. As a consequence, mapping f[e(s,t)] in non-transparent space does not create physical analogues to the mathematical structure of the ordered, dense half-set R+ of real numbers, and reverse mapping, f-1f[e(s,t)] would not allow unique identification and reconstruction of original events from their images. In these cases, causality and determinism, as well as invariance of physical processes under time reversal, might be violated. Existence of time holes could be possible, as follows from the sequence of images, f[e(s,t)], that is not uncountably infinite, in contrast to R+. Practical impacts are expected for understanding physical diffusion-like, radiative transfer processes, stability models to protect superconductors against quenchs or for description of their transient local pair density and critical currents. Impacts would be expected also in mathematical formulations (differential equations) of classical physics, in relativity and perhaps in quantum mechanics, all as far as transient processes in non-transparent space would be concerned. An interesting problem is whether temporal cloaking (a time hole) in a transparent medium, as very recently reported in the literature, can be explained by the present analysis. The analysis is not restricted to objects of laboratory dimensions: Because of obviously existing radiation transfer analogues, it is tempting to discuss consequences also for much larger structures in particular if an origin of time is postulated.}, subject = {Strahlungstransport}, language = {en} } @unpublished{Volkmann2011, author = {Volkmann, Armin}, title = {Die Germanen: Mythos und Forschungsrealit{\"a}t}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-66789}, year = {2011}, abstract = {Der Begriff Germanen ist eine Fremdbezeichnung griechisch-r{\"o}mischer Autoren der Antike. Die so bezeichneten Gruppen hatten aber keine gemeinsame germanische Identit{\"a}t. Die Germanen wurden schon in der Antike als m{\"a}chtige Gegner stilisiert, was wiederum im Mittelalter im Zuge der Staatenbildungen gerne in den schriftlichen Quellen aufgegriffen wurde. Retrospektiv kann keine "Ursprache" oder "Urheimat" der Germanen rekonstruiert werden. In der Arch{\"a}ologie gibt es jedoch aufgrund des Fundmaterials Kulturr{\"a}ume einer materiellen Kultur, die als germanisch interpretiert werden. Diese sind jedoch nicht mit einer "germanischen Ethnie" zu verwechseln.}, subject = {Vor- und Fr{\"u}hgeschichte}, language = {de} } @unpublished{Nassourou2011, author = {Nassourou, Mohamadou}, title = {Using Machine Learning Algorithms for Categorizing Quranic Chaptersby Major Phases of Prophet Mohammad's Messengership}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-66862}, year = {2011}, abstract = {This paper discusses the categorization of Quranic chapters by major phases of Prophet Mohammad's messengership using machine learning algorithms. First, the chapters were categorized by places of revelation using Support Vector Machine and na{\"i}ve Bayesian classifiers separately, and their results were compared to each other, as well as to the existing traditional Islamic and western orientalists classifications. The chapters were categorized into Meccan (revealed in Mecca) and Medinan (revealed in Medina). After that, chapters of each category were clustered using a kind of fuzzy-single linkage clustering approach, in order to correspond to the major phases of Prophet Mohammad's life. The major phases of the Prophet's life were manually derived from the Quranic text, as well as from the secondary Islamic literature e.g hadiths, exegesis. Previous studies on computing the places of revelation of Quranic chapters relied heavily on features extracted from existing background knowledge of the chapters. For instance, it is known that Meccan chapters contain mostly verses about faith and related problems, while Medinan ones encompass verses dealing with social issues, battles…etc. These features are by themselves insufficient as a basis for assigning the chapters to their respective places of revelation. In fact, there are exceptions, since some chapters do contain both Meccan and Medinan features. In this study, features of each category were automatically created from very few chapters, whose places of revelation have been determined through identification of historical facts and events such as battles, migration to Medina…etc. Chapters having unanimously agreed places of revelation were used as the initial training set, while the remaining chapters formed the testing set. The classification process was made recursive by regularly augmenting the training set with correctly classified chapters, in order to classify the whole testing set. Each chapter was preprocessed by removing unimportant words, stemming, and representation with vector space model. The result of this study shows that, the two classifiers have produced useable results, with an outperformance of the support vector machine classifier. This study indicates that, the proposed methodology yields encouraging results for arranging Quranic chapters by phases of Prophet Mohammad's messengership.}, subject = {Koran}, language = {en} } @unpublished{Nassourou2011, author = {Nassourou, Mohamadou}, title = {Computing Generic Causes of Revelation of the Quranic Verses Using Machine Learning Techniques}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-66083}, year = {2011}, abstract = {Because many verses of the holy Quran are similar, there is high probability that, similar verses addressing same issues share same generic causes of revelation. In this study, machine learning techniques have been employed in order to automatically derive causes of revelation of Quranic verses. The derivation of the causes of revelation is viewed as a classification problem. Initially the categories are based on the verses with known causes of revelation, and the testing set consists of the remaining verses. Based on a computed threshold value, a na{\"i}ve Bayesian classifier is used to categorize some verses. After that, using a decision tree classifier the remaining uncategorized verses are separated into verses that contain indicators (resultative connectors, causative expressions…), and those that do not. As for those verses having indicators, each one is segmented into its constituent clauses by identification of the linking indicators. Then a dominant clause is extracted and considered either as the cause of revelation, or post-processed by adding or subtracting some terms to form a causal clause that constitutes the cause of revelation. Concerning remaining unclassified verses without indicators, a naive Bayesian classifier is again used to assign each one of them to one of the existing classes based on features and topics similarity. As for verses that could not be classified so far, manual classification was made by considering each verse as a category on its own. The result obtained in this study is encouraging, and shows that automatic derivation of Quranic verses' generic causes of revelation is achievable, and reasonably reliable for understanding and implementing the teachings of the Quran.}, subject = {Text Mining}, language = {en} } @unpublished{Nassourou2011, author = {Nassourou, Mohamadou}, title = {Philosophical and Computational Approaches for Estimating and Visualizing Months of Revelations of Quranic Chapters}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-65784}, year = {2011}, abstract = {The question of why the Quran structure does not follow its chronology of revelation is a recurring one. Some Islamic scholars such as [1] have answered the question using hadiths, as well as other philosophical reasons based on internal evidences of the Quran itself. Unfortunately till today many are still wondering about this issue. Muslims believe that the Quran is a summary and a copy of the content of a preserved tablet called Lawhul-Mahfuz located in the heaven. Logically speaking, this suggests that the arrangement of the verses and chapters is expected to be similar to that of the Lawhul-Mahfuz. As for the arrangement of the verses in each chapter, there is unanimity that it was carried out by the Prophet himself under the guidance of Angel Gabriel with the recommendation of God. But concerning the ordering of the chapters, there are reports about some divergences [3] among the Prophet's companions as to which chapter should precede which one. This paper argues that Quranic chapters might have been arranged according to months and seasons of revelation. In fact, based on some verses of the Quran, it is defendable that the Lawhul-Mahfuz itself is understood to have been structured in terms of the months of the year. In this study, philosophical and mathematical arguments for computing chapters' months of revelation are discussed, and the result is displayed on an interactive scatter plot.}, subject = {Text Mining}, language = {en} } @unpublished{Nassourou2011, author = {Nassourou, Mohamadou}, title = {Computer-based Textual Documents Collation System for Reconstructing the Original Text from Automatically Identified Base Text and Ranked Witnesses}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-65749}, year = {2011}, abstract = {Given a collection of diverging documents about some lost original text, any person interested in the text would try reconstructing it from the diverging documents. Whether it is eclecticism, stemmatics, or copy-text, one is expected to explicitly or indirectly select one of the documents as a starting point or as a base text, which could be emended through comparison with remaining documents, so that a text that could be designated as the original document is generated. Unfortunately the process of giving priority to one of the documents also known as witnesses is a subjective approach. In fact even Cladistics, which could be considered as a computer-based approach of implementing stemmatics, does not present or recommend users to select a certain witness as a starting point for the process of reconstructing the original document. In this study, a computational method using a rule-based Bayesian classifier is used, to assist text scholars in their attempts of reconstructing a non-existing document from some available witnesses. The method developed in this study consists of selecting a base text successively and collating it with remaining documents. Each completed collation cycle stores the selected base text and its closest witness, along with a weighted score of their similarities and differences. At the end of the collation process, a witness selected more often by majority of base texts is considered as the probable base text of the collection. Witnesses' scores are weighted using a weighting system, based on effects of types of textual modifications on the process of reconstructing original documents. Users have the possibility to select between baseless and base text collation. If a base text is selected, the task is reduced to ranking the witnesses with respect to the base text, otherwise a base text as well as ranking of the witnesses with respect to the base text are computed and displayed on a bar diagram. Additionally this study includes a recursive algorithm for automatically reconstructing the original text from the identified base text and ranked witnesses.}, subject = {Textvergleich}, language = {en} } @unpublished{Nassourou2011, author = {Nassourou, Mohamadou}, title = {Design and Implementation of a Model-driven XML-based Integrated System Architecture for Assisting Analysis, Understanding, and Retention of Religious Texts:The Case of The Quran}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-65737}, year = {2011}, abstract = {Learning a book in general involves reading it, underlining important words, adding comments, summarizing some passages, and marking up some text or concepts. Once deeper understanding is achieved, one would like to organize and manage her/his knowledge in such a way that, it could be easily remembered and efficiently transmitted to others. This paper discusses about modeling religious texts using semantic XML markup based on frame-based knowledge representation, with the purpose of assisting understanding, retention, and sharing of knowledge they contain. In this study, books organized in terms of chapters made up of verses are considered as the source of knowledge to model. Some metadata representing the multiple perspectives of knowledge modeling are assigned to each chapter and verse. Chapters and verses with their metadata form a meta-model, which is represented using frames, and published on a web mashup. An XML-based annotation and visualization system equipped with user interfaces for creating static and dynamic metadata, annotating chapters' contents according to user selected semantics, and templates for publishing generated knowledge on the Internet, has been developed. The system has been applied to the Quran, and the result obtained shows that multiple perspectives of information modeling can be successfully applied to religious texts, in order to support analysis, understanding, and retention of the texts.}, subject = {Wissensrepr{\"a}sentation}, language = {en} } @unpublished{Schmidt2011, author = {Schmidt, Karin Stella}, title = {Zur Musik Mesopotamiens. Erste Erg{\"a}nzung (2011)}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-65169}, year = {2011}, abstract = {Literaturzusammenstellung zur Musik Mesopotamiens: zu Musiktheorie, Notenschriften, Instrumentenkunde, Auff{\"u}hrungspraxis in Sumer, Akkad, Babylonien, Assyrien}, subject = {Mesopotamien}, language = {de} } @unpublished{Nassourou2011, author = {Nassourou, Mohamadou}, title = {A Rule-based Statistical Classifier for Determining a Base Text and Ranking Witnesses In Textual Documents Collation Process}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-57465}, year = {2011}, abstract = {Given a collection of diverging documents about some lost original text, any person interested in the text would try reconstructing it from the diverging documents. Whether it is eclecticism, stemmatics, or copy-text, one is expected to explicitly or indirectly select one of the documents as a starting point or as a base text, which could be emended through comparison with remaining documents, so that a text that could be designated as the original document is generated. Unfortunately the process of giving priority to one of the documents also known as witnesses is a subjective approach. In fact even Cladistics, which could be considered as a computer-based approach of implementing stemmatics, does not present or recommend users to select a certain witness as a starting point for the process of reconstructing the original document. In this study, a computational method using a rule-based Bayesian classifier is used, to assist text scholars in their attempts of reconstructing a non-existing document from some available witnesses. The method developed in this study consists of selecting a base text successively and collating it with remaining documents. Each completed collation cycle stores the selected base text and its closest witness, along with a weighted score of their similarities and differences. At the end of the collation process, a witness selected more often by majority of base texts is considered as the probable base text of the collection. Witnesses' scores are weighted using a weighting system, based on effects of types of textual modifications on the process of reconstructing original documents. Users have the possibility to select between baseless and base text collation. If a base text is selected, the task is reduced to ranking the witnesses with respect to the base text, otherwise a base text as well as ranking of the witnesses with respect to the base text are computed and displayed on a histogram.}, subject = {Textvergleich}, language = {en} } @unpublished{Nassourou2011, author = {Nassourou, Mohamadou}, title = {Design and Implementation of Architectures for Interactive Textual Documents Collation Systems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-56601}, year = {2011}, abstract = {One of the main purposes of textual documents collation is to identify a base text or closest witness to the base text, by analyzing and interpreting differences also known as types of changes that might exist between those documents. Based on this fact, it is reasonable to argue that, explicit identification of types of changes such as deletions, additions, transpositions, and mutations should be part of the collation process. The identification could be carried out by an interpretation module after alignment has taken place. Unfortunately existing collation software such as CollateX1 and Juxta2's collation engine do not have interpretation modules. In fact they implement the Gothenburg model [1] for collation process which does not include an interpretation unit. Currently both CollateX and Juxta's collation engine do not distinguish in their critical apparatus between the types of changes, and do not offer statistics about those changes. This paper presents a model for both integrated and distributed collation processes that improves the Gothenburg model. The model introduces an interpretation component for computing and distinguishing between the types of changes that documents could have undergone. Moreover two architectures implementing the model in order to solve the problem of interactive collation are discussed as well. Each architecture uses CollateX library, and provides on the one hand preprocessing functions for transforming input documents into CollateX input format, and on the other hand a post-processing module for enabling interactive collation. Finally simple algorithms for distinguishing between types of changes, and linking collated source documents with the collation results are also introduced.}, subject = {Softwarearchitektur}, language = {en} } @unpublished{Nassourou2011, author = {Nassourou, Mohamadou}, title = {Assisting Understanding, Retention, and Dissemination of Religious Texts Knowledge with Modeling, and Visualization Techniques: The Case of The Quran}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-55927}, year = {2011}, abstract = {Learning a book in general involves reading it, underlining important words, adding comments, summarizing some passages, and marking up some text or concepts. Once deeper understanding is achieved, one would like to organize and manage her/his knowledge in such a way that, it could be easily remembered and efficiently transmitted to others. In this paper, books organized in terms of chapters consisting of verses, are considered as the source of knowledge to be modeled. The knowledge model consists of verses with their metadata and semantic annotations. The metadata represent the multiple perspectives of knowledge modeling. Verses with their metadata and annotations form a meta-model, which will be published on a web Mashup. The meta-model with linking between its elements constitute a knowledge base. An XML-based annotation system breaking down the learning process into specific tasks, helps constructing the desired meta-model. The system is made up of user interfaces for creating metadata, annotating chapters' contents according to user selected semantics, and templates for publishing the generated knowledge on the Internet. The proposed software system improves comprehension and retention of knowledge contained in religious texts through modeling and visualization. The system has been applied to the Quran, and the result obtained shows that multiple perspectives of information modeling can be successfully applied to religious texts. It is expected that this short ongoing study would motivate others to engage in devising and offering software systems for cross-religions learning.}, subject = {Wissensmanagement}, language = {en} } @unpublished{Nassourou2011, author = {Nassourou, Mohamadou}, title = {Assisting Analysis and Understanding of Quran Search Results with Interactive Scatter Plots and Tables}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-55840}, year = {2011}, abstract = {The Quran is the holy book of Islam consisting of 6236 verses divided into 114 chapters called suras. Many verses are similar and even identical. Searching for similar texts (e.g verses) could return thousands of verses, that when displayed completely or partly as textual list would make analysis and understanding difficult and confusing. Moreover it would be visually impossible to instantly figure out the overall distribution of the retrieved verses in the Quran. As consequence reading and analyzing the verses would be tedious and unintuitive. In this study a combination of interactive scatter plots and tables has been developed to assist analysis and understanding of the search result. Retrieved verses are clustered by chapters, and a weight is assigned to each cluster according to number of verses it contains, so that users could visually identify most relevant areas, and figure out the places of revelation of the verses. Users visualize the complete result and can select a region of the plot to zoom in, click on a marker to display a table containing verses with English translation side by side.}, subject = {Text Mining}, language = {en} }