@phdthesis{Planchet2004, author = {Planchet, Elisabeth}, title = {Nitric oxide production by tobacco plants and cell cultures under normal conditions and under stress}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-9339}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2004}, abstract = {Stickstoffmonoxid (NO) ist ein gasf{\"o}rmiges freies Radikal. In tierischen Geweben ist NO an der Regulation vieler physiologischer Prozesse beteiligt. In den letzten zehn Jahren wurde immer wahrscheinlicher, dass NO auch in Pflanzen als „second messenger" fungiert. Besonderes Interesse fanden Berichte, dass NO als intermedi{\"a}res Signal bei der Induktion der hypersensitiven Antwort (HR) von Pflanzen auf Pathogene involviert ist. Im Gegensatz zu Tieren haben Pflanzen wahrscheinlich eine Reihe verschiedener Systeme, die NO produzieren k{\"o}nnen. Potentielle Kandidaten daf{\"u}r sind: cytosolische Nitratreduktase (NR; EC 1.6.6.1), PM-gebundene Nitrit: NO Reduktase (Ni:NOR), NO-Synthase (NOS; EC 1.14.13.39) und Xanthindehydrogenase (XDH; EC 1.1.1.204). Das Ziel dieser Arbeit bestand darin, die NO-Produktion von Pflanzen zu quantifizieren und die beteiligten enzymatischen Schritte zu identifizieren. Als wichtigste Methode zur NO-Messung wurde die Chemilumineszenz verwendet, mit der die NO Emission aus Pflanzen, Zellsuspensionen oder Enzyml{\"o}sungen in NO-freie Luft oder N2 in Echtzeit verfolgt werden konnte. Wir benutzten f{\"u}r unsere Analyse: Tabak Wildtyp (N. tabacum cv Xanthi oder cv Gatersleben) und Zellsuspensionskulturen davon, NR-freie Mutanten oder WT Pflanzen, die auf Ammonium angezogen wurden um NR-Induktion zu vermeiden, Pflanzen die auf Wolframat an Stelle von Molybdat wuchsen um die Synthese funktionierender MoCo-Enzyme zu unterdr{\"u}cken, und eine NO-{\"u}berproduzierende, Nitritreduktase (NiR)-defiziente Transformante. Normale Bl{\"a}tter von nitratern{\"a}hrten Pflanzen zeigten eine typisches NO-Emissionsmuster,bei dem die NO-Emission im Dunkeln niedrig, im Licht viel h{\"o}her, und unter anoxischen Bedingungen im Dunkeln mit weitem Abstand am h{\"o}chsten war. Aber selbst nach Erreichen maximaler Raten war die NO-Emission h{\"o}chstens 1 \% der extrahierbaren NR Aktivit{\"a}t. Auch eine L{\"o}sung hochgereinigter Nitratreduktase produzierte NO aus den Substraten Nitrit und NADH, und auch hier war die Rate der NO-Emission nur maximal 1\% der vorhandenen NR-Aktivit{\"a}t. Dieses {\"u}bereinstimmende Verh{\"a}ltnis von NR Aktivit{\"a}t und NO-Emission in Bl{\"a}ttern, Zellsuspensionen und einer NR-L{\"o}sung zeigt an dass die NO-L{\"o}schung nur gering war und dass deshalb die NO-Emissionsmessung eine zuverl{\"a}ssige Methode zur Quantifizierung der NO Produktion sein sollte. Die NO-Emission aus einer NiR-defizienten, nitritakkumulierenden Transformante warimmer sehr hoch. NR-freie Pflanzen oder Zellsuspensionen produzierten dagegen normalerweise kein NO, woraus geschlossen werden konnte, dass hier NR die einzige NOQuelle war. Die Rate war in der Regel korreliert mit der Nitritkonzentration, aber cytosolisches NADH erschien als ein weiterer wichtiger limitierender Faktor.{\"U}berraschenderweise reduzierten aber auch NR-freie Pflanzen oder Zellkulturen unter anoxischen Bedingungen Nitrit zu NO. Das beteiligte Enzymsystem war kein MoCo-Enzym und war Cyanid-sensitiv. Der pilzliche Elicitor Cryptogein induzierte nach Infiltration in Bl{\"a}tter oder nach Zugabe zu Zellsuspensionen bereits in nanomolaren Konzentrationen den Zelltod. Diese Antwort wurde verhindert oder zumindest stark verz{\"o}gert durch den NO-Scavenger PTIO oder c-PTIO. Die Schlussfolgerung war zun{\"a}chst, das NO tats{\"a}chlich an der HR-Induktion involviert war. Da aber das Reaktionsprodukt von c-PTIO und NO, c-PTI, den HR ebenfalls verhinderte ohne jedoch NO zu l{\"o}schen, scheint die weit verbreitete Verwendung von c-PTIO und seinen Derivaten f{\"u}r die Beweisf{\"u}hrung einer Beteiligung von NO zumindest fragw{\"u}rdig. Der HR wurde unterschiedslos sowohl in WT-Pflanzen als auch in NR-freien Pflanzen bzw. Zellsuspensionen induziert. NR ist also offensichtlich f{\"u}r den HR nicht erforderlich. Im Gegensatz zur publizierten Literaturdaten verhinderte auch eine kontinuierliche hohe {\"U}berproduktion von NO die Auspr{\"a}gung des HR nicht. Besonders {\"u}berraschend war der Befund, dass trotz der Hemmung des HR durch PTIO keinerlei Cryptogein-induzierte NO Produktion in Bl{\"a}ttern messbar war. Allerdings wurde in nitratern{\"a}hrten Zellsuspensionskulturen ca. 3-6 h nach Cryptogein-Gabe eine -wenn auch geringe-NOEmission beobachtet, die von einer Nitritakkumulation begleitet war. Beides blieb in Ammonium-ern{\"a}hrten Kulturen aus. Hier schien also eine gewisse Relation zwischen Cryptogein-induzierter NO Emission, NR und Nitrit zu bestehen, die im Detail noch nicht verstanden ist. Da der Zelltod aber auch in NR-freien Zellsuspensionskulturen auftrat, besteht offensichtlich kein kausaler Zusammenhang zwischen dieser NO-Emission, Nitritakkumulation und der Cryptogein-Wirkung. Da NOS-Inhibitoren weder den Zelltod noch die nitritanh{\"a}ngige NO-Emission verhinderten, scheint eine NOS-artige Aktivit{\"a}t ebenfalls keine Rolle zu spielen. Insgesamt werden damit die in der Literatur etablierte Rolle von NO als Signal beim HR und die Rolle von NOS als NO-Quelle stark in Frage gestellt.}, subject = {Tabak}, language = {en} } @phdthesis{Kraus2003, author = {Kraus, Daniela}, title = {Conformal pseudo-metrics and some applications}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-9193}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2003}, abstract = {The point of departure for the present work has been the following free boundary value problem for analytic functions \$f\$ which are defined on a domain \$G \subset \mathbb{C}\$ and map into the unit disk \$\mathbb{D}= \{z \in \mathbb{C} : |z|<1 \}\$. Problem 1: Let \$z_1, \ldots, z_n\$ be finitely many points in a bounded simply connected domain \$G \subset \mathbb{C}\$. Show that there exists a holomorphic function \$f:G \to \mathbb{D}\$ with critical points \$z_j\$ (counted with multiplicities) and no others such that \$\lim_{z \to \xi} \frac{|f'(z)|}{1-|f(z)|^2}=1\$ for all \$\xi \in \partial G\$. If \$G=\mathbb{D}\$, Problem 1 was solved by K?nau [5] in the case of one critical point, and for more than one critical point by Fournier and Ruscheweyh [3]. The method employed by K?nau, Fournier and Ruscheweyh easily extends to more general domains \$G\$, say bounded by a Dini-smooth Jordan curve, but does not work for arbitrary bounded simply connected domains. In this paper we present a new approach to Problem 1, which shows that this boundary value problem is not an isolated question in complex analysis, but is intimately connected to a number of basic open problems in conformal geometry and non-linear PDE. One of our results is a solution to Problem 1 for arbitrary simply connected domains. However, we shall see that our approach has also some other ramifications, for instance to a well-known problem due to Rellich and Wittich in PDE. Roughly speaking, this paper is broken down into two parts. In a first step we construct a conformal metric in a bounded regular domain \$G\subset \mathbb{C}\$ with prescribed non-positive Gaussian curvature \$k(z)\$ and prescribed singularities by solving the first boundary value problem for the Gaussian curvature equation \$\Delta u =-k(z) e^{2u}\$ in \$G\$ with prescribed singularities and continuous boundary data. This is related to the Berger-Nirenberg problem in Riemannian geometry, the question which functions on a surface R can arise as the Gaussian curvature of a Riemannian metric on R. The special case, where \$k(z)=-4\$ and the domain \$G\$ is bounded by finitely many analytic Jordan curves was treated by Heins [4]. In a second step we show every conformal pseudo-metric on a simply connected domain \$G\subseteq \mathbb{C}\$ with constant negative Gaussian curvature and isolated zeros of integer order is the pullback of the hyperbolic metric on \$\mathbb{D}\$ under an analytic map \$f:G \to \mathbb{D}\$. This extends a theorem of Liouville which deals with the case that the pseudo-metric has no zeros at all. These two steps together allow a complete solution of Problem 1. Contents: Chapter I contains the statement of the main results and connects them with some old and new problems in complex analysis, conformal geometry and PDE: the Uniformization Theorem for Riemann surfaces, the problem of Schwarz-Picard, the Berger-Nirenberg problem, Wittich's problem, etc.. Chapter II and III have preparatory character. In Chapter II we recall some basic results about ordinary differential equations in the complex plane. In our presentation we follow Laine [6], but we have reorganized the material and present a self-contained account of the basic features of Riccati, Schwarzian and second order differential equations. In Chapter III we discuss the first boundary value problem for the Poisson equation. We shall need to consider this problem in the most general situation, which does not seem to be covered in a satisfactory way in the existing literature, see [1,2]. In Chapter IV we turn to a discussion of conformal pseudo-metrics in planar domains. We focus on conformal metrics with prescribed singularities and prescribed non-positive Gaussian curvature. We shall establish the existence of such metrics, that is, we solve the corresponding Gaussian curvature equation by making use of the results of Chapter III. In Chapter V we show that every constantly curved pseudo-metric can be represented as the pullback of either the hyperbolic, the euclidean or the spherical metric under an analytic map. This is proved by using the results of Chapter II. Finally we give in Chapter VI some applications of our results. [1,2] Courant, H., Hilbert, D., Methoden der Mathematischen Physik, Erster/ Zweiter Band, Springer-Verlag, Berlin, 1931/1937. [3] Fournier, R., Ruscheweyh, St., Free boundary value problems for analytic functions in the closed unit disk, Proc. Amer. Math. Soc. (1999), 127 no. 11, 3287-3294. [4] Heins, M., On a class of conformal metrics, Nagoya Math. J. (1962), 21, 1-60. [5] K?nau, R., L?gentreue Randverzerrung bei analytischer Abbildung in hyperbolischer und sph?ischer Geometrie, Mitt. Math. Sem. Giessen (1997), 229, 45-53. [6] Laine, I., Nevanlinna Theory and Complex Differential Equations, de Gruyter, Berlin - New York, 1993.}, subject = {Freies Randwertproblem}, language = {en} } @phdthesis{Petrovic2004, author = {Petrovic, Suzana}, title = {In vivo analysis of homing pattern and differentiation potential of cells deriving from embryonic and adult haematopoietic regions}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-9323}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2004}, abstract = {The experimental work of this thesis addresses the questions of whether established cell lines injected into murine blastocysts find their way back home and seed preferentially at the site of their origin. Furthermore, can they change their fate and differentiate to unrelated cell types when exposed to the embryonic environment. This survey was based on the fact that different cell lines have different potentials in developing embryos, dependent on their cellular identity. The cell lines used in this survey were AGM region-deriving DAS 104-4, DAS 104-8 cells, yolk sac-deriving YSE cells and bone marrow-deriving FDCP mix cells. These cells were injected into mouse blastocysts. Donor cells were traced in developing embryos via specific markers. Analysis of the embryos revealed that DAS cells are promiscuous in their seeding pattern, since they were found in all analysed tissues with similar frequencies. YSE cells showed preferences in seeding yolk sac and liver. YSE donor cells in chimaeric tissues were not able to change their immuno-phenotype, indicating that they did not change their destiny. Analysis of adult mice did not reveal any of YSE-derived cells donor contribution. In contrast, FDCP mix cells mostly engrafted haematopoietic tissues, although the embryos analysed by in situ hybridization had donor signals frequently in cartilage primordia, heads, and livers. Analysis of whether FDCPmix-derived cells found in foetal livers were of haematopoietic or hepatocytes nature showed that progeny of injected FDCP mix cells do not differentiate into cells that express a hepatocyte-specific marker. Further analysis showed that FDCPmix-derived donor cells found in brain express neural or haematopoietic markers. In order to reveal if they transdifferentiate to neurons or fuse with neurons/glial cells, nuclear diameters of donor and recipient cells were determined. Comparison of the nuclear diameters of recipient and donor cells revealed no differences. Therefore this suggests that progeny of FDCP mix in brain are not fusion products. Analysis of adult mice tissues revealed that presence of FDCP mix-derived cells was the highest in brains. These results confirmed the assumption that the developmental potential of the analysed cells cannot be easily modified, even when exposed to early embryonic environment. Therefore one can conclude that the analysed cell types had different homing patterns depending on their origins.}, subject = {Zelllinie}, language = {en} } @phdthesis{Rouziere2004, author = {Rouzi{\`e}re, Anne-Sophie}, title = {MODULATION OF THE B-CELL REPERTOIRE IN RHEUMATOID ARTHRITIS BY TRANSIENT B-CELL DEPLETION}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-9290}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2004}, abstract = {Although the role of B-cells in autoimmunity is not completely understood, their importance in the pathogenesis of autoimmune diseases has been more appreciated in the past few years. It is now well known that they have roles in addition to (auto) antibody production and are involved by different mechanisms in the regulation of T-cell mediated autoimmune disorders. The evolution of an autoimmune disease is a dynamic process, which takes a course of years during which complex immunoregulatory mechanisms shape the immune repertoire until the development of clinical disease. During this course, the B-cell repertoire itself is influenced and a change in the distribution of immunoglobulin heavy and light chain genes can be observed. B-cell depletive therapies have beneficial effects in patients suffering from rheumatoid arthritis (RA), highlighting also the central role of B-cells in the pathogenesis of this disease. Nevertheless, the mechanism of action is unclear. It has been hypothesised that B-cell depletion is able to reset deviated humoral immunity. Therefore we wanted to investigate if transient B-cell depletion results in changes of the peripheral B-cell receptor repertoire. To address this issue, expressed immunoglobulin genes of two patients suffering from RA were analysed; one patient for the heavy chain repertoire (patient H), one patient for the light chain repertoire (patient L). Both patients were treated with rituximab, an anti-CD20 monoclonal antibody that selectively depletes peripheral CD20+ B-cells for several months. The B-cell repertoire was studied before therapy and at the earliest time point after B-cell regeneration in both patients. A longer follow-up (up to 27 months) was performed in patient H who was treated a second time with rituximab after 17 months. Heavy chain gene analysis was carried out by nested-PCR on bulk DNA from peripheral B-cells using family-specific primers, followed by subcloning and sequencing. During the study, patient H received two courses of antibody treatment. B-cell depletion lasted 7 and 10 months, respectively and each time was accompanied by a clinical improvement. Anti-CD20 therapy induced two types of changes in this patient. During the early phase of B-cell regeneration, we noticed the presence of an expanded and recirculating population of highly mutated B-cells. These cells expressed very different immunoglobulin VH genes compared before therapy. They were class-switched and could be detected for a short period only. The long-term changes were more subtle. Nevertheless, characteristic changes in the VH2 family, as well as in specific mini-genes like VH3-23, 4-34 or 1-69 were noticed. Some of these genes have already been reported to be biased in autoimmune diseases. Also in autoimmune diseases, in particular in RA, clonal B-cells have been frequently found in the repertoire. B-cell depletion with anti-CD20 antibody resulted in a long term loss of clonal B-cells in patient H. Thus, temporary B-cell depletion induced significant changes in the heavy chain repertoire. For the light chain gene analysis, the repertoire changes were analysed separately for naive (CD27-) and memory (CD27+) B-cells. Individual CD19+ B-cells were sorted into CD27- and CD27+ cells and single cell RT-PCR was performed, followed by direct sequencing. During the study, patient L received one course of antibody treatment. B-cell depletion lasted 10 months and the light chain repertoire was studied before and after therapy. Before therapy, some differences in the distribution of VL and JL genes were observed between naive and memory B-cells. In particular, the predominant usage of Jk-proximal Vk genes by the CD27- naive B-cells indicated that the receptor editing was less frequent in this population compared to memory cells. In VlJl rearrangements also, some evidence for decreased receptor editing was noticed, with the overrepresentation of the Jl2/3 gene segments. The CDR3 regions of naive and memory cells showed different characteristics: the activity of the terminal deoxynucleotidyl transferase and exonuclease in Vl(5') side was greater in memory cells. Also in the light chain repertoire, we observed some changes induced by the B-cell depletive therapy. There was a tendency of a less frequent usage of Jk-proximal Vk genes in the naive population. Some Vl genes, previously described in autoimmune diseases and connected to rheumatoid factor activity, such as 3p, 3r, 1g, were not found after therapy. The different characteristics of the CDR3 regions of VlJl rearrangements were not observed anymore. Very significantly, the ratio Vk to Vl was shifted toward a greater usage of Vk genes in the naive population after therapy. Taken together, these results indicate that therapeutic transient B-cell depletion by anti-CD20 antibody therapy modulates the immunoglobulin gene repertoire in the two RA patients studied. Measurable changes were observed in the heavy chain as well as in the light chain repertoire, which may be relevant to the course of the disease. This also supports the notion that the composition of the B-cell repertoire is influenced by the disease and that B-cell depletion can reset biases that are typically found in autoimmune diseases.}, subject = {Rheumatoide Arthritis}, language = {en} } @phdthesis{Grozdanov2004, author = {Grozdanov, Lubomir Assenov}, title = {Analysis of the genome organization and fitness traits of non-pathogenic Escherichia coli strain Nissle 1917 (O6:K5:H1)}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-9304}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2004}, abstract = {In the last years more than one hundred microbial genomes have been sequenced, many of them from pathogenic bacteria. The availability of this huge amount of sequence data enormously increases our knowledge on the genome structure and plasticity, as well as on the microbial diversity and evolution. In parallel, these data are the basis for the scientific "revolution" in the field of industrial and environmental biotechnology and medical microbiology - diagnostics and therapy, development of new drugs and vaccines against infectious agents. Together with the genomic approach, other molecular biological methods such as PCR, DNA-chip technology, subtractive hybridization, transcriptomics and proteomics are of increasing importance for research on infectious diseases and public health. The aim of this work was to characterize the genome structure and -content of the probiotic Escherichia coli strain Nissle 1917 (O6:K5:H31) and to compare these data with publicly available data on the genomes of different pathogenic and non-pathogenic E. coli strains and other closely related species. A cosmid genomic library of strain Nissle 1917 was screened for clones containing the genetic determinants contributing to the successful survival in and colonization of the human body, as well as to mediate this strain's probiotic effect as part of the intestinal microflora. Four genomic islands (GEI I-IVNissle 1917) were identifed and characterized. They contain many known fitness determinants (mch/mcm, foc, iuc, kps, ybt), as well as novel genes of unknown function, mobile genetic elements or newly identified putative fitness-contributing factors (Sat, Iha, ShiA-homologue, Ag43-homologues). All islands were found to be integrated next to tRNA genes (serX, pheV, argW and asnT, respectively). Their structure and chromosomal localization closely resembles those of analogous islands in the genome of uropathogenic E. coli strain CFT073 (O6:K2(?):H1), but they lack important virulence genes of uropathogenic E. coli (hly, cnf, prf/pap). Evidence for instability of GEI IINissle 1917 was given, since a deletion event in which IS2 elements play a role was detected. This event results in loss of a 30 kb DNA region, containing important fitness determinants (iuc, sat, iha), and therefore probably might influence the colonization capacity of Nissle 1917 strain. In addition, a screening of the sequence context of tRNA-encoding genes in the genome of Nissle 1917 was performed to identify genome wide potential integration sites of "foreign" DNA. As a result, similar "tRNA screening patterns" have been observed for strain Nissle 1917 and for the uropathogenic E. coli O6 strains (UPEC) 536 and CFT073. I. Summary 4 The molecular reason for the semi-rough phenotype and serum sensitivity of strain Nissle 1917 was analyzed. The O6-antigen polymerase-encoding gene wzy was identified, and it was shown that the reason for the semi-rough phenotype is a frame shift mutation in wzy, due to the presence of a premature stop codon. It was shown that the restoration of the O side-chain LPS polymerization by complementation with a functional wzy gene increased serumresistance of strain Nissle 1917. The results of this study show that despite the genome similarity of the E. coli strain Nissle 1917 with the UPEC strain CFT073, the strain Nissle 1917 exhibits a specific set of geno- and phenotypic features which contribute to its probiotic action. By comparison with the available data on the genomics of different species of Enterobacteriaceae, this study contributes to our understanding of the important processes such as horizontal gene transfer, deletions and rearrangements which contribute to genome diversity and -plasticity, and which are driving forces for the evolution of bacterial variants. At last, the fim, bcs and rfaH determinats whose expression contributes to the mutlicellular behaviour and biofilm formation of E. coli strain Nissle 1917 have been characterized.}, subject = {Escherichia coli}, language = {en} } @phdthesis{Henn2004, author = {Henn, Julian}, title = {The electron density : a bridge between exact quantum mechanics and fuzzy chemical concepts}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-9003}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2004}, abstract = {Summary The nature of the chemical bond is a topic under constant debate. What is known about individual molecular properties and functional groups is often taught and rationalized by explaining Lewis structures, which, in turn, make extensive use of the valence concept. The valence concept distinguishes between electrons, which do not participate in chemical interactions (core electrons) and those, which do (single, double, triple bonds, lone-pair electrons, etc.). Additionally, individual electrons are assigned to atomic centers. The valence concept is of paramount success: It allows the successful planning of chemical syntheses and analyses, it explains the behavior of individual functional groups, and, moreover, it provides the "language" to think of and talk about molecular structure and chemical interactions. The resounding success of the valence concept may be misleading to forget its approximative character. On the other hand, quantum mechanics provide in principle a quantitative description of all chemical phenomena, but there is no discrimination between electrons in quantum mechanics. From the quantum mechanical point of view there are only indistinguishable electrons in the field of the nuclei, i.e., it is impossible to assign a given electron to a particular center or to ascribe a particular purpose to individual electrons. The concept of indistinguishability of micro particles is founded on the Heisenberg uncertainty relation, which states, that wavepackets diverge in the 6N dimensional phase space, such that individual trajectories can not be identified. Hence it is a deep-rooted and approved physical concept. As an introduction to the present work density partitioning schemes were discussed, which divide the total molecular density into chemically meaningful areas. These partitioning schemes are intimately related to either the concepts of bound atoms in a molecule (as in the Atoms In Molecules theory (AIM) according to Bader or as in the Hirshfeld partitioning scheme) or to the concept of chemical structure in the sense of Lewis structures, which divide the total molecular density into core and valence density, where the valence density is split up again into bonding and non-bonding electron densities. Examples are early and recent loge theories, the topological analysis by means of the Electron Localization Function (ELF), and the Natural Bond Orbital (NBO) approach. Of these partitioning schemes, the theories according to Bader (AIM), to Becke and Edgecomb (ELF) and according to Weinhold (NBO and Natural Resonance Theory, NRT), respectively, were reviewed in detail critically. Points of criticism were explicated for each of the mentioned theories. Since theoretically derived electron densities are to be compared to experimentally derived densities, a brief introduction into the theory of X-ray di®raction experiments was given and the multipole formalism was introduced. The procedure of density refinement was briefly discussed. Various suggestions for improvements were developed: One strategy would be the employment of model parameters, which are to a maximum degree mutually orthogonal, with the object of minimizing correlations among the model parameters, e.g., to introduce nodal planes into the radial functions of the multipole model. A further suggestion involves the guidance of the iterative refinement procedure by an extremum principle, which states, that when di®erent solutions to the least squares minimization problem are available with about the same statistical measures of quality and with about the same residual density, then the solution is to prefer, which yields a minimum density at the bond critical point (BCP) and a maximum polarity in terms of the ratio of distances between the BCP and the nuclei. This suggestion is based on the well known fact, that the bond polarity (in terms of the ratio of distances between the BCP and the respective nuclei) is underestimated in the experiment. Another suggestion for including physical constraints is the explicit consideration of the virial theorem, e.g., by evaluating the integration of the Laplacian over the entire atomic basins and comparing this value to zero and to the value obtained from the integration of the electron gradient field over the atomic surface. The next suggestion was to explicitly use the electrostatic theorem of Feynman (often also denoted as Hellmann-Feynman theorem), which states, that the forces onto the nuclei can be calculated from the purely classical electrostatic forces of the electron distribution and the nuclei distribution. For a stationary system, these forces must add to zero. This also provides an internal quality criterion of the density model. This can be performed in an iterative way during the refinement procedure or as a test of the final result. The use of the electrostatic theorem is expected to reduce significantly correlations among static density parameters and parameters describing vibrations, since it is a valuable tool to discriminate between physically reasonable and artificial static electron densities. All of these mentioned suggestions can be applied as internal quality criteria. The last suggestion is based on the idea to initiate the experimental refinement with a set of model parameters, which is, as much as possible close to the final solution. This can be achieved by performing periodic boundary conditions calculations, from which theoretically created files are obtained, which contain the Miller indices (h, k, l) and the respective intensity I. This file is used for a model parameter estimation (refinement), which excludes vibrations. The resulting parameters can be used for the experimental refinement, where, in a first step, the density parameters are fixed to determine the parameters describing vibrations. For a fine tuning, again the electrostatic theorem and the other above mentioned suggestions could be applied. Theoretical predictions should not be biased by the method of computation. Therefore the dependence of the density analyzing tools on the level of calculation (method of calculation/basis set) and on the substituents in complex chemical bonding situations were evaluated in the second part of the present work. A number of compounds containing formal single and double sulfur nitrogen bonds was investigated. For these compounds, experimental data were also available. The calculated data were compared internally and with the experimental results. The internal comparison was drawn with regard to questions of convergency as well as with regard to questions of consistency: The resulting molecular properties from NBO/NRT analyses were found to be very stable, when the geometries were optimized at the respective level of theory. This stability is valid for variations in the methods of calculation as well as for variations in the basis set. Only the individual resonance weights of the contributing Natural Lewis Structures differed considerably depending on the level of calculation and depending on the substituents. However, the deviations were in both cases to a large extent within a limit which preserves the descending order of the leading resonance structure weights. The resulting bond orders, i.e., the total, covalent and ionic bond order from NRT calculations, were not affected by the shift in the resonance weights. The analysis of the bond topological parameters resulted in a discrimination between insensitive parameters and sensitive parameters. The stable parameters do neither depend strongly on the method of calculation nor on the basis set. Only minor variation occurs in the numerical values of these parameters, when the level of calculation is changed or even when other functional groups (H, Me, or tBu) are employed, as long as the methods of calculation do not drop considerably below a standard level. The bond descriptors of the sulfur nitrogen bonds were found to be also stable with respect to the functional groups R = H, R = Me, and R = tBu. Stable parameters are the bond distance, the density at the bond critical point (BCP) and the ratio of distances between the BCP and the nuclei A and B, which varies clearly when considering the formal bond type. For very small basis sets like the 3-21G basis set, this characteristic stability collapses. The sensitive parameters are based on the second derivatives of the density with respect to the coordinates. This is in accordance with the well known fact, that the total second derivative of the density with respect to the coordinates is a strongly oscillating function with positive as well as negative values. A profound deviation has to be anticipated as a consequence of strong oscillations. lambda3, which describes the local charge depletion in the direction of the interaction line, is the most varying parameter. A detailed analysis revealed that the position of the BCP in the rampant edge of the Laplacian distribution is responsible for the sensitivity of the numerical value of lambda3 in formal double bonds. Since the slope of the Laplacian assumes very high values in its rampant edge, a tiny displacement of the BCP leads already to a considerable change in lambda3. This instability is not a failure of the underlying theory, but it yields de facto to a considerable dependence of sensitive bond topological properties on the method of calculation and on the applied basis sets. Since the total second derivative is important to judge on the nature of the bond in the AIM theory (closed shell interactions versus shared interactions), the changes in lambda3 can lead to differing chemical interpretations. The comparison of theoretically derived bond topological properties of various sulfur nitrogen bonds provides the possibility to measure the self consistency of this data set. All data sets clearly exhibit a linear correlation between the bond distances and the density at the BCP on one hand and between the bond distances and the Laplacian values at the BCP on the other hand. These correlations were almost independent of the basis set size. In this context, the linear regression has to be regarded exclusively as a descriptive statistics tool. There is no correlation anticipated a priori. The formal bond type was found to be readily deducible from the theoretically obtained bond topological descriptors of the model systems. In this sense, the bond topological properties are self consistent despite of the numerical sensitivity of the derivatives, as exemplified above. Often, calculations are performed with the experimentally derived equilibrium geometries and not with optimized ones. Applying this approach, the computationally costly geometry optimizations are saved. Following this approach the bond topological properties were calculated using very flexible basis sets and employing the fixed experimental geometry (which, of course, includes the application of tBu groups). Regression coe±cients similar to those from optimized geometries were obtained for correlations between bond distances and the densities at the BCP as well as for the correlation between bond distances and the Laplacian at the BCP, i.e. the approach is valid. However, the data points scattered less and the coe±cient of correlation was clearly increased when geometry optimizations were performed beforehand. The comparison between data obtained from theory and experiment revealed fundamental discrepancies: In the data set of bond topological parameters from the experiment, the behavior of only 2 out of 3 insensitive parameters was comparable to the behavior of the theoretically obtained values, i.e. theoretical and experimental bond distances as well as theoretical and experimental densities at the BCP correlate. From the theoretically obtained data it was easy to deduce the formal bond type from the position of the BCP, since it changed in a systematic manner. The respective experimentally obtained values were almost constant and did not change systematically. For the SN bonds containing compounds, the total second derivative assumes exclusively negative values in the experiment. Due to the different internal behavior, experimentally and theoretically sensitive bond topological values could not be compared directly. The qualitative agreement in the Laplacian distribution, however, was excellent. In the third and last part of this work, the application to chemical systems follows. Formal hypervalent molecules, i.e. molecules where some atoms are considered to hold more than 8 electrons in their valence shell, were investigated. These were compounds containing sulfur nitrogen bonds (H(NtBu)2SMe, H2C{S(NtBu)2(NHtBu)}2, S(NtBu)2 and S(NtBu)3) and a highly coordinated silicon compound. The set of sulfur nitrogen compounds also contained a textbook example for valence expansion, the sulfur triimide. For these molecules, experimental reference values were available from high resolution X-ray experiments. The experimental results were in the case of the sulfur triimide not unique. Furthermore, from the experimental bond topological data no definite conclusion about the formal bonding type could be drawn. The situation of sulfur nitrogen bonds in the above mentioned set of molecules was analyzed in terms of a geometry discussion and by means of a topological analysis. The methyl-substituted isolated molecules served as model compounds. For the interpretation of the bonding situation additional NBO/NRT calculations were preformed for the sulfur nitrogen compounds and an ELF calculation and analysis was performed for the silicon compound. The ELF analysis included not only the presentation and discussion of the ELF-isosurfaces (eta = 0.85), but also the investigation of populations of disynaptic valence basins and the percentage contributions to these populations of the individual atoms when the disynaptic valence basins are split into atomic contributions according to Bader's partitioning scheme. The question of chemical interest was whether hypervalency is present in the set of molecules or not. In the first case the octet rule would be violated, in the second case Pauling's verdict would be violated. While the concept of hypervalency is well established in chemistry, the violation of Pauling's verdict is not. The quantitative numbers of the sensitive bond topological values from theory and experiment were not comparable, since no systematic relationship between the experimentally and theoretically determined sensitive bond descriptors was found. However, the insensitive parameters are in good agreement and the qualitative Laplacian distribution is, with few exceptions, in excellent agreement. The formal bonding type was deduced from experimental and theoretical topological data by considering the number and shape of valence shell charge concentrations in proximity to the sulfur and nitrogen centers. The results from NBO/NRT calculations confirmed the findings. All employed density analyzing tools AIM, ELF and NBO/NRT coincided in describing the bonding situation in the formally hypervalent molecules as highly polar. A comparison and analysis of experimentally and theoretically derived electron densities led consistently to the result, that regarding this set of molecules, hypervalency has to be excluded unequivocally.}, subject = {Elektronendichtebestimmung}, language = {en} } @phdthesis{Rapp2004, author = {Rapp, Ulrike}, title = {Achieving protective immunitity against intracellular bacterial pathogens : a study on the efficiency of Gp96 as a vaccine carrier}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-9096}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2004}, abstract = {Protective vaccination against intracellular pathogens using HSP fusion proteins in the listeria model.}, subject = {Listeria monocytogenes}, language = {en} } @phdthesis{Wollmershaeuser2003, author = {Wollmersh{\"a}user, Timo}, title = {A theory of managed floating}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-8676}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2003}, abstract = {After the experience with the currency crises of the 1990s, a broad consensus has emerged among economists that such shocks can only be avoided if countries that decided to maintain unrestricted capital mobility adopt either independently floating exchange rates or very hard pegs (currency boards, dollarisation). As a consequence of this view which has been enshrined in the so-called impossible trinity all intermediate currency regimes are regarded as inherently unstable. As far as the economic theory is concerned, this view has the attractive feature that it not only fits with the logic of traditional open economy macro models, but also that for both corner solutions (independently floating exchange rates with a domestically oriented interest rate policy; hard pegs with a completely exchange rate oriented monetary policy) solid theoretical frameworks have been developed. Above all the IMF statistics seem to confirm that intermediate regimes are indeed less and less fashionable by both industrial countries and emerging market economies. However, in the last few years an anomaly has been detected which seriously challenges this paradigm on exchange rate regimes. In their influential cross-country study, Calvo and Reinhart (2000) have shown that many of those countries which had declared themselves as 'independent floaters' in the IMF statistics were charaterised by a pronounced 'fear of floating' and were actually heavily reacting to exchange rate movements, either in the form of an interest rate response, or by intervening in foreign exchange markets. The present analysis can be understood as an approach to develop a theoretical framework for this managed floating behaviour that - even though it is widely used in practice - has not attracted very much attention in monetary economics. In particular we would like to fill the gap that has recently been criticised by one of the few 'middle-ground' economists, John Williamson, who argued that "managed floating is not a regime with well-defined rules" (Williamson, 2000, p. 47). Our approach is based on a standard open economy macro model typically employed for the analysis of monetary policy strategies. The consequences of independently floating and market determined exchange rates are evaluated in terms of a social welfare function, or, to be more precise, in terms of an intertemporal loss function containing a central bank's final targets output and inflation. We explicitly model the source of the observable fear of floating by questioning the basic assumption underlying most open economy macro models that the foreign exchange market is an efficient asset market with rational agents. We will show that both policy reactions to the fear of floating (an interest rate response to exchange rate movements which we call indirect managed floating, and sterilised interventions in the foreign exchange markets which we call direct managed floating) can be rationalised if we allow for deviations from the assumption of perfectly functioning foreign exchange markets and if we assume a central bank that takes these deviations into account and behaves so as to reach its final targets. In such a scenario with a high degree of uncertainty about the true model determining the exchange rate, the rationale for indirect managed floating is the monetary policy maker's quest for a robust interest rate policy rule that performs comparatively well across a range of alternative exchange rate models. We will show, however, that the strategy of indirect managed floating still bears the risk that the central bank's final targets might be negatively affected by the unpredictability of the true exchange rate behaviour. This is where the second policy measure comes into play. The use of sterilised foreign exchange market interventions to counter movements of market determined exchange rates can be rationalised by a central bank's effort to lower the risk of missing its final targets if it only has a single instrument at its disposal. We provide a theoretical model-based foundation of a strategy of direct managed floating in which the central bank targets, in addition to a short-term interest rate, the nominal exchange rate. In particular, we develop a rule for the instrument of intervening in the foreign exchange market that is based on the failure of foreign exchange market to guarantee a reliable relationship between the exchange rate and other fundamental variables.}, language = {en} } @phdthesis{Lin2004, author = {Lin, Chia-Huey}, title = {Functional characterization of rat CTLA-4 and CD25+CD4+ regulatory T cells}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-8521}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2004}, abstract = {Summary: In the present work, two important negative regulators of T cell responses in rats were examined. At the molecular level, rat CTLA-4, a receptor important for deactivating T cell responses, was examined for the expression pattern and in vitro functions. For this purpose, anti-rat CTLA-4 mAbs were generated. Consistent with the studies in mice and humans, rat CTLA-4 was detectable only in CD25+CD4+ regulatory T cells in unstimulated rats, and was upregulated in all activated T cells. Cross-linking rat CTLA-4 led to the deactivation of anti-TCR- and anti-CD28 stimulated (costimulation) T cell responses such as reduction in activation marker expression, proliferation, and cytokine IL-2 production. Although T cells stimulated with the superagonistic anti-CD28 antibody alone without TCR engagement also increased their CTLA-4 expression, a delayed kinetics of CTLA-4 upregulation was found in cells stimulated in this way. The physiological relevance of this finding needs further investigation. At the cellular level, rat CD25+CD4+ regulatory T cells were examined here in detail. Using rat anti-CTLA-4 mAbs, the phenotype of CD25+CD4+ regulatory T cells was investigated. Identical to the mouse and human Treg phenotype, rat CD25+CD4+ T cells constitutively expressed CTLA-4, were predominantly CD45RC low, and expressed high level of CD62L (L-selectin). CD25+CD4+ cells proliferated poorly and were unable to produce IL-2 upon engagement of the TCR and CD28. Furthermore, rat CD25+CD4+ cells produced high amounts of anti-inflammatory cytokine IL-10 upon stimulation. Importantly, freshly isolated CD25+CD4+ T cells from na{\"i}ve rats exhibited suppressor activities in the in vitro suppressor assays. In vitro, CD25+CD4+ regulatory T cells proliferated vigorously upon superagonistic anti-CD28 stimulation and became very potent suppressor cells. In vivo, a single injection of CD28 superagonist into rats induced transient accumulation and activation of CD25+CD4+ regulatory T cells. These findings suggest firstly that efficient expansion of CD25+CD4+ cells without losing their suppressive effects (even enhance their suppressive activities) can be achieved with the superagonistic anti- CD28 antibody in vitro. Secondly, the induction of disproportional expansion of CD25+CD4+ cells by a single injection of superagonistic anti-CD28 antibody in vivo implies that superagonistic anti-CD28 antibody may be a promising candidate in treating autoimmune diseases by causing a transient increase of activated CD25+CD4+ T cells and thus tipping ongoing autoimmune responses toward selftolerance.}, subject = {Ratte}, language = {en} } @phdthesis{Huelsewig2003, author = {H{\"u}lsewig, Oliver}, title = {Bank lending and monetary policy transmission in Germany}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-8686}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2003}, abstract = {This study investigates the credit channel in the transmission of monetary policy in Germany by means of a structural analysis of aggregate bank loan data. We base our analysis on a stylized model of the banking firm, which specifies the loan supply decisions of banks in the light of expectations about the future course of monetary policy. Using the model as a guide, we apply a vector error correction model (VECM), in which we identify long-run cointegration relationships that can be interpreted as loan supply and loan demand equations. In this way, the identification problem inherent in reduced form approaches based on aggregate data is explicitly addressed. The short-run dynamics is explored by means of innovation analysis, which displays the reaction of the variables in the system to a monetary policy shock. The main implication of our results is that the credit channel in Germany appears to be effective, as we find that loan supply effects in addition to loan demand effects contribute to the propagation of monetary policy measures.}, language = {en} }