@phdthesis{Klotzky2018, author = {Klotzky, Jens}, title = {Well-posedness of a fluid-particle interaction model}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-169009}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2018}, abstract = {This thesis considers a model of a scalar partial differential equation in the presence of a singular source term, modeling the interaction between an inviscid fluid represented by the Burgers equation and an arbitrary, finite amount of particles moving inside the fluid, each one acting as a point-wise drag force with a particle related friction constant. \begin{align*} \partial_t u + \partial_x (u^2/2) \&= \sum_{i \in N(t)} \lambda_i \Big(h_i'(t)-u(t,h_i(t)\Big)\delta(x-h_i(t)) \end{align*} The model was introduced for the case of a single particle by Lagouti{\`e}re, Seguin and Takahashi, is a first step towards a better understanding of interaction between fluids and solids on the level of partial differential equations and has the unique property of considering entropy admissible solutions and the interaction with shockwaves. The model is extended to an arbitrary, finite number of particles and interactions like merging, splitting and crossing of particle paths are considered. The theory of entropy admissibility is revisited for the cases of interfaces and discontinuous flux conservation laws, existing results are summarized and compared, and adapted for regions of particle interactions. To this goal, the theory of germs introduced by Andreianov, Karlsen and Risebro is extended to this case of non-conservative interface coupling. Exact solutions for the Riemann Problem of particles drifting apart are computed and analysis on the behavior of entropy solutions across the particle related interfaces is used to determine physically relevant and consistent behavior for merging and splitting of particles. Well-posedness of entropy solutions to the Cauchy problem is proven, using an explicit construction method, L-infinity bounds, an approximation of the particle paths and compactness arguments to obtain existence of entropy solutions. Uniqueness is shown in the class of weak entropy solutions using almost classical Kruzkov-type analysis and the notion of L1-dissipative germs. Necessary fundamentals of hyperbolic conservation laws, including weak solutions, shocks and rarefaction waves and the Rankine-Hugoniot condition are briefly recapitulated.}, subject = {Hyperbolische Differentialgleichung}, language = {en} } @phdthesis{Ruppert2017, author = {Ruppert, Markus}, title = {Wege der Analogiebildung - Eine qualitative Studie {\"u}ber den Prozess der Analogiebildung beim L{\"o}sen von Aufgaben}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-155910}, school = {Universit{\"a}t W{\"u}rzburg}, pages = {311}, year = {2017}, abstract = {{\"U}ber die besondere Bedeutung von Analogiebildungsprozessen beim Lernen im Allgemeinen und beim Lernen von Mathematik im Speziellen besteht ein breiter wissenschaftlicher Konsens. Es liegt deshalb nahe, von einem lernf{\"o}rderlichen Mathematikunterricht zu verlangen, dass er im Bewusstsein dieser Bedeutung entwickelt ist - dass er also einerseits Analogien aufzeigt und sich diese beim Lehren von Mathematik zunutze macht, dass er andererseits aber auch dem Lernenden Gelegenheiten bietet, Analogien zu erkennen und zu entwickeln. Kurz: Die F{\"a}higkeit zum Bilden von Analogien soll durch den Unterricht gezielt gef{\"o}rdert werden. Um diesem Anspruch gerecht werden zu k{\"o}nnen, m{\"u}ssen ausreichende Kenntnisse dar{\"u}ber vorliegen, wie Analogiebildungsprozesse beim Lernen von Mathematik und beim L{\"o}sen mathematischer Aufgaben ablaufen, wodurch sich erfolgreiche Analogiebildungsprozesse auszeichnen und an welchen Stellen m{\"o}glicherweise Schwierigkeiten bestehen. Der Autor zeigt auf, wie Prozesse der Analogiebildung beim L{\"o}sen mathematischer Aufgaben initiiert, beobachtet, beschrieben und interpretiert werden k{\"o}nnen, um auf dieser Grundlage Ansatzpunkte f{\"u}r geeignete F{\"o}rdermaßnahmen zu identifizieren, bestehende Ideen zur F{\"o}rderung der Analogiebildungsf{\"a}higkeit zu beurteilen und neue Ideen zu entwickeln. Es werden dabei Wege der Analogiebildung nachgezeichnet und untersucht, die auf der Verschr{\"a}nkung zweier Dimensionen der Analogiebildung im Rahmen des zugrundeliegenden theoretischen Modells beruhen. So k{\"o}nnen verschiedene Vorgehensweisen ebenso kontrastiert werden, wie kritische Punkte im Verlauf eines Analogiebildungsprozesses. Es ergeben sich daraus Unterrichtsvorschl{\"a}ge, die auf den Ideen zum beispielbasierten Lernen aufbauen.}, subject = {Analogie}, language = {de} } @phdthesis{Lieb2017, author = {Lieb, Julia}, title = {Counting Polynomial Matrices over Finite Fields : Matrices with Certain Primeness Properties and Applications to Linear Systems and Coding Theory}, edition = {1. Auflage}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-064-1 (print)}, doi = {10.25972/WUP-978-3-95826-065-8}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-151303}, school = {W{\"u}rzburg University Press}, pages = {164}, year = {2017}, abstract = {This dissertation is dealing with three mathematical areas, namely polynomial matrices over finite fields, linear systems and coding theory. Coprimeness properties of polynomial matrices provide criteria for the reachability and observability of interconnected linear systems. Since time-discrete linear systems over finite fields and convolutional codes are basically the same objects, these results could be transfered to criteria for non-catastrophicity of convolutional codes. We calculate the probability that specially structured polynomial matrices are right prime. In particular, formulas for the number of pairwise coprime polynomials and for the number of mutually left coprime polynomial matrices are calculated. This leads to the probability that a parallel connected linear system is reachable and that a parallel connected convolutional codes is non-catastrophic. Moreover, the corresponding probabilities are calculated for other networks of linear systems and convolutional codes, such as series connection. Furthermore, the probabilities that a convolutional codes is MDP and that a clock code is MDS are approximated. Finally, we consider the probability of finding a solution for a linear network coding problem.}, subject = {Lineares System}, language = {en} } @phdthesis{Steck2018, author = {Steck, Daniel}, title = {Lagrange Multiplier Methods for Constrained Optimization and Variational Problems in Banach Spaces}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-174444}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2018}, abstract = {This thesis is concerned with a class of general-purpose algorithms for constrained minimization problems, variational inequalities, and quasi-variational inequalities in Banach spaces. A substantial amount of background material from Banach space theory, convex analysis, variational analysis, and optimization theory is presented, including some results which are refinements of those existing in the literature. This basis is used to formulate an augmented Lagrangian algorithm with multiplier safeguarding for the solution of constrained optimization problems in Banach spaces. The method is analyzed in terms of local and global convergence, and many popular problem classes such as nonlinear programming, semidefinite programming, and function space optimization are shown to be included as special cases of the general setting. The algorithmic framework is then extended to variational and quasi-variational inequalities, which include, by extension, Nash and generalized Nash equilibrium problems. For these problem classes, the convergence is analyzed in detail. The thesis then presents a rich collection of application examples for all problem classes, including implementation details and numerical results.}, subject = {Optimierung}, language = {en} } @phdthesis{Forster2016, author = {Forster, Johannes}, title = {Variational Approach to the Modeling and Analysis of Magnetoelastic Materials}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-147226}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2016}, abstract = {This doctoral thesis is concerned with the mathematical modeling of magnetoelastic materials and the analysis of PDE systems describing these materials and obtained from a variational approach. The purpose is to capture the behavior of elastic particles that are not only magnetic but exhibit a magnetic domain structure which is well described by the micromagnetic energy and the Landau-Lifshitz-Gilbert equation of the magnetization. The equation of motion for the material's velocity is derived in a continuum mechanical setting from an energy ansatz. In the modeling process, the focus is on the interplay between Lagrangian and Eulerian coordinate systems to combine elasticity and magnetism in one model without the assumption of small deformations. The resulting general PDE system is simplified using special assumptions. Existence of weak solutions is proved for two variants of the PDE system, one including gradient flow dynamics on the magnetization, and the other featuring the Landau-Lifshitz-Gilbert equation. The proof is based on a Galerkin method and a fixed point argument. The analysis of the PDE system with the Landau-Lifshitz-Gilbert equation uses a more involved approach to obtain weak solutions based on G. Carbou and P. Fabrie 2001.}, subject = {Magnetoelastizit{\"a}t}, language = {en} } @article{KasangKalluvyaMajingeetal.2016, author = {Kasang, Christa and Kalluvya, Samuel and Majinge, Charles and Kongola, Gilbert and Mlewa, Mathias and Massawe, Irene and Kabyemera, Rogatus and Magambo, Kinanga and Ulmer, Albrecht and Klinker, Hartwig and Gschmack, Eva and Horn, Anne and Koutsilieri, Eleni and Preiser, Wolfgang and Hofmann, Daniela and Hain, Johannes and M{\"u}ller, Andreas and D{\"o}lken, Lars and Weissbrich, Benedikt and Rethwilm, Axel and Stich, August and Scheller, Carsten}, title = {Effects of Prednisolone on Disease Progression in Antiretroviral-Untreated HIV Infection: A 2-Year Randomized, Double-Blind Placebo-Controlled Clinical Trial}, series = {PLoS One}, volume = {11}, journal = {PLoS One}, number = {1}, doi = {10.1371/journal.pone.0146678}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-146479}, pages = {e0146678}, year = {2016}, abstract = {Background HIV-disease progression correlates with immune activation. Here we investigated whether corticosteroid treatment can attenuate HIV disease progression in antiretroviral-untreated patients. Methods Double-blind, placebo-controlled randomized clinical trial including 326 HIV-patients in a resource-limited setting in Tanzania (clinicaltrials.gov NCT01299948). Inclusion criteria were a CD4 count above 300 cells/μl, the absence of AIDS-defining symptoms and an ART-na{\"i}ve therapy status. Study participants received 5 mg prednisolone per day or placebo for 2 years. Primary endpoint was time to progression to an AIDS-defining condition or to a CD4-count below 200 cells/μl. Results No significant change in progression towards the primary endpoint was observed in the intent-to-treat (ITT) analysis (19 cases with prednisolone versus 28 cases with placebo, p = 0.1407). In a per-protocol (PP)-analysis, 13 versus 24 study participants progressed to the primary study endpoint (p = 0.0741). Secondary endpoints: Prednisolone-treatment decreased immune activation (sCD14, suPAR, CD38/HLA-DR/CD8+) and increased CD4-counts (+77.42 ± 5.70 cells/μl compared to -37.42 ± 10.77 cells/μl under placebo, p < 0.0001). Treatment with prednisolone was associated with a 3.2-fold increase in HIV viral load (p < 0.0001). In a post-hoc analysis stratifying for sex, females treated with prednisolone progressed significantly slower to the primary study endpoint than females treated with placebo (ITT-analysis: 11 versus 21 cases, p = 0.0567; PP-analysis: 5 versus 18 cases, p = 0.0051): No changes in disease progression were observed in men. Conclusions This study could not detect any significant effects of prednisolone on disease progression in antiretroviral-untreated HIV infection within the intent-to-treat population. However, significant effects were observed on CD4 counts, immune activation and HIV viral load. This study contributes to a better understanding of the role of immune activation in the pathogenesis of HIV infection.}, language = {en} } @phdthesis{Koch2016, author = {Koch, Julia Diana}, title = {Value Ranges for Schlicht Functions}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-144978}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2016}, abstract = {This thesis deals with value sets, i.e. the question of what the set of values that a set of functions can take in a prescribed point looks like. Interest in such problems has been around for a long time; a first answer was given by the Schwarz lemma in the 19th century, and soon various refinements were proven. Since the 1930s, a powerful method for solving such problems has been developed, namely Loewner theory. We make extensive use of this tool, as well as variation methods which go back to Schiffer to examine the following questions: We describe the set of values a schlicht normalised function on the unit disc with prescribed derivative at the origin can take by applying Pontryagin's maximum principle to the radial Loewner equation. We then determine the value ranges for the set of holomorphic, normalised, and bounded functions that have only real coefficients in their power series expansion around 0, and for the smaller set of functions which are additionally typically real. Furthermore, we describe the values a univalent self-mapping of the upper half-plane with hydrodynamical normalization which is symmetric with respect to the imaginary axis can take. Lastly, we give a necessary condition for a schlicht bounded function f on the unit disc to have extremal derivative in a point z where its value f(z) is fixed by using variation methods.}, subject = {Pontrjagin-Maximumprinzip}, language = {en} } @article{SchindeleBorzi2016, author = {Schindele, Andreas and Borz{\`i}, Alfio}, title = {Proximal Methods for Elliptic Optimal Control Problems with Sparsity Cost Functional}, series = {Applied Mathematics}, volume = {7}, journal = {Applied Mathematics}, number = {9}, doi = {10.4236/am.2016.79086}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-145850}, pages = {967-992}, year = {2016}, abstract = {First-order proximal methods that solve linear and bilinear elliptic optimal control problems with a sparsity cost functional are discussed. In particular, fast convergence of these methods is proved. For benchmarking purposes, inexact proximal schemes are compared to an inexact semismooth Newton method. Results of numerical experiments are presented to demonstrate the computational effectiveness of proximal schemes applied to infinite-dimensional elliptic optimal control problems and to validate the theoretical estimates.}, language = {en} } @phdthesis{GallegoValencia2017, author = {Gallego Valencia, Juan Pablo}, title = {On Runge-Kutta discontinuous Galerkin methods for compressible Euler equations and the ideal magneto-hydrodynamical model}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-148874}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2017}, abstract = {An explicit Runge-Kutta discontinuous Galerkin (RKDG) method is used to device numerical schemes for both the compressible Euler equations of gas dynamics and the ideal magneto- hydrodynamical (MHD) model. These systems of conservation laws are known to have discontinuous solutions. Discontinuities are the source of spurious oscillations in the solution profile of the numerical approximation, when a high order accurate numerical method is used. Different techniques are reviewed in order to control spurious oscillations. A shock detection technique is shown to be useful in order to determine the regions where the spurious oscillations appear such that a Limiter can be used to eliminate these numeric artifacts. To guarantee the positivity of specific variables like the density and the pressure, a positivity preserving limiter is used. Furthermore, a numerical flux, proven to preserve the entropy stability of the semi-discrete DG scheme for the MHD system is used. Finally, the numerical schemes are implemented using the deal.II C++ libraries in the dflo code. The solution of common test cases show the capability of the method.}, subject = {Eulersche Differentialgleichung}, language = {en} } @unpublished{BreitenbachBorzi2019, author = {Breitenbach, Tim and Borz{\`i}, Alfio}, title = {On the SQH scheme to solve non-smooth PDE optimal control problems}, series = {Numerical Functional Analysis and Optimization}, journal = {Numerical Functional Analysis and Optimization}, doi = {10.1080/01630563.2019.1599911}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-180936}, year = {2019}, abstract = {A sequential quadratic Hamiltonian (SQH) scheme for solving different classes of non-smooth and non-convex PDE optimal control problems is investigated considering seven different benchmark problems with increasing difficulty. These problems include linear and nonlinear PDEs with linear and bilinear control mechanisms, non-convex and discontinuous costs of the controls, L\(^1\) tracking terms, and the case of state constraints. The SQH method is based on the characterisation of optimality of PDE optimal control problems by the Pontryagin's maximum principle (PMP). For each problem, a theoretical discussion of the PMP optimality condition is given and results of numerical experiments are presented that demonstrate the large range of applicability of the SQH scheme.}, language = {en} } @article{GaviraghiSchindeleAnnunziatoetal.2016, author = {Gaviraghi, Beatrice and Schindele, Andreas and Annunziato, Mario and Borz{\`i}, Alfio}, title = {On Optimal Sparse-Control Problems Governed by Jump-Diffusion Processes}, series = {Applied Mathematics}, volume = {7}, journal = {Applied Mathematics}, number = {16}, doi = {10.4236/am.2016.716162}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-147819}, pages = {1978 -- 2004}, year = {2016}, abstract = {A framework for the optimal sparse-control of the probability density function of a jump-diffusion process is presented. This framework is based on the partial integro-differential Fokker-Planck (FP) equation that governs the time evolution of the probability density function of this process. In the stochastic process and, correspondingly, in the FP model the control function enters as a time-dependent coefficient. The objectives of the control are to minimize a discrete-in-time, resp. continuous-in-time, tracking functionals and its L2- and L1-costs, where the latter is considered to promote control sparsity. An efficient proximal scheme for solving these optimal control problems is considered. Results of numerical experiments are presented to validate the theoretical results and the computational effectiveness of the proposed control framework.}, language = {en} } @phdthesis{Sapozhnikova2018, author = {Sapozhnikova, Kateryna}, title = {Robust Stability of Differential Equations with Maximum}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-173945}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2018}, abstract = {In this thesis stability and robustness properties of systems of functional differential equations which dynamics depends on the maximum of a solution over a prehistory time interval is studied. Max-operator is analyzed and it is proved that due to its presence such kind of systems are particular case of state dependent delay differential equations with piecewise continuous delay function. They are nonlinear, infinite-dimensional and may reduce to one-dimensional along its solution. Stability analysis with respect to input is accomplished by trajectory estimate and via averaging method. Numerical method is proposed.}, subject = {Differentialgleichung}, language = {en} } @phdthesis{Gaviraghi2017, author = {Gaviraghi, Beatrice}, title = {Theoretical and numerical analysis of Fokker-Planck optimal control problems for jump-diffusion processes}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-145645}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2017}, abstract = {The topic of this thesis is the theoretical and numerical analysis of optimal control problems, whose differential constraints are given by Fokker-Planck models related to jump-diffusion processes. We tackle the issue of controlling a stochastic process by formulating a deterministic optimization problem. The key idea of our approach is to focus on the probability density function of the process, whose time evolution is modeled by the Fokker-Planck equation. Our control framework is advantageous since it allows to model the action of the control over the entire range of the process, whose statistics are characterized by the shape of its probability density function. We first investigate jump-diffusion processes, illustrating their main properties. We define stochastic initial-value problems and present results on the existence and uniqueness of their solutions. We then discuss how numerical solutions of stochastic problems are computed, focusing on the Euler-Maruyama method. We put our attention to jump-diffusion models with time- and space-dependent coefficients and jumps given by a compound Poisson process. We derive the related Fokker-Planck equations, which take the form of partial integro-differential equations. Their differential term is governed by a parabolic operator, while the nonlocal integral operator is due to the presence of the jumps. The derivation is carried out in two cases. On the one hand, we consider a process with unbounded range. On the other hand, we confine the dynamic of the sample paths to a bounded domain, and thus the behavior of the process in proximity of the boundaries has to be specified. Throughout this thesis, we set the barriers of the domain to be reflecting. The Fokker-Planck equation, endowed with initial and boundary conditions, gives rise to Fokker-Planck problems. Their solvability is discussed in suitable functional spaces. The properties of their solutions are examined, namely their regularity, positivity and probability mass conservation. Since closed-form solutions to Fokker-Planck problems are usually not available, one has to resort to numerical methods. The first main achievement of this thesis is the definition and analysis of conservative and positive-preserving numerical methods for Fokker-Planck problems. Our SIMEX1 and SIMEX2 (Splitting-Implicit-Explicit) schemes are defined within the framework given by the method of lines. The differential operator is discretized by a finite volume scheme given by the Chang-Cooper method, while the integral operator is approximated by a mid-point rule. This leads to a large system of ordinary differential equations, that we approximate with the Strang-Marchuk splitting method. This technique decomposes the original problem in a sequence of different subproblems with simpler structure, which are separately solved and linked to each other through initial conditions and final solutions. After performing the splitting step, we carry out the time integration with first- and second-order time-differencing methods. These steps give rise to the SIMEX1 and SIMEX2 methods, respectively. A full convergence and stability analysis of our schemes is included. Moreover, we are able to prove that the positivity and the mass conservation of the solution to Fokker-Planck problems are satisfied at the discrete level by the numerical solutions computed with the SIMEX schemes. The second main achievement of this thesis is the theoretical analysis and the numerical solution of optimal control problems governed by Fokker-Planck models. The field of optimal control deals with finding control functions in such a way that given cost functionals are minimized. Our framework aims at the minimization of the difference between a known sequence of values and the first moment of a jump-diffusion process; therefore, this formulation can also be considered as a parameter estimation problem for stochastic processes. Two cases are discussed, in which the form of the cost functional is continuous-in-time and discrete-in-time, respectively. The control variable enters the state equation as a coefficient of the Fokker-Planck partial integro-differential operator. We also include in the cost functional a \$L^1\$-penalization term, which enhances the sparsity of the solution. Therefore, the resulting optimization problem is nonconvex and nonsmooth. We derive the first-order optimality systems satisfied by the optimal solution. The computation of the optimal solution is carried out by means of proximal iterative schemes in an infinite-dimensional framework.}, subject = {Fokker-Planck-Gleichung}, language = {en} } @phdthesis{Tichy2011, author = {Tichy, Diana}, title = {On the Fragility Index}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-73610}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2011}, abstract = {The Fragility Index captures the amount of risk in a stochastic system of arbitrary dimension. Its main mathematical tool is the asymptotic distribution of exceedance counts within the system which can be derived by use of multivariate extreme value theory. Thereby the basic assumption is that data comes from a distribution which lies in the domain of attraction of a multivariate extreme value distribution. The Fragility Index itself and its extension can serve as a quantitative measure for tail dependence in arbitrary dimensions. It is linked to the well known extremal index for stochastic processes as well the extremal coefficient of an extreme value distribution.}, subject = {Extremwertstatistik}, language = {en} } @phdthesis{Hofmann2012, author = {Hofmann, Martin}, title = {Contributions to Extreme Value Theory in the Space C[0,1]}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-74405}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2012}, abstract = {We introduce some mathematical framework for extreme value theory in the space of continuous functions on compact intervals and provide basic definitions and tools. Continuous max-stable processes on [0,1] are characterized by their "distribution functions" G which can be represented via a norm on function space, called D-norm. The high conformity of this setup with the multivariate case leads to the introduction of a functional domain of attraction approach for stochastic processes, which is more general than the usual one based on weak convergence. We also introduce the concept of "sojourn time transformation" and compare several types of convergence on function space. Again in complete accordance with the uni- or multivariate case it is now possible to get functional generalized Pareto distributions (GPD) W via W = 1 + log(G) in the upper tail. In particular, this enables us to derive characterizations of the functional domain of attraction condition for copula processes. Moreover, we investigate the sojourn time above a high threshold of a continuous stochastic process. It turns out that the limit, as the threshold increases, of the expected sojourn time given that it is positive, exists if the copula process corresponding to Y is in the functional domain of attraction of a max-stable process. If the process is in a certain neighborhood of a generalized Pareto process, then we can replace the constant threshold by a general threshold function and we can compute the asymptotic sojourn time distribution.}, subject = {Extremwertstatistik}, language = {en} } @techreport{Englert2009, author = {Englert, Stefan}, title = {Mathematica in 15 Minuten (Mathematica Version 6.0)}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-70275}, year = {2009}, abstract = {Mathematica ist ein hervorragendes Programm um mathematische Berechnungen - auch sehr komplexe - auf relativ einfache Art und Weise durchf{\"u}hren zu lassen. Dieses Skript soll eine wirklich kurze Einf{\"u}hrung in Mathematica geben und als Nachschlagewerk einiger g{\"a}ngiger Anwendungen von Mathematica dienen. Dabei wird folgende Grobgliederung verwendet: - Grundlagen: Graphische Oberfl{\"a}che, einfache Berechnungen, Formeleingabe - Bedienung: Vorstellung einiger Kommandos und Einblick in die Funktionsweise - Praxis: Beispielhafte Berechnung einiger Abitur- und {\"U}bungsaufgaben}, subject = {Anwendungssoftware}, language = {de} } @techreport{Englert2012, author = {Englert, Stefan}, title = {Mathematica in 15 Minuten (Mathematica Version 8.0)}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-70287}, year = {2012}, abstract = {Mathematica ist ein hervorragendes Programm um mathematische Berechnungen - auch sehr komplexe - auf relativ einfache Art und Weise durchf{\"u}hren zu lassen. Dieses Skript soll eine wirklich kurze Einf{\"u}hrung in Mathematica geben und als Nachschlagewerk einiger g{\"a}ngiger Anwendungen von Mathematica dienen. Dabei wird folgende Grobgliederung verwendet: - Grundlagen: Graphische Oberfl{\"a}che, einfache Berechnungen, Formeleingabe - Bedienung: Vorstellung einiger Kommandos und Einblick in die Funktionsweise - Praxis: Beispielhafte Berechnung einiger Abitur- und {\"U}bungsaufgaben}, subject = {Anwendungssoftware}, language = {de} } @phdthesis{Dreves2011, author = {Dreves, Axel}, title = {Globally Convergent Algorithms for the Solution of Generalized Nash Equilibrium Problems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-69822}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2011}, abstract = {Es werden verschiedene Verfahren zur L{\"o}sung verallgemeinerter Nash-Gleichgewichtsprobleme mit dem Schwerpunkt auf deren globaler Konvergenz entwickelt. Ein globalisiertes Newton-Verfahren zur Berechnung normalisierter L{\"o}sungen, ein nichtglattes Optimierungsverfahren basierend auf einer unrestringierten Umformulierung des spieltheoretischen Problems, und ein Minimierungsansatz sowei eine Innere-Punkte-Methode zur L{\"o}sung der gemeinsamen Karush-Kuhn-Tucker-Bedingungen der Spieler werden theoretisch untersucht und numerisch getestet. Insbesondere das Innere-Punkte Verfahren erweist sich als das zur Zeit wohl beste Verfahren zur L{\"o}sung verallgemeinerter Nash-Gleichgewichtsprobleme.}, subject = {Nash-Gleichgewicht}, language = {en} } @phdthesis{Mauder2012, author = {Mauder, Markus}, title = {Time-Optimal Control of the Bi-Steerable Robot: A Case Study in Optimal Control of Nonholonomic Systems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-75036}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2012}, abstract = {In this thesis, time-optimal control of the bi-steerable robot is addressed. The bi-steerable robot, a vehicle with two independently steerable axles, is a complex nonholonomic system with applications in many areas of land-based robotics. Motion planning and optimal control are challenging tasks for this system, since standard control schemes do not apply. The model of the bi-steerable robot considered here is a reduced kinematic model with the driving velocity and the steering angles of the front and rear axle as inputs. The steering angles of the two axles can be set independently from each other. The reduced kinematic model is a control system with affine and non-affine inputs, as the driving velocity enters the system linearly, whereas the steering angles enter nonlinearly. In this work, a new approach to solve the time-optimal control problem for the bi-steerable robot is presented. In contrast to most standard methods for time-optimal control, our approach does not exclusively rely on discretization and purely numerical methods. Instead, the Pontryagin Maximum Principle is used to characterize candidates for time-optimal solutions. The resultant boundary value problem is solved by optimization to obtain solutions to the path planning problem over a given time horizon. The time horizon is decreased and the path planning is iterated to approximate a time-optimal solution. An optimality condition is introduced which depends on the number of cusps, i.e., reversals of the driving direction of the robot. This optimality condition allows to single out non-optimal solutions with too many cusps. In general, our approach only gives approximations of time-optimal solutions, since only normal regular extremals are considered as solutions to the path planning problem, and the path planning is terminated when an extremal with minimal number of cusps is found. However, for most desired configurations, normal regular extremals with the minimal number of cusps provide time-optimal solutions for the bi-steerable robot. The convergence of the approach is analyzed and its probabilistic completeness is shown. Moreover, simulation results on time-optimal solutions for the bi-steerable robot are presented.}, subject = {Mobiler Roboter}, language = {en} } @phdthesis{Akindeinde2012, author = {Akindeinde, Saheed Ojo}, title = {Numerical Verification of Optimality Conditions in Optimal Control Problems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-76065}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2012}, abstract = {This thesis is devoted to numerical verification of optimality conditions for non-convex optimal control problems. In the first part, we are concerned with a-posteriori verification of sufficient optimality conditions. It is a common knowledge that verification of such conditions for general non-convex PDE-constrained optimization problems is very challenging. We propose a method to verify second-order sufficient conditions for a general class of optimal control problem. If the proposed verification method confirms the fulfillment of the sufficient condition then a-posteriori error estimates can be computed. A special ingredient of our method is an error analysis for the Hessian of the underlying optimization problem. We derive conditions under which positive definiteness of the Hessian of the discrete problem implies positive definiteness of the Hessian of the continuous problem. The results are complemented with numerical experiments. In the second part, we investigate adaptive methods for optimal control problems with finitely many control parameters. We analyze a-posteriori error estimates based on verification of second-order sufficient optimality conditions using the method developed in the first part. Reliability and efficiency of the error estimator are shown. We illustrate through numerical experiments, the use of the estimator in guiding adaptive mesh refinement.}, subject = {Optimale Kontrolle}, language = {en} } @article{BallesterBolinchesBeidlemanHeinekenetal.2010, author = {Ballester-Bolinches, A. and Beidleman, J. C. and Heineken, H. and Pedraza-Aguilera, M. C.}, title = {Local Classes and Pairwise Mutually Permutable Products of Finite Groups}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-68062}, year = {2010}, abstract = {The main aim of the paper is to present some results about products of pairwise mutually permutable subgroups and local classes.}, subject = {Mathematik}, language = {en} } @article{FerranteWimmer2010, author = {Ferrante, Augusto and Wimmer, Harald K.}, title = {Reachability matrices and cyclic matrices}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-68074}, year = {2010}, abstract = {We study reachability matrices R(A, b) = [b,Ab, . . . ,An-1b], where A is an n × n matrix over a field K and b is in Kn. We characterize those matrices that are reachability matrices for some pair (A, b). In the case of a cyclic matrix A and an n-vector of indeterminates x, we derive a factorization of the polynomial det(R(A, x)).}, subject = {Mathematik}, language = {en} } @book{FalkMarohnMicheletal.2012, author = {Falk, Michael and Marohn, Frank and Michel, Ren{\´e} and Hofmann, Daniel and Macke, Maria and Spachmann, Christoph and Englert, Stefan}, title = {A First Course on Time Series Analysis : Examples with SAS [Version 2012.August.01]}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-72617}, publisher = {Universit{\"a}t W{\"u}rzburg}, year = {2012}, abstract = {The analysis of real data by means of statistical methods with the aid of a software package common in industry and administration usually is not an integral part of mathematics studies, but it will certainly be part of a future professional work. The present book links up elements from time series analysis with a selection of statistical procedures used in general practice including the statistical software package SAS. Consequently this book addresses students of statistics as well as students of other branches such as economics, demography and engineering, where lectures on statistics belong to their academic training. But it is also intended for the practician who, beyond the use of statistical tools, is interested in their mathematical background. Numerous problems illustrate the applicability of the presented statistical procedures, where SAS gives the solutions. The programs used are explicitly listed and explained. No previous experience is expected neither in SAS nor in a special computer system so that a short training period is guaranteed. This book is meant for a two semester course (lecture, seminar or practical training) where the first three chapters can be dealt within the first semester. They provide the principal components of the analysis of a time series in the time domain. Chapters 4, 5 and 6 deal with its analysis in the frequency domain and can be worked through in the second term. In order to understand the mathematical background some terms are useful such as convergence in distribution, stochastic convergence, maximum likelihood estimator as well as a basic knowledge of the test theory, so that work on the book can start after an introductory lecture on stochastics. Each chapter includes exercises. An exhaustive treatment is recommended. Chapter 7 (case study) deals with a practical case and demonstrates the presented methods. It is possible to use this chapter independent in a seminar or practical training course, if the concepts of time series analysis are already well understood. This book is consecutively subdivided in a statistical part and an SAS-specific part. For better clearness the SAS-specific parts are highlighted. This book is an open source project under the GNU Free Documentation License.}, subject = {Zeitreihenanalyse}, language = {en} } @phdthesis{Schoenlein2012, author = {Sch{\"o}nlein, Michael}, title = {Stability and Robustness of Fluid Networks: A Lyapunov Perspective}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-72235}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2012}, abstract = {In the verification of positive Harris recurrence of multiclass queueing networks the stability analysis for the class of fluid networks is of vital interest. This thesis addresses stability of fluid networks from a Lyapunov point of view. In particular, the focus is on converse Lyapunov theorems. To gain an unified approach the considerations are based on generic properties that fluid networks under widely used disciplines have in common. It is shown that the class of closed generic fluid network models (closed GFNs) is too wide to provide a reasonable Lyapunov theory. To overcome this fact the class of strict generic fluid network models (strict GFNs) is introduced. In this class it is required that closed GFNs satisfy additionally a concatenation and a lower semicontinuity condition. We show that for strict GFNs a converse Lyapunov theorem is true which provides a continuous Lyapunov function. Moreover, it is shown that for strict GFNs satisfying a trajectory estimate a smooth converse Lyapunov theorem holds. To see that widely used queueing disciplines fulfill the additional conditions, fluid networks are considered from a differential inclusions perspective. Within this approach it turns out that fluid networks under general work-conserving, priority and proportional processor-sharing disciplines define strict GFNs. Furthermore, we provide an alternative proof for the fact that the Markov process underlying a multiclass queueing network is positive Harris recurrent if the associate fluid network defining a strict GFN is stable. The proof explicitely uses the Lyapunov function admitted by the stable strict GFN. Also, the differential inclusions approach shows that first-in-first-out disciplines play a special role.}, subject = {Warteschlangennetz}, language = {en} } @article{ChenchiahSchloemerkemper2012, author = {Chenchiah, Isaac and Schl{\"o}merkemper, Anja}, title = {Non-laminate microstructures in monoclinic-I martensite}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-72134}, year = {2012}, abstract = {We study the symmetrised rank-one convex hull of monoclinic-I martensite (a twelve-variant material) in the context of geometrically-linear elasticity. We construct sets of T3s, which are (non-trivial) symmetrised rank-one convex hulls of 3-tuples of pairwise incompatible strains. Moreover we construct a five-dimensional continuum of T3s and show that its intersection with the boundary of the symmetrised rank-one convex hull is four-dimensional. We also show that there is another kind of monoclinic-I martensite with qualitatively different semi-convex hulls which, so far as we know, has not been experimentally observed. Our strategy is to combine understanding of the algebraic structure of symmetrised rank-one convex cones with knowledge of the faceting structure of the convex polytope formed by the strains.}, subject = {Martensit}, language = {en} } @phdthesis{Schroeter2012, author = {Schr{\"o}ter, Martin}, title = {Newton Methods for Image Registration}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-71490}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2012}, abstract = {Consider the situation where two or more images are taken from the same object. After taking the first image, the object is moved or rotated so that the second recording depicts it in a different manner. Additionally, take heed of the possibility that the imaging techniques may have also been changed. One of the main problems in image processing is to determine the spatial relation between such images. The corresponding process of finding the spatial alignment is called "registration". In this work, we study the optimization problem which corresponds to the registration task. Especially, we exploit the Lie group structure of the set of transformations to construct efficient, intrinsic algorithms. We also apply the algorithms to medical registration tasks. However, the methods developed are not restricted to the field of medical image processing. We also have a closer look at more general forms of optimization problems and show connections to related tasks.}, subject = {Newton-Verfahren}, language = {en} } @misc{Englert2009, type = {Master Thesis}, author = {Englert, Stefan}, title = {Sch{\"a}tzer des Artenreichtums bei speziellen Erscheinungsh{\"a}ufigkeiten}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-71362}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2009}, abstract = {Bei vielen Fragestellungen, in denen sich eine Grundgesamtheit in verschiedene Klassen unterteilt, ist weniger die relative Klassengr{\"o}ße als vielmehr die Anzahl der Klassen von Bedeutung. So interessiert sich beispielsweise der Biologe daf{\"u}r, wie viele Spezien einer Gattung es gibt, der Numismatiker daf{\"u}r, wie viele M{\"u}nzen oder M{\"u}nzpr{\"a}gest{\"a}tten es in einer Epoche gab, der Informatiker daf{\"u}r, wie viele unterschiedlichen Eintr{\"a}ge es in einer sehr großen Datenbank gibt, der Programmierer daf{\"u}r, wie viele Fehler eine Software enth{\"a}lt oder der Germanist daf{\"u}r, wie groß der Wortschatz eines Autors war oder ist. Dieser Artenreichtum ist die einfachste und intuitivste Art und Weise eine Population oder Grundgesamtheit zu charakterisieren. Jedoch kann nur in Kollektiven, in denen die Gesamtanzahl der Bestandteile bekannt und relativ klein ist, die Anzahl der verschiedenen Spezien durch Erfassung aller bestimmt werden. In allen anderen F{\"a}llen ist es notwendig die Spezienanzahl durch Sch{\"a}tzungen zu bestimmen.}, subject = {Statistik}, language = {de} } @phdthesis{Tichy2011, author = {Tichy, Michael}, title = {On algebraic aggregation methods in additive preconditioning}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-56541}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2011}, abstract = {In the following dissertation we consider three preconditioners of algebraic multigrid type, though they are defined for arbitrary prolongation and restriction operators, we consider them in more detail for the aggregation method. The strengthened Cauchy-Schwarz inequality and the resulting angle between the spaces will be our main interests. In this context we will introduce some modifications. For the problem of the one-dimensional convection we obtain perfect theoretical results. Although this is not the case for more complex problems, the numerical results we present will show that the modifications are also useful in these situation. Additionally, we will consider a symmetric problem in the energy norm and present a simple rule for algebraic aggregation.}, subject = {Pr{\"a}konditionierung}, language = {en} } @book{FalkMarohnMicheletal.2011, author = {Falk, Michael and Marohn, Frank and Michel, Ren{\´e} and Hofmann, Daniel and Macke, Maria and Tewes, Bernward and Dinges, Peter and Spachmann, Christoph and Englert, Stefan}, title = {A First Course on Time Series Analysis : Examples with SAS}, organization = {Universit{\"a}t W{\"u}rzburg / Lehrstuhl f{\"u}r Statistik}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-56489}, publisher = {Universit{\"a}t W{\"u}rzburg}, year = {2011}, abstract = {The analysis of real data by means of statistical methods with the aid of a software package common in industry and administration usually is not an integral part of mathematics studies, but it will certainly be part of a future professional work. The present book links up elements from time series analysis with a selection of statistical procedures used in general practice including the statistical software package SAS. Consequently this book addresses students of statistics as well as students of other branches such as economics, demography and engineering, where lectures on statistics belong to their academic training. But it is also intended for the practician who, beyond the use of statistical tools, is interested in their mathematical background. Numerous problems illustrate the applicability of the presented statistical procedures, where SAS gives the solutions. The programs used are explicitly listed and explained. No previous experience is expected neither in SAS nor in a special computer system so that a short training period is guaranteed. This book is meant for a two semester course (lecture, seminar or practical training) where the first three chapters can be dealt within the first semester. They provide the principal components of the analysis of a time series in the time domain. Chapters 4, 5 and 6 deal with its analysis in the frequency domain and can be worked through in the second term. In order to understand the mathematical background some terms are useful such as convergence in distribution, stochastic convergence, maximum likelihood estimator as well as a basic knowledge of the test theory, so that work on the book can start after an introductory lecture on stochastics. Each chapter includes exercises. An exhaustive treatment is recommended. Chapter 7 (case study) deals with a practical case and demonstrates the presented methods. It is possible to use this chapter independent in a seminar or practical training course, if the concepts of time series analysis are already well understood. This book is consecutively subdivided in a statistical part and an SAS-specific part. For better clearness the SAS-specific parts are highlighted. This book is an open source project under the GNU Free Documentation License.}, subject = {Zeitreihenanalyse}, language = {en} } @phdthesis{Seifert2020, author = {Seifert, Bastian}, title = {Multivariate Chebyshev polynomials and FFT-like algorithms}, doi = {10.25972/OPUS-20684}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-206845}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2020}, abstract = {This dissertation investigates the application of multivariate Chebyshev polynomials in the algebraic signal processing theory for the development of FFT-like algorithms for discrete cosine transforms on weight lattices of compact Lie groups. After an introduction of the algebraic signal processing theory, a multivariate Gauss-Jacobi procedure for the development of orthogonal transforms is proven. Two theorems on fast algorithms in algebraic signal processing, one based on a decomposition property of certain polynomials and the other based on induced modules, are proven as multivariate generalizations of prior theorems. The definition of multivariate Chebyshev polynomials based on the theory of root systems is recalled. It is shown how to use these polynomials to define discrete cosine transforms on weight lattices of compact Lie groups. Furthermore it is shown how to develop FFT-like algorithms for these transforms. Then the theory of matrix-valued, multivariate Chebyshev polynomials is developed based on prior ideas. Under an existence assumption a formula for generating functions of these matrix-valued Chebyshev polynomials is deduced.}, subject = {Schnelle Fourier-Transformation}, language = {en} } @phdthesis{Wisheckel2020, author = {Wisheckel, Florian}, title = {Some Applications of D-Norms to Probability and Statistics}, doi = {10.25972/OPUS-21214}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-212140}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2020}, abstract = {This cumulative dissertation is organized as follows: After the introduction, the second chapter, based on "Asymptotic independence of bivariate order statistics" (2017) by Falk and Wisheckel, is an investigation of the asymptotic dependence behavior of the components of bivariate order statistics. We find that the two components of the order statistics become asymptotically independent for certain combinations of (sequences of) indices that are selected, and it turns out that no further assumptions on the dependence of the two components in the underlying sample are necessary. To establish this, an explicit representation of the conditional distribution of bivariate order statistics is derived. Chapter 3 is from "Conditional tail independence in archimedean copula models" (2019) by Falk, Padoan and Wisheckel and deals with the conditional distribution of an Archimedean copula, conditioned on one of its components. We show that its tails are independent under minor conditions on the generator function, even if the unconditional tails were dependent. The theoretical findings are underlined by a simulation study and can be generalized to Archimax copulas. "Generalized pareto copulas: A key to multivariate extremes" (2019) by Falk, Padoan and Wisheckel lead to Chapter 4 where we introduce a nonparametric approach to estimate the probability that a random vector exceeds a fixed threshold if it follows a Generalized Pareto copula. To this end, some theory underlying the concept of Generalized Pareto distributions is presented first, the estimation procedure is tested using a simulation and finally applied to a dataset of air pollution parameters in Milan, Italy, from 2002 until 2017. The fifth chapter collects some additional results on derivatives of D-norms, in particular a condition for the existence of directional derivatives, and multivariate spacings, specifically an explicit formula for the second-to-last bivariate spacing.}, subject = {Kopula }, language = {en} } @phdthesis{Buchholzer2011, author = {Buchholzer, Hannes}, title = {The Semismooth Newton Method for the Solution of Reactive Transport Problems Including Mineral Precipitation-Dissolution Reactions}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-65342}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2011}, abstract = {In dieser Arbeit befassen wir uns mit einem reaktiven Transportmodell mit Niederschlags-Aufl{\"o}sung Reaktionen das aus den Geowissenschaften stammt. Es besteht aus PDGs, gew{\"o}hnlichen Differentialgleichungen, algebraischen Gleichungen und Komplementarit{\"a}tsbedingungen. Nach Diskretisation dieses Modells erhalten wir eine großes nichtlineares und nichtglattes Gleichungssystem. Wir l{\"o}sen dieses System mit der semismoothen Newtonverfahren, das von Qi und Sun eingef{\"u}hrt wurde. Der Fokus dieser Arbeit ist in der Anwendung und Konvergenz dieses Algorithmus. Wir zeigen, dass dieser Algorithmus f{\"u}r dieses Problem wohldefiniert ist und sogar lokal quadratisch konvergiert gegen eine BD-regul{\"a}re L{\"o}sung. Wir befassen uns auch mit den dabei entstehenden linearen Gleichungssystemen, die sehr groß und d{\"u}nn besetzt sind, und wie sie effizient gel{\"o}st werden k{\"o}nnen. Ein wichtiger Bestandteil dieser Untersuchung ist die Beschr{\"a}nktheit einer gewissen matrixwertigen Funktion, die in einem eigenen Kapitel gezeigt wird. Als Seitenbetrachtung untersuchen wir wie die extremalen Eigenwerte (und Singul{\"a}rwerte) von gewissen PDE-Operatoren, welche in unserem diskretisierten Modell vorkommen, genau abgesch{\"a}tzt werden k{\"o}nnen.}, subject = {Komplementarit{\"a}tsproblem}, language = {en} } @phdthesis{Schwartz2011, author = {Schwartz, Alexandra}, title = {Mathematical Programs with Complementarity Constraints: Theory, Methods and Applications}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-64891}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2011}, abstract = {The subject of this thesis are mathematical programs with complementarity conditions (MPCC). At first, an economic example of this problem class is analyzed, the problem of effort maximization in asymmetric n-person contest games. While an analytical solution for this special problem could be derived, this is not possible in general for MPCCs. Therefore, optimality conditions which might be used for numerical approaches where considered next. More precisely, a Fritz-John result for MPCCs with stronger properties than those known so far was derived together with some new constraint qualifications and subsequently used to prove an exact penalty result. Finally, to solve MPCCs numerically, the so called relaxation approach was used. Besides improving the results for existing relaxation methods, a new relaxation with strong convergence properties was suggested and a numerical comparison of all methods based on the MacMPEC collection conducted.}, subject = {Zwei-Ebenen-Optimierung}, language = {en} } @phdthesis{Lurz2015, author = {Lurz, Kristina}, title = {Confidence and Prediction under Covariates and Prior Information}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-122748}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {The purpose of confidence and prediction intervals is to provide an interval estimation for an unknown distribution parameter or the future value of a phenomenon. In many applications, prior knowledge about the distribution parameter is available, but rarely made use of, unless in a Bayesian framework. This thesis provides exact frequentist confidence intervals of minimal volume exploiting prior information. The scheme is applied to distribution parameters of the binomial and the Poisson distribution. The Bayesian approach to obtain intervals on a distribution parameter in form of credibility intervals is considered, with particular emphasis on the binomial distribution. An application of interval estimation is found in auditing, where two-sided intervals of Stringer type are meant to contain the mean of a zero-inflated population. In the context of time series analysis, covariates are supposed to improve the prediction of future values. Exponential smoothing with covariates as an extension of the popular forecasting method exponential smoothing is considered in this thesis. A double-seasonality version of it is applied to forecast hourly electricity load under the use of meteorological covariates. Different kinds of prediction intervals for exponential smoothing with covariates are formulated.}, subject = {Konfidenzintervall}, language = {en} } @phdthesis{Ciaramella2015, author = {Ciaramella, Gabriele}, title = {Exact and non-smooth control of quantum spin systems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-118386}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {An efficient and accurate computational framework for solving control problems governed by quantum spin systems is presented. Spin systems are extremely important in modern quantum technologies such as nuclear magnetic resonance spectroscopy, quantum imaging and quantum computing. In these applications, two classes of quantum control problems arise: optimal control problems and exact-controllability problems, with a bilinear con- trol structure. These models correspond to the Schr{\"o}dinger-Pauli equation, describing the time evolution of a spinor, and the Liouville-von Neumann master equation, describing the time evolution of a spinor and a density operator. This thesis focuses on quantum control problems governed by these models. An appropriate definition of the optimiza- tion objectives and of the admissible set of control functions allows to construct controls with specific properties. These properties are in general required by the physics and the technologies involved in quantum control applications. A main purpose of this work is to address non-differentiable quantum control problems. For this reason, a computational framework is developed to address optimal-control prob- lems, with possibly L1 -penalization term in the cost-functional, and exact-controllability problems. In both cases the set of admissible control functions is a subset of a Hilbert space. The bilinear control structure of the quantum model, the L1 -penalization term and the control constraints generate high non-linearities that make difficult to solve and analyse the corresponding control problems. The first part of this thesis focuses on the physical description of the spin of particles and of the magnetic resonance phenomenon. Afterwards, the controlled Schr{\"o}dinger- Pauli equation and the Liouville-von Neumann master equation are discussed. These equations, like many other controlled quantum models, can be represented by dynamical systems with a bilinear control structure. In the second part of this thesis, theoretical investigations of optimal control problems, with a possible L1 -penalization term in the objective and control constraints, are consid- ered. In particular, existence of solutions, optimality conditions, and regularity properties of the optimal controls are discussed. In order to solve these optimal control problems, semi-smooth Newton methods are developed and proved to be superlinear convergent. The main difficulty in the implementation of a Newton method for optimal control prob- lems comes from the dimension of the Jacobian operator. In a discrete form, the Jacobian is a very large matrix, and this fact makes its construction infeasible from a practical point of view. For this reason, the focus of this work is on inexact Krylov-Newton methods, that combine the Newton method with Krylov iterative solvers for linear systems, and allows to avoid the construction of the discrete Jacobian. In the third part of this thesis, two methodologies for the exact-controllability of quan- tum spin systems are presented. The first method consists of a continuation technique, while the second method is based on a particular reformulation of the exact-control prob- lem. Both these methodologies address minimum L2 -norm exact-controllability problems. In the fourth part, the thesis focuses on the numerical analysis of quantum con- trol problems. In particular, the modified Crank-Nicolson scheme as an adequate time discretization of the Schr{\"o}dinger equation is discussed, the first-discretize-then-optimize strategy is used to obtain a discrete reduced gradient formula for the differentiable part of the optimization objective, and implementation details and globalization strategies to guarantee an adequate numerical behaviour of semi-smooth Newton methods are treated. In the last part of this work, several numerical experiments are performed to vali- date the theoretical results and demonstrate the ability of the proposed computational framework to solve quantum spin control problems.}, subject = {Spinsystem}, language = {en} } @phdthesis{Srichan2015, author = {Srichan, Teerapat}, title = {Discrete Moments of Zeta-Functions with respect to random and ergodic transformations}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-118395}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {In the thesis discrete moments of the Riemann zeta-function and allied Dirichlet series are studied. In the first part the asymptotic value-distribution of zeta-functions is studied where the samples are taken from a Cauchy random walk on a vertical line inside the critical strip. Building on techniques by Lifshits and Weber analogous results for the Hurwitz zeta-function are derived. Using Atkinson's dissection this is even generalized to Dirichlet L-functions associated with a primitive character. Both results indicate that the expectation value equals one which shows that the values of these zeta-function are small on average. The second part deals with the logarithmic derivative of the Riemann zeta-function on vertical lines and here the samples are with respect to an explicit ergodic transformation. Extending work of Steuding, discrete moments are evaluated and an equivalent formulation for the Riemann Hypothesis in terms of ergodic theory is obtained. In the third and last part of the thesis, the phenomenon of universality with respect to stochastic processes is studied. It is shown that certain random shifts of the zeta-function can approximate non-vanishing analytic target functions as good as we please. This result relies on Voronin's universality theorem.}, subject = {Riemannsche Zetafunktion}, language = {en} } @phdthesis{Schaeffner2015, author = {Sch{\"a}ffner, Mathias}, title = {Multiscale analysis of non-convex discrete systems via \(\Gamma\)-convergence}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-122349}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {The subject of this thesis is the rigorous passage from discrete systems to continuum models via variational methods. The first part of this work studies a discrete model describing a one-dimensional chain of atoms with finite range interactions of Lennard-Jones type. We derive an expansion of the ground state energy using \(\Gamma\)-convergence. In particular, we show that a variant of the Cauchy-Born rule holds true for the model under consideration. We exploit this observation to derive boundary layer energies due to asymmetries of the lattice at the boundary or at cracks of the specimen. Hereby we extend several results obtained previously for models involving only nearest and next-to-nearest neighbour interactions by Braides and Cicalese and Scardia, Schl{\"o}merkemper and Zanini. The second part of this thesis is devoted to the analysis of a quasi-continuum (QC) method. To this end, we consider the discrete model studied in the first part of this thesis as the fully atomistic model problem and construct an approximation based on a QC method. We show that in an elastic setting the expansion by \(\Gamma\)-convergence of the fully atomistic energy and its QC approximation coincide. In the case of fracture, we show that this is not true in general. In the case of only nearest and next-to-nearest neighbour interactions, we give sufficient conditions on the QC approximation such that, also in case of fracture, the minimal energies of the fully atomistic energy and its approximation coincide in the limit.}, subject = {Gamma-Konvergenz}, language = {en} } @phdthesis{Aulbach2015, author = {Aulbach, Stefan}, title = {Contributions to Extreme Value Theory in Finite and Infinite Dimensions: With a Focus on Testing for Generalized Pareto Models}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-127162}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {Extreme value theory aims at modeling extreme but rare events from a probabilistic point of view. It is well-known that so-called generalized Pareto distributions, which are briefly reviewed in Chapter 1, are the only reasonable probability distributions suited for modeling observations above a high threshold, such as waves exceeding the height of a certain dike, earthquakes having at least a certain intensity, and, after applying a simple transformation, share prices falling below some low threshold. However, there are cases for which a generalized Pareto model might fail. Therefore, Chapter 2 derives certain neighborhoods of a generalized Pareto distribution and provides several statistical tests for these neighborhoods, where the cases of observing finite dimensional data and of observing continuous functions on [0,1] are considered. By using a notation based on so-called D-norms it is shown that these tests consistently link both frameworks, the finite dimensional and the functional one. Since the derivation of the asymptotic distributions of the test statistics requires certain technical restrictions, Chapter 3 analyzes these assumptions in more detail. It provides in particular some examples of distributions that satisfy the null hypothesis and of those that do not. Since continuous copula processes are crucial tools for the functional versions of the proposed tests, it is also discussed whether those copula processes actually exist for a given set of data. Moreover, some practical advice is given how to choose the free parameters incorporated in the test statistics. Finally, a simulation study in Chapter 4 compares the in total three different test statistics with another test found in the literature that has a similar null hypothesis. This thesis ends with a short summary of the results and an outlook to further open questions.}, subject = {Extremwertstatistik}, language = {en} } @phdthesis{Bauer2015, author = {Bauer, Andreas}, title = {Argumentieren mit multiplen und dynamischen Repr{\"a}sentationen}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-022-1 (print)}, doi = {10.25972/WUP-978-3-95826-023-8}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-112114}, school = {W{\"u}rzburg University Press}, pages = {132}, year = {2015}, abstract = {Der Einzug des Rechners in den Mathematikunterricht hat eine Vielzahl neuer M{\"o}glichkeiten der Darstellung mit sich gebracht, darunter auch multiple, dynamisch verbundene Repr{\"a}sentationen mathematischer Probleme. Die Arbeit beantwortet die Frage, ob und wie diese Repr{\"a}sentationsarten von Sch{\"u}lerinnen und Sch{\"u}ler in Argumentationen genutzt werden. In der empirischen Untersuchung wurde dabei einerseits quantitativ erforscht, wie groß der Einfluss der in der Aufgabenstellung gegebenen Repr{\"a}sentationsform auf die schriftliche Argumentationen der Sch{\"u}lerinnen und Sch{\"u}ler ist. Andererseits wurden durch eine qualitative Analyse spezifische Nutzungsweisen identifiziert und mittels Toulmins Argumentationsmodell beschrieben. Diese Erkenntnisse wurden genutzt, um Konsequenzen bez{\"u}glich der Verwendung von multiplen und/oder dynamischen Repr{\"a}sentationen im Mathematikunterricht der Sekundarstufe zu formulieren.}, subject = {Argumentation}, language = {de} } @phdthesis{Mohammadi2015, author = {Mohammadi, Masoumeh}, title = {Analysis of discretization schemes for Fokker-Planck equations and related optimality systems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-111494}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {The Fokker-Planck (FP) equation is a fundamental model in thermodynamic kinetic theories and statistical mechanics. In general, the FP equation appears in a number of different fields in natural sciences, for instance in solid-state physics, quantum optics, chemical physics, theoretical biology, and circuit theory. These equations also provide a powerful mean to define robust control strategies for random models. The FP equations are partial differential equations (PDE) describing the time evolution of the probability density function (PDF) of stochastic processes. These equations are of different types depending on the underlying stochastic process. In particular, they are parabolic PDEs for the PDF of Ito processes, and hyperbolic PDEs for piecewise deterministic processes (PDP). A fundamental axiom of probability calculus requires that the integral of the PDF over all the allowable state space must be equal to one, for all time. Therefore, for the purpose of accurate numerical simulation, a discretized FP equation must guarantee conservativeness of the total probability. Furthermore, since the solution of the FP equation represents a probability density, any numerical scheme that approximates the FP equation is required to guarantee the positivity of the solution. In addition, an approximation scheme must be accurate and stable. For these purposes, for parabolic FP equations on bounded domains, we investigate the Chang-Cooper (CC) scheme for space discretization and first- and second-order backward time differencing. We prove that the resulting space-time discretization schemes are accurate, conditionally stable, conservative, and preserve positivity. Further, we discuss a finite difference discretization for the FP system corresponding to a PDP process in a bounded domain. Next, we discuss FP equations in unbounded domains. In this case, finite-difference or finite-element methods cannot be applied. By employing a suitable set of basis functions, spectral methods allow to treat unbounded domains. Since FP solutions decay exponentially at infinity, we consider Hermite functions as basis functions, which are Hermite polynomials multiplied by a Gaussian. To this end, the Hermite spectral discretization is applied to two different FP equations; the parabolic PDE corresponding to Ito processes, and the system of hyperbolic PDEs corresponding to a PDP process. The resulting discretized schemes are analyzed. Stability and spectral accuracy of the Hermite spectral discretization of the FP problems is proved. Furthermore, we investigate the conservativity of the solutions of FP equations discretized with the Hermite spectral scheme. In the last part of this thesis, we discuss optimal control problems governed by FP equations on the characterization of their solution by optimality systems. We then investigate the Hermite spectral discretization of FP optimality systems in unbounded domains. Within the framework of Hermite discretization, we obtain sparse-band systems of ordinary differential equations. We analyze the accuracy of the discretization schemes by showing spectral convergence in approximating the state, the adjoint, and the control variables that appear in the FP optimality systems. To validate our theoretical estimates, we present results of numerical experiments.}, subject = {Fokker-Planck-Gleichung}, language = {en} } @article{HeldMittnachtKolbetal.2014, author = {Held, Matthias and Mittnacht, Maria and Kolb, Martin and Karl, Sabine and Jany, Berthold}, title = {Pulmonary and Cardiac Function in Asymptomatic Obese Subjects and Changes following a Structured Weight Reduction Program: A Prospective Observational Study}, series = {PLOS ONE}, volume = {9}, journal = {PLOS ONE}, number = {9}, doi = {10.1371/journal.pone.0107480}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-119239}, pages = {e107480}, year = {2014}, abstract = {Background The prevalence of obesity is rising. Obesity can lead to cardiovascular and ventilatory complications through multiple mechanisms. Cardiac and pulmonary function in asymptomatic subjects and the effect of structured dietary programs on cardiac and pulmonary function is unclear. Objective To determine lung and cardiac function in asymptomatic obese adults and to evaluate whether weight loss positively affects functional parameters. Methods We prospectively evaluated bodyplethysmographic and echocardiographic data in asymptomatic subjects undergoing a structured one-year weight reduction program. Results 74 subjects (32 male, 42 female; mean age 42±12 years) with an average BMI 42.5±7.9, body weight 123.7±24.9 kg were enrolled. Body weight correlated negatively with vital capacity (R = -0.42, p<0.001), FEV1 (R = -0.497, p<0.001) and positively with P 0.1 (R = 0.32, p = 0.02) and myocardial mass (R = 0.419, p = 0.002). After 4 months the study subjects had significantly reduced their body weight (-26.0±11.8 kg) and BMI (-8.9±3.8) associated with a significant improvement of lung function (absolute changes: vital capacity +5.5±7.5\% pred., p<0.001; FEV1+9.8±8.3\% pred., p<0.001, ITGV+16.4±16.0\% pred., p<0.001, SR tot -17.4±41.5\% pred., p<0.01). Moreover, P0.1/Pimax decreased to 47.7\% (p<0.01) indicating a decreased respiratory load. The change of FEV1 correlated significantly with the change of body weight (R = -0.31, p = 0.03). Echocardiography demonstrated reduced myocardial wall thickness (-0.08±0.2 cm, p = 0.02) and improved left ventricular myocardial performance index (-0.16±0.35, p = 0.02). Mitral annular plane systolic excursion (+0.14, p = 0.03) and pulmonary outflow acceleration time (AT +26.65±41.3 ms, p = 0.001) increased. Conclusion Even in asymptomatic individuals obesity is associated with abnormalities in pulmonary and cardiac function and increased myocardial mass. All the abnormalities can be reversed by a weight reduction program.}, language = {en} } @phdthesis{Wongkaew2015, author = {Wongkaew, Suttida}, title = {On the control through leadership of multi-agent systems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-120914}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {The investigation of interacting multi-agent models is a new field of mathematical research with application to the study of behavior in groups of animals or community of people. One interesting feature of multi-agent systems is collective behavior. From the mathematical point of view, one of the challenging issues considering with these dynamical models is development of control mechanisms that are able to influence the time evolution of these systems. In this thesis, we focus on the study of controllability, stabilization and optimal control problems for multi-agent systems considering three models as follows: The first one is the Hegselmann Krause opinion formation (HK) model. The HK dynamics describes how individuals' opinions are changed by the interaction with others taking place in a bounded domain of confidence. The study of this model focuses on determining feedback controls in order to drive the agents' opinions to reach a desired agreement. The second model is the Heider social balance (HB) model. The HB dynamics explains the evolution of relationships in a social network. One purpose of studying this system is the construction of control function in oder to steer the relationship to reach a friendship state. The third model that we discuss is a flocking model describing collective motion observed in biological systems. The flocking model under consideration includes self-propelling, friction, attraction, repulsion, and alignment features. We investigate a control for steering the flocking system to track a desired trajectory. Common to all these systems is our strategy to add a leader agent that interacts with all other members of the system and includes the control mechanism. Our control through leadership approach is developed using classical theoretical control methods and a model predictive control (MPC) scheme. To apply the former method, for each model the stability of the corresponding linearized system near consensus is investigated. Further, local controllability is examined. However, only in the Hegselmann-Krause opinion formation model, the feedback control is determined in order to steer agents' opinions to globally converge to a desired agreement. The MPC approach is an optimal control strategy based on numerical optimization. To apply the MPC scheme, optimal control problems for each model are formulated where the objective functions are different depending on the desired objective of the problem. The first-oder necessary optimality conditions for each problem are presented. Moreover for the numerical treatment, a sequence of open-loop discrete optimality systems is solved by accurate Runge-Kutta schemes, and in the optimization procedure, a nonlinear conjugate gradient solver is implemented. Finally, numerical experiments are performed to investigate the properties of the multi-agent models and demonstrate the ability of the proposed control strategies to drive multi-agent systems to attain a desired consensus and to track a given trajectory.}, subject = {Mehragentensystem}, language = {en} } @phdthesis{Bauer2015, author = {Bauer, Ulrich Josef}, title = {Conformal Mappings onto Simply and Multiply Connected Circular Arc Polygon Domains}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-123914}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {The goal of this thesis is to investigate conformal mappings onto circular arc polygon domains, i.e. domains that are bounded by polygons consisting of circular arcs instead of line segments. Conformal mappings onto circular arc polygon domains contain parameters in addition to the classical parameters of the Schwarz-Christoffel transformation. To contribute to the parameter problem of conformal mappings from the unit disk onto circular arc polygon domains, we investigate two special cases of these mappings. In the first case we can describe the additional parameters if the bounding circular arc polygon is a polygon with straight sides. In the second case we provide an approximation for the additional parameters if the circular arc polygon domain satisfies some symmetry conditions. These results allow us to draw conclusions on the connection between these additional parameters and the classical parameters of the mapping. For conformal mappings onto multiply connected circular arc polygon domains, we provide an alternative construction of the mapping formula without using the Schottky-Klein prime function. In the process of constructing our main result, mappings for domains of connectivity three or greater, we also provide a formula for conformal mappings onto doubly connected circular arc polygon domains. The comparison of these mapping formulas with already known mappings allows us to provide values for some of the parameters of the mappings onto doubly connected circular arc polygon domains if the image domain is a polygonal domain. The different components of the mapping formula are constructed by using a slightly modified variant of the Poincar{\´e} theta series. This construction includes the design of a function to remove unwanted poles and of different versions of functions that are analytic on the domain of definition of the mapping functions and satisfy some special functional equations. We also provide the necessary concepts to numerically evaluate the conformal mappings onto multiply connected circular arc polygon domains. As the evaluation of such a map requires the solution of a differential equation, we provide a possible configuration of curves inside the preimage domain to solve the equation along them in addition to a description of the procedure for computing either the formula for the doubly connected case or the case of connectivity three or greater. We also describe the procedures for solving the parameter problem for multiply connected circular arc polygon domains.}, subject = {Konforme Abbildungen}, language = {en} } @phdthesis{Hain2015, author = {Hain, Johannes}, title = {Valuation Algorithms for Structural Models of Financial Networks}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-128108}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {The thesis focuses on the valuation of firms in a system context where cross-holdings of the firms in liabilities and equities are allowed and, therefore, systemic risk can be modeled on a structural level. A main property of such models is that for the determination of the firm values a pricing equilibrium has to be found. While there exists a small but growing amount of research on the existence and the uniqueness of such price equilibria, the literature is still somewhat inconsistent. An example for this fact is that different authors define the underlying financial system on differing ways. Moreover, only few articles pay intense attention on procedures to find the pricing equilibria. In the existing publications, the provided algorithms mainly reflect the individual authors' particular approach to the problem. Additionally, all existing methods do have the drawback of potentially infinite runtime. For these reasons, the objects of this thesis are as follows. First, a definition of a financial system is introduced in its most general form in Chapter 2. It is shown that under a fairly mild regularity condition the financial system has a unique existing payment equilibrium. In Chapter 3, some extensions and differing definitions of financial systems that exist in literature are presented and it is shown how these models can be embedded into the general model from the proceeding chapter. Second, an overview of existing valuation algorithms to find the equilibrium is given in Chapter 4, where the existing methods are generalized and their corresponding mathematical properties are highlighted. Third, a complete new class of valuation algorithms is developed in Chapter 4 that includes the additional information whether a firm is in default or solvent under a current payment vector. This results in procedures that are able find the solution of the system in a finite number of iteration steps. In Chapter 5, the developed concepts of Chapter 4 are applied to more general financial systems where more than one seniority level of debt is present. Chapter 6 develops optimal starting vectors for non-finite algorithms and Chapter 7 compares the existing and the new developed algorithms concerning their efficiency in an extensive simulation study covering a wide range of possible settings for financial systems.}, subject = {Risikomanagement}, language = {en} } @phdthesis{Hofmann2009, author = {Hofmann, Daniel}, title = {Characterization of the D-Norm Corresponding to a Multivariate Extreme Value Distribution}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-41347}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2009}, abstract = {It is well-known that a multivariate extreme value distribution can be represented via the D-Norm. However not every norm yields a D-Norm. In this thesis a necessary and sufficient condition is given for a norm to define an extreme value distribution. Applications of this theorem includes a new proof for the bivariate case, the Pickands dependence function and the nested logistic model. Furthermore the GPD-Flow is introduced and first insights were given such that if it converges it converges against the copula of complete dependence.}, subject = {Kopula }, language = {en} } @phdthesis{Hoheisel2009, author = {Hoheisel, Tim}, title = {Mathematical Programs with Vanishing Constraints}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-40790}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2009}, abstract = {A new class of optimization problems name 'mathematical programs with vanishing constraints (MPVCs)' is considered. MPVCs are on the one hand very challenging from a theoretical viewpoint, since standard constraint qualifications such as LICQ, MFCQ, or ACQ are most often violated, and hence, the Karush-Kuhn-Tucker conditions do not provide necessary optimality conditions off-hand. Thus, new CQs and the corresponding optimality conditions are investigated. On the other hand, MPVCs have important applications, e.g., in the field of topology optimization. Therefore, numerical algorithms for the solution of MPVCs are designed, investigated and tested for certain problems from truss-topology-optimization.}, subject = {Nichtlineare Optimierung}, language = {en} } @phdthesis{vonHeusinger2009, author = {von Heusinger, Anna}, title = {Numerical Methods for the Solution of the Generalized Nash Equilibrium Problem}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-47662}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2009}, abstract = {In the generalized Nash equilibrium problem not only the cost function of a player depends on the rival players' decisions, but also his constraints. This thesis presents different iterative methods for the numerical computation of a generalized Nash equilibrium, some of them globally, others locally superlinearly convergent. These methods are based on either reformulations of the generalized Nash equilibrium problem as an optimization problem, or on a fixed point formulation. The key tool for these reformulations is the Nikaido-Isoda function. Numerical results for various problem from the literature are given.}, subject = {Spieltheorie}, language = {en} } @article{FalkMarohn1993, author = {Falk, Michael and Marohn, Frank}, title = {Von Mises condition revisited}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-45790}, year = {1993}, abstract = {It is shown that the rate of convergence in the von Mises conditions of extreme value theory determines the distance of the underlying distribution function F from a generalized Pareto distribution. The distance is measured in terms of the pertaining densities with the limit being ultimately attained if and only if F is ultimately a generalized Pareto distribution. Consequently, the rate of convergence of the extremes in an lid sample, whether in terms of the distribution of the largest order statistics or of corresponding empirical truncated point processes, is determined by the rate of convergence in the von Mises condition. We prove that the converse is also true.}, language = {en} } @article{JanssenMarohn1994, author = {Janssen, A. and Marohn, Frank}, title = {On statistical information of extreme order statistics, local extreme value alternatives, and Poisson point processes}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-45816}, year = {1994}, abstract = {The aim of the present paper is to clarify the role of extreme order statistics in general statistical models. This is done within the general setup of statistical experiments in LeCam's sense. Under the assumption of monotone likelihood ratios, we prove that a sequence of experiments is asymptotically Gaussian if, and only if, a fixed number of extremes asymptotically does not contain any information. In other words: A fixed number of extremes asymptotically contains information iff the Poisson part of the limit experiment is non-trivial. Suggested by this result, we propose a new extreme value model given by local alternatives. The local structure is described by introducing the space of extreme value tangents. It turns out that under local alternatives a new class of extreme value distributions appears as limit distributions. Moreover, explicit representations of the Poisson limit experiments via Poisson point processes are found. As a concrete example nonparametric tests for Frechet type distributions against stochastically larger alternatives are treated. We find asymptotically optimal tests within certain threshold models.}, language = {en} } @inproceedings{Marohn1994, author = {Marohn, Frank}, title = {On testing the exponential and Gumbel distribution}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-45804}, year = {1994}, abstract = {No abstract available}, subject = {Gumbel-Verteilung}, language = {en} } @book{Marohn1990, author = {Marohn, Frank}, title = {On statistical information of extreme order statistics}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-47866}, publisher = {Universit{\"a}t W{\"u}rzburg}, year = {1990}, abstract = {No abstract available}, subject = {Rangstatistik}, language = {en} } @article{FalkMarohn1993, author = {Falk, Michael and Marohn, Frank}, title = {Asymptotically optimal tests for conditional distributions}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-45823}, year = {1993}, abstract = {No abstract available}, language = {en} } @article{Marohn1994, author = {Marohn, Frank}, title = {Asymptotic sufficiency of order statistics for almost regular Weibull type densities}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-45837}, year = {1994}, abstract = {Consider a location family which is defined via a Weibull type density having shape parameter a = 1. We treat the problem, which portion of the order statistics is asymptotically sufficient. It turns out that the intermediate order statistics are relevant.}, language = {en} } @incollection{FalkMarohn1992, author = {Falk, Michael and Marohn, Frank}, title = {Laws of small numbers : Some applications to conditional curve estimation}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-45841}, publisher = {Universit{\"a}t W{\"u}rzburg}, year = {1992}, abstract = {No abstract available}, subject = {Gesetz der kleinen Zahlen}, language = {en} } @article{Marohn1991, author = {Marohn, Frank}, title = {Global sufficiency of extreme order statistics in location models of Weibull type}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-47874}, year = {1991}, abstract = {In Janssen and Reiss (1988) it was shown that in a location model of a Weibull type sample with shape parameter -1 < a < 1 the k(n) lower extremes are asymptotically local sufficient. In the present paper we show that even global sufficiency holds. Moreover, it turns out that convergence of the given statistical experiments in the deficiency metric does not only hold for compact parameter sets but for the whole real line.}, subject = {Extremwertstatistik}, language = {de} } @phdthesis{Schindele2016, author = {Schindele, Andreas}, title = {Proximal methods in medical image reconstruction and in nonsmooth optimal control of partial differential equations}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-136569}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2016}, abstract = {Proximal methods are iterative optimization techniques for functionals, J = J1 + J2, consisting of a differentiable part J2 and a possibly nondifferentiable part J1. In this thesis proximal methods for finite- and infinite-dimensional optimization problems are discussed. In finite dimensions, they solve l1- and TV-minimization problems that are effectively applied to image reconstruction in magnetic resonance imaging (MRI). Convergence of these methods in this setting is proved. The proposed proximal scheme is compared to a split proximal scheme and it achieves a better signal-to-noise ratio. In addition, an application that uses parallel imaging is presented. In infinite dimensions, these methods are discussed to solve nonsmooth linear and bilinear elliptic and parabolic optimal control problems. In particular, fast convergence of these methods is proved. Furthermore, for benchmarking purposes, truncated proximal schemes are compared to an inexact semismooth Newton method. Results of numerical experiments are presented to demonstrate the computational effectiveness of our proximal schemes that need less computation time than the semismooth Newton method in most cases. Results of numerical experiments are presented that successfully validate the theoretical estimates.}, subject = {Optimale Kontrolle}, language = {en} } @phdthesis{Zott2016, author = {Zott, Maximilian}, title = {Extreme Value Theory in Higher Dimensions - Max-Stable Processes and Multivariate Records}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-136614}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2016}, abstract = {Die Extremwerttheorie behandelt die stochastische Modellierung seltener und extremer Ereignisse. W{\"a}hrend fundamentale Theorien in der klassischen Stochastik, wie etwa die Gesetze der großen Zahlen oder der zentrale Grenzwertsatz das asymptotische Verhalten der Summe von Zufallsvariablen untersucht, liegt in der Extremwerttheorie der Fokus auf dem Maximum oder dem Minimum einer Menge von Beobachtungen. Die Grenzverteilung des normierten Stichprobenmaximums unter einer Folge von unabh{\"a}ngigen und identisch verteilten Zufallsvariablen kann durch sogenannte max-stabile Verteilungen charakterisiert werden. In dieser Dissertation werden verschiedene Aspekte der Theorie der max-stabilen Zufallsvektoren und stochastischen Prozesse behandelt. Insbesondere wird der Begriff der 'Differenzierbarkeit in Verteilung' eines max-stabilen Prozesses eingef{\"u}hrt und untersucht. Ferner werden 'verallgemeinerte max-lineare Modelle' eingef{\"u}hrt, um einen bekannten max-stabilen Zufallsvektor durch einen max-stabilen Prozess zu interpolieren. Dar{\"u}ber hinaus wird der Zusammenhang von extremwerttheoretischen Methoden mit der Theorie der multivariaten Rekorde hergestellt. Insbesondere werden sogenannte 'vollst{\"a}ndige' und 'einfache' Rekorde eingef{\"u}hrt, und deren asymptotisches Verhalten untersucht.}, subject = {Stochastischer Prozess}, language = {en} } @phdthesis{Merger2016, author = {Merger, Juri}, title = {Optimal Control and Function Identification in Biological Processes}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-138900}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2016}, abstract = {Mathematical modelling, simulation, and optimisation are core methodologies for future developments in engineering, natural, and life sciences. This work aims at applying these mathematical techniques in the field of biological processes with a focus on the wine fermentation process that is chosen as a representative model. In the literature, basic models for the wine fermentation process consist of a system of ordinary differential equations. They model the evolution of the yeast population number as well as the concentrations of assimilable nitrogen, sugar, and ethanol. In this thesis, the concentration of molecular oxygen is also included in order to model the change of the metabolism of the yeast from an aerobic to an anaerobic one. Further, a more sophisticated toxicity function is used. It provides simulation results that match experimental measurements better than a linear toxicity model. Moreover, a further equation for the temperature plays a crucial role in this work as it opens a way to influence the fermentation process in a desired way by changing the temperature of the system via a cooling mechanism. From the view of the wine industry, it is necessary to cope with large scale fermentation vessels, where spatial inhomogeneities of concentrations and temperature are likely to arise. Therefore, a system of reaction-diffusion equations is formulated in this work, which acts as an approximation for a model including computationally very expensive fluid dynamics. In addition to the modelling issues, an optimal control problem for the proposed reaction-diffusion fermentation model with temperature boundary control is presented and analysed. Variational methods are used to prove the existence of unique weak solutions to this non-linear problem. In this framework, it is possible to exploit the Hilbert space structure of state and control spaces to prove the existence of optimal controls. Additionally, first-order necessary optimality conditions are presented. They characterise controls that minimise an objective functional with the purpose to minimise the final sugar concentration. A numerical experiment shows that the final concentration of sugar can be reduced by a suitably chosen temperature control. The second part of this thesis deals with the identification of an unknown function that participates in a dynamical model. For models with ordinary differential equations, where parts of the dynamic cannot be deduced due to the complexity of the underlying phenomena, a minimisation problem is formulated. By minimising the deviations of simulation results and measurements the best possible function from a trial function space is found. The analysis of this function identification problem covers the proof of the differentiability of the function-to-state operator, the existence of minimisers, and the sensitivity analysis by means of the data-to-function mapping. Moreover, the presented function identification method is extended to stochastic differential equations. Here, the objective functional consists of the difference of measured values and the statistical expected value of the stochastic process solving the stochastic differential equation. Using a Fokker-Planck equation that governs the probability density function of the process, the probabilistic problem of simulating a stochastic process is cast to a deterministic partial differential equation. Proofs of unique solvability of the forward equation, the existence of minimisers, and first-order necessary optimality conditions are presented. The application of the function identification framework to the wine fermentation model aims at finding the shape of the toxicity function and is carried out for the deterministic as well as the stochastic case.}, subject = {Optimale Kontrolle}, language = {en} } @phdthesis{Schnuecke2016, author = {Schn{\"u}cke, Gero}, title = {Arbitrary Lagrangian-Eulerian Discontinous Galerkin methods for nonlinear time-dependent first order partial differential equations}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-139579}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2016}, abstract = {The present thesis considers the development and analysis of arbitrary Lagrangian-Eulerian discontinuous Galerkin (ALE-DG) methods with time-dependent approximation spaces for conservation laws and the Hamilton-Jacobi equations. Fundamentals about conservation laws, Hamilton-Jacobi equations and discontinuous Galerkin methods are presented. In particular, issues in the development of discontinuous Galerkin (DG) methods for the Hamilton-Jacobi equations are discussed. The development of the ALE-DG methods based on the assumption that the distribution of the grid points is explicitly given for an upcoming time level. This assumption allows to construct a time-dependent local affine linear mapping to a reference cell and a time-dependent finite element test function space. In addition, a version of Reynolds' transport theorem can be proven. For the fully-discrete ALE-DG method for nonlinear scalar conservation laws the geometric conservation law and a local maximum principle are proven. Furthermore, conditions for slope limiters are stated. These conditions ensure the total variation stability of the method. In addition, entropy stability is discussed. For the corresponding semi-discrete ALE-DG method, error estimates are proven. If a piecewise \$\mathcal{P}^{k}\$ polynomial approximation space is used on the reference cell, the sub-optimal \$\left(k+\frac{1}{2}\right)\$ convergence for monotone fuxes and the optimal \$(k+1)\$ convergence for an upwind flux are proven in the \$\mathrm{L}^{2}\$-norm. The capability of the method is shown by numerical examples for nonlinear conservation laws. Likewise, for the semi-discrete ALE-DG method for nonlinear Hamilton-Jacobi equations, error estimates are proven. In the one dimensional case the optimal \$\left(k+1\right)\$ convergence and in the two dimensional case the sub-optimal \$\left(k+\frac{1}{2}\right)\$ convergence are proven in the \$\mathrm{L}^{2}\$-norm, if a piecewise \$\mathcal{P}^{k}\$ polynomial approximation space is used on the reference cell. For the fullydiscrete method, the geometric conservation is proven and for the piecewise constant forward Euler step the convergence of the method to the unique physical relevant solution is discussed.}, subject = {Galerkin-Methode}, language = {en} } @article{HornSchellerduPlessisetal.2013, author = {Horn, Anne and Scheller, Carsten and du Plessis, Stefan and Arendt, Gabriele and Nolting, Thorsten and Joska, John and Sopper, Sieghart and Maschke, Matthias and Obermann, Mark and Husstedt, Ingo W. and Hain, Johannes and Maponga, Tongai and Riederer, Peter and Koutsilieri, Eleni}, title = {Increases in CSF dopamine in HIV patients are due to the dopamine transporter 10/10-repeat allele which is more frequent in HIV-infected individuals}, series = {Journal of Neural Transmission}, volume = {120}, journal = {Journal of Neural Transmission}, doi = {10.1007/s00702-013-1086-x}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-132385}, pages = {1411-1419}, year = {2013}, abstract = {Dysfunction of dopaminergic neurotransmission has been implicated in HIV infection. We showed previously increased dopamine (DA) levels in CSF of therapy-na{\"i}ve HIV patients and an inverse correlation between CSF DA and CD4 counts in the periphery, suggesting adverse effects of high levels of DA on HIV infection. In the current study including a total of 167 HIV-positive and negative donors from Germany and South Africa (SA), we investigated the mechanistic background for the increase of CSF DA in HIV individuals. Interestingly, we found that the DAT 10/10-repeat allele is present more frequently within HIV individuals than in uninfected subjects. Logistic regression analysis adjusted for gender and ethnicity showed an odds ratio for HIV infection in DAT 10/10 allele carriers of 3.93 (95 \% CI 1.72-8.96; p = 0.001, Fishers exact test). 42.6 \% HIV-infected patients harbored the DAT 10/10 allele compared to only 10.5 \% uninfected DAT 10/10 carriers in SA (odds ratio 6.31), whereas 68.1 versus 40.9 \%, respectively, in Germany (odds ratio 3.08). Subjects homozygous for the 10-repeat allele had higher amounts of CSF DA and reduced DAT mRNA expression but similar disease severity compared with those carrying other DAT genotypes. These intriguing and novel findings show the mutual interaction between DA and HIV, suggesting caution in the interpretation of CNS DA alterations in HIV infection solely as a secondary phenomenon to the virus and open the door for larger studies investigating consequences of the DAT functional polymorphism on HIV epidemiology and progression of disease.}, language = {en} } @techreport{NedrencoBeck2016, author = {Nedrenco, Dmitri and Beck, Johannes}, title = {Flachfaltbarkeit: Mathematik mit eigenen H{\"a}nden schaffen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-133647}, pages = {11}, year = {2016}, abstract = {Die Arbeit besch{\"a}ftigt sich mit dem Einsatz von Origami im Schulunterricht. Genauer beschreibt sie eine Unterrichtssequenz zur Flachfaltbarkeit, einem Teilgebiet des mathematischen Papierfaltens, f{\"u}r den Mathematikunterricht in der Oberstufe an Gymnasien und h{\"o}heren Schulen. Es werden konkrete Handlungsanweisungen sowie Alternativen ausgef{\"u}hrt und begr{\"u}ndet und mit vielen Grafiken erl{\"a}utert. Ferner werden Ziele dieser Unterrichtssequenz gem{\"a}ß KMK-Bildungsstandards dargelegt. Anschließend wird ein mathematischer Blick auf die Besch{\"a}ftigung mit der Flachfaltbarkeit sowie eine Einordnung in die aktuelle Forschungslage gegeben.}, subject = {Origami}, language = {de} } @phdthesis{Boehm2015, author = {B{\"o}hm, Christoph}, title = {Loewner equations in multiply connected domains}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-129903}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {The first goal of this thesis is to generalize Loewner's famous differential equation to multiply connected domains. The resulting differential equations are known as Komatu--Loewner differential equations. We discuss Komatu--Loewner equations for canonical domains (circular slit disks, circular slit annuli and parallel slit half-planes). Additionally, we give a generalisation to several slits and discuss parametrisations that lead to constant coefficients. Moreover, we compare Komatu--Loewner equations with several slits to single slit Loewner equations. Finally we generalise Komatu--Loewner equations to hulls satisfying a local growth property.}, subject = {Biholomorphe Abbildung}, language = {en} } @phdthesis{Solak2007, author = {Solak, Ebru}, title = {Almost Completely Decomposable Groups of Type (1,2)}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-24794}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2007}, abstract = {A torsion free abelian group of finite rank is called almost completely decomposable if it has a completely decomposable subgroup of finite index. A p-local, p-reduced almost completely decomposable group of type (1,2) is briefly called a (1,2)-group. Almost completely decomposable groups can be represented by matrices over the ring Z/hZ, where h is the exponent of the regulator quotient. This particular choice of representation allows for a better investigation of the decomposability of the group. Arnold and Dugas showed in several of their works that (1,2)-groups with regulator quotient of exponent at least p^7 allow infinitely many isomorphism types of indecomposable groups. It is not known if the exponent 7 is minimal. In this dissertation, this problem is addressed.}, language = {en} } @phdthesis{Teichert2009, author = {Teichert, Christian}, title = {Globale Minimierung von Linearen Programmen mit Gleichgewichtsrestriktionen und globale Konvergenz eines Filter-SQPEC-Verfahrens f{\"u}r Mathematische Programme mit Gleichgewichtsrestriktionen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-38700}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2009}, abstract = {Mathematische Programme mit Gleichgewichtsrestriktionen (oder Komplementarit{\"a}tsbedingungen), kurz MPECs, sind als {\"a}ußerst schwere Optimierungsprobleme bekannt. Lokale Minima oder geeignete station{\"a}re Punkte zu finden, ist ein nichttriviales Problem. Diese Arbeit beschreibt, wie man dennoch die spezielle Struktur von MPECs ausnutzen kann und mittels eines Branch-and-Bound-Verfahrens ein globales Minimum von Linearen Programmen mit Gleichgewichtsrestriktionen, kurz LPECs, bekommt. Des Weiteren wird dieser Branch-and-Bound-Algorithmus innerhalb eines Filter-SQPEC-Verfahrens genutzt, um allgemeine MPECs zu l{\"o}sen. F{\"u}r das Filter-SQPEC Verfahren wird ein globaler Konvergenzsatz bewiesen. Außerdem werden f{\"u}r beide Verfahren numerische Resultate angegeben.}, subject = {Nichtlineare Optimierung}, language = {de} } @article{Zillober1993, author = {Zillober, Christian}, title = {A globally convergent version of the method of moving asymptotes}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-31984}, year = {1993}, abstract = {No abstract available}, language = {en} } @article{KredlerZilloberJohannesetal.1993, author = {Kredler, Christian and Zillober, Christian and Johannes, Frank and Sigl, Georg}, title = {An application of preconditioned conjugate gradients to relative placement in chip design}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-31996}, year = {1993}, abstract = {In distance geometry problems and many other applications, we are faced with the optimization of high-dimensional quadratic functions subject to linear equality constraints. A new approach is presented that projects the constraints, preserving sparsity properties of the original quadratic form such that well-known preconditioning techniques for the conjugate gradient method remain applicable. Very-largescale cell placement problems in chip design have been solved successfully with diagonal and incomplete Cholesky preconditioning. Numerical results produced by a FORTRAN 77 program illustrate the good behaviour of the algorithm.}, language = {en} } @inproceedings{Zillober1993, author = {Zillober, Christian}, title = {Sequential convex programming in theory and praxis}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-35513}, year = {1993}, abstract = {In this paper, convex approximation methods, suclt as CONLIN, the method of moving asymptotes (MMA) and a stabilized version of MMA (Sequential Convex Programming), are discussed with respect to their convergence behaviour. In an extensive numerical study they are :finally compared with other well-known optimization methods at 72 examples of sizing problems.}, language = {en} } @phdthesis{Even2009, author = {Even, Nadine}, title = {On Hydrodynamic Limits and Conservation Laws}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-38374}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2009}, abstract = {No abstract available}, language = {en} } @phdthesis{Wissel2009, author = {Wissel, Julia}, title = {A new biased estimator for multivariate regression models with highly collinear variables}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-36383}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2009}, abstract = {Es ist wohlbekannt, dass der Kleinste-Quadrate-Sch{\"a}tzer im Falle vorhandener Multikollinearit{\"a}t eine große Varianz besitzt. Eine M{\"o}glichkeit dieses Problem zu umgehen, besteht in der Verwendung von verzerrten Sch{\"a}tzern, z.B den Ridge-Sch{\"a}tzer. In dieser Arbeit wird ein neues Sch{\"a}tzverfahren vorgestellt, dass auf Addition einer kleinen Konstanten omega auf die Regressoren beruht. Der dadurch erzeugte Sch{\"a}tzer wird in Abh{\"a}ngigkeit von omega beschrieben und es wird gezeigt, dass dessen Mean Squared Error kleiner ist als der des Kleinste-Quadrate-Sch{\"a}tzers im Falle von stark korrelierten Regressoren.}, subject = {Starke Kopplung}, language = {en} } @phdthesis{Wurst2015, author = {Wurst, Jan-Eric}, title = {Hp-Finite Elements for PDE-Constrained Optimization}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-024-5 (print)}, doi = {10.25972/WUP-978-3-95826-025-2}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-115027}, school = {W{\"u}rzburg University Press}, pages = {188}, year = {2015}, abstract = {Diese Arbeit behandelt die hp-Finite Elemente Methode (FEM) f{\"u}r linear quadratische Optimal-steuerungsprobleme. Dabei soll ein Zielfunktional, welches die Entfernung zu einem angestrebten Zustand und hohe Steuerungskosten (als Regularisierung) bestraft, unter der Nebenbedingung einer elliptischen partiellen Differentialgleichung minimiert werden. Bei der Anwesenheit von Steuerungsbeschr{\"a}nkungen k{\"o}nnen die notwendigen Bedingungen erster Ordnung, die typischerweise f{\"u}r numerische L{\"o}sungsverfahren genutzt werden, als halbglatte Projektionsformel formuliert werden. Folglich sind optimale L{\"o}sungen oftmals auch nicht-glatt. Die Technik der hp-Diskretisierung ber{\"u}cksichtigt diese Tatsache und approximiert raue Funktionen auf feinen Gittern, w{\"a}hrend Elemente h{\"o}herer Ordnung auf Gebieten verwendet werden, auf denen die L{\"o}sung glatt ist. Die erste Leistung dieser Arbeit ist die erfolgreiche Anwendung der hp-FEM auf zwei verwandte Problemklassen: Neumann- und Interface-Steuerungsprobleme. Diese werden zun{\"a}chst mit entsprechenden a-priori Verfeinerungsstrategien gel{\"o}st, mit der randkonzentrierten (bc) FEM oder interface konzentrierten (ic) FEM. Diese Strategien generieren Gitter, die stark in Richtung des Randes beziehungsweise des Interfaces verfeinert werden. Um f{\"u}r beide Techniken eine algebraische Reduktion des Approximationsfehlers zu beweisen, wird eine elementweise interpolierende Funktion konstruiert. Außerdem werden die lokale und globale Regularit{\"a}t von L{\"o}sungen behandelt, weil sie entscheidend f{\"u}r die Konvergenzgeschwindigkeit ist. Da die bc- und ic- FEM kleine Polynomgrade f{\"u}r Elemente verwenden, die den Rand beziehungsweise das Interface ber{\"u}hren, k{\"o}nnen eine neue L2- und L∞-Fehlerabsch{\"a}tzung hergeleitet werden. Letztere bildet die Grundlage f{\"u}r eine a-priori Strategie zum Aufdatieren des Regularisierungsparameters im Zielfunktional, um Probleme mit bang-bang Charakter zu l{\"o}sen. Zudem wird die herk{\"o}mmliche hp-Idee, die daraus besteht das Gitter geometrisch in Richtung der Ecken des Gebiets abzustufen, auf die L{\"o}sung von Optimalsteuerungsproblemen {\"u}bertragen (vc-FEM). Es gelingt, Regularit{\"a}t in abz{\"a}hlbar normierten R{\"a}umen f{\"u}r die Variablen des gekoppelten Optimalit{\"a}tssystems zu zeigen. Hieraus resultiert die exponentielle Konvergenz im Bezug auf die Anzahl der Freiheitsgrade. Die zweite Leistung dieser Arbeit ist die Entwicklung einer v{\"o}llig adaptiven hp-Innere-Punkte-Methode, die Probleme mit verteilter oder Neumann Steuerung l{\"o}sen kann. Das zugrundeliegende Barriereproblem besitzt ein nichtlineares Optimilit{\"a}tssystem, das eine numerische Herausforderung beinhaltet: die stabile Berechnung von Integralen {\"u}ber Funktionen mit m{\"o}glichen Singularit{\"a}ten in Elementen h{\"o}herer Ordnung. Dieses Problem wird dadurch gel{\"o}st, dass die Steuerung an den Integrationspunkten {\"u}berwacht wird. Die Zul{\"a}ssigkeit an diesen Punkten wird durch einen Gl{\"a}ttungsschritt garantiert. In dieser Arbeit werden sowohl die Konvergenz eines Innere-Punkte-Verfahrens mit Gl{\"a}ttungsschritt als auch a-posteriori Schranken f{\"u}r den Diskretisierungsfehler gezeigt. Dies f{\"u}hrt zu einem adaptiven L{\"o}sungsalgorithmus, dessen Gitterverfeinerung auf der Entwicklung der L{\"o}sung in eine Legendre Reihe basiert. Hierbei dient das Abklingverhalten der Koeffizienten als Glattheitsindikator und wird f{\"u}r die Entscheidung zwischen h- und p-Verfeinerung herangezogen.}, subject = {Finite-Elemente-Methode}, language = {en} } @phdthesis{Karl2015, author = {Karl, Sabine}, title = {Firm Values and Systemic Stability in Financial Networks}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-115739}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {Based on the work of Eisenberg and Noe [2001], Suzuki [2002], Elsinger [2009] and Fischer [2014], we consider a generalization of Merton's asset valuation approach where n firms are linked by cross-ownership of equities and liabilities. Each firm is assumed to have a single outstanding liability, whereas its assets consist of one system-exogenous asset, as well as system-endogenous assets comprising some fraction of other firms' equity and liability, respectively. Following Fischer [2014], one can obtain no-arbitrage prices of equity and the recovery claims of liabilities as solutions of a fixed point problem, and hence obtain no-arbitrage prices of the `firm value' of each firm, which is the value of the firm's liability plus the firm's equity. In a first step, we consider the two-firm case where explicit formulae for the no-arbitrage prices of the firm values are available (cf. Suzuki [2002]). Since firm values are derivatives of exogenous asset values, the distribution of firm values at maturity can be determined from the distribution of exogenous asset values. The Merton model and most of its known extensions do not account for the cross-ownership structure of the assets owned by the firm. Therefore the assumption of lognormally distributed exogenous assets leads to lognormally distributed firm values in such models, as the values of the liability and the equity add up to the exogenous asset's value (which has lognormal distribution by assumption). Our work therefore starts from lognormally distributed exogenous assets and reveals how cross-ownership, when correctly accounted for in the valuation process, affects the distribution of the firm value, which is not lognormal anymore. In a simulation study we examine the impact of several parameters (amount of cross-ownership of debt and equity, ratio of liabilities to expected exogenous assets value) on the differences between the distribution of firm values obtained from our model and correspondingly matched lognormal distributions. It becomes clear that the assumption of lognormally distributed firm values may lead to both over- and underestimation of the "true" firm values (within the cross-ownership model) and consequently of bankruptcy risk, too. In a second step, the bankruptcy risk of one firm within the system is analyzed in more detail in a further simulation study, revealing that the correct incorporation of cross-ownership in the valuation procedure is the more important, the tighter the cross-ownership structure between the two firms. Furthermore, depending on the considered type of cross-ownership (debt or equity), the assumption of lognormally distributed firm values is likely to result in an over- resp. underestimation of the actual probability of default. In a similar vein, we consider the Value-at-Risk (VaR) of a firm in the system, which we calculate as the negative α-quantile of the firm value at maturity minus the firm's risk neutral price in t=0, i.e. we consider the (1-α)100\%-VaR of the change in firm value. If we let the cross-ownership fractions (i.e. the fraction that one firm holds of another firm's debt or equity) converge to 1 (which is the supremum of the possible values that cross-ownership fractions can take), we can prove that in a system of two firms, the lognormal model will over- resp. underestimate both univariate and bivariate probabilities of default under cross-ownership of debt only resp. cross-ownership of equity only. Furthermore, we provide a formula that allows us to check for an arbitrary scenario of cross-ownership and any non-negative distribution of exogenous assets whether the approximating lognormal model will over- or underestimate the related probability of default of a firm. In particular, any given non-negative distribution of exogenous asset values (non-degenerate in a certain sense) can be transformed into a new, "extreme" distribution of exogenous assets yielding such a low or high actual probability of default that the approximating lognormal model will over- and underestimate this risk, respectively. After this analysis of the univariate distribution of firm values under cross-ownership in a system of two firms with bivariately lognormally distributed exogenous asset values, we consider the copula of these firm values as a distribution-free measure of the dependency between these firm values. Without cross-ownership, this copula would be the Gaussian copula. Under cross-ownership, we especially consider the behaviour of the copula of firm values in the lower left and upper right corner of the unit square, and depending on the type of cross-ownership and the considered corner, we either obtain error bounds as to how good the copula of firm values under cross-ownership can be approximated with the Gaussian copula, or we see that the copula of firm values can be written as the copula of two linear combinations of exogenous asset values (note that these linear combinations are not lognormally distributed). These insights serve as a basis for our analysis of the tail dependence coefficient of firm values under cross-ownership. Under cross-ownership of debt only, firm values remain upper tail independent, whereas they become perfectly lower tail dependent if the correlation between exogenous asset values exceeds a certain positive threshold, which does not depend on the exact level of cross-ownership. Under cross-ownership of equity only, the situation is reverse in that firm values always remain lower tail independent, but upper tail independence is preserved if and only if the right tail behaviour of both firms' values is determined by the right tail behaviour of the firms' own exogenous asset value instead of the respective other firm's exogenous asset value. Next, we return to systems of n≥2 firms and analyze sensitivities of no-arbitrage prices of equity and the recovery claims of liabilities with respect to the model parameters. In the literature, such sensitivities are provided with respect to exogenous asset values by Gouri{\´e}roux et al. [2012], and we extend the existing results by considering how these no-arbitrage prices depend on the cross-ownership fractions and the level of liabilities. For the former, we can show that all prices are non-decreasing in any cross-ownership fraction in the model, and by use of a version of the Implicit Function Theorem we can also determine exact derivatives. For the latter, we show that the recovery value of debt and the equity value of a firm are non-decreasing and non-increasing in the firm's nominal level of liabilities, respectively, but the firm value is in general not monotone in the firm's level of liabilities. Furthermore, no-arbitrage prices of equity and the recovery claims of liabilities of a firm are in general non-monotone in the nominal level of liabilities of other firms in the system. If we confine ourselves to one type of cross-ownership (i.e. debt or equity), we can derive more precise relationships. All the results can be transferred to risk-neutral prices before maturity. Finally, following Gouri{\´e}roux et al. [2012] and as a kind of extension to the above sensitivity results, we consider how immediate changes in exogenous asset values of one or more firms at maturity affect the financial health of a system of n initially solvent firms. We start with some theoretical considerations on what we call the contagion effect, namely the change in the endogenous asset value of a firm caused by shocks on the exogenous assets of firms within the system. For the two-firm case, an explicit formula is available, making clear that in general (and in particular under cross-ownership of equity only), the effect of contagion can be positive as well as negative, i.e. it can both, mitigate and exacerbate the change in the exogenous asset value of a firm. On the other hand, we cannot generally say that a tighter cross-ownership structure leads to bigger absolute contagion effects. Under cross-ownership of debt only, firms cannot profit from positive shocks beyond the direct effect on exogenous assets, as the contagion effect is always non-positive. Next, we are concerned with spillover effects of negative shocks on a subset of firms to other firms in the system (experiencing non-negative shocks themselves), driving them into default due to large losses in their endogenous asset values. Extending the results of Glasserman and Young [2015], we provide a necessary condition for the shock to cause such an event. This also yields an upper bound for the probability of such an event. We further investigate how the stability of a system of firms exposed to multiple shocks depends on the model parameters in a simulation study. In doing so, we consider three network types (incomplete, core-periphery and ring network) with simultaneous shocks on some of the firms and wiping out a certain percentage of their exogenous assets. Then we analyze for all three types of cross-ownership (debt only, equity only, both debt and equity) how the shock intensity, the shock size, and network parameters as the number of links in the network and the proportion of a firm's debt or equity held within the system of firms influences several output parameters, comprising the total number of defaults and the relative loss in the sum of firm values, among others. Comparing our results to the studies of Nier et al. [2007], Gai and Kapadia [2010] and Elliott et al. [2014], we can only partly confirm their results with respect to the number of defaults. We conclude our work with a theoretical comparison of the complete network (where each firm holds a part of any other firm) and the ring network with respect to the number of defaults caused by a shock on a single firm, as it is done by Allen and Gale [2000]. In line with the literature, we find that under cross-ownership of debt only, complete networks are "robust yet fragile" [Gai and Kapadia, 2010] in that moderate shocks can be completely withstood or drive the firm directly hit by the shock in default, but as soon as the shock exceeds a certain size, all firms are simultaneously in default. In contrast to that, firms default one by one in the ring network, with the first "contagious default" (i.e. a default of a firm not directly hit by the shock) already occurs for smaller shock sizes than under the complete network.}, subject = {Finanzmathematik}, language = {en} } @article{LuKlingenbergRendonetal.2014, author = {Lu, Yun-guang and Klingenberg, Christian and Rendon, Leonardo and Zheng, De-Yin}, title = {Global Solutions for a Simplified Shallow Elastic Fluids Model}, series = {Abstract and Applied Analytics}, journal = {Abstract and Applied Analytics}, number = {920248}, issn = {1687-0409}, doi = {10.1155/2014/920248}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-117978}, year = {2014}, abstract = {The Cauchy problem for a simplified shallow elastic fluids model, one 3 x 3 system of Temple's type, is studied and a global weak solution is obtained by using the compensated compactness theorem coupled with the total variation estimates on the first and third Riemann invariants, where the second Riemann invariant is singular near the zero layer depth (rho - 0). This work extends in some sense the previous works, (Serre, 1987) and (Leveque and Temple, 1985), which provided the global existence of weak solutions for 2 x 2 strictly hyperbolic system and (Heibig, 1994) for n x n strictly hyperbolic system with smooth Riemann invariants.}, language = {en} } @phdthesis{Geiselhart2015, author = {Geiselhart, Roman}, title = {Advances in the stability analysis of large-scale discrete-time systems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-112963}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {Several aspects of the stability analysis of large-scale discrete-time systems are considered. An important feature is that the right-hand side does not have have to be continuous. In particular, constructive approaches to compute Lyapunov functions are derived and applied to several system classes. For large-scale systems, which are considered as an interconnection of smaller subsystems, we derive a new class of small-gain results, which do not require the subsystems to be robust in some sense. Moreover, we do not only study sufficiency of the conditions, but rather state an assumption under which these conditions are also necessary. Moreover, gain construction methods are derived for several types of aggregation, quantifying how large a prescribed set of interconnection gains can be in order that a small-gain condition holds.}, subject = {Ljapunov-Funktion}, language = {en} } @article{RudelPrustySiegletal.2013, author = {Rudel, Thomas and Prusty, Bhupesh K. and Siegl, Christine and Hauck, Petra and Hain, Johannes and Korhonen, Suvi J. and Hiltunen-Back, Eija and Poulakkainen, Mirja}, title = {Chlamydia trachomatis Infection Induces Replication of Latent HHV-6}, series = {PLoS ONE}, journal = {PLoS ONE}, doi = {10.1371/journal.pone.0061400}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-96731}, year = {2013}, abstract = {Human herpesvirus-6 (HHV-6) exists in latent form either as a nuclear episome or integrated into human chromosomes in more than 90\% of healthy individuals without causing clinical symptoms. Immunosuppression and stress conditions can reactivate HHV-6 replication, associated with clinical complications and even death. We have previously shown that co-infection of Chlamydia trachomatis and HHV-6 promotes chlamydial persistence and increases viral uptake in an in vitro cell culture model. Here we investigated C. trachomatis-induced HHV-6 activation in cell lines and fresh blood samples from patients having Chromosomally integrated HHV-6 (CiHHV-6). We observed activation of latent HHV-6 DNA replication in CiHHV-6 cell lines and fresh blood cells without formation of viral particles. Interestingly, we detected HHV-6 DNA in blood as well as cervical swabs from C. trachomatis-infected women. Low virus titers correlated with high C. trachomatis load and vice versa, demonstrating a potentially significant interaction of these pathogens in blood cells and in the cervix of infected patients. Our data suggest a thus far underestimated interference of HHV-6 and C. trachomatis with a likely impact on the disease outcome as consequence of co-infection.}, language = {en} } @phdthesis{Schleissinger2013, author = {Schleißinger, Sebastian}, title = {Embedding Problems in Loewner Theory}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-96782}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2013}, abstract = {The work at hand studies problems from Loewner theory and is divided into two parts: In part 1 (chapter 2) we present the basic notions of Loewner theory. Here we use a modern form which was developed by F. Bracci, M. Contreras, S. D{\´i}az-Madrigal et al. and which can be applied to certain higher dimensional complex manifolds. We look at two domains in more detail: the Euclidean unit ball and the polydisc. Here we consider two classes of biholomorphic mappings which were introduced by T. Poreda and G. Kohr as generalizations of the class S. We prove a conjecture of G. Kohr about support points of these classes. The proof relies on the observation that the classes describe so called Runge domains, which follows from a result by L. Arosio, F. Bracci and E. F. Wold. Furthermore, we prove a conjecture of G. Kohr about support points of a class of biholomorphic mappings that comes from applying the Roper-Suffridge extension operator to the class S. In part 2 (chapter 3) we consider one special Loewner equation: the chordal multiple-slit equation in the upper half-plane. After describing basic properties of this equation we look at the problem, whether one can choose the coefficient functions in this equation to be constant. D. Prokhorov proved this statement under the assumption that the slits are piecewise analytic. We use a completely different idea to solve the problem in its general form. As the Loewner equation with constant coefficients holds everywhere (and not just almost everywhere), this result generalizes Loewner's original idea to the multiple-slit case. Moreover, we consider the following problems: • The "simple-curve problem" asks which driving functions describe the growth of simple curves (in contrast to curves that touch itself). We discuss necessary and sufficient conditions, generalize a theorem of J. Lind, D. Marshall and S. Rohde to the multiple-slit equation and we give an example of a set of driving functions which generate simple curves because of a certain self-similarity property. • We discuss properties of driving functions that generate slits which enclose a given angle with the real axis. • A theorem by O. Roth gives an explicit description of the reachable set of one point in the radial Loewner equation. We prove the analog for the chordal equation.}, subject = {Biholomorphe Abbildung}, language = {en} } @article{StoevesandtHofmannHainetal.2013, author = {Stoevesandt, Johanna and Hofmann, Bernd and Hain, Johannes and Kerstan, Andreas and Trautmann, Axel}, title = {Single venom-based immunotherapy effectively protects patients with double positive tests to honey bee and Vespula venom}, series = {Allergy, Asthma \& Clinical Immunology}, journal = {Allergy, Asthma \& Clinical Immunology}, doi = {10.1186/1710-1492-9-33}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-96808}, year = {2013}, abstract = {Background Referring to individuals with reactivity to honey bee and Vespula venom in diagnostic tests, the umbrella terms "double sensitization" or "double positivity" cover patients with true clinical double allergy and those allergic to a single venom with asymptomatic sensitization to the other. There is no international consensus on whether immunotherapy regimens should generally include both venoms in double sensitized patients. Objective We investigated the long-term outcome of single venom-based immunotherapy with regard to potential risk factors for treatment failure and specifically compared the risk of relapse in mono sensitized and double sensitized patients. Methods Re-sting data were obtained from 635 patients who had completed at least 3 years of immunotherapy between 1988 and 2008. The adequate venom for immunotherapy was selected using an algorithm based on clinical details and the results of diagnostic tests. Results Of 635 patients, 351 (55.3\%) were double sensitized to both venoms. The overall re-exposure rate to Hymenoptera stings during and after immunotherapy was 62.4\%; the relapse rate was 7.1\% (6.0\% in mono sensitized, 7.8\% in double sensitized patients). Recurring anaphylaxis was statistically less severe than the index sting reaction (P = 0.004). Double sensitization was not significantly related to relapsing anaphylaxis (P = 0.56), but there was a tendency towards an increased risk of relapse in a subgroup of patients with equal reactivity to both venoms in diagnostic tests (P = 0.15). Conclusions Single venom-based immunotherapy over 3 to 5 years effectively and long-lastingly protects the vast majority of both mono sensitized and double sensitized Hymenoptera venom allergic patients. Double venom immunotherapy is indicated in clinically double allergic patients reporting systemic reactions to stings of both Hymenoptera and in those with equal reactivity to both venoms in diagnostic tests who have not reliably identified the culprit stinging insect.}, language = {en} } @article{FreibergMatlachGrehnetal.2013, author = {Freiberg, Florentina Joyce and Matlach, Juliane and Grehn, Franz and Karl, Sabine and Klink, Thomas}, title = {Postoperative subconjunctival bevacizumab injection as an adjunct to 5-fluorouracil in the management of scarring after trabeculectomy}, series = {Clinical Ophthalmology}, journal = {Clinical Ophthalmology}, doi = {10.2147/OPTH.S41750}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-96546}, year = {2013}, abstract = {Purpose: Scarring after glaucoma filtering surgery remains the most frequent cause for bleb failure. The aim of this study was to assess if the postoperative injection of bevacizumab reduces the number of postoperative subconjunctival 5-fluorouracil (5-FU) injections. Further, the effect of bevacizumab as an adjunct to 5-FU on the intraocular pressure (IOP) outcome, bleb morphology, postoperative medications, and complications was evaluated. Methods: Glaucoma patients (N = 61) who underwent trabeculectomy with mitomycin C were analyzed retrospectively (follow-up period of 25 ± 19 months). Surgery was performed exclusively by one experienced glaucoma specialist using a standardized technique. Patients in group 1 received subconjunctival applications of 5-FU postoperatively. Patients in group 2 received 5-FU and subconjunctival injection of bevacizumab. Results: Group 1 had 6.4 ± 3.3 (0-15) (mean ± standard deviation and range, respectively) 5-FU injections. Group 2 had 4.0 ± 2.8 (0-12) (mean ± standard deviation and range, respectively) 5-FU injections. The added injection of bevacizumab significantly reduced the mean number of 5-FU injections by 2.4 ± 3.08 (P ≤ 0.005). There was no significantly lower IOP in group 2 when compared to group 1. A significant reduction in vascularization and in cork screw vessels could be found in both groups (P < 0.0001, 7 days to last 5-FU), yet there was no difference between the two groups at the last follow-up. Postoperative complications were significantly higher for both groups when more 5-FU injections were applied. (P = 0.008). No significant difference in best corrected visual acuity (P = 0.852) and visual field testing (P = 0.610) between preoperative to last follow-up could be found between the two groups. Conclusion: The postoperative injection of bevacizumab reduced the number of subconjunctival 5-FU injections significantly by 2.4 injections. A significant difference in postoperative IOP reduction, bleb morphology, and postoperative medication was not detected.}, language = {en} } @article{TrautmannSeitzBrockowetal.2014, author = {Trautmann, Axel and Seitz, Cornelia S. and Brockow, Knut and Hain, Johannes}, title = {Non-steroidal anti-inflammatory drug hypersensitivity: association with elevated basal serum tryptase?}, doi = {10.1186/1710-1492-10-19}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-110399}, year = {2014}, abstract = {Background It is hypothesized that because of higher mast cell numbers and mediator release, mastocytosis predisposes patients for systemic immediate-type hypersensitivity reactions to certain drugs including non-steroidal anti-inflammatory drugs (NSAID). Objective To clarify whether patients with NSAID hypersensitivity show increased basal serum tryptase levels as sign for underlying mast cell disease. Methods As part of our allergy work-up, basal serum tryptase levels were determined in all patients with a diagnosis of NSAID hypersensitivity and the severity of the reaction was graded. Patients with confirmed IgE-mediated hymenoptera venom allergy served as a comparison group. Results Out of 284 patients with NSAID hypersensitivity, 26 were identified with basal serum tryptase > 10.0 ng/mL (9.2\%). In contrast, significantly (P = .004) more hymenoptera venom allergic patients had elevated tryptase > 10.0 ng/mL (83 out of 484; 17.1\%). Basal tryptase > 20.0 ng/mL was indicative for severe anaphylaxis only in venom allergic subjects (29 patients; 4x grade 2 and 25x grade 3 anaphylaxis), but not in NSAID hypersensitive patients (6 patients; 4x grade 1, 2x grade 2). Conclusions In contrast to hymenoptera venom allergy, NSAID hypersensitivity do not seem to be associated with elevated basal serum tryptase levels and levels > 20 ng/mL were not related to increased severity of the clinical reaction. This suggests that mastocytosis patients may be treated with NSAID without special precautions.}, language = {en} } @phdthesis{Reinwand2021, author = {Reinwand, Simon}, title = {Functions of Bounded Variation: Theory, Methods, Applications}, publisher = {Cuvillier-Verlag, G{\"o}ttingen}, isbn = {9783736974036}, doi = {10.25972/OPUS-23515}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-235153}, school = {Universit{\"a}t W{\"u}rzburg}, pages = {326}, year = {2021}, abstract = {Functions of bounded variation are most important in many fields of mathematics. This thesis investigates spaces of functions of bounded variation with one variable of various types, compares them to other classical function spaces and reveals natural "habitats" of BV-functions. New and almost comprehensive results concerning mapping properties like surjectivity and injectivity, several kinds of continuity and compactness of both linear and nonlinear operators between such spaces are given. A new theory about different types of convergence of sequences of such operators is presented in full detail and applied to a new proof for the continuity of the composition operator in the classical BV-space. The abstract results serve as ingredients to solve Hammerstein and Volterra integral equations using fixed point theory. Many criteria guaranteeing the existence and uniqueness of solutions in BV-type spaces are given and later applied to solve boundary and initial value problems in a nonclassical setting. A big emphasis is put on a clear and detailed discussion. Many pictures and synoptic tables help to visualize and summarize the most important ideas. Over 160 examples and counterexamples illustrate the many abstract results and how delicate some of them are.}, subject = {Funktion von beschr{\"a}nkter Variation}, language = {en} } @phdthesis{Nguyen2012, author = {Nguyen, Danh Nam}, title = {Understanding the development of the proving process within a dynamic geometry environment}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-71754}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2012}, abstract = {Argumentation and proof have played a fundamental role in mathematics education in recent years. The author of this dissertation would like to investigate the development of the proving process within a dynamic geometry system in order to support tertiary students understanding the proving process. The strengths of this dynamic system stimulate students to formulate conjectures and produce arguments during the proving process. Through empirical research, we classified different levels of proving and proposed a methodological model for proving. This methodological model makes a contribution to improve students' levels of proving and develop their dynamic visual thinking. We used Toulmin model of argumentation as a theoretical model to analyze the relationship between argumentation and proof. This research also offers some possible explanation so as to why students have cognitive difficulties in constructing proofs and provides mathematics educators with a deeper understanding on the proving process within a dynamic geometry system.}, subject = {Argumentation}, language = {en} } @article{SchaeffnerSchloemerkemper2018, author = {Sch{\"a}ffner, M. and Schl{\"o}merkemper, A.}, title = {On Lennard-Jones systems with finite range interactions and their asymptotic analysis}, series = {Networks and Heterogeneous Media}, volume = {13}, journal = {Networks and Heterogeneous Media}, number = {1}, doi = {10.3934/nhm.2018005}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-228428}, pages = {95-118}, year = {2018}, abstract = {The aim of this work is to provide further insight into the qualitative behavior of mechanical systems that are well described by Lennard-Jones type interactions on an atomistic scale. By means of Gamma-convergence techniques, we study the continuum limit of one-dimensional chains of atoms with finite range interactions of Lennard-Jones type, including the classical Lennard-Jones potentials. So far, explicit formula for the continuum limit were only available for the case of nearest and next-to-nearest neighbour interactions. In this work, we provide an explicit expression for the continuum limit in the case of finite range interactions. The obtained homogenization formula is given by the convexification of a Cauchy-Born energy density. Furthermore, we study rescaled energies in which bulk and surface contributions scale in the same way. The related discrete-to-continuum limit yields a rigorous derivation of a one-dimensional version of Griffith' fracture energy and thus generalizes earlier derivations for nearest and next-to-nearest neighbors to the case of finite range interactions. A crucial ingredient to our proofs is a novel decomposition of the energy that allows for re fined estimates.}, language = {en} } @phdthesis{Dirr2001, author = {Dirr, Gunther}, title = {Differentialgleichungen in Fr{\´e}chetr{\"a}umen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-1180417}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2001}, abstract = {Teil 1 der Arbeit beinhaltet eine Zusammenfassung grundlegender funktionalanalytischer Ergebnisse sowie eine Einf{\"u}hrung in die Integral- und Differentialrechnung in Fr{\´e}chetr{\"a}umen. Insbesondere wird in Kapitel 2 eine ausf{\"u}hrliche Darstellung des Lebesgue-Bochner-Integrals auf Fr{\´e}chetr{\"a}umen geliefert. Teil 2 behandelt die Theorie der linearen Differentialgleichungen auf Fr{\´e}chetr{\"a}umen. Dazu werden in Kapitel 3 stark differenzierbare Halbgruppen und deren infinitesimale Generatoren charakterisiert. In Kapitel 4 werden diese Ergebnisse benutzt, um lineare Evolutionsgleichungen (von hyperbolischem oder parabolischem Typ) zu untersuchen. Teil 3 enth{\"a}lt die zentralen Resultate der Arbeit. In Kapitel 5 werden zwei Existenz- und Eindeutigkeitss{\"a}tze f{\"u}r nichtlineare gew{\"o}hnliche Differentialgleichungen in zahmen Fr{\´e}chetr{\"a}umen bewiesen. Kapitel 6 liefert eine Anwendung der Ergebnisse aus Kapitel 5 auf nichtlineare partielle Differentialgleichungen erster Ordnung.}, subject = {Differentialgleichung}, language = {de} } @phdthesis{Dittmann2001, author = {Dittmann, Ulrich}, title = {Coset Types and Tight Subgroups of Almost Completely Decomposable Groups}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-2762}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2001}, abstract = {A completely decomposable group is a direct sum of subgroups of the rationals. An almost completely decomposable group is a torsion free abelian group that contains a completely decomposable group as subgroup of finite index. Tight subgroups are maximal subgroups (with respect to set inclusion) among the completely decomposable subgroups of an almost completely decomposable group. In this dissertation we show an extended version of the theorem of Bezout, give a new criterion for the tightness of a completely decomposable subgroup, derive some conditions under which a tight subgroup is regulating and generalize a theorem of Campagna. We give an example of an almost completely decomposable group, all of whose regulating subgroups do not have a quotient with minimal exponent. We show that among the types of elements of a coset modulo a completely decomposable group there exists a unique maximal type and define this type to be -the- coset type. We give criteria for tightness and regulating in term of coset types as well as a representation of the type subgroups using coset types. We introduce the notion of reducible cosets and show their key role for transitions from one completely decomposable subgroup up to another one containing the first one as a proper subgroup. We give an example of a tight, but not regulating subgroup which contains the regulator. We develop the notion of a fully single covered subset of a lattice, show that V-free implies fully single covered, but not necessarily vice versa, and we define an equivalence relation on the set of all finite subsets of a given lattice. We develop some extension of ordinary Hasse diagrams, and apply the lattice theoretic results on the lattice of types and almost completely decomposable groups.}, subject = {Torsionsfreie Abelsche Gruppe}, language = {en} } @phdthesis{Keilbach2000, author = {Keilbach, Rupert}, title = {Minimalfl{\"a}chen und Bj{\"o}rlingsches Problem in der Relativgeometrie}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-2782}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2000}, abstract = {In dieser Arbeit besch{\"a}ftigen wir uns mit Themen aus der affinen Hyperfl{\"a}chentheorie. Nachdem wir die euklidische Normale, die Blaschkesche Affinnormale, eine gewisse Einparameterfamilie von Relativnormalen und die zentroaffine Normale besprochen und eine neue Einparameterfamilie von Relativnormalen definiert haben, behandeln wir die folgenden drei Schwerpunkte: Zuerst befassen wir uns mit Minimalfl{\"a}chen bez{\"u}glich verschiedener Volumina und der Rolle der jeweiligen Mittleren Kr{\"u}mmung. Wir berechnen die erste und zweite Variation der Volumina, die von den Normalen der erw{\"a}hnten Familien induziert werden. Hierbei stellen wir fest, daß die Mittlere Kr{\"u}mmung nicht immer das Verschwinden der ersten Variation des Volumens anzeigt. Anschließend {\"u}bertragen wir die Begriffe Adjungierte und Assoziierte bei euklidischen Minimalfl{\"a}chen auf Affinminimalfl{\"a}chen: Analog zum euklidischen Fall kann man die Konormale einer Affinminimalfl{\"a}che durch bestimmte ,,harmonische'' Abbildungen darstellen. Wir geben eine Methode an, wie man aus einer gegebenen Affinminimalfl{\"a}che weitere gewinnt, indem man diese Abbildungen entsprechend modifiziert. Schließlich l{\"o}sen wir eine Verallgemeinerung des Bj{\"o}rlingschen Problems f{\"u}r Normalen der oben erw{\"a}hnten Familien: Bei Vorgabe einer Kurve mit zwei Vektorfeldern und der Art der Normalisierung existiert - mit Ausnahmen - je genau eine elliptische und eine hyperbolische Fl{\"a}che in (pseudo-)isothermen Parametern mit folgenden Eigenschaften: Die Kurve ist eine Parameterlinie, die Normale l{\"a}ngs der Kurve stimmt mit dem einen Vektorfeld {\"u}berein, die Konormale mit dem anderen und die Mittlere und Gaußsche Kr{\"u}mmung erf{\"u}llen eine vorgegebene Bedingung.}, subject = {Minimalfl{\"a}che}, language = {de} } @phdthesis{Kessler2000, author = {Keßler, Manuel}, title = {Die Ladyzhenskaya-Konstante in der numerischen Behandlung von Str{\"o}mungsproblemen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-2791}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2000}, abstract = {Charakteristisch f{\"u}r die L{\"o}sbarkeit von elliptischen partiellen Differentialgleichungssystemen mit Nebenbedingungen ist das Auftreten einer inf-sup-Bedingung. Im prototypischen Fall der Stokes-Gleichungen ist diese auch als Ladyzhenskaya-Bedingung bekannt. Die G{\"u}ltigkeit dieser Bedingung, bzw. die Existenz der zugeh{\"o}rigen Konstante ist eine Eigenschaft des Gebietes, innerhalb dessen die Differentialgleichung gel{\"o}st werden soll. W{\"a}hrend die Existenz schon die L{\"o}sbarkeit garantiert, ist beispielsweise f{\"u}r Fehleraussagen bei der numerischen Approximation auch die Gr{\"o}ße der Konstanten sehr wichtig. Insbesondere auch deshalb, weil eine {\"a}hnliche inf-sup-Bedingung auch bei der Diskretisierung mittel Finiter-Elemente-Methoden auftaucht, die hier Babuska-Brezzi-Bedingung heißt. Die Arbeit befaßt sich auf der einen Seite mit einer analytischen Absch{\"a}tzung der Ladyzhenskaya-Konstante f{\"u}r verschiedene Gebiete, wobei {\"A}quivalenzen mit verwandten Problemen aus der komplexen Analysis (Friedrichs-Ungleichung) und der Strukturmechanik (Kornsche Ungleichung) benutzt werden. Ein weiterer Teil befaßt sich mit dem Zusammenhang zwischen kontinuierlicher Ladyzhenskaya- Konstante und diskreter Babuska-Brezzi-Konstante. Die dabei gefundenen Ergebnisse werden mit Hilfe eines dazu entwickelten leistungsf{\"a}higen Finite-Elemente-Programmsystems numerisch verifiziert. Damit k{\"o}nnen erstmals genaue Absch{\"a}tzungen der Konstanten in zwei und drei Dimensionen gefunden werden. Aufbauend auf diesen Resultaten wird ein schneller L{\"o}sungsalgorithmus f{\"u}r die Stokes-Gleichungen vorgeschlagen und anhand von problematischen Gebieten dessen {\"U}berlegenheit gegen{\"u}ber klassischen Verfahren wie beispielsweise der Uzawa-Iteration demonstriert. W{\"a}hrend selbst bei einfachen Geometrien eine Konvergenzbeschleunigung um einen Faktor 5 erwartet werden kann, sind in kritischen F{\"a}llen Faktoren bis zu 1000 m{\"o}glich.}, subject = {Stokes-Gleichung}, language = {de} } @phdthesis{Nahler2001, author = {Nahler, Michael}, title = {Isomorphism classes of almost completely decomposable groups}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-2817}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2001}, abstract = {In this thesis we investigate near-isomorphism classes and isomorphism classes of almost completely decomposable groups. In Chapter 2 we introduce the concept of almost completely decomposable groups and sum up their most important facts. A local group is an almost completely decomposable group with a primary regulator quotient. A uniform group is a rigid local group with a homocyclic regulator quotient. In Chapter 3 a weakening of isomorphism, called type-isomorphism, appears. It is shown that type-isomorphism agrees with Lady's near-isomorphism. By the Main Decomposition Theorem and the Primary Reduction Theorem we are allowed to restrict ourselves on clipped local groups, namely groups without a direct rank-one summand. In Chapter 4 we collect facts of matrices over commutative rings with an identity element. Matrices over the local ring (Z / p^e Z) of residue classes of the rational integers modulo a prime power play an important role. In Chapter 5 we introduce representing matrices of finite essential extensions. Here a normal form for local groups is found by the Gauß algorithm. Uniform groups have representing matrices in Hermite normal form. The classification problems for almost completely decomposable groups up to isomorphism and up to near-isomorphism can be rephrased as equivalence problems for the representing matrices. In Chapter 6 we derive a criterion for the representing matrices of local groups in Gauß normal form. In Chapter 7 we formulate the matrix criterion for uniform groups. Two representing matrices in Hermite normal form describe isomorphic groups if and only if the rest blocks of the representing matrices are T-diagonally equivalent. Starting from a fixed near-isomorphism class in Chapter 8 we investigate isomorphism classes of uniform groups. We count groups and isomorphism classes. In Chapter 9 we specialize on uniform groups of rank 2r with a regulator quotient of rank r such that the rest block of the representing matrix is invertible and normed.}, subject = {Fast vollst{\"a}ndig zerlegbare Gruppe}, language = {en} } @phdthesis{Kramer2004, author = {Kramer, Helmut}, title = {Inzidenzmatrizen endlicher projektiver Ebenen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-11215}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2004}, abstract = {Ziel dieser Arbeit ist eine computerunterst{\"u}tzte Suche nach, bis auf Isomorphie, allen projektiven Ebenen zu einer gegebenen Ordnung durch Berechnung ihrer Inzidenzmatrix. Dies gelingt durch geeignete Vorstrukturierung der Matrix mit Hilfe der Doppelordnung bis Ordnung 9 auf einem aktuellen PC. In diesem Zusammenhang ist insbesondere durch einen gen{\"u}gend schnellen Algorithmus das Problem zu l{\"o}sen, ob zwei Inzidenzmatrizen zu derselben projektiven Ebene geh{\"o}ren. Die besondere Struktur, die die berechneten Beispiele von doppelgeordneten Inzidenzmatrizen der desarguesschen Ebenen aufzeigen, wird zudem durch theoretische {\"U}berlegungen untermauert. In einem letzten Kapitel wird noch eine Verbindung der projektiven Ebenen zu besonderen Blockpl{\"a}nen geschaffen.}, subject = {Projektive Ebene}, language = {de} } @phdthesis{Flegel2005, author = {Flegel, Michael L.}, title = {Constraint qualifications and stationarity concepts for mathematical programs with equilibrium constraints}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-12453}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2005}, abstract = {An exhaustive discussion of constraint qualifications (CQ) and stationarity concepts for mathematical programs with equilibrium constraints (MPEC) is presented. It is demonstrated that all but the weakest CQ, Guignard CQ, are too strong for a discussion of MPECs. Therefore, MPEC variants of all the standard CQs are introduced and investigated. A strongly stationary point (which is simply a KKT-point) is seen to be a necessary first order optimality condition only under the strongest CQs, MPEC-LICQ, MPEC-SMFCQ and Guignard CQ. Therefore a whole set of KKT-type conditions is investigated. A simple approach is given to acquire A-stationarity to be a necessary first order condition under MPEC-Guiganrd CQ. Finally, a whole chapter is devoted to investigating M-stationary, among the strongest stationarity concepts, second only to strong stationarity. It is shown to be a necessary first order condition under MPEC-Guignard CQ, the weakest known CQ for MPECs.}, subject = {Nichtlineare Optimierung}, language = {en} } @book{FalkMarohnMicheletal.2005, author = {Falk, Michael and Marohn, Frank and Michel, Ren{\´e} and Hofmann, Daniel and Macke, Maria and Tewes, Bernward and Dinges, Peter}, title = {A First Course on Time Series Analysis : Examples with SAS}, organization = {Universit{\"a}t W{\"u}rzburg / Lehrstuhl f{\"u}r Statistik , Universit{\"a}t Eichst{\"a}tt/Rechenzentrum}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-12593}, publisher = {Universit{\"a}t W{\"u}rzburg}, year = {2005}, abstract = {The analysis of real data by means of statistical methods with the aid of a software package common in industry and administration usually is not an integral part of mathematics studies, but it will certainly be part of a future professional work. The present book links up elements from time series analysis with a selection of statistical procedures used in general practice including the statistical software package SAS Statistical Analysis System). Consequently this book addresses students of statistics as well as students of other branches such as economics, demography and engineering, where lectures on statistics belong to their academic training. But it is also intended for the practician who, beyond the use of statistical tools, is interested in their mathematical background. Numerous problems illustrate the applicability of the presented statistical procedures, where SAS gives the solutions. The programs used are explicitly listed and explained. No previous experience is expected neither in SAS nor in a special computer system so that a short training period is guaranteed. This book is meant for a two semester course (lecture, seminar or practical training) where the first two chapters can be dealt with in the first semester. They provide the principal components of the analysis of a time series in the time domain. Chapters 3, 4 and 5 deal with its analysis in the frequency domain and can be worked through in the second term. In order to understand the mathematical background some terms are useful such as convergence in distribution, stochastic convergence, maximum likelihood estimator as well as a basic knowledge of the test theory, so that work on the book can start after an introductory lecture on stochastics. Each chapter includes exercises. An exhaustive treatment is recommended. This book is consecutively subdivided in a statistical part and an SAS-specific part. For better clearness the SAS-specific part, including the diagrams generated with SAS, always starts with a computer symbol, representing the beginning of a session at the computer, and ends with a printer symbol for the end of this session. This book is an open source project under the GNU Free Documentation License.}, subject = {Zeitreihenanalyse}, language = {en} } @phdthesis{Trumpf2002, author = {Trumpf, Jochen}, title = {On the geometry and parametrization of almost invariant subspaces and observer theory}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-5034}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2002}, abstract = {In my Ph.D. thesis "On the geometry and parametrization of almost invariant subspaces and observer theory" I consider the set of almost conditioned invariant subspaces of fixed dimension for a given fixed linear finite-dimensional time-invariant observable control system in state space form. Almost conditioned invariant subspaces were introduced by Willems. They generalize the concept of a conditioned invariant subspace requiring the invariance condition to hold only up to an arbitrarily small deviation in the metric of the state space. One of the goals of the theory of almost conditioned invariant subspaces was to identify the subspaces appearing as limits of sequences of conditioned invariant subspaces. An example due to {\"O}zveren, Verghese and Willsky, however, shows that the set of almost conditioned invariant subspaces is not big enough. I address this question in a joint paper with Helmke and Fuhrmann (Towards a compactification of the set of conditioned invariant subspaces, Systems and Control Letters, 48(2):101-111, 2003). Antoulas derived a description of conditioned invariant subspaces as kernels of permuted and truncated reachability matrices of controllable pairs of the appropriate size. This description was used by Helmke and Fuhrmann to construct a diffeomorphism from the set of similarity classes of certain controllable pairs onto the set of tight conditioned invariant subspaces. In my thesis I generalize this result to almost conditioned invariant subspaces describing them in terms of restricted system equivalence classes of controllable triples. Furthermore, I identify the controllable pairs appearing in the kernel representations of conditioned invariant subspaces as being induced by corestrictions of the original system to the subspace. Conditioned invariant subspaces are known to be closely related to partial observers. In fact, a tracking observer for a linear function of the state of the observed system exists if and only if the kernel of that function is conditioned invariant. In my thesis I show that the system matrices of the observers are in fact the corestrictions of the observed system to the kernels of the observed functions. They in turn are closely related to partial realizations. Exploring this connection further, I prove that the set of tracking observer parameters of fixed size, i.e. tracking observers of fixed order together with the functions they are tracking, is a smooth manifold. Furthermore, I construct a vector bundle structure for the set of conditioned invariant subspaces of fixed dimension together with their friends, i.e. the output injections making the subspaces invariant, over that manifold. Willems and Trentelman generalized the concept of a tracking observer by including derivatives of the output of the observed system in the observer equations (PID-observers). They showed that a PID-observer for a linear function of the state of the observed system exists if and only if the kernel of that function is almost conditioned invariant. In my thesis I replace PID-observers by singular systems, which has the advantage that the system matrices of the observers coincide with the matrices appearing in the kernel representations of the subspaces. In a second approach to the parametrization of conditioned invariant subspaces Hinrichsen, M{\"u}nzner and Pr{\"a}tzel-Wolters, Fuhrmann and Helmke and Ferrer, F. Puerta, X. Puerta and Zaballa derived a description of conditioned invariant subspaces in terms of images of block Toeplitz type matrices. They used this description to construct a stratification of the set of conditioned invariant subspaces of fixed dimension into smooth manifolds. These so called Brunovsky strata consist of all the subspaces with fixed restriction indices. They constructed a cell decomposition of the Brunovsky strata into so called Kronecker cells. In my thesis I show that in the tight case this cell decomposition is induced by a Bruhat decomposition of a generalized flag manifold. I identify the adherence order of the cell decomposition as being induced by the reverse Bruhat order.}, subject = {Invarianter Unterraum}, language = {en} } @phdthesis{Vodopivec2005, author = {Vodopivec, Andrija}, title = {Quasibasen abelscher, nichtseparabler p-Gruppen}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-15359}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2005}, abstract = {In dieser Arbeit wird der Bau der (abz{\"a}hlbaren) abelschen p-Gruppen untersucht, durch die Betrachtung der dazugeh{\"o}rigen Quasibasen, die als bestimmte erzeugende Systeme der gegebenen p-Gruppe definiert sind. Die Untersuchung wird insbesondere auf die nichtseparablen p-Gruppen und ihre induktiven Quasibasen bezogen.}, subject = {Abelsche p-Gruppe}, language = {de} } @misc{Forster2013, type = {Master Thesis}, author = {Forster, Johannes}, title = {Mathematical Modeling of Complex Fluids}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-83533}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2013}, abstract = {This thesis gives an overview over mathematical modeling of complex fluids with the discussion of underlying mechanical principles, the introduction of the energetic variational framework, and examples and applications. The purpose is to present a formal energetic variational treatment of energies corresponding to the models of physical phenomena and to derive PDEs for the complex fluid systems. The advantages of this approach over force-based modeling are, e.g., that for complex systems energy terms can be established in a relatively easy way, that force components within a system are not counted twice, and that this approach can naturally combine effects on different scales. We follow a lecture of Professor Dr. Chun Liu from Penn State University, USA, on complex fluids which he gave at the University of Wuerzburg during his Giovanni Prodi professorship in summer 2012. We elaborate on this lecture and consider also parts of his work and publications, and substantially extend the lecture by own calculations and arguments (for papers including an overview over the energetic variational treatment see [HKL10], [Liu11] and references therein).}, subject = {Variationsrechnung}, language = {en} } @phdthesis{Klug2006, author = {Klug, Andreas}, title = {Affine-Scaling Methods for Nonlinear Minimization Problems and Nonlinear Systems of Equations with Bound Constraints}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-18851}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2006}, abstract = {In this thesis affine-scaling-methods for two different types of mathematical problems are considered. The first type of problems are nonlinear optimization problems subject to bound constraints. A class of new affine-scaling Newton-type methods is introduced. The methods are shown to be locally quadratically convergent without assuming strict complementarity of the solution. The new methods differ from previous ones mainly in the choice of the scaling matrix. The second type of problems are semismooth system of equations with bound constraints. A new affine-scaling trust-region method for these problems is developed. The method is shown to have strong global and local convergence properties under suitable assumptions. Numerical results are presented for a number of problems arising from different areas.}, subject = {Skalierungsfunktion}, language = {en} } @phdthesis{Michel2006, author = {Michel, Ren{\´e}}, title = {Simulation and Estimation in Multivariate Generalized Pareto Models}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-18489}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2006}, abstract = {The investigation of multivariate generalized Pareto distributions (GPDs) in the framework of extreme value theory has begun only lately. Recent results show that they can, as in the univariate case, be used in Peaks over Threshold approaches. In this manuscript we investigate the definition of GPDs from Section 5.1 of Falk et al. (2004), which does not differ in the area of interest from those of other authors. We first show some theoretical properties and introduce important examples of GPDs. For the further investigation of these distributions simulation methods are an important part. We describe several methods of simulating GPDs, beginning with an efficient method for the logistic GPD. This algorithm is based on the Shi transformation, which was introduced by Shi (1995) and was used in Stephenson (2003) for the simulation of multivariate extreme value distributions of logistic type. We also present nonparametric and parametric estimation methods in GPD models. We estimate the angular density nonparametrically in arbitrary dimension, where the bivariate case turns out to be a special case. The asymptotic normality of the corresponding estimators is shown. Also in the parametric estimations, which are mainly based on maximum likelihood methods, the asymptotic normality of the estimators is shown under certain regularity conditions. Finally the methods are applied to a real hydrological data set containing water discharges of the rivers Altm{\"u}hl and Danube in southern Bavaria.}, subject = {Pareto-Verteilung}, language = {en} } @phdthesis{Petra2006, author = {Petra, Stefania}, title = {Semismooth least squares methods for complementarity problems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-18660}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2006}, abstract = {This thesis is concerned with numerical methods for solving nonlinear and mixed complementarity problems. Such problems arise from a variety of applications such as equilibria models of economics, contact and structural mechanics problems, obstacle problems, discrete-time optimal control problems etc. In this thesis we present a new formulation of nonlinear and mixed complementarity problems based on the Fischer-Burmeister function approach. Unlike traditional reformulations, our approach leads to an over-determined system of nonlinear equations. This has the advantage that certain drawbacks of the Fischer-Burmeister approach are avoided. Among other favorable properties of the new formulation, the natural merit function turns out to be differentiable. To solve the arising over-determined system we use a nonsmooth damped Levenberg-Marquardt-type method and investigate its convergence properties. Under mild assumptions, it can be shown that the global and local fast convergence results are similar to some of the better equation-based method. Moreover, the new method turns out to be significantly more robust than the corresponding equation-based method. For the case of large complementarity problems, however, the performance of this method suffers from the need for solving the arising linear least squares problem exactly at each iteration. Therefore, we suggest a modified version which allows inexact solutions of the least squares problems by using an appropriate iterative solver. Under certain assumptions, the favorable convergence properties of the original method are preserved. As an alternative method for mixed complementarity problems, we consider a box constrained least squares formulation along with a projected Levenberg-Marquardt-type method. To globalize this method, trust region strategies are proposed. Several ingredients are used to improve this approach: affine scaling matrices and multi-dimensional filter techniques. Global convergence results as well as local superlinear/quadratic convergence are shown under appropriate assumptions. Combining the advantages of the new methods, a new software for solving mixed complementarity problems is presented.}, subject = {Komplementarit{\"a}tsproblem}, language = {en} } @phdthesis{Kraus2004, author = {Kraus, Christiane}, title = {On some maximal convergence theorems for real analytic functions in R^N}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-9795}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2004}, abstract = {Ausgangspunkt dieser Arbeit war eine Publikation von D. Braess [Bra01], in der die Approximationsg{\"u}te der Funktionen \$\$ \frac{1}{((x-x_0)^2 + (y-y_0)^2)^s}, \qquad x_0^2 + y_0^2 \ge 1, \quad s \in (0,\infty),\$\$ auf der Einheitskreisscheibe \$x^2+y^2 \le 1\$ durch reelle Polynome untersucht wurde. Braess's Ergebnisse und insbesondere die von ihm angesprochenen offenen Probleme waren von besonderem Interesse, da sie Anlaß zu der Vermutung gaben, dass die klassische Theorie der ``Maximalen Konvergenz'' in Sinne von Walsh auf (zun{\"a}chst) die oben erw{\"a}hnten reell analytischen Funktionen erweitert werden kann. (Die Theorie der Maximalen Konvergenz bringt die Approximationsg{\"u}te einer Funktion auf einer kompakten Menge durch Polynome mit der Analyzit{\"a}t dieser Funktion in Verbindung.) \\ Hauptgegenstand der Arbeit ist die Erweiterung des klassischen ``Maximalen Konvergenz''--Konzeptes auf reell analytische Funktionen in h{\"o}heren Dimensionen. Es werden verschiedene maximale Konvergenzs{\"a}tze sowohl in einer als auch in mehreren Ver{\"a}nderlichen bewiesen. \\ Die Arbeit gliedert sich in drei Hauptteile. \\[2mm] Im ersten Teil wird der theoretische Hintergrund der ``Maximalen Konvergenz'' mit dem Problemkreis von Braess in Zusammenhang gebracht. Es wird gezeigt, dass f{\"u}r betrags-quadratisch holomorphe Funktionen folgender Satz gilt: \\ { \bf {Satz 1}}: Es sei \$g\$ eine holomorphe Funktion auf der abgeschlossenen Einheitskreisscheibe \$\overline{\mathbb{D}}:=\{ z \in \mathbb{C} : |z| \le 1\}\$ und \$F(x,y):= |g(x+iy)|^2\$, \$x,y \in \mathbb{R}\$. Dann gilt: \$\$ \limsup_{n \to \infty} \sqrt[n]{E_n ( \overline{\mathbb{D}},F)} = \frac{1}{\rho}\$\$ genau dann, wenn \$g\$ auf \$ \{ z \in \mathbb{C} : |z| < \rho \}\$ holomorph ist, aber auf keiner echt gr\"o\3eren Kreisscheibe, wobei \$\$ E_n ( \overline{\mathbb{D}},F)= \inf \{ ||F -P_n||_{\overline{\mathbb{D}}}, \, P_n: \mathbb{R}^2 \to \mathbb{R} \mbox{ Polynom vom Grad } \le n \}.\$\$ Dieser Satz beinhaltet nicht nur die Ergebnisse von Braess [Bra01], sondern erweitert ihn, und beantwortet die von Braess aufgeworfenen Fragen vollst{\"a}ndig. Zudem zeigt der Satz die genaue Analogie des klassischen ``Maximalen Konvergenz''--Konzeptes f{\"u}r die Funktionenklasse der betrag--quadratisch holomorphen Funktionen im \$\mathbb{R}^2\$. \\[2mm] In der Literatur gibt es viele Verallgemeinerungen des ``Maximalen Konvergenz''--Begriffes f{\"u}r mehrere komplexe Ver{\"a}nderlichen. Im Hinblick auf die vorliegende Arbeit sind besonders die Artikel [Sic62] und [Sic81] zu erw{\"a}hnen. Diese bereits bekannten Ergebnisse werden im zweiten Teil der Arbeit herangezogen, um den ``Maximalen Konvergenz''--Begriff auf mehrere reelle Ver{\"a}nderlichen zu erweitern. Man beachte, dass der entscheidende Unterschied hier in der polynomialen Approximationsklasse liegt. \\[2mm] Der dritte Teil befaßt sich mit der Verallgemeinerung des Satzes 1 in mehreren Ver{\"a}nderlichen. Eng verbunden mit diesem Problemkreis ist die Charakterisierung einer gewissen Extremalfunktion. Diese Funktion wird zur Bestimmung des Analyzit{\"a}tsbereichs der zu approximierenden Funktion ben{\"o}tigt. Mittels geeigneter Darstellung der Extremalfunktion und Charakterisierung des Analyzit{\"a}tsbereichs gelingt es schließlich, den folgenden Hauptsatz der vorliegenden Arbeit zu beweisen:\\ { \bf { Satz 2}}: Es seien \$g,h\$ holomorphe Funktionen auf der abgeschlossenen Einheitskugel \$\overline{\mathbb{D}}_N:=\{ z \in \mathbb{C}^N : |z| \le 1\}\$ und \$F(x,y):= g(x+iy) \overline{h(x+iy)}\$, \$x,y \in \mathbb{R}^N\$. Dann gilt: \$\$ \limsup_{n \to \infty} \sqrt[n]{E_n ( \overline{\mathbb{D}}_N,F)} = \frac{1}{\rho}\$\$ genau dann, wenn \$g,h\$ auf \${\mathbb{D}}_{N,\rho}:= \{ z \in \mathbb{C}^N : |z| < \rho \}\$ holomorph sind, und mindestens eine der zwei Funktionen \$g,h\$ auf keinem echt gr\"o\3eren Ball als \$\mathbb{D}_{N,\rho}\$ holomorph fortsetzbar ist. Hierbei bezeichnet \$\$ E_n ( \overline{\mathbb{D}}_N,F)= \inf \{ ||F -P_n||_{\overline{\mathbb{D}}_N}, \, P_n: \mathbb{R}^{2N} \to \mathbb{C} \mbox{ Polynom vom Grad } \le n \}.\$\$ \$[\$Bra01\$]\$ Braess, D., {\it Note on the Approximation of Powers of the Distance in Two-Dimensional Domains}, Constructive Approximation (2001), {\bf 17} No. 1, 147-151. \\ \$[\$Sic62\$]\$ Siciak, J., {\it On some extremal functions and their applications in the theory of analytic functions of several complex variables}, Trans. Amer. Math. Soc. (1962), {\bf 105}, 322--357. \\ \$[\$Sic81\$]\$ Siciak, J., {\it Extremal plurisubharmonic functions in \$\mathbb{C}^N\$}, Ann. Pol. Math. (1981), {\bf 39}, 175--211.}, subject = {Reelle Funktion}, language = {de} } @phdthesis{Seider2004, author = {Seider, David}, title = {Solving an eigenvalue problem in laser simulation}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-10057}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2004}, abstract = {In this thesis a new and powerful approach for modeling laser cavity eigenmodes is presented. This approach is based on an eigenvalue problem for singularly perturbed partial differential operators with complex coefficients; such operators have not been investigated in detail until now. The eigenvalue problem is discretized by finite elements, and convergence of the approximate solution is proved by using an abstract convergence theory also developed in this dissertation. This theory for the convergence of an approximate solution of a (quadratic) eigenvalue problem, which particularly can be applied to a finite element discretization, is interesting on its own, since the ideas can conceivably be used to handle equations with a more complex nonlinearity. The discretized eigenvalue problem essentially is solved by preconditioned GMRES, where the preconditioner is constructed according to the underlying physics of the problem. The power and correctness of the new approach for computing laser cavity eigenmodes is clearly demonstrated by successfully simulating a variety of different cavity configurations. The thesis is organized as follows: Chapter 1 contains a short overview on solving the so-called Helmholtz equation with the help of finite elements. The main part of Chapter 2 is dedicated to the analysis of a one-dimensional model problem containing the main idea of a new model for laser cavity eigenmodes which is derived in detail in Chapter 3. Chapter 4 comprises a convergence theory for the approximate solution of quadratic eigenvalue problems. In Chapter 5, a stabilized finite element discretization of the new model is described and its convergence is proved by applying the theory of Chapter 4. Chapter 6 contains computational aspects of solving the resulting system of equations and, finally, Chapter 7 presents numerical results for various configurations, demonstrating the practical relevance of our new approach.}, subject = {Laser}, language = {en} } @phdthesis{Kleinsteuber2005, author = {Kleinsteuber, Martin}, title = {Jacobi-type methods on semisimple Lie algebras : a Lie algebraic approach to numerical linear algebra}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-16454}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2005}, abstract = {Es wird eine Lie-algebraische Verallgemeinerung sowohl des klassischen als auch des Sortier-Jacobi-Verfahrens f{\"u}r das symmetrische Eigenwertproblem behandelt. Der koordinatenfreie Zugang erm{\"o}glicht durch eine neue Betrachtungsweise die Vereinheitlichung strukturierter Eigen- und Singul{\"a}rwertprobleme, darunter bis dato noch nicht betrachtete F{\"a}lle. F{\"u}r beide Verfahren wird lokal quadratische Konvergenz, sowohl f{\"u}r den regul{\"a}ren als auch f{\"u}r den irregul{\"a}ren Fall, gezeigt. Die Analyse und Verallgemeinerung der sog. speziellen Sweeps f{\"u}r das symmetrische Eigenwertproblem f{\"u}hrt zu neuen Sweep-Methoden f{\"u}r strukturierte Eigen- und Singul{\"a}rwertprobleme, die ein besseres Konvergenzverhalten als die bisher bekannten aufweisen.}, subject = {Eigenwert}, language = {en} } @book{FalkMarohnMicheletal.2006, author = {Falk, Michael and Marohn, Frank and Michel, Ren{\´e} and Hofmann, Daniel and Macke, Maria and Tewes, Bernward and Dinges, Peter}, title = {A First Course on Time Series Analysis : Examples with SAS}, organization = {Universit{\"a}t W{\"u}rzburg / Lehrstuhl f{\"u}r Statistik}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-16919}, publisher = {Universit{\"a}t W{\"u}rzburg}, year = {2006}, abstract = {The analysis of real data by means of statistical methods with the aid of a software package common in industry and administration usually is not an integral part of mathematics studies, but it will certainly be part of a future professional work. The present book links up elements from time series analysis with a selection of statistical procedures used in general practice including the statistical software package SAS Statistical Analysis System). Consequently this book addresses students of statistics as well as students of other branches such as economics, demography and engineering, where lectures on statistics belong to their academic training. But it is also intended for the practician who, beyond the use of statistical tools, is interested in their mathematical background. Numerous problems illustrate the applicability of the presented statistical procedures, where SAS gives the solutions. The programs used are explicitly listed and explained. No previous experience is expected neither in SAS nor in a special computer system so that a short training period is guaranteed. This book is meant for a two semester course (lecture, seminar or practical training) where the first two chapters can be dealt with in the first semester. They provide the principal components of the analysis of a time series in the time domain. Chapters 3, 4 and 5 deal with its analysis in the frequency domain and can be worked through in the second term. In order to understand the mathematical background some terms are useful such as convergence in distribution, stochastic convergence, maximum likelihood estimator as well as a basic knowledge of the test theory, so that work on the book can start after an introductory lecture on stochastics. Each chapter includes exercises. An exhaustive treatment is recommended. This book is consecutively subdivided in a statistical part and an SAS-specific part. For better clearness the SAS-specific part, including the diagrams generated with SAS, always starts with a computer symbol, representing the beginning of a session at the computer, and ends with a printer symbol for the end of this session. This book is an open source project under the GNU Free Documentation License.}, subject = {Zeitreihenanalyse}, language = {en} } @phdthesis{Lageman2007, author = {Lageman, Christian}, title = {Convergence of gradient-like dynamical systems and optimization algorithms}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-23948}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2007}, abstract = {This work studies the convergence of trajectories of gradient-like systems. In the first part of this work continuous-time gradient-like systems are examined. Results on the convergence of integral curves of gradient systems to single points of Lojasiewicz and Kurdyka are extended to a class of gradient-like vector fields and gradient-like differential inclusions. In the second part of this work discrete-time gradient-like optimization methods on manifolds are studied. Methods for smooth and for nonsmooth optimization problems are considered. For these methods some convergence results are proven. Additionally the optimization methods for nonsmooth cost functions are applied to sphere packing problems on adjoint orbits.}, subject = {Dynamisches System}, language = {en} }