@phdthesis{Wurst2015, author = {Wurst, Jan-Eric}, title = {Hp-Finite Elements for PDE-Constrained Optimization}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-024-5 (print)}, doi = {10.25972/WUP-978-3-95826-025-2}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-115027}, school = {W{\"u}rzburg University Press}, pages = {188}, year = {2015}, abstract = {Diese Arbeit behandelt die hp-Finite Elemente Methode (FEM) f{\"u}r linear quadratische Optimal-steuerungsprobleme. Dabei soll ein Zielfunktional, welches die Entfernung zu einem angestrebten Zustand und hohe Steuerungskosten (als Regularisierung) bestraft, unter der Nebenbedingung einer elliptischen partiellen Differentialgleichung minimiert werden. Bei der Anwesenheit von Steuerungsbeschr{\"a}nkungen k{\"o}nnen die notwendigen Bedingungen erster Ordnung, die typischerweise f{\"u}r numerische L{\"o}sungsverfahren genutzt werden, als halbglatte Projektionsformel formuliert werden. Folglich sind optimale L{\"o}sungen oftmals auch nicht-glatt. Die Technik der hp-Diskretisierung ber{\"u}cksichtigt diese Tatsache und approximiert raue Funktionen auf feinen Gittern, w{\"a}hrend Elemente h{\"o}herer Ordnung auf Gebieten verwendet werden, auf denen die L{\"o}sung glatt ist. Die erste Leistung dieser Arbeit ist die erfolgreiche Anwendung der hp-FEM auf zwei verwandte Problemklassen: Neumann- und Interface-Steuerungsprobleme. Diese werden zun{\"a}chst mit entsprechenden a-priori Verfeinerungsstrategien gel{\"o}st, mit der randkonzentrierten (bc) FEM oder interface konzentrierten (ic) FEM. Diese Strategien generieren Gitter, die stark in Richtung des Randes beziehungsweise des Interfaces verfeinert werden. Um f{\"u}r beide Techniken eine algebraische Reduktion des Approximationsfehlers zu beweisen, wird eine elementweise interpolierende Funktion konstruiert. Außerdem werden die lokale und globale Regularit{\"a}t von L{\"o}sungen behandelt, weil sie entscheidend f{\"u}r die Konvergenzgeschwindigkeit ist. Da die bc- und ic- FEM kleine Polynomgrade f{\"u}r Elemente verwenden, die den Rand beziehungsweise das Interface ber{\"u}hren, k{\"o}nnen eine neue L2- und L∞-Fehlerabsch{\"a}tzung hergeleitet werden. Letztere bildet die Grundlage f{\"u}r eine a-priori Strategie zum Aufdatieren des Regularisierungsparameters im Zielfunktional, um Probleme mit bang-bang Charakter zu l{\"o}sen. Zudem wird die herk{\"o}mmliche hp-Idee, die daraus besteht das Gitter geometrisch in Richtung der Ecken des Gebiets abzustufen, auf die L{\"o}sung von Optimalsteuerungsproblemen {\"u}bertragen (vc-FEM). Es gelingt, Regularit{\"a}t in abz{\"a}hlbar normierten R{\"a}umen f{\"u}r die Variablen des gekoppelten Optimalit{\"a}tssystems zu zeigen. Hieraus resultiert die exponentielle Konvergenz im Bezug auf die Anzahl der Freiheitsgrade. Die zweite Leistung dieser Arbeit ist die Entwicklung einer v{\"o}llig adaptiven hp-Innere-Punkte-Methode, die Probleme mit verteilter oder Neumann Steuerung l{\"o}sen kann. Das zugrundeliegende Barriereproblem besitzt ein nichtlineares Optimilit{\"a}tssystem, das eine numerische Herausforderung beinhaltet: die stabile Berechnung von Integralen {\"u}ber Funktionen mit m{\"o}glichen Singularit{\"a}ten in Elementen h{\"o}herer Ordnung. Dieses Problem wird dadurch gel{\"o}st, dass die Steuerung an den Integrationspunkten {\"u}berwacht wird. Die Zul{\"a}ssigkeit an diesen Punkten wird durch einen Gl{\"a}ttungsschritt garantiert. In dieser Arbeit werden sowohl die Konvergenz eines Innere-Punkte-Verfahrens mit Gl{\"a}ttungsschritt als auch a-posteriori Schranken f{\"u}r den Diskretisierungsfehler gezeigt. Dies f{\"u}hrt zu einem adaptiven L{\"o}sungsalgorithmus, dessen Gitterverfeinerung auf der Entwicklung der L{\"o}sung in eine Legendre Reihe basiert. Hierbei dient das Abklingverhalten der Koeffizienten als Glattheitsindikator und wird f{\"u}r die Entscheidung zwischen h- und p-Verfeinerung herangezogen.}, subject = {Finite-Elemente-Methode}, language = {en} } @phdthesis{Wongkaew2015, author = {Wongkaew, Suttida}, title = {On the control through leadership of multi-agent systems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-120914}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {The investigation of interacting multi-agent models is a new field of mathematical research with application to the study of behavior in groups of animals or community of people. One interesting feature of multi-agent systems is collective behavior. From the mathematical point of view, one of the challenging issues considering with these dynamical models is development of control mechanisms that are able to influence the time evolution of these systems. In this thesis, we focus on the study of controllability, stabilization and optimal control problems for multi-agent systems considering three models as follows: The first one is the Hegselmann Krause opinion formation (HK) model. The HK dynamics describes how individuals' opinions are changed by the interaction with others taking place in a bounded domain of confidence. The study of this model focuses on determining feedback controls in order to drive the agents' opinions to reach a desired agreement. The second model is the Heider social balance (HB) model. The HB dynamics explains the evolution of relationships in a social network. One purpose of studying this system is the construction of control function in oder to steer the relationship to reach a friendship state. The third model that we discuss is a flocking model describing collective motion observed in biological systems. The flocking model under consideration includes self-propelling, friction, attraction, repulsion, and alignment features. We investigate a control for steering the flocking system to track a desired trajectory. Common to all these systems is our strategy to add a leader agent that interacts with all other members of the system and includes the control mechanism. Our control through leadership approach is developed using classical theoretical control methods and a model predictive control (MPC) scheme. To apply the former method, for each model the stability of the corresponding linearized system near consensus is investigated. Further, local controllability is examined. However, only in the Hegselmann-Krause opinion formation model, the feedback control is determined in order to steer agents' opinions to globally converge to a desired agreement. The MPC approach is an optimal control strategy based on numerical optimization. To apply the MPC scheme, optimal control problems for each model are formulated where the objective functions are different depending on the desired objective of the problem. The first-oder necessary optimality conditions for each problem are presented. Moreover for the numerical treatment, a sequence of open-loop discrete optimality systems is solved by accurate Runge-Kutta schemes, and in the optimization procedure, a nonlinear conjugate gradient solver is implemented. Finally, numerical experiments are performed to investigate the properties of the multi-agent models and demonstrate the ability of the proposed control strategies to drive multi-agent systems to attain a desired consensus and to track a given trajectory.}, subject = {Mehragentensystem}, language = {en} } @phdthesis{Srichan2015, author = {Srichan, Teerapat}, title = {Discrete Moments of Zeta-Functions with respect to random and ergodic transformations}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-118395}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {In the thesis discrete moments of the Riemann zeta-function and allied Dirichlet series are studied. In the first part the asymptotic value-distribution of zeta-functions is studied where the samples are taken from a Cauchy random walk on a vertical line inside the critical strip. Building on techniques by Lifshits and Weber analogous results for the Hurwitz zeta-function are derived. Using Atkinson's dissection this is even generalized to Dirichlet L-functions associated with a primitive character. Both results indicate that the expectation value equals one which shows that the values of these zeta-function are small on average. The second part deals with the logarithmic derivative of the Riemann zeta-function on vertical lines and here the samples are with respect to an explicit ergodic transformation. Extending work of Steuding, discrete moments are evaluated and an equivalent formulation for the Riemann Hypothesis in terms of ergodic theory is obtained. In the third and last part of the thesis, the phenomenon of universality with respect to stochastic processes is studied. It is shown that certain random shifts of the zeta-function can approximate non-vanishing analytic target functions as good as we please. This result relies on Voronin's universality theorem.}, subject = {Riemannsche Zetafunktion}, language = {en} } @phdthesis{Schaeffner2015, author = {Sch{\"a}ffner, Mathias}, title = {Multiscale analysis of non-convex discrete systems via \(\Gamma\)-convergence}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-122349}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {The subject of this thesis is the rigorous passage from discrete systems to continuum models via variational methods. The first part of this work studies a discrete model describing a one-dimensional chain of atoms with finite range interactions of Lennard-Jones type. We derive an expansion of the ground state energy using \(\Gamma\)-convergence. In particular, we show that a variant of the Cauchy-Born rule holds true for the model under consideration. We exploit this observation to derive boundary layer energies due to asymmetries of the lattice at the boundary or at cracks of the specimen. Hereby we extend several results obtained previously for models involving only nearest and next-to-nearest neighbour interactions by Braides and Cicalese and Scardia, Schl{\"o}merkemper and Zanini. The second part of this thesis is devoted to the analysis of a quasi-continuum (QC) method. To this end, we consider the discrete model studied in the first part of this thesis as the fully atomistic model problem and construct an approximation based on a QC method. We show that in an elastic setting the expansion by \(\Gamma\)-convergence of the fully atomistic energy and its QC approximation coincide. In the case of fracture, we show that this is not true in general. In the case of only nearest and next-to-nearest neighbour interactions, we give sufficient conditions on the QC approximation such that, also in case of fracture, the minimal energies of the fully atomistic energy and its approximation coincide in the limit.}, subject = {Gamma-Konvergenz}, language = {en} } @phdthesis{Mohammadi2015, author = {Mohammadi, Masoumeh}, title = {Analysis of discretization schemes for Fokker-Planck equations and related optimality systems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-111494}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {The Fokker-Planck (FP) equation is a fundamental model in thermodynamic kinetic theories and statistical mechanics. In general, the FP equation appears in a number of different fields in natural sciences, for instance in solid-state physics, quantum optics, chemical physics, theoretical biology, and circuit theory. These equations also provide a powerful mean to define robust control strategies for random models. The FP equations are partial differential equations (PDE) describing the time evolution of the probability density function (PDF) of stochastic processes. These equations are of different types depending on the underlying stochastic process. In particular, they are parabolic PDEs for the PDF of Ito processes, and hyperbolic PDEs for piecewise deterministic processes (PDP). A fundamental axiom of probability calculus requires that the integral of the PDF over all the allowable state space must be equal to one, for all time. Therefore, for the purpose of accurate numerical simulation, a discretized FP equation must guarantee conservativeness of the total probability. Furthermore, since the solution of the FP equation represents a probability density, any numerical scheme that approximates the FP equation is required to guarantee the positivity of the solution. In addition, an approximation scheme must be accurate and stable. For these purposes, for parabolic FP equations on bounded domains, we investigate the Chang-Cooper (CC) scheme for space discretization and first- and second-order backward time differencing. We prove that the resulting space-time discretization schemes are accurate, conditionally stable, conservative, and preserve positivity. Further, we discuss a finite difference discretization for the FP system corresponding to a PDP process in a bounded domain. Next, we discuss FP equations in unbounded domains. In this case, finite-difference or finite-element methods cannot be applied. By employing a suitable set of basis functions, spectral methods allow to treat unbounded domains. Since FP solutions decay exponentially at infinity, we consider Hermite functions as basis functions, which are Hermite polynomials multiplied by a Gaussian. To this end, the Hermite spectral discretization is applied to two different FP equations; the parabolic PDE corresponding to Ito processes, and the system of hyperbolic PDEs corresponding to a PDP process. The resulting discretized schemes are analyzed. Stability and spectral accuracy of the Hermite spectral discretization of the FP problems is proved. Furthermore, we investigate the conservativity of the solutions of FP equations discretized with the Hermite spectral scheme. In the last part of this thesis, we discuss optimal control problems governed by FP equations on the characterization of their solution by optimality systems. We then investigate the Hermite spectral discretization of FP optimality systems in unbounded domains. Within the framework of Hermite discretization, we obtain sparse-band systems of ordinary differential equations. We analyze the accuracy of the discretization schemes by showing spectral convergence in approximating the state, the adjoint, and the control variables that appear in the FP optimality systems. To validate our theoretical estimates, we present results of numerical experiments.}, subject = {Fokker-Planck-Gleichung}, language = {en} } @article{MatlachDhillonHainetal.2015, author = {Matlach, Juliane and Dhillon, Christine and Hain, Johannes and Schlunck, G{\"u}nther and Grehn, Franz and Klink, Thomas}, title = {Trabeculectomy versus canaloplasty (TVC study) in the treatment of patients with open-angle glaucoma: a prospective randomized clinical trial}, series = {Acta Ophthalmologica}, volume = {93}, journal = {Acta Ophthalmologica}, doi = {10.1111/aos.12722}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-149263}, pages = {753-761}, year = {2015}, abstract = {Purpose: To compare the outcomes of canaloplasty and trabeculectomy in open-angle glaucoma. Methods: This prospective, randomized clinical trial included 62 patients who randomly received trabeculectomy (n = 32) or canaloplasty (n = 30) and were followed up prospectively for 2 years. Primary endpoint was complete (without medication) and qualified success (with or without medication) defined as an intraocular pressure (IOP) of ≤18 mmHg (definition 1) or IOP ≤21 mmHg and ≥20\% IOP reduction (definition 2), IOP ≥5 mmHg, no vision loss and no further glaucoma surgery. Secondary endpoints were the absolute IOP reduction, visual acuity, medication, complications and second surgeries. Results: Surgical treatment significantly reduced IOP in both groups (p < 0.001). Complete success was achieved in 74.2\% and 39.1\% (definition 1, p = 0.01), and 67.7\% and 39.1\% (definition 2, p = 0.04) after 2 years in the trabeculectomy and canaloplasty group, respectively. Mean absolute IOP reduction was 10.8 ± 6.9 mmHg in the trabeculectomy and 9.3 ± 5.7 mmHg in the canaloplasty group after 2 years (p = 0.47). Mean IOP was 11.5 ± 3.4 mmHg in the trabeculectomy and 14.4 ± 4.2 mmHg in the canaloplasty group after 2 years. Following trabeculectomy, complications were more frequent including hypotony (37.5\%), choroidal detachment (12.5\%) and elevated IOP (25.0\%). Conclusions: Trabeculectomy is associated with a stronger IOP reduction and less need for medication at the cost of a higher rate of complications. If target pressure is attainable by moderate IOP reduction, canaloplasty may be considered for its relative ease of postoperative care and lack of complications.}, language = {en} } @phdthesis{Lurz2015, author = {Lurz, Kristina}, title = {Confidence and Prediction under Covariates and Prior Information}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-122748}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {The purpose of confidence and prediction intervals is to provide an interval estimation for an unknown distribution parameter or the future value of a phenomenon. In many applications, prior knowledge about the distribution parameter is available, but rarely made use of, unless in a Bayesian framework. This thesis provides exact frequentist confidence intervals of minimal volume exploiting prior information. The scheme is applied to distribution parameters of the binomial and the Poisson distribution. The Bayesian approach to obtain intervals on a distribution parameter in form of credibility intervals is considered, with particular emphasis on the binomial distribution. An application of interval estimation is found in auditing, where two-sided intervals of Stringer type are meant to contain the mean of a zero-inflated population. In the context of time series analysis, covariates are supposed to improve the prediction of future values. Exponential smoothing with covariates as an extension of the popular forecasting method exponential smoothing is considered in this thesis. A double-seasonality version of it is applied to forecast hourly electricity load under the use of meteorological covariates. Different kinds of prediction intervals for exponential smoothing with covariates are formulated.}, subject = {Konfidenzintervall}, language = {en} } @phdthesis{Karl2015, author = {Karl, Sabine}, title = {Firm Values and Systemic Stability in Financial Networks}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-115739}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {Based on the work of Eisenberg and Noe [2001], Suzuki [2002], Elsinger [2009] and Fischer [2014], we consider a generalization of Merton's asset valuation approach where n firms are linked by cross-ownership of equities and liabilities. Each firm is assumed to have a single outstanding liability, whereas its assets consist of one system-exogenous asset, as well as system-endogenous assets comprising some fraction of other firms' equity and liability, respectively. Following Fischer [2014], one can obtain no-arbitrage prices of equity and the recovery claims of liabilities as solutions of a fixed point problem, and hence obtain no-arbitrage prices of the `firm value' of each firm, which is the value of the firm's liability plus the firm's equity. In a first step, we consider the two-firm case where explicit formulae for the no-arbitrage prices of the firm values are available (cf. Suzuki [2002]). Since firm values are derivatives of exogenous asset values, the distribution of firm values at maturity can be determined from the distribution of exogenous asset values. The Merton model and most of its known extensions do not account for the cross-ownership structure of the assets owned by the firm. Therefore the assumption of lognormally distributed exogenous assets leads to lognormally distributed firm values in such models, as the values of the liability and the equity add up to the exogenous asset's value (which has lognormal distribution by assumption). Our work therefore starts from lognormally distributed exogenous assets and reveals how cross-ownership, when correctly accounted for in the valuation process, affects the distribution of the firm value, which is not lognormal anymore. In a simulation study we examine the impact of several parameters (amount of cross-ownership of debt and equity, ratio of liabilities to expected exogenous assets value) on the differences between the distribution of firm values obtained from our model and correspondingly matched lognormal distributions. It becomes clear that the assumption of lognormally distributed firm values may lead to both over- and underestimation of the "true" firm values (within the cross-ownership model) and consequently of bankruptcy risk, too. In a second step, the bankruptcy risk of one firm within the system is analyzed in more detail in a further simulation study, revealing that the correct incorporation of cross-ownership in the valuation procedure is the more important, the tighter the cross-ownership structure between the two firms. Furthermore, depending on the considered type of cross-ownership (debt or equity), the assumption of lognormally distributed firm values is likely to result in an over- resp. underestimation of the actual probability of default. In a similar vein, we consider the Value-at-Risk (VaR) of a firm in the system, which we calculate as the negative α-quantile of the firm value at maturity minus the firm's risk neutral price in t=0, i.e. we consider the (1-α)100\%-VaR of the change in firm value. If we let the cross-ownership fractions (i.e. the fraction that one firm holds of another firm's debt or equity) converge to 1 (which is the supremum of the possible values that cross-ownership fractions can take), we can prove that in a system of two firms, the lognormal model will over- resp. underestimate both univariate and bivariate probabilities of default under cross-ownership of debt only resp. cross-ownership of equity only. Furthermore, we provide a formula that allows us to check for an arbitrary scenario of cross-ownership and any non-negative distribution of exogenous assets whether the approximating lognormal model will over- or underestimate the related probability of default of a firm. In particular, any given non-negative distribution of exogenous asset values (non-degenerate in a certain sense) can be transformed into a new, "extreme" distribution of exogenous assets yielding such a low or high actual probability of default that the approximating lognormal model will over- and underestimate this risk, respectively. After this analysis of the univariate distribution of firm values under cross-ownership in a system of two firms with bivariately lognormally distributed exogenous asset values, we consider the copula of these firm values as a distribution-free measure of the dependency between these firm values. Without cross-ownership, this copula would be the Gaussian copula. Under cross-ownership, we especially consider the behaviour of the copula of firm values in the lower left and upper right corner of the unit square, and depending on the type of cross-ownership and the considered corner, we either obtain error bounds as to how good the copula of firm values under cross-ownership can be approximated with the Gaussian copula, or we see that the copula of firm values can be written as the copula of two linear combinations of exogenous asset values (note that these linear combinations are not lognormally distributed). These insights serve as a basis for our analysis of the tail dependence coefficient of firm values under cross-ownership. Under cross-ownership of debt only, firm values remain upper tail independent, whereas they become perfectly lower tail dependent if the correlation between exogenous asset values exceeds a certain positive threshold, which does not depend on the exact level of cross-ownership. Under cross-ownership of equity only, the situation is reverse in that firm values always remain lower tail independent, but upper tail independence is preserved if and only if the right tail behaviour of both firms' values is determined by the right tail behaviour of the firms' own exogenous asset value instead of the respective other firm's exogenous asset value. Next, we return to systems of n≥2 firms and analyze sensitivities of no-arbitrage prices of equity and the recovery claims of liabilities with respect to the model parameters. In the literature, such sensitivities are provided with respect to exogenous asset values by Gouri{\´e}roux et al. [2012], and we extend the existing results by considering how these no-arbitrage prices depend on the cross-ownership fractions and the level of liabilities. For the former, we can show that all prices are non-decreasing in any cross-ownership fraction in the model, and by use of a version of the Implicit Function Theorem we can also determine exact derivatives. For the latter, we show that the recovery value of debt and the equity value of a firm are non-decreasing and non-increasing in the firm's nominal level of liabilities, respectively, but the firm value is in general not monotone in the firm's level of liabilities. Furthermore, no-arbitrage prices of equity and the recovery claims of liabilities of a firm are in general non-monotone in the nominal level of liabilities of other firms in the system. If we confine ourselves to one type of cross-ownership (i.e. debt or equity), we can derive more precise relationships. All the results can be transferred to risk-neutral prices before maturity. Finally, following Gouri{\´e}roux et al. [2012] and as a kind of extension to the above sensitivity results, we consider how immediate changes in exogenous asset values of one or more firms at maturity affect the financial health of a system of n initially solvent firms. We start with some theoretical considerations on what we call the contagion effect, namely the change in the endogenous asset value of a firm caused by shocks on the exogenous assets of firms within the system. For the two-firm case, an explicit formula is available, making clear that in general (and in particular under cross-ownership of equity only), the effect of contagion can be positive as well as negative, i.e. it can both, mitigate and exacerbate the change in the exogenous asset value of a firm. On the other hand, we cannot generally say that a tighter cross-ownership structure leads to bigger absolute contagion effects. Under cross-ownership of debt only, firms cannot profit from positive shocks beyond the direct effect on exogenous assets, as the contagion effect is always non-positive. Next, we are concerned with spillover effects of negative shocks on a subset of firms to other firms in the system (experiencing non-negative shocks themselves), driving them into default due to large losses in their endogenous asset values. Extending the results of Glasserman and Young [2015], we provide a necessary condition for the shock to cause such an event. This also yields an upper bound for the probability of such an event. We further investigate how the stability of a system of firms exposed to multiple shocks depends on the model parameters in a simulation study. In doing so, we consider three network types (incomplete, core-periphery and ring network) with simultaneous shocks on some of the firms and wiping out a certain percentage of their exogenous assets. Then we analyze for all three types of cross-ownership (debt only, equity only, both debt and equity) how the shock intensity, the shock size, and network parameters as the number of links in the network and the proportion of a firm's debt or equity held within the system of firms influences several output parameters, comprising the total number of defaults and the relative loss in the sum of firm values, among others. Comparing our results to the studies of Nier et al. [2007], Gai and Kapadia [2010] and Elliott et al. [2014], we can only partly confirm their results with respect to the number of defaults. We conclude our work with a theoretical comparison of the complete network (where each firm holds a part of any other firm) and the ring network with respect to the number of defaults caused by a shock on a single firm, as it is done by Allen and Gale [2000]. In line with the literature, we find that under cross-ownership of debt only, complete networks are "robust yet fragile" [Gai and Kapadia, 2010] in that moderate shocks can be completely withstood or drive the firm directly hit by the shock in default, but as soon as the shock exceeds a certain size, all firms are simultaneously in default. In contrast to that, firms default one by one in the ring network, with the first "contagious default" (i.e. a default of a firm not directly hit by the shock) already occurs for smaller shock sizes than under the complete network.}, subject = {Finanzmathematik}, language = {en} } @phdthesis{Hain2015, author = {Hain, Johannes}, title = {Valuation Algorithms for Structural Models of Financial Networks}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-128108}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {The thesis focuses on the valuation of firms in a system context where cross-holdings of the firms in liabilities and equities are allowed and, therefore, systemic risk can be modeled on a structural level. A main property of such models is that for the determination of the firm values a pricing equilibrium has to be found. While there exists a small but growing amount of research on the existence and the uniqueness of such price equilibria, the literature is still somewhat inconsistent. An example for this fact is that different authors define the underlying financial system on differing ways. Moreover, only few articles pay intense attention on procedures to find the pricing equilibria. In the existing publications, the provided algorithms mainly reflect the individual authors' particular approach to the problem. Additionally, all existing methods do have the drawback of potentially infinite runtime. For these reasons, the objects of this thesis are as follows. First, a definition of a financial system is introduced in its most general form in Chapter 2. It is shown that under a fairly mild regularity condition the financial system has a unique existing payment equilibrium. In Chapter 3, some extensions and differing definitions of financial systems that exist in literature are presented and it is shown how these models can be embedded into the general model from the proceeding chapter. Second, an overview of existing valuation algorithms to find the equilibrium is given in Chapter 4, where the existing methods are generalized and their corresponding mathematical properties are highlighted. Third, a complete new class of valuation algorithms is developed in Chapter 4 that includes the additional information whether a firm is in default or solvent under a current payment vector. This results in procedures that are able find the solution of the system in a finite number of iteration steps. In Chapter 5, the developed concepts of Chapter 4 are applied to more general financial systems where more than one seniority level of debt is present. Chapter 6 develops optimal starting vectors for non-finite algorithms and Chapter 7 compares the existing and the new developed algorithms concerning their efficiency in an extensive simulation study covering a wide range of possible settings for financial systems.}, subject = {Risikomanagement}, language = {en} } @phdthesis{Geiselhart2015, author = {Geiselhart, Roman}, title = {Advances in the stability analysis of large-scale discrete-time systems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-112963}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {Several aspects of the stability analysis of large-scale discrete-time systems are considered. An important feature is that the right-hand side does not have have to be continuous. In particular, constructive approaches to compute Lyapunov functions are derived and applied to several system classes. For large-scale systems, which are considered as an interconnection of smaller subsystems, we derive a new class of small-gain results, which do not require the subsystems to be robust in some sense. Moreover, we do not only study sufficiency of the conditions, but rather state an assumption under which these conditions are also necessary. Moreover, gain construction methods are derived for several types of aggregation, quantifying how large a prescribed set of interconnection gains can be in order that a small-gain condition holds.}, subject = {Ljapunov-Funktion}, language = {en} } @phdthesis{Ciaramella2015, author = {Ciaramella, Gabriele}, title = {Exact and non-smooth control of quantum spin systems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-118386}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {An efficient and accurate computational framework for solving control problems governed by quantum spin systems is presented. Spin systems are extremely important in modern quantum technologies such as nuclear magnetic resonance spectroscopy, quantum imaging and quantum computing. In these applications, two classes of quantum control problems arise: optimal control problems and exact-controllability problems, with a bilinear con- trol structure. These models correspond to the Schr{\"o}dinger-Pauli equation, describing the time evolution of a spinor, and the Liouville-von Neumann master equation, describing the time evolution of a spinor and a density operator. This thesis focuses on quantum control problems governed by these models. An appropriate definition of the optimiza- tion objectives and of the admissible set of control functions allows to construct controls with specific properties. These properties are in general required by the physics and the technologies involved in quantum control applications. A main purpose of this work is to address non-differentiable quantum control problems. For this reason, a computational framework is developed to address optimal-control prob- lems, with possibly L1 -penalization term in the cost-functional, and exact-controllability problems. In both cases the set of admissible control functions is a subset of a Hilbert space. The bilinear control structure of the quantum model, the L1 -penalization term and the control constraints generate high non-linearities that make difficult to solve and analyse the corresponding control problems. The first part of this thesis focuses on the physical description of the spin of particles and of the magnetic resonance phenomenon. Afterwards, the controlled Schr{\"o}dinger- Pauli equation and the Liouville-von Neumann master equation are discussed. These equations, like many other controlled quantum models, can be represented by dynamical systems with a bilinear control structure. In the second part of this thesis, theoretical investigations of optimal control problems, with a possible L1 -penalization term in the objective and control constraints, are consid- ered. In particular, existence of solutions, optimality conditions, and regularity properties of the optimal controls are discussed. In order to solve these optimal control problems, semi-smooth Newton methods are developed and proved to be superlinear convergent. The main difficulty in the implementation of a Newton method for optimal control prob- lems comes from the dimension of the Jacobian operator. In a discrete form, the Jacobian is a very large matrix, and this fact makes its construction infeasible from a practical point of view. For this reason, the focus of this work is on inexact Krylov-Newton methods, that combine the Newton method with Krylov iterative solvers for linear systems, and allows to avoid the construction of the discrete Jacobian. In the third part of this thesis, two methodologies for the exact-controllability of quan- tum spin systems are presented. The first method consists of a continuation technique, while the second method is based on a particular reformulation of the exact-control prob- lem. Both these methodologies address minimum L2 -norm exact-controllability problems. In the fourth part, the thesis focuses on the numerical analysis of quantum con- trol problems. In particular, the modified Crank-Nicolson scheme as an adequate time discretization of the Schr{\"o}dinger equation is discussed, the first-discretize-then-optimize strategy is used to obtain a discrete reduced gradient formula for the differentiable part of the optimization objective, and implementation details and globalization strategies to guarantee an adequate numerical behaviour of semi-smooth Newton methods are treated. In the last part of this work, several numerical experiments are performed to vali- date the theoretical results and demonstrate the ability of the proposed computational framework to solve quantum spin control problems.}, subject = {Spinsystem}, language = {en} } @phdthesis{Boehm2015, author = {B{\"o}hm, Christoph}, title = {Loewner equations in multiply connected domains}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-129903}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {The first goal of this thesis is to generalize Loewner's famous differential equation to multiply connected domains. The resulting differential equations are known as Komatu--Loewner differential equations. We discuss Komatu--Loewner equations for canonical domains (circular slit disks, circular slit annuli and parallel slit half-planes). Additionally, we give a generalisation to several slits and discuss parametrisations that lead to constant coefficients. Moreover, we compare Komatu--Loewner equations with several slits to single slit Loewner equations. Finally we generalise Komatu--Loewner equations to hulls satisfying a local growth property.}, subject = {Biholomorphe Abbildung}, language = {en} } @phdthesis{Bauer2015, author = {Bauer, Ulrich Josef}, title = {Conformal Mappings onto Simply and Multiply Connected Circular Arc Polygon Domains}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-123914}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {The goal of this thesis is to investigate conformal mappings onto circular arc polygon domains, i.e. domains that are bounded by polygons consisting of circular arcs instead of line segments. Conformal mappings onto circular arc polygon domains contain parameters in addition to the classical parameters of the Schwarz-Christoffel transformation. To contribute to the parameter problem of conformal mappings from the unit disk onto circular arc polygon domains, we investigate two special cases of these mappings. In the first case we can describe the additional parameters if the bounding circular arc polygon is a polygon with straight sides. In the second case we provide an approximation for the additional parameters if the circular arc polygon domain satisfies some symmetry conditions. These results allow us to draw conclusions on the connection between these additional parameters and the classical parameters of the mapping. For conformal mappings onto multiply connected circular arc polygon domains, we provide an alternative construction of the mapping formula without using the Schottky-Klein prime function. In the process of constructing our main result, mappings for domains of connectivity three or greater, we also provide a formula for conformal mappings onto doubly connected circular arc polygon domains. The comparison of these mapping formulas with already known mappings allows us to provide values for some of the parameters of the mappings onto doubly connected circular arc polygon domains if the image domain is a polygonal domain. The different components of the mapping formula are constructed by using a slightly modified variant of the Poincar{\´e} theta series. This construction includes the design of a function to remove unwanted poles and of different versions of functions that are analytic on the domain of definition of the mapping functions and satisfy some special functional equations. We also provide the necessary concepts to numerically evaluate the conformal mappings onto multiply connected circular arc polygon domains. As the evaluation of such a map requires the solution of a differential equation, we provide a possible configuration of curves inside the preimage domain to solve the equation along them in addition to a description of the procedure for computing either the formula for the doubly connected case or the case of connectivity three or greater. We also describe the procedures for solving the parameter problem for multiply connected circular arc polygon domains.}, subject = {Konforme Abbildungen}, language = {en} } @phdthesis{Bauer2015, author = {Bauer, Andreas}, title = {Argumentieren mit multiplen und dynamischen Repr{\"a}sentationen}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-022-1 (print)}, doi = {10.25972/WUP-978-3-95826-023-8}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-112114}, school = {W{\"u}rzburg University Press}, pages = {132}, year = {2015}, abstract = {Der Einzug des Rechners in den Mathematikunterricht hat eine Vielzahl neuer M{\"o}glichkeiten der Darstellung mit sich gebracht, darunter auch multiple, dynamisch verbundene Repr{\"a}sentationen mathematischer Probleme. Die Arbeit beantwortet die Frage, ob und wie diese Repr{\"a}sentationsarten von Sch{\"u}lerinnen und Sch{\"u}ler in Argumentationen genutzt werden. In der empirischen Untersuchung wurde dabei einerseits quantitativ erforscht, wie groß der Einfluss der in der Aufgabenstellung gegebenen Repr{\"a}sentationsform auf die schriftliche Argumentationen der Sch{\"u}lerinnen und Sch{\"u}ler ist. Andererseits wurden durch eine qualitative Analyse spezifische Nutzungsweisen identifiziert und mittels Toulmins Argumentationsmodell beschrieben. Diese Erkenntnisse wurden genutzt, um Konsequenzen bez{\"u}glich der Verwendung von multiplen und/oder dynamischen Repr{\"a}sentationen im Mathematikunterricht der Sekundarstufe zu formulieren.}, subject = {Argumentation}, language = {de} } @phdthesis{Aulbach2015, author = {Aulbach, Stefan}, title = {Contributions to Extreme Value Theory in Finite and Infinite Dimensions: With a Focus on Testing for Generalized Pareto Models}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-127162}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {Extreme value theory aims at modeling extreme but rare events from a probabilistic point of view. It is well-known that so-called generalized Pareto distributions, which are briefly reviewed in Chapter 1, are the only reasonable probability distributions suited for modeling observations above a high threshold, such as waves exceeding the height of a certain dike, earthquakes having at least a certain intensity, and, after applying a simple transformation, share prices falling below some low threshold. However, there are cases for which a generalized Pareto model might fail. Therefore, Chapter 2 derives certain neighborhoods of a generalized Pareto distribution and provides several statistical tests for these neighborhoods, where the cases of observing finite dimensional data and of observing continuous functions on [0,1] are considered. By using a notation based on so-called D-norms it is shown that these tests consistently link both frameworks, the finite dimensional and the functional one. Since the derivation of the asymptotic distributions of the test statistics requires certain technical restrictions, Chapter 3 analyzes these assumptions in more detail. It provides in particular some examples of distributions that satisfy the null hypothesis and of those that do not. Since continuous copula processes are crucial tools for the functional versions of the proposed tests, it is also discussed whether those copula processes actually exist for a given set of data. Moreover, some practical advice is given how to choose the free parameters incorporated in the test statistics. Finally, a simulation study in Chapter 4 compares the in total three different test statistics with another test found in the literature that has a similar null hypothesis. This thesis ends with a short summary of the results and an outlook to further open questions.}, subject = {Extremwertstatistik}, language = {en} }