@article{HellmuthKlingenberg2022, author = {Hellmuth, Kathrin and Klingenberg, Christian}, title = {Computing Black Scholes with uncertain volatility — a machine learning approach}, series = {Mathematics}, volume = {10}, journal = {Mathematics}, number = {3}, issn = {2227-7390}, doi = {10.3390/math10030489}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-262280}, year = {2022}, abstract = {In financial mathematics, it is a typical approach to approximate financial markets operating in discrete time by continuous-time models such as the Black Scholes model. Fitting this model gives rise to difficulties due to the discrete nature of market data. We thus model the pricing process of financial derivatives by the Black Scholes equation, where the volatility is a function of a finite number of random variables. This reflects an influence of uncertain factors when determining volatility. The aim is to quantify the effect of this uncertainty when computing the price of derivatives. Our underlying method is the generalized Polynomial Chaos (gPC) method in order to numerically compute the uncertainty of the solution by the stochastic Galerkin approach and a finite difference method. We present an efficient numerical variation of this method, which is based on a machine learning technique, the so-called Bi-Fidelity approach. This is illustrated with numerical examples.}, language = {en} } @phdthesis{Mungenast2022, author = {Mungenast, Sebastian}, title = {Zur Bedeutung von Metakognition beim Umgang mit Mathematik - Dokumentation metakognitiver Aktivit{\"a}ten bei Studienanf{\"a}nger_innen, Entwicklung eines Kategoriensystems f{\"u}r Metakognition beim Umgang mit Mathematik und Er{\"o}rterung von Ansatzpunkten f{\"u}r Metakognition in der Analysis}, doi = {10.25972/OPUS-29311}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-293114}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2022}, abstract = {Die vorliegende Arbeit besch{\"a}ftigt sich explorativ mit Metakognition beim Umgang mit Mathematik. Aufbauend auf der vorgestellten Forschungsliteratur wird der Einsatz von Metakognition im Rahmen einer qualitativen Studie bei Studienanf{\"a}nger_innen aus verschiedenen Mathematik-(Lehramts-)Studieng{\"a}ngen dokumentiert. Unter Verwendung der Qualitativen Inhaltsanalyse nach Mayring erfolgt die Etablierung eines Kategoriensystems f{\"u}r den Begriff Metakognition im Hinblick auf den Einsatz in der Mathematik, das bisherige Systematisierungen erweitert. Schließlich wird der Einsatz der entsprechenden metakognitiven Aspekte am Beispiel verschiedener Begriffe und Verfahren aus dem Analysis-Unterricht exemplarisch aufgezeigt.}, subject = {Metakognition}, language = {de} } @phdthesis{Nedrenco2022, author = {Nedrenco, Dmitri}, title = {Axiomatisieren lernen mit Papierfalten : Entwicklung, Durchf{\"u}hrung und Auswertung eines Hochschulkurses f{\"u}r gymnasiale Lehramtsstudierende}, doi = {10.25972/OPUS-27938}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-279383}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2022}, abstract = {In dieser Arbeit wird mathematisches Papierfalten und speziell 1-fach-Origami im universitären Kontext untersucht. Die Arbeit besteht aus drei Teilen. Der erste Teil ist im Wesentlichen der Sachanalyse des 1-fach-Origami gewidmet. Im ersten Kapitel gehen wir auf die geschichtliche Einordnung des 1-fach-Origami, betrachten axiomatische Grundlagen und diskutieren, wie das Axiomatisieren von 1-fach-Origami zum Verständnis des Axiomenbegriffs beitragen könnte. Im zweiten Kapitel schildern wir das Design der zugehörigen explorativen Studie, beschreiben unsere Forschungsziele und -fragen. Im dritten Kapitel wird 1-fach-Origami mathematisiert, definiert und eingehend untersucht. Der zweite Teil beschäftigt sich mit den von uns gestalteten und durchgef{\"u}hrten Kursen »Axiomatisieren lernen mit Papierfalten«. Im vierten Kapitel beschreiben wir die Lehrmethodik und die Gestaltung der Kurse, das f{\"u}nfte Kapitel enthält ein Exzerpt der Kurse. Im dritten Teil werden die zugehörigen Tests beschrieben. Im sechsten Kapitel erläutern wir das Design der Tests sowie die Testmethodik. Im siebten Kapitel findet die Auswertung ebendieser Tests statt.}, subject = {Mathematikunterricht}, language = {de} } @phdthesis{Karl2020, author = {Karl, Veronika}, title = {Augmented Lagrangian Methods for State Constrained Optimal Control Problems}, doi = {10.25972/OPUS-21384}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-213846}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2020}, abstract = {This thesis is concerned with the solution of control and state constrained optimal control problems, which are governed by elliptic partial differential equations. Problems of this type are challenging since they suffer from the low regularity of the multiplier corresponding to the state constraint. Applying an augmented Lagrangian method we overcome these difficulties by working with multiplier approximations in \$L^2(\Omega)\$. For each problem class, we introduce the solution algorithm, carry out a thoroughly convergence analysis and illustrate our theoretical findings with numerical examples. The thesis is divided into two parts. The first part focuses on classical PDE constrained optimal control problems. We start by studying linear-quadratic objective functionals, which include the standard tracking type term and an additional regularization term as well as the case, where the regularization term is replaced by an \$L^1(\Omega)\$-norm term, which makes the problem ill-posed. We deepen our study of the augmented Lagrangian algorithm by examining the more complicated class of optimal control problems that are governed by a semilinear partial differential equation. The second part investigates the broader class of multi-player control problems. While the examination of jointly convex generalized Nash equilibrium problems (GNEP) is a simple extension of the linear elliptic optimal control case, the complexity is increased significantly for pure GNEPs. The existence of solutions of jointly convex GNEPs is well-studied. However, solution algorithms may suffer from non-uniqueness of solutions. Therefore, the last part of this thesis is devoted to the analysis of the uniqueness of normalized equilibria.}, subject = {Optimale Kontrolle}, language = {en} } @phdthesis{Lauerbach2020, author = {Lauerbach, Laura}, title = {Stochastic Homogenization in the Passage from Discrete to Continuous Systems - Fracture in Composite Materials}, doi = {10.25972/OPUS-21453}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-214534}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2020}, abstract = {The work in this thesis contains three main topics. These are the passage from discrete to continuous models by means of \$\Gamma\$-convergence, random as well as periodic homogenization and fracture enabled by non-convex Lennard-Jones type interaction potentials. Each of them is discussed in the following. We consider a discrete model given by a one-dimensional chain of particles with randomly distributed interaction potentials. Our interest lies in the continuum limit, which yields the effective behaviour of the system. This limit is achieved as the number of atoms tends to infinity, which corresponds to a vanishing distance between the particles. The starting point of our analysis is an energy functional in a discrete system; its continuum limit is obtained by variational \$\Gamma\$-convergence. The \$\Gamma\$-convergence methods are combined with a homogenization process in the framework of ergodic theory, which allows to focus on heterogeneous systems. On the one hand, composite materials or materials with impurities are modelled by a stochastic or periodic distribution of particles or interaction potentials. On the other hand, systems of one species of particles can be considered as random in cases when the orientation of particles matters. Nanomaterials, like chains of atoms, molecules or polymers, are an application of the heterogeneous chains in experimental sciences. A special interest is in fracture in such heterogeneous systems. We consider interaction potentials of Lennard-Jones type. The non-standard growth conditions and the convex-concave structure of the Lennard-Jones type interactions yield mathematical difficulties, but allow for fracture. The interaction potentials are long-range in the sense that their modulus decays slower than exponential. Further, we allow for interactions beyond nearest neighbours, which is also referred to as long-range. The main mathematical issue is to bring together the Lennard-Jones type interactions with ergodic theorems in the limiting process as the number of particles tends to infinity. The blow up at zero of the potentials prevents from using standard extensions of the Akcoglu-Krengel subadditive ergodic theorem. We overcome this difficulty by an approximation of the interaction potentials which shows suitable Lipschitz and H{\"o}lder regularity. Beyond that, allowing for continuous probability distributions instead of only finitely many different potentials leads to a further challenge. The limiting integral functional of the energy by means of \$\Gamma\$-convergence involves a homogenized energy density and allows for fracture, but without a fracture contribution in the energy. In order to refine this result, we rescale our model and consider its \$\Gamma\$-limit, which is of Griffith's type consisting of an elastic part and a jump contribution. In a further approach we study fracture at the level of the discrete energies. With an appropriate definition of fracture in the discrete setting, we define a fracture threshold separating the region of elasticity from that of fracture and consider the pointwise convergence of this threshold. This limit turns out to coincide with the one obtained in the variational \$\Gamma\$-convergence approach.}, subject = {Homogenisierung }, language = {en} } @article{delAlamoLiMunketal.2020, author = {del Alamo, Miguel and Li, Housen and Munk, Axel and Werner, Frank}, title = {Variational Multiscale Nonparametric Regression: Algorithms and Implementation}, series = {Algorithms}, volume = {13}, journal = {Algorithms}, number = {11}, issn = {1999-4893}, doi = {10.3390/a13110296}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-219332}, year = {2020}, abstract = {Many modern statistically efficient methods come with tremendous computational challenges, often leading to large-scale optimisation problems. In this work, we examine such computational issues for recently developed estimation methods in nonparametric regression with a specific view on image denoising. We consider in particular certain variational multiscale estimators which are statistically optimal in minimax sense, yet computationally intensive. Such an estimator is computed as the minimiser of a smoothness functional (e.g., TV norm) over the class of all estimators such that none of its coefficients with respect to a given multiscale dictionary is statistically significant. The so obtained multiscale Nemirowski-Dantzig estimator (MIND) can incorporate any convex smoothness functional and combine it with a proper dictionary including wavelets, curvelets and shearlets. The computation of MIND in general requires to solve a high-dimensional constrained convex optimisation problem with a specific structure of the constraints induced by the statistical multiscale testing criterion. To solve this explicitly, we discuss three different algorithmic approaches: the Chambolle-Pock, ADMM and semismooth Newton algorithms. Algorithmic details and an explicit implementation is presented and the solutions are then compared numerically in a simulation study and on various test images. We thereby recommend the Chambolle-Pock algorithm in most cases for its fast convergence. We stress that our analysis can also be transferred to signal recovery and other denoising problems to recover more general objects whenever it is possible to borrow statistical strength from data patches of similar object structure.}, language = {en} } @phdthesis{Boergens2020, author = {B{\"o}rgens, Eike Alexander Lars Guido}, title = {ADMM-Type Methods for Optimization and Generalized Nash Equilibrium Problems in Hilbert Spaces}, doi = {10.25972/OPUS-21877}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-218777}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2020}, abstract = {This thesis is concerned with a certain class of algorithms for the solution of constrained optimization problems and generalized Nash equilibrium problems in Hilbert spaces. This class of algorithms is inspired by the alternating direction method of multipliers (ADMM) and eliminates the constraints using an augmented Lagrangian approach. The alternating direction method consists of splitting the augmented Lagrangian subproblem into smaller and more easily manageable parts. Before the algorithms are discussed, a substantial amount of background material, including the theory of Banach and Hilbert spaces, fixed-point iterations as well as convex and monotone set-valued analysis, is presented. Thereafter, certain optimization problems and generalized Nash equilibrium problems are reformulated and analyzed using variational inequalities and set-valued mappings. The analysis of the algorithms developed in the course of this thesis is rooted in these reformulations as variational inequalities and set-valued mappings. The first algorithms discussed and analyzed are one weakly and one strongly convergent ADMM-type algorithm for convex, linearly constrained optimization. By equipping the associated Hilbert space with the correct weighted scalar product, the analysis of these two methods is accomplished using the proximal point method and the Halpern method. The rest of the thesis is concerned with the development and analysis of ADMM-type algorithms for generalized Nash equilibrium problems that jointly share a linear equality constraint. The first class of these algorithms is completely parallelizable and uses a forward-backward idea for the analysis, whereas the second class of algorithms can be interpreted as a direct extension of the classical ADMM-method to generalized Nash equilibrium problems. At the end of this thesis, the numerical behavior of the discussed algorithms is demonstrated on a collection of examples.}, subject = {Constrained optimization}, language = {en} } @phdthesis{Kann2020, author = {Kann, Lennart}, title = {Statistical Failure Prediction with an Account for Prior Information}, doi = {10.25972/OPUS-20504}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-205049}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2020}, abstract = {Prediction intervals are needed in many industrial applications. Frequently in mass production, small subgroups of unknown size with a lifetime behavior differing from the remainder of the population exist. A risk assessment for such a subgroup consists of two steps: i) the estimation of the subgroup size, and ii) the estimation of the lifetime behavior of this subgroup. This thesis covers both steps. An efficient practical method to estimate the size of a subgroup is presented and benchmarked against other methods. A prediction interval procedure which includes prior information in form of a Beta distribution is provided. This scheme is applied to the prediction of binomial and negative binomial counts. The effect of the population size on the prediction of the future number of failures is considered for a Weibull lifetime distribution, whose parameters are estimated from censored field data. Methods to obtain a prediction interval for the future number of failures with unknown sample size are presented. In many applications, failures are reported with a delay. The effects of such a reporting delay on the coverage properties of prediction intervals for the future number of failures are studied. The total failure probability of the two steps can be decomposed as a product probability. One-sided confidence intervals for such a product probability are presented.}, subject = {Konfidenzintervall}, language = {en} } @misc{Breitenbach2018, author = {Breitenbach, Tim}, title = {Codes of examples for SQH method}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-167587}, year = {2018}, abstract = {Code examples for the paper "On the SQH Scheme to Solve Nonsmooth PDE Optimal Control Problems" by Tim Breitenbach and Alfio Borz{\`i} published in the journal "Numerical Functional Analysis and Optimization", in 2019, DOI: 10.1080/01630563.2019.1599911}, language = {en} } @phdthesis{Meyer2021, author = {Meyer, Michael}, title = {Practical isogeny-based cryptography}, doi = {10.25972/OPUS-24682}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-246821}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2021}, abstract = {This thesis aims at providing efficient and side-channel protected implementations of isogeny-based primitives, and at their application in threshold protocols. It is based on a sequence of academic papers. Chapter 3 reviews the original variable-time implementation of CSIDH and introduces several optimizations, e.g. a significant improvement of isogeny computations by using both Montgomery and Edwards curves. In total, our improvements yield a speedup of 25\% compared to the original implementation. Chapter 4 presents the first practical constant-time implementation of CSIDH. We describe how variable-time implementations of CSIDH leak information on private keys, and describe ways to mitigate this. Further, we present several techniques to speed up the implementation. In total, our constant-time implementation achieves a rather small slowdown by a factor of 3.03. Chapter 5 reviews practical fault injection attacks on CSIDH and presents countermeasures. We evaluate different attack models theoretically and practically, using low-budget equipment. Moreover, we present countermeasures that mitigate the proposed fault injection attacks, only leading to a small performance overhead of 7\%. Chapter 6 initiates the study of threshold schemes based on the Hard Homogeneous Spaces (HHS) framework of Couveignes. Using the HHS equivalent of Shamir's secret sharing in the exponents, we adapt isogeny based schemes to the threshold setting. In particular, we present threshold versions of the CSIDH public key encryption and the CSI-FiSh signature scheme. Chapter 7 gives a sieving algorithm for finding pairs of consecutive smooth numbers that utilizes solutions to the Prouhet-Tarry-Escott (PTE) problem. Recent compact isogeny-based protocols, namely B-SIDH and SQISign, both require large primes that lie between two smooth integers. Finding such a prime can be seen as a special case of finding twin smooth integers under the additional stipulation that their sum is a prime.}, subject = {Kryptologie}, language = {en} } @phdthesis{CalaCampana2021, author = {Cal{\`a} Campana, Francesca}, title = {Numerical methods for solving open-loop non zero-sum differential Nash games}, doi = {10.25972/OPUS-24590}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-245900}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2021}, abstract = {This thesis is devoted to a theoretical and numerical investigation of methods to solve open-loop non zero-sum differential Nash games. These problems arise in many applications, e.g., biology, economics, physics, where competition between different agents appears. In this case, the goal of each agent is in contrast with those of the others, and a competition game can be interpreted as a coupled optimization problem for which, in general, an optimal solution does not exist. In fact, an optimal strategy for one player may be unsatisfactory for the others. For this reason, a solution of a game is sought as an equilibrium and among the solutions concepts proposed in the literature, that of Nash equilibrium (NE) is the focus of this thesis. The building blocks of the resulting differential Nash games are a dynamical model with different control functions associated with different players that pursue non-cooperative objectives. In particular, the aim of this thesis is on differential models having linear or bilinear state-strategy structures. In this framework, in the first chapter, some well-known results are recalled, especially for non-cooperative linear-quadratic differential Nash games. Then, a bilinear Nash game is formulated and analysed. The main achievement in this chapter is Theorem 1.4.2 concerning existence of Nash equilibria for non-cooperative differential bilinear games. This result is obtained assuming a sufficiently small time horizon T, and an estimate of T is provided in Lemma 1.4.8 using specific properties of the regularized Nikaido-Isoda function. In Chapter 2, in order to solve a bilinear Nash game, a semi-smooth Newton (SSN) scheme combined with a relaxation method is investigated, where the choice of a SSN scheme is motivated by the presence of constraints on the players' actions that make the problem non-smooth. The resulting method is proved to be locally convergent in Theorem 2.1, and an estimate on the relaxation parameter is also obtained that relates the relaxation factor to the time horizon of a Nash equilibrium and to the other parameters of the game. For the bilinear Nash game, a Nash bargaining problem is also introduced and discussed, aiming at determining an improvement of all players' objectives with respect to the Nash equilibrium. A characterization of a bargaining solution is given in Theorem 2.2.1 and a numerical scheme based on this result is presented that allows to compute this solution on the Pareto frontier. Results of numerical experiments based on a quantum model of two spin-particles and on a population dynamics model with two competing species are presented that successfully validate the proposed algorithms. In Chapter 3 a functional formulation of the classical homicidal chauffeur (HC) Nash game is introduced and a new numerical framework for its solution in a time-optimal formulation is discussed. This methodology combines a Hamiltonian based scheme, with proximal penalty to determine the time horizon where the game takes place, with a Lagrangian optimal control approach and relaxation to solve the Nash game at a fixed end-time. The resulting numerical optimization scheme has a bilevel structure, which aims at decoupling the computation of the end-time from the solution of the pursuit-evader game. Several numerical experiments are performed to show the ability of the proposed algorithm to solve the HC game. Focusing on the case where a collision may occur, the time for this event is determined. The last part of this thesis deals with the analysis of a novel sequential quadratic Hamiltonian (SQH) scheme for solving open-loop differential Nash games. This method is formulated in the framework of Pontryagin's maximum principle and represents an efficient and robust extension of the successive approximations strategy in the realm of Nash games. In the SQH method, the Hamilton-Pontryagin functions are augmented by a quadratic penalty term and the Nikaido-Isoda function is used as a selection criterion. Based on this fact, the key idea of this SQH scheme is that the PMP characterization of Nash games leads to a finite-dimensional Nash game for any fixed time. A class of problems for which this finite-dimensional game admits a unique solution is identified and for this class of games theoretical results are presented that prove the well-posedness of the proposed scheme. In particular, Proposition 4.2.1 is proved to show that the selection criterion on the Nikaido-Isoda function is fulfilled. A comparison of the computational performances of the SQH scheme and the SSN-relaxation method previously discussed is shown. Applications to linear-quadratic Nash games and variants with control constraints, weighted L1 costs of the players' actions and tracking objectives are presented that corroborate the theoretical statements.}, subject = {Differential Games}, language = {en} } @article{BreitenbachBorzi2020, author = {Breitenbach, Tim and Borz{\`i}, Alfio}, title = {The Pontryagin maximum principle for solving Fokker-Planck optimal control problems}, series = {Computational Optimization and Applications}, volume = {76}, journal = {Computational Optimization and Applications}, issn = {0926-6003}, doi = {10.1007/s10589-020-00187-x}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-232665}, pages = {499-533}, year = {2020}, abstract = {The characterization and numerical solution of two non-smooth optimal control problems governed by a Fokker-Planck (FP) equation are investigated in the framework of the Pontryagin maximum principle (PMP). The two FP control problems are related to the problem of determining open- and closed-loop controls for a stochastic process whose probability density function is modelled by the FP equation. In both cases, existence and PMP characterisation of optimal controls are proved, and PMP-based numerical optimization schemes are implemented that solve the PMP optimality conditions to determine the controls sought. Results of experiments are presented that successfully validate the proposed computational framework and allow to compare the two control strategies.}, language = {en} } @phdthesis{Raharja2021, author = {Raharja, Andreas Budi}, title = {Optimisation Problems with Sparsity Terms: Theory and Algorithms}, doi = {10.25972/OPUS-24195}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-241955}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2021}, abstract = {The present thesis deals with optimisation problems with sparsity terms, either in the constraints which lead to cardinality-constrained problems or in the objective function which in turn lead to sparse optimisation problems. One of the primary aims of this work is to extend the so-called sequential optimality conditions to these two classes of problems. In recent years sequential optimality conditions have become increasingly popular in the realm of standard nonlinear programming. In contrast to the more well-known Karush-Kuhn-Tucker condition, they are genuine optimality conditions in the sense that every local minimiser satisfies these conditions without any further assumption. Lately they have also been extended to mathematical programmes with complementarity constraints. At around the same time it was also shown that optimisation problems with sparsity terms can be reformulated into problems which possess similar structures to mathematical programmes with complementarity constraints. These recent developments have become the impetus of the present work. But rather than working with the aforementioned reformulations which involve an artifical variable we shall first directly look at the problems themselves and derive sequential optimality conditions which are independent of any artificial variable. Afterwards we shall derive the weakest constraint qualifications associated with these conditions which relate them to the Karush-Kuhn-Tucker-type conditions. Another equally important aim of this work is to then consider the practicability of the derived sequential optimality conditions. The previously mentioned reformulations open up the possibilities to adapt methods which have been proven successful to handle mathematical programmes with complementarity constraints. We will show that the safeguarded augmented Lagrangian method and some regularisation methods may generate a point satisfying the derived conditions.}, subject = {Optimierungsproblem}, language = {en} } @article{HomburgWeissFrahmetal.2021, author = {Homburg, Annika and Weiß, Christian H. and Frahm, Gabriel and Alwan, Layth C. and G{\"o}b, Rainer}, title = {Analysis and forecasting of risk in count processes}, series = {Journal of Risk and Financial Management}, volume = {14}, journal = {Journal of Risk and Financial Management}, number = {4}, issn = {1911-8074}, doi = {10.3390/jrfm14040182}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-236692}, year = {2021}, abstract = {Risk measures are commonly used to prepare for a prospective occurrence of an adverse event. If we are concerned with discrete risk phenomena such as counts of natural disasters, counts of infections by a serious disease, or counts of certain economic events, then the required risk forecasts are to be computed for an underlying count process. In practice, however, the discrete nature of count data is sometimes ignored and risk forecasts are calculated based on Gaussian time series models. But even if methods from count time series analysis are used in an adequate manner, the performance of risk forecasting is affected by estimation uncertainty as well as certain discreteness phenomena. To get a thorough overview of the aforementioned issues in risk forecasting of count processes, a comprehensive simulation study was done considering a broad variety of risk measures and count time series models. It becomes clear that Gaussian approximate risk forecasts substantially distort risk assessment and, thus, should be avoided. In order to account for the apparent estimation uncertainty in risk forecasting, we use bootstrap approaches for count time series. The relevance and the application of the proposed approaches are illustrated by real data examples about counts of storm surges and counts of financial transactions.}, language = {en} } @article{Pirner2021, author = {Pirner, Marlies}, title = {A review on BGK models for gas mixtures of mono and polyatomic molecules}, series = {Fluids}, volume = {6}, journal = {Fluids}, number = {11}, issn = {2311-5521}, doi = {10.3390/fluids6110393}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-250161}, year = {2021}, abstract = {We consider the Bathnagar-Gross-Krook (BGK) model, an approximation of the Boltzmann equation, describing the time evolution of a single momoatomic rarefied gas and satisfying the same two main properties (conservation properties and entropy inequality). However, in practical applications, one often has to deal with two additional physical issues. First, a gas often does not consist of only one species, but it consists of a mixture of different species. Second, the particles can store energy not only in translational degrees of freedom but also in internal degrees of freedom such as rotations or vibrations (polyatomic molecules). Therefore, here, we will present recent BGK models for gas mixtures for mono- and polyatomic particles and the existing mathematical theory for these models.}, language = {en} } @article{SteudingSuriajaya2020, author = {Steuding, J{\"o}rn and Suriajaya, Ade Irma}, title = {Value-Distribution of the Riemann Zeta-Function Along Its Julia Lines}, series = {Computational Methods and Function Theory}, volume = {20}, journal = {Computational Methods and Function Theory}, issn = {1617-9447}, doi = {10.1007/s40315-020-00316-x}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-232621}, pages = {389-401}, year = {2020}, abstract = {For an arbitrary complex number a≠0 we consider the distribution of values of the Riemann zeta-function ζ at the a-points of the function Δ which appears in the functional equation ζ(s)=Δ(s)ζ(1-s). These a-points δa are clustered around the critical line 1/2+i\(\mathbb {R}\) which happens to be a Julia line for the essential singularity of ζ at infinity. We observe a remarkable average behaviour for the sequence of values ζ(δ\(_a\)).}, language = {en} } @phdthesis{Biersack2024, author = {Biersack, Florian}, title = {Topological Properties of Quasiconformal Automorphism Groups}, doi = {10.25972/OPUS-35917}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-359177}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2024}, abstract = {The goal of this thesis is to study the topological and algebraic properties of the quasiconformal automorphism groups of simply and multiply connected domains in the complex plain, in which the quasiconformal automorphism groups are endowed with the supremum metric on the underlying domain. More precisely, questions concerning central topological properties such as (local) compactness, (path)-connectedness and separability and their dependence on the boundary of the corresponding domains are studied, as well as completeness with respect to the supremum metric. Moreover, special subsets of the quasiconformal automorphism group of the unit disk are investigated, and concrete quasiconformal automorphisms are constructed. Finally, a possible application of quasiconformal unit disk automorphisms to symmetric cryptography is presented, in which a quasiconformal cryptosystem is defined and studied.}, subject = {Quasikonforme Abbildung}, language = {en} } @phdthesis{Bossert2024, author = {Bossert, Patrick}, title = {Statistical structure and inference methods for discrete high-frequency observations of SPDEs in one and multiple space dimensions}, doi = {10.25972/OPUS-36113}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-361130}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2024}, abstract = {The focus of this thesis is on analysing a linear stochastic partial differential equation (SPDE) with a bounded domain. The first part of the thesis commences with an examination of a one-dimensional SPDE. In this context, we construct estimators for the parameters of a parabolic SPDE based on discrete observations of a solution in time and space on a bounded domain. We establish central limit theorems for a high-frequency asymptotic regime, showing substantially smaller asymptotic variances compared to existing estimation methods. Moreover, asymptotic confidence intervals are directly feasible. Our approach builds upon realized volatilities and their asymptotic illustration as the response of a log-linear model with a spatial explanatory variable. This yields efficient estimators based on realized volatilities with optimal rates of convergence and minimal variances. We demonstrate our results by Monte Carlo simulations. Extending this framework, we analyse a second-order SPDE model in multiple space dimensions in the second part of this thesis and develop estimators for the parameters of this model based on discrete observations in time and space on a bounded domain. While parameter estimation for one and two spatial dimensions was established in recent literature, this is the first work that generalizes the theory to a general, multi-dimensional framework. Our methodology enables the construction of an oracle estimator for volatility within the underlying model. For proving central limit theorems, we use a high-frequency observation scheme. To showcase our results, we conduct a Monte Carlo simulation, highlighting the advantages of our novel approach in a multi-dimensional context.}, subject = {Stochastische partielle Differentialgleichung}, language = {en} } @phdthesis{Koerner2024, author = {K{\"o}rner, Jacob}, title = {Theoretical and numerical analysis of Fokker-Planck optimal control problems by first- and second-order optimality conditions}, doi = {10.25972/OPUS-36299}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-362997}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2024}, abstract = {In this thesis, a variety of Fokker--Planck (FP) optimal control problems are investigated. Main emphasis is put on a first-- and second--order analysis of different optimal control problems, characterizing optimal controls, establishing regularity results for optimal controls, and providing a numerical analysis for a Galerkin--based numerical scheme. The Fokker--Planck equation is a partial differential equation (PDE) of linear parabolic type deeply connected to the theory of stochastic processes and stochastic differential equations. In essence, it describes the evolution over time of the probability distribution of the state of an object or system of objects under the influence of both deterministic and stochastic forces. The FP equation is a cornerstone in understanding and modeling phenomena ranging from the diffusion and motion of molecules in a fluid to the fluctuations in financial markets. Two different types of optimal control problems are analyzed in this thesis. On the one hand, Fokker--Planck ensemble optimal control problems are considered that have a wide range of applications in controlling a system of multiple non--interacting objects. In this framework, the goal is to collectively drive each object into a desired state. On the other hand, tracking--type control problems are investigated, commonly used in parameter identification problems or stemming from the field of inverse problems. In this framework, the aim is to determine certain parameters or functions of the FP equation, such that the resulting probability distribution function takes a desired form, possibly observed by measurements. In both cases, we consider FP models where the control functions are part of the drift, arising only from the deterministic forces of the system. Therefore, the FP optimal control problem has a bilinear control structure. Box constraints on the controls may be present, and the focus is on time--space dependent controls for ensemble--type problems and on only time--dependent controls for tracking--type optimal control problems. In the first chapter of the thesis, a proof of the connection between the FP equation and stochastic differential equations is provided. Additionally, stochastic optimal control problems, aiming to minimize an expected cost value, are introduced, and the corresponding formulation within a deterministic FP control framework is established. For the analysis of this PDE--constrained optimal control problem, the existence, and regularity of solutions to the FP problem are investigated. New \$L^\infty\$--estimates for solutions are established for low space dimensions under mild assumptions on the drift. Furthermore, based on the theory of Bessel potential spaces, new smoothness properties are derived for solutions to the FP problem in the case of only time--dependent controls. Due to these properties, the control--to--state map, which associates the control functions with the corresponding solution of the FP problem, is well--defined, Fr{\´e}chet differentiable and compact for suitable Lebesgue spaces or Sobolev spaces. The existence of optimal controls is proven under various assumptions on the space of admissible controls and objective functionals. First--order optimality conditions are derived using the adjoint system. The resulting characterization of optimal controls is exploited to achieve higher regularity of optimal controls, as well as their state and co--state functions. Since the FP optimal control problem is non--convex due to its bilinear structure, a first--order analysis should be complemented by a second--order analysis. Therefore, a second--order analysis for the ensemble--type control problem in the case of \$H^1\$--controls in time and space is performed, and sufficient second--order conditions are provided. Analogous results are obtained for the tracking--type problem for only time--dependent controls. The developed theory on the control problem and the first-- and second--order optimality conditions is applied to perform a numerical analysis for a Galerkin discretization of the FP optimal control problem. The main focus is on tracking-type problems with only time--dependent controls. The idea of the presented Galerkin scheme is to first approximate the PDE--constrained optimization problem by a system of ODE--constrained optimization problems. Then, conditions on the problem are presented such that the convergence of optimal controls from one problem to the other can be guaranteed. For this purpose, a class of bilinear ODE--constrained optimal control problems arising from the Galerkin discretization of the FP problem is analyzed. First-- and second--order optimality conditions are established, and a numerical analysis is performed. A discretization with linear finite elements for the state and co--state problem is investigated, while the control functions are approximated by piecewise constant or piecewise quadratic continuous polynomials. The latter choice is motivated by the bilinear structure of the optimal control problem, allowing to overcome the discrepancies between a discretize--then--optimize and optimize--then--discretize approach. Moreover, second--order accuracy results are shown using the space of continuous, piecewise quadratic polynomials as the discrete space of controls. Lastly, the theoretical results and the second--order convergence rates are numerically verified.}, subject = {Parabolische Differentialgleichung}, language = {en} } @phdthesis{Birke2024, author = {Birke, Claudius B.}, title = {Low Mach and Well-Balanced Numerical Methods for Compressible Euler and Ideal MHD Equations with Gravity}, doi = {10.25972/OPUS-36330}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-363303}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2024}, abstract = {Physical regimes characterized by low Mach numbers and steep stratifications pose severe challenges to standard finite volume methods. We present three new methods specifically designed to navigate these challenges by being both low Mach compliant and well-balanced. These properties are crucial for numerical methods to efficiently and accurately compute solutions in the regimes considered. First, we concentrate on the construction of an approximate Riemann solver within Godunov-type finite volume methods. A new relaxation system gives rise to a two-speed relaxation solver for the Euler equations with gravity. Derived from fundamental mathematical principles, this solver reduces the artificial dissipation in the subsonic regime and preserves hydrostatic equilibria. The solver is particularly stable as it satisfies a discrete entropy inequality, preserves positivity of density and internal energy, and suppresses checkerboard modes. The second scheme is designed to solve the equations of ideal MHD and combines different approaches. In order to deal with low Mach numbers, it makes use of a low-dissipation version of the HLLD solver and a partially implicit time discretization to relax the CFL time step constraint. A Deviation Well-Balancing method is employed to preserve a priori known magnetohydrostatic equilibria and thereby reduces the magnitude of spatial discretization errors in strongly stratified setups. The third scheme relies on an IMEX approach based on a splitting of the MHD equations. The slow scale part of the system is discretized by a time-explicit Godunov-type method, whereas the fast scale part is discretized implicitly by central finite differences. Numerical dissipation terms and CFL time step restriction of the method depend solely on the slow waves of the explicit part, making the method particularly suited for subsonic regimes. Deviation Well-Balancing ensures the preservation of a priori known magnetohydrostatic equilibria. The three schemes are applied to various numerical experiments for the compressible Euler and ideal MHD equations, demonstrating their ability to accurately simulate flows in regimes with low Mach numbers and strong stratification even on coarse grids.}, subject = {Magnetohydrodynamik}, language = {en} }