@phdthesis{Gaviraghi2017, author = {Gaviraghi, Beatrice}, title = {Theoretical and numerical analysis of Fokker-Planck optimal control problems for jump-diffusion processes}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-145645}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2017}, abstract = {The topic of this thesis is the theoretical and numerical analysis of optimal control problems, whose differential constraints are given by Fokker-Planck models related to jump-diffusion processes. We tackle the issue of controlling a stochastic process by formulating a deterministic optimization problem. The key idea of our approach is to focus on the probability density function of the process, whose time evolution is modeled by the Fokker-Planck equation. Our control framework is advantageous since it allows to model the action of the control over the entire range of the process, whose statistics are characterized by the shape of its probability density function. We first investigate jump-diffusion processes, illustrating their main properties. We define stochastic initial-value problems and present results on the existence and uniqueness of their solutions. We then discuss how numerical solutions of stochastic problems are computed, focusing on the Euler-Maruyama method. We put our attention to jump-diffusion models with time- and space-dependent coefficients and jumps given by a compound Poisson process. We derive the related Fokker-Planck equations, which take the form of partial integro-differential equations. Their differential term is governed by a parabolic operator, while the nonlocal integral operator is due to the presence of the jumps. The derivation is carried out in two cases. On the one hand, we consider a process with unbounded range. On the other hand, we confine the dynamic of the sample paths to a bounded domain, and thus the behavior of the process in proximity of the boundaries has to be specified. Throughout this thesis, we set the barriers of the domain to be reflecting. The Fokker-Planck equation, endowed with initial and boundary conditions, gives rise to Fokker-Planck problems. Their solvability is discussed in suitable functional spaces. The properties of their solutions are examined, namely their regularity, positivity and probability mass conservation. Since closed-form solutions to Fokker-Planck problems are usually not available, one has to resort to numerical methods. The first main achievement of this thesis is the definition and analysis of conservative and positive-preserving numerical methods for Fokker-Planck problems. Our SIMEX1 and SIMEX2 (Splitting-Implicit-Explicit) schemes are defined within the framework given by the method of lines. The differential operator is discretized by a finite volume scheme given by the Chang-Cooper method, while the integral operator is approximated by a mid-point rule. This leads to a large system of ordinary differential equations, that we approximate with the Strang-Marchuk splitting method. This technique decomposes the original problem in a sequence of different subproblems with simpler structure, which are separately solved and linked to each other through initial conditions and final solutions. After performing the splitting step, we carry out the time integration with first- and second-order time-differencing methods. These steps give rise to the SIMEX1 and SIMEX2 methods, respectively. A full convergence and stability analysis of our schemes is included. Moreover, we are able to prove that the positivity and the mass conservation of the solution to Fokker-Planck problems are satisfied at the discrete level by the numerical solutions computed with the SIMEX schemes. The second main achievement of this thesis is the theoretical analysis and the numerical solution of optimal control problems governed by Fokker-Planck models. The field of optimal control deals with finding control functions in such a way that given cost functionals are minimized. Our framework aims at the minimization of the difference between a known sequence of values and the first moment of a jump-diffusion process; therefore, this formulation can also be considered as a parameter estimation problem for stochastic processes. Two cases are discussed, in which the form of the cost functional is continuous-in-time and discrete-in-time, respectively. The control variable enters the state equation as a coefficient of the Fokker-Planck partial integro-differential operator. We also include in the cost functional a \$L^1\$-penalization term, which enhances the sparsity of the solution. Therefore, the resulting optimization problem is nonconvex and nonsmooth. We derive the first-order optimality systems satisfied by the optimal solution. The computation of the optimal solution is carried out by means of proximal iterative schemes in an infinite-dimensional framework.}, subject = {Fokker-Planck-Gleichung}, language = {en} } @phdthesis{Mauder2012, author = {Mauder, Markus}, title = {Time-Optimal Control of the Bi-Steerable Robot: A Case Study in Optimal Control of Nonholonomic Systems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-75036}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2012}, abstract = {In this thesis, time-optimal control of the bi-steerable robot is addressed. The bi-steerable robot, a vehicle with two independently steerable axles, is a complex nonholonomic system with applications in many areas of land-based robotics. Motion planning and optimal control are challenging tasks for this system, since standard control schemes do not apply. The model of the bi-steerable robot considered here is a reduced kinematic model with the driving velocity and the steering angles of the front and rear axle as inputs. The steering angles of the two axles can be set independently from each other. The reduced kinematic model is a control system with affine and non-affine inputs, as the driving velocity enters the system linearly, whereas the steering angles enter nonlinearly. In this work, a new approach to solve the time-optimal control problem for the bi-steerable robot is presented. In contrast to most standard methods for time-optimal control, our approach does not exclusively rely on discretization and purely numerical methods. Instead, the Pontryagin Maximum Principle is used to characterize candidates for time-optimal solutions. The resultant boundary value problem is solved by optimization to obtain solutions to the path planning problem over a given time horizon. The time horizon is decreased and the path planning is iterated to approximate a time-optimal solution. An optimality condition is introduced which depends on the number of cusps, i.e., reversals of the driving direction of the robot. This optimality condition allows to single out non-optimal solutions with too many cusps. In general, our approach only gives approximations of time-optimal solutions, since only normal regular extremals are considered as solutions to the path planning problem, and the path planning is terminated when an extremal with minimal number of cusps is found. However, for most desired configurations, normal regular extremals with the minimal number of cusps provide time-optimal solutions for the bi-steerable robot. The convergence of the approach is analyzed and its probabilistic completeness is shown. Moreover, simulation results on time-optimal solutions for the bi-steerable robot are presented.}, subject = {Mobiler Roboter}, language = {en} } @phdthesis{Akindeinde2012, author = {Akindeinde, Saheed Ojo}, title = {Numerical Verification of Optimality Conditions in Optimal Control Problems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-76065}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2012}, abstract = {This thesis is devoted to numerical verification of optimality conditions for non-convex optimal control problems. In the first part, we are concerned with a-posteriori verification of sufficient optimality conditions. It is a common knowledge that verification of such conditions for general non-convex PDE-constrained optimization problems is very challenging. We propose a method to verify second-order sufficient conditions for a general class of optimal control problem. If the proposed verification method confirms the fulfillment of the sufficient condition then a-posteriori error estimates can be computed. A special ingredient of our method is an error analysis for the Hessian of the underlying optimization problem. We derive conditions under which positive definiteness of the Hessian of the discrete problem implies positive definiteness of the Hessian of the continuous problem. The results are complemented with numerical experiments. In the second part, we investigate adaptive methods for optimal control problems with finitely many control parameters. We analyze a-posteriori error estimates based on verification of second-order sufficient optimality conditions using the method developed in the first part. Reliability and efficiency of the error estimator are shown. We illustrate through numerical experiments, the use of the estimator in guiding adaptive mesh refinement.}, subject = {Optimale Kontrolle}, language = {en} } @phdthesis{Wongkaew2015, author = {Wongkaew, Suttida}, title = {On the control through leadership of multi-agent systems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-120914}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {The investigation of interacting multi-agent models is a new field of mathematical research with application to the study of behavior in groups of animals or community of people. One interesting feature of multi-agent systems is collective behavior. From the mathematical point of view, one of the challenging issues considering with these dynamical models is development of control mechanisms that are able to influence the time evolution of these systems. In this thesis, we focus on the study of controllability, stabilization and optimal control problems for multi-agent systems considering three models as follows: The first one is the Hegselmann Krause opinion formation (HK) model. The HK dynamics describes how individuals' opinions are changed by the interaction with others taking place in a bounded domain of confidence. The study of this model focuses on determining feedback controls in order to drive the agents' opinions to reach a desired agreement. The second model is the Heider social balance (HB) model. The HB dynamics explains the evolution of relationships in a social network. One purpose of studying this system is the construction of control function in oder to steer the relationship to reach a friendship state. The third model that we discuss is a flocking model describing collective motion observed in biological systems. The flocking model under consideration includes self-propelling, friction, attraction, repulsion, and alignment features. We investigate a control for steering the flocking system to track a desired trajectory. Common to all these systems is our strategy to add a leader agent that interacts with all other members of the system and includes the control mechanism. Our control through leadership approach is developed using classical theoretical control methods and a model predictive control (MPC) scheme. To apply the former method, for each model the stability of the corresponding linearized system near consensus is investigated. Further, local controllability is examined. However, only in the Hegselmann-Krause opinion formation model, the feedback control is determined in order to steer agents' opinions to globally converge to a desired agreement. The MPC approach is an optimal control strategy based on numerical optimization. To apply the MPC scheme, optimal control problems for each model are formulated where the objective functions are different depending on the desired objective of the problem. The first-oder necessary optimality conditions for each problem are presented. Moreover for the numerical treatment, a sequence of open-loop discrete optimality systems is solved by accurate Runge-Kutta schemes, and in the optimization procedure, a nonlinear conjugate gradient solver is implemented. Finally, numerical experiments are performed to investigate the properties of the multi-agent models and demonstrate the ability of the proposed control strategies to drive multi-agent systems to attain a desired consensus and to track a given trajectory.}, subject = {Mehragentensystem}, language = {en} } @phdthesis{Schindele2016, author = {Schindele, Andreas}, title = {Proximal methods in medical image reconstruction and in nonsmooth optimal control of partial differential equations}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-136569}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2016}, abstract = {Proximal methods are iterative optimization techniques for functionals, J = J1 + J2, consisting of a differentiable part J2 and a possibly nondifferentiable part J1. In this thesis proximal methods for finite- and infinite-dimensional optimization problems are discussed. In finite dimensions, they solve l1- and TV-minimization problems that are effectively applied to image reconstruction in magnetic resonance imaging (MRI). Convergence of these methods in this setting is proved. The proposed proximal scheme is compared to a split proximal scheme and it achieves a better signal-to-noise ratio. In addition, an application that uses parallel imaging is presented. In infinite dimensions, these methods are discussed to solve nonsmooth linear and bilinear elliptic and parabolic optimal control problems. In particular, fast convergence of these methods is proved. Furthermore, for benchmarking purposes, truncated proximal schemes are compared to an inexact semismooth Newton method. Results of numerical experiments are presented to demonstrate the computational effectiveness of our proximal schemes that need less computation time than the semismooth Newton method in most cases. Results of numerical experiments are presented that successfully validate the theoretical estimates.}, subject = {Optimale Kontrolle}, language = {en} } @phdthesis{Merger2016, author = {Merger, Juri}, title = {Optimal Control and Function Identification in Biological Processes}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-138900}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2016}, abstract = {Mathematical modelling, simulation, and optimisation are core methodologies for future developments in engineering, natural, and life sciences. This work aims at applying these mathematical techniques in the field of biological processes with a focus on the wine fermentation process that is chosen as a representative model. In the literature, basic models for the wine fermentation process consist of a system of ordinary differential equations. They model the evolution of the yeast population number as well as the concentrations of assimilable nitrogen, sugar, and ethanol. In this thesis, the concentration of molecular oxygen is also included in order to model the change of the metabolism of the yeast from an aerobic to an anaerobic one. Further, a more sophisticated toxicity function is used. It provides simulation results that match experimental measurements better than a linear toxicity model. Moreover, a further equation for the temperature plays a crucial role in this work as it opens a way to influence the fermentation process in a desired way by changing the temperature of the system via a cooling mechanism. From the view of the wine industry, it is necessary to cope with large scale fermentation vessels, where spatial inhomogeneities of concentrations and temperature are likely to arise. Therefore, a system of reaction-diffusion equations is formulated in this work, which acts as an approximation for a model including computationally very expensive fluid dynamics. In addition to the modelling issues, an optimal control problem for the proposed reaction-diffusion fermentation model with temperature boundary control is presented and analysed. Variational methods are used to prove the existence of unique weak solutions to this non-linear problem. In this framework, it is possible to exploit the Hilbert space structure of state and control spaces to prove the existence of optimal controls. Additionally, first-order necessary optimality conditions are presented. They characterise controls that minimise an objective functional with the purpose to minimise the final sugar concentration. A numerical experiment shows that the final concentration of sugar can be reduced by a suitably chosen temperature control. The second part of this thesis deals with the identification of an unknown function that participates in a dynamical model. For models with ordinary differential equations, where parts of the dynamic cannot be deduced due to the complexity of the underlying phenomena, a minimisation problem is formulated. By minimising the deviations of simulation results and measurements the best possible function from a trial function space is found. The analysis of this function identification problem covers the proof of the differentiability of the function-to-state operator, the existence of minimisers, and the sensitivity analysis by means of the data-to-function mapping. Moreover, the presented function identification method is extended to stochastic differential equations. Here, the objective functional consists of the difference of measured values and the statistical expected value of the stochastic process solving the stochastic differential equation. Using a Fokker-Planck equation that governs the probability density function of the process, the probabilistic problem of simulating a stochastic process is cast to a deterministic partial differential equation. Proofs of unique solvability of the forward equation, the existence of minimisers, and first-order necessary optimality conditions are presented. The application of the function identification framework to the wine fermentation model aims at finding the shape of the toxicity function and is carried out for the deterministic as well as the stochastic case.}, subject = {Optimale Kontrolle}, language = {en} } @phdthesis{Wurst2015, author = {Wurst, Jan-Eric}, title = {Hp-Finite Elements for PDE-Constrained Optimization}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-024-5 (print)}, doi = {10.25972/WUP-978-3-95826-025-2}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-115027}, school = {W{\"u}rzburg University Press}, pages = {188}, year = {2015}, abstract = {Diese Arbeit behandelt die hp-Finite Elemente Methode (FEM) f{\"u}r linear quadratische Optimal-steuerungsprobleme. Dabei soll ein Zielfunktional, welches die Entfernung zu einem angestrebten Zustand und hohe Steuerungskosten (als Regularisierung) bestraft, unter der Nebenbedingung einer elliptischen partiellen Differentialgleichung minimiert werden. Bei der Anwesenheit von Steuerungsbeschr{\"a}nkungen k{\"o}nnen die notwendigen Bedingungen erster Ordnung, die typischerweise f{\"u}r numerische L{\"o}sungsverfahren genutzt werden, als halbglatte Projektionsformel formuliert werden. Folglich sind optimale L{\"o}sungen oftmals auch nicht-glatt. Die Technik der hp-Diskretisierung ber{\"u}cksichtigt diese Tatsache und approximiert raue Funktionen auf feinen Gittern, w{\"a}hrend Elemente h{\"o}herer Ordnung auf Gebieten verwendet werden, auf denen die L{\"o}sung glatt ist. Die erste Leistung dieser Arbeit ist die erfolgreiche Anwendung der hp-FEM auf zwei verwandte Problemklassen: Neumann- und Interface-Steuerungsprobleme. Diese werden zun{\"a}chst mit entsprechenden a-priori Verfeinerungsstrategien gel{\"o}st, mit der randkonzentrierten (bc) FEM oder interface konzentrierten (ic) FEM. Diese Strategien generieren Gitter, die stark in Richtung des Randes beziehungsweise des Interfaces verfeinert werden. Um f{\"u}r beide Techniken eine algebraische Reduktion des Approximationsfehlers zu beweisen, wird eine elementweise interpolierende Funktion konstruiert. Außerdem werden die lokale und globale Regularit{\"a}t von L{\"o}sungen behandelt, weil sie entscheidend f{\"u}r die Konvergenzgeschwindigkeit ist. Da die bc- und ic- FEM kleine Polynomgrade f{\"u}r Elemente verwenden, die den Rand beziehungsweise das Interface ber{\"u}hren, k{\"o}nnen eine neue L2- und L∞-Fehlerabsch{\"a}tzung hergeleitet werden. Letztere bildet die Grundlage f{\"u}r eine a-priori Strategie zum Aufdatieren des Regularisierungsparameters im Zielfunktional, um Probleme mit bang-bang Charakter zu l{\"o}sen. Zudem wird die herk{\"o}mmliche hp-Idee, die daraus besteht das Gitter geometrisch in Richtung der Ecken des Gebiets abzustufen, auf die L{\"o}sung von Optimalsteuerungsproblemen {\"u}bertragen (vc-FEM). Es gelingt, Regularit{\"a}t in abz{\"a}hlbar normierten R{\"a}umen f{\"u}r die Variablen des gekoppelten Optimalit{\"a}tssystems zu zeigen. Hieraus resultiert die exponentielle Konvergenz im Bezug auf die Anzahl der Freiheitsgrade. Die zweite Leistung dieser Arbeit ist die Entwicklung einer v{\"o}llig adaptiven hp-Innere-Punkte-Methode, die Probleme mit verteilter oder Neumann Steuerung l{\"o}sen kann. Das zugrundeliegende Barriereproblem besitzt ein nichtlineares Optimilit{\"a}tssystem, das eine numerische Herausforderung beinhaltet: die stabile Berechnung von Integralen {\"u}ber Funktionen mit m{\"o}glichen Singularit{\"a}ten in Elementen h{\"o}herer Ordnung. Dieses Problem wird dadurch gel{\"o}st, dass die Steuerung an den Integrationspunkten {\"u}berwacht wird. Die Zul{\"a}ssigkeit an diesen Punkten wird durch einen Gl{\"a}ttungsschritt garantiert. In dieser Arbeit werden sowohl die Konvergenz eines Innere-Punkte-Verfahrens mit Gl{\"a}ttungsschritt als auch a-posteriori Schranken f{\"u}r den Diskretisierungsfehler gezeigt. Dies f{\"u}hrt zu einem adaptiven L{\"o}sungsalgorithmus, dessen Gitterverfeinerung auf der Entwicklung der L{\"o}sung in eine Legendre Reihe basiert. Hierbei dient das Abklingverhalten der Koeffizienten als Glattheitsindikator und wird f{\"u}r die Entscheidung zwischen h- und p-Verfeinerung herangezogen.}, subject = {Finite-Elemente-Methode}, language = {en} } @phdthesis{Sprengel2017, author = {Sprengel, Martin}, title = {A Theoretical and Numerical Analysis of a Kohn-Sham Equation and Related Control Problems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-153545}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2017}, abstract = {In this work, multi-particle quantum optimal control problems are studied in the framework of time-dependent density functional theory (TDDFT). Quantum control problems are of great importance in both fundamental research and application of atomic and molecular systems. Typical applications are laser induced chemical reactions, nuclear magnetic resonance experiments, and quantum computing. Theoretically, the problem of how to describe a non-relativistic system of multiple particles is solved by the Schr{\"o}dinger equation (SE). However, due to the exponential increase in numerical complexity with the number of particles, it is impossible to directly solve the Schr{\"o}dinger equation for large systems of interest. An efficient and successful approach to overcome this difficulty is the framework of TDDFT and the use of the time-dependent Kohn-Sham (TDKS) equations therein. This is done by replacing the multi-particle SE with a set of nonlinear single-particle Schr{\"o}dinger equations that are coupled through an additional potential. Despite the fact that TDDFT is widely used for physical and quantum chemical calculation and software packages for its use are readily available, its mathematical foundation is still under active development and even fundamental issues remain unproven today. The main purpose of this thesis is to provide a consistent and rigorous setting for the TDKS equations and of the related optimal control problems. In the first part of the thesis, the framework of density functional theory (DFT) and TDDFT are introduced. This includes a detailed presentation of the different functional sets forming DFT. Furthermore, the known equivalence of the TDKS system to the original SE problem is further discussed. To implement the TDDFT framework for multi-particle computations, the TDKS equations provide one of the most successful approaches nowadays. However, only few mathematical results concerning these equations are available and these results do not cover all issues that arise in the formulation of optimal control problems governed by the TDKS model. It is the purpose of the second part of this thesis to address these issues such as higher regularity of TDKS solutions and the case of weaker requirements on external (control) potentials that are instrumental for the formulation of well-posed TDKS control problems. For this purpose, in this work, existence and uniqueness of TDKS solutions are investigated in the Galerkin framework and using energy estimates for the nonlinear TDKS equations. In the third part of this thesis, optimal control problems governed by the TDKS model are formulated and investigated. For this purpose, relevant cost functionals that model the purpose of the control are discussed. Henceforth, TDKS control problems result from the requirement of optimising the given cost functionals subject to the differential constraint given by the TDKS equations. The analysis of these problems is novel and represents one of the main contributions of the present thesis. In particular, existence of minimizers is proved and their characterization by TDKS optimality systems is discussed in detail. To this end, Fr{\´e}chet differentiability of the TDKS model and of the cost functionals is addressed considering \(H^1\) cost of the control. This part is concluded by deriving the reduced gradient in the \(L^2\) and \(H^1\) inner product. While the \(L^2\) optimization is widespread in the literature, the choice of the \(H^1\) gradient is motivated in this work by theoretical consideration and by resulting numerical advantages. The last part of the thesis is devoted to the numerical approximation of the TDKS optimality systems and to their solution by gradient-based optimization techniques. For the former purpose, Strang time-splitting pseudo-spectral schemes are discussed including a review of some recent theoretical estimates for these schemes and a numerical validation of these estimates. For the latter purpose, nonlinear (projected) conjugate gradient methods are implemented and are used to validate the theoretical analysis of this thesis with results of numerical experiments with different cost functional settings.}, subject = {Optimale Kontrolle}, language = {en} } @phdthesis{Breitenbach2019, author = {Breitenbach, Tim}, title = {A sequential quadratic Hamiltonian scheme for solving optimal control problems with non-smooth cost functionals}, doi = {10.25972/OPUS-18217}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-182170}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2019}, abstract = {This thesis deals with a new so-called sequential quadratic Hamiltonian (SQH) iterative scheme to solve optimal control problems with differential models and cost functionals ranging from smooth to discontinuous and non-convex. This scheme is based on the Pontryagin maximum principle (PMP) that provides necessary optimality conditions for an optimal solution. In this framework, a Hamiltonian function is defined that attains its minimum pointwise at the optimal solution of the corresponding optimal control problem. In the SQH scheme, this Hamiltonian function is augmented by a quadratic penalty term consisting of the current control function and the control function from the previous iteration. The heart of the SQH scheme is to minimize this augmented Hamiltonian function pointwise in order to determine a control update. Since the PMP does not require any differ- entiability with respect to the control argument, the SQH scheme can be used to solve optimal control problems with both smooth and non-convex or even discontinuous cost functionals. The main achievement of the thesis is the formulation of a robust and efficient SQH scheme and a framework in which the convergence analysis of the SQH scheme can be carried out. In this framework, convergence of the scheme means that the calculated solution fulfills the PMP condition. The governing differential models of the considered optimal control problems are ordinary differential equations (ODEs) and partial differential equations (PDEs). In the PDE case, elliptic and parabolic equations as well as the Fokker-Planck (FP) equation are considered. For both the ODE and the PDE cases, assumptions are formulated for which it can be proved that a solution to an optimal control problem has to fulfill the PMP. The obtained results are essential for the discussion of the convergence analysis of the SQH scheme. This analysis has two parts. The first one is the well-posedness of the scheme which means that all steps of the scheme can be carried out and provide a result in finite time. The second part part is the PMP consistency of the solution. This means that the solution of the SQH scheme fulfills the PMP conditions. In the ODE case, the following results are obtained that state well-posedness of the SQH scheme and the PMP consistency of the corresponding solution. Lemma 7 states the existence of a pointwise minimum of the augmented Hamiltonian. Lemma 11 proves the existence of a weight of the quadratic penalty term such that the minimization of the corresponding augmented Hamiltonian results in a control updated that reduces the value of the cost functional. Lemma 12 states that the SQH scheme stops if an iterate is PMP optimal. Theorem 13 proves the cost functional reducing properties of the SQH control updates. The main result is given in Theorem 14, which states the pointwise convergence of the SQH scheme towards a PMP consistent solution. In this ODE framework, the SQH method is applied to two optimal control problems. The first one is an optimal quantum control problem where it is shown that the SQH method converges much faster to an optimal solution than a globalized Newton method. The second optimal control problem is an optimal tumor treatment problem with a system of coupled highly non-linear state equations that describe the tumor growth. It is shown that the framework in which the convergence of the SQH scheme is proved is applicable for this highly non-linear case. Next, the case of PDE control problems is considered. First a general framework is discussed in which a solution to the corresponding optimal control problem fulfills the PMP conditions. In this case, many theoretical estimates are presented in Theorem 59 and Theorem 64 to prove in particular the essential boundedness of the state and adjoint variables. The steps for the convergence analysis of the SQH scheme are analogous to that of the ODE case and result in Theorem 27 that states the PMP consistency of the solution obtained with the SQH scheme. This framework is applied to different elliptic and parabolic optimal control problems, including linear and bilinear control mechanisms, as well as non-linear state equations. Moreover, the SQH method is discussed for solving a state-constrained optimal control problem in an augmented formulation. In this case, it is shown in Theorem 30 that for increasing the weight of the augmentation term, which penalizes the violation of the state constraint, the measure of this state constraint violation by the corresponding solution converges to zero. Furthermore, an optimal control problem with a non-smooth L\(^1\)-tracking term and a non-smooth state equation is investigated. For this purpose, an adjoint equation is defined and the SQH method is used to solve the corresponding optimal control problem. The final part of this thesis is devoted to a class of FP models related to specific stochastic processes. The discussion starts with a focus on random walks where also jumps are included. This framework allows a derivation of a discrete FP model corresponding to a continuous FP model with jumps and boundary conditions ranging from absorbing to totally reflecting. This discussion allows the consideration of the drift-control resulting from an anisotropic probability of the steps of the random walk. Thereafter, in the PMP framework, two drift-diffusion processes and the corresponding FP models with two different control strategies for an optimal control problem with an expectation functional are considered. In the first strategy, the controls depend on time and in the second one, the controls depend on space and time. In both cases a solution to the corresponding optimal control problem is characterized with the PMP conditions, stated in Theorem 48 and Theorem 49. The well-posedness of the SQH scheme is shown in both cases and further conditions are discussed that ensure the convergence of the SQH scheme to a PMP consistent solution. The case of a space and time dependent control strategy results in a special structure of the corresponding PMP conditions that is exploited in another solution method, the so-called direct Hamiltonian (DH) method.}, subject = {Optimale Kontrolle}, language = {en} } @phdthesis{Bartsch2021, author = {Bartsch, Jan}, title = {Theoretical and numerical investigation of optimal control problems governed by kinetic models}, doi = {10.25972/OPUS-24906}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-249066}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2021}, abstract = {This thesis is devoted to the numerical and theoretical analysis of ensemble optimal control problems governed by kinetic models. The formulation and study of these problems have been put forward in recent years by R.W. Brockett with the motivation that ensemble control may provide a more general and robust control framework for dynamical systems. Following this formulation, a Liouville (or continuity) equation with an unbounded drift function is considered together with a class of cost functionals that include tracking of ensembles of trajectories of dynamical systems and different control costs. Specifically, \$L^2\$, \$H^1\$ and \$L^1\$ control costs are taken into account which leads to non--smooth optimization problems. For the theoretical investigation of the resulting optimal control problems, a well--posedness theory in weighted Sobolev spaces is presented for Liouville and related transport equations. Specifically, existence and uniqueness results for these equations and energy estimates in suitable norms are provided; in particular norms in weighted Sobolev spaces. Then, non--smooth optimal control problems governed by the Liouville equation are formulated with a control mechanism in the drift function. Further, box--constraints on the control are imposed. The control--to--state map is introduced, that associates to any control the unique solution of the corresponding Liouville equation. Important properties of this map are investigated, specifically, that it is well--defined, continuous and Frechet differentiable. Using the first two properties, the existence of solutions to the optimal control problems is shown. While proving the differentiability, a loss of regularity is encountered, that is natural to hyperbolic equations. This leads to the need of the investigation of the control--to--state map in the topology of weighted Sobolev spaces. Exploiting the Frechet differentiability, it is possible to characterize solutions to the optimal control problem as solutions to an optimality system. This system consists of the Liouville equation, its optimization adjoint in the form of a transport equation, and a gradient inequality. Numerical methodologies for solving Liouville and transport equations are presented that are based on a non--smooth Lagrange optimization framework. For this purpose, approximation and solution schemes for such equations are developed and analyzed. For the approximation of the Liouville model and its optimization adjoint, a combination of a Kurganov--Tadmor method, a Runge--Kutta scheme, and a Strang splitting method are discussed. Stability and second--order accuracy of these resulting schemes are proven in the discrete \$L^1\$ norm. In addition, conservation of mass and positivity preservation are confirmed for the solution method of the Liouville model. As numerical optimization strategy, an adapted Krylow--Newton method is applied. Since the control is considered to be an element of \$H^1\$ and to obey certain box--constraints, a method for calculating a \$H^1\$ projection is presented. Since the optimal control problem is non-smooth, a semi-smooth adaption of Newton's method is taken into account. Results of numerical experiments are presented that successfully validate the proposed deterministic framework. After the discussion of deterministic schemes, the linear space--homogeneous Keilson--Storer master equation is investigated. This equation was originally developed for the modelling of Brownian motion of particles immersed in a fluid and is a representative model of the class of linear Boltzmann equations. The well--posedness of the Keilson--Storer master equation is investigated and energy estimates in different topologies are derived. To solve this equation numerically, Monte Carlo methods are considered. Such methods take advantage of the kinetic formulation of the Liouville equation and directly implement the behaviour of the system of particles under consideration. This includes the probabilistic behaviour of the collisions between particles. Optimal control problems are formulated with an objective that is constituted of certain expected values in velocity space and the \$L^2\$ and \$H^1\$ costs of the control. The problems are governed by the Keilson--Storer master equation and the control mechanism is considered to be within the collision kernel. The objective of the optimal control of this model is to drive an ensemble of particles to acquire a desired mean velocity and to achieve a desired final velocity configuration. Existence of solutions of the optimal control problem is proven and a Keilson--Storer optimality system characterizing the solution of the proposed optimal control problem is obtained. The optimality system is used to construct a gradient--based optimization strategy in the framework of Monte--Carlo methods. This task requires to accommodate the resulting adjoint Keilson--Storer model in a form that is consistent with the kinetic formulation. For this reason, we derive an adjoint Keilson--Storer collision kernel and an additional source term. A similar approach is presented in the case of a linear space--inhomogeneous kinetic model with external forces and with Keilson--Storer collision term. In this framework, a control mechanism in the form of an external space--dependent force is investigated. The purpose of this control is to steer the multi--particle system to follow a desired mean velocity and position and to reach a desired final configuration in phase space. An optimal control problem using the formulation of ensemble controls is stated with an objective that is constituted of expected values in phase space and \$H^1\$ costs of the control. For solving the optimal control problems, a gradient--based computational strategy in the framework of Monte Carlo methods is developed. Part of this is the denoising of the distribution functions calculated by Monte Carlo algorithms using methods of the realm of partial differential equations. A standalone C++ code is presented that implements the developed non--linear conjugated gradient strategy. Results of numerical experiments confirm the ability of the designed probabilistic control framework to operate as desired. An outlook section about optimal control problems governed by non--linear space--inhomogeneous kinetic models completes this thesis.}, subject = {Optimale Kontrolle}, language = {en} }