@article{FreibergMatlachGrehnetal.2013, author = {Freiberg, Florentina Joyce and Matlach, Juliane and Grehn, Franz and Karl, Sabine and Klink, Thomas}, title = {Postoperative subconjunctival bevacizumab injection as an adjunct to 5-fluorouracil in the management of scarring after trabeculectomy}, series = {Clinical Ophthalmology}, journal = {Clinical Ophthalmology}, doi = {10.2147/OPTH.S41750}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-96546}, year = {2013}, abstract = {Purpose: Scarring after glaucoma filtering surgery remains the most frequent cause for bleb failure. The aim of this study was to assess if the postoperative injection of bevacizumab reduces the number of postoperative subconjunctival 5-fluorouracil (5-FU) injections. Further, the effect of bevacizumab as an adjunct to 5-FU on the intraocular pressure (IOP) outcome, bleb morphology, postoperative medications, and complications was evaluated. Methods: Glaucoma patients (N = 61) who underwent trabeculectomy with mitomycin C were analyzed retrospectively (follow-up period of 25 ± 19 months). Surgery was performed exclusively by one experienced glaucoma specialist using a standardized technique. Patients in group 1 received subconjunctival applications of 5-FU postoperatively. Patients in group 2 received 5-FU and subconjunctival injection of bevacizumab. Results: Group 1 had 6.4 ± 3.3 (0-15) (mean ± standard deviation and range, respectively) 5-FU injections. Group 2 had 4.0 ± 2.8 (0-12) (mean ± standard deviation and range, respectively) 5-FU injections. The added injection of bevacizumab significantly reduced the mean number of 5-FU injections by 2.4 ± 3.08 (P ≤ 0.005). There was no significantly lower IOP in group 2 when compared to group 1. A significant reduction in vascularization and in cork screw vessels could be found in both groups (P < 0.0001, 7 days to last 5-FU), yet there was no difference between the two groups at the last follow-up. Postoperative complications were significantly higher for both groups when more 5-FU injections were applied. (P = 0.008). No significant difference in best corrected visual acuity (P = 0.852) and visual field testing (P = 0.610) between preoperative to last follow-up could be found between the two groups. Conclusion: The postoperative injection of bevacizumab reduced the number of subconjunctival 5-FU injections significantly by 2.4 injections. A significant difference in postoperative IOP reduction, bleb morphology, and postoperative medication was not detected.}, language = {en} } @misc{Forster2013, type = {Master Thesis}, author = {Forster, Johannes}, title = {Mathematical Modeling of Complex Fluids}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-83533}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2013}, abstract = {This thesis gives an overview over mathematical modeling of complex fluids with the discussion of underlying mechanical principles, the introduction of the energetic variational framework, and examples and applications. The purpose is to present a formal energetic variational treatment of energies corresponding to the models of physical phenomena and to derive PDEs for the complex fluid systems. The advantages of this approach over force-based modeling are, e.g., that for complex systems energy terms can be established in a relatively easy way, that force components within a system are not counted twice, and that this approach can naturally combine effects on different scales. We follow a lecture of Professor Dr. Chun Liu from Penn State University, USA, on complex fluids which he gave at the University of Wuerzburg during his Giovanni Prodi professorship in summer 2012. We elaborate on this lecture and consider also parts of his work and publications, and substantially extend the lecture by own calculations and arguments (for papers including an overview over the energetic variational treatment see [HKL10], [Liu11] and references therein).}, subject = {Variationsrechnung}, language = {en} } @misc{Proell2013, type = {Master Thesis}, author = {Pr{\"o}ll, Sebastian}, title = {Stability of Switched Epidemiological Models}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-108573}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2013}, abstract = {In this thesis it is shown how the spread of infectious diseases can be described via mathematical models that show the dynamic behavior of epidemics. Ordinary differential equations are used for the modeling process. SIR and SIRS models are distinguished, depending on whether a disease confers immunity to individuals after recovery or not. There are characteristic parameters for each disease like the infection rate or the recovery rate. These parameters indicate how aggressive a disease acts and how long it takes for an individual to recover, respectively. In general the parameters are time-varying and depend on population groups. For this reason, models with multiple subgroups are introduced, and switched systems are used to carry out time-variant parameters. When investigating such models, the so called disease-free equilibrium is of interest, where no infectives appear within the population. The question is whether there are conditions, under which this equilibrium is stable. Necessary mathematical tools for the stability analysis are presented. The theory of ordinary differential equations, including Lyapunov stability theory, is fundamental. Moreover, convex and nonsmooth analysis, positive systems and differential inclusions are introduced. With these tools, sufficient conditions are given for the disease-free equilibrium of SIS, SIR and SIRS systems to be asymptotically stable.}, subject = {Gew{\"o}hnliche Differentialgleichung}, language = {en} } @phdthesis{Christ2013, author = {Christ, Thomas}, title = {Value-distribution of the Riemann zeta-function and related functions near the critical line}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-97763}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2013}, abstract = {The Riemann zeta-function forms a central object in multiplicative number theory; its value-distribution encodes deep arithmetic properties of the prime numbers. Here, a crucial role is assigned to the analytic behavior of the zeta-function on the so called critical line. In this thesis we study the value-distribution of the Riemann zeta-function near and on the critical line. Amongst others we focus on the following. PART I: A modified concept of universality, a-points near the critical line and a denseness conjecture attributed to Ramachandra. The critical line is a natural boundary of the Voronin-type universality property of the Riemann zeta-function. We modify Voronin's concept by adding a scaling factor to the vertical shifts that appear in Voronin's universality theorem and investigate whether this modified concept is appropriate to keep up a certain universality property of the Riemann zeta-function near and on the critical line. It turns out that it is mainly the functional equation of the Riemann zeta-function that restricts the set of functions which can be approximated by this modified concept around the critical line. Levinson showed that almost all a-points of the Riemann zeta-function lie in a certain funnel-shaped region around the critical line. We complement Levinson's result: Relying on arguments of the theory of normal families and the notion of filling discs, we detect a-points in this region which are very close to the critical line. According to a folklore conjecture (often attributed to Ramachandra) one expects that the values of the Riemann zeta-function on the critical line lie dense in the complex numbers. We show that there are certain curves which approach the critical line asymptotically and have the property that the values of the zeta-function on these curves are dense in the complex numbers. Many of our results in part I are independent of the Euler product representation of the Riemann zeta-function and apply for meromorphic functions that satisfy a Riemann-type functional equation in general. PART II: Discrete and continuous moments. The Lindel{\"o}f hypothesis deals with the growth behavior of the Riemann zeta-function on the critical line. Due to classical works by Hardy and Littlewood, the Lindel{\"o}f hypothesis can be reformulated in terms of power moments to the right of the critical line. Tanaka showed recently that the expected asymptotic formulas for these power moments are true in a certain measure-theoretical sense; roughly speaking he omits a set of Banach density zero from the path of integration of these moments. We provide a discrete and integrated version of Tanaka's result and extend it to a large class of Dirichlet series connected to the Riemann zeta-function.}, subject = {Riemannsche Zetafunktion}, language = {en} } @unpublished{GeiselhartGielenLazaretal.2013, author = {Geiselhart, Roman and Gielen, Rob H. and Lazar, Mircea and Wirth, Fabian R.}, title = {An Alternative Converse Lyapunov Theorem for Discrete-Time Systems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-78512}, year = {2013}, abstract = {This paper presents an alternative approach for obtaining a converse Lyapunov theorem for discrete-time systems. The proposed approach is constructive, as it provides an explicit Lyapunov function. The developed converse theorem establishes existence of global Lyapunov functions for globally exponentially stable (GES) systems and semi-global practical Lyapunov functions for globally asymptotically stable systems. Furthermore, for specific classes of sys- tems, the developed converse theorem can be used to establish non-conservatism of a particular type of Lyapunov functions. Most notably, a proof that conewise linear Lyapunov functions are non-conservative for GES conewise linear systems is given and, as a by-product, tractable construction of polyhedral Lyapunov functions for linear systems is attained.}, subject = {Ljapunov-Funktion}, language = {en} } @phdthesis{Sen2013, author = {Sen, Surath}, title = {Character Analysis and Numerical Computations of Standard M.I. Probability Distributions}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-78623}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2013}, abstract = {Development and character analysis of software programs, which compute minimum information probability distributions.}, subject = {Newton-Verfahren}, language = {en} } @article{Harrach2013, author = {Harrach, Bastian}, title = {Recent Progress on the Factorization Method for Electrical Impedance Tomography}, series = {Computational and Mathematical Methods in Medicine}, journal = {Computational and Mathematical Methods in Medicine}, doi = {10.1155/2013/425184}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-96229}, year = {2013}, abstract = {The Factorization Method is a noniterative method to detect the shape and position of conductivity anomalies inside an object. The method was introduced by Kirsch for inverse scattering problems and extended to electrical impedance tomography (EIT) by Br{\"u}hl and Hanke. Since these pioneering works, substantial progress has been made on the theoretical foundations of the method. The necessary assumptions have been weakened, and the proofs have been considerably simplified. In this work, we aim to summarize this progress and present a state-of-the-art formulation of the Factorization Method for EIT with continuous data. In particular, we formulate the method for general piecewise analytic conductivities and give short and self-contained proofs.}, language = {en} } @phdthesis{Hofmann2012, author = {Hofmann, Martin}, title = {Contributions to Extreme Value Theory in the Space C[0,1]}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-74405}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2012}, abstract = {We introduce some mathematical framework for extreme value theory in the space of continuous functions on compact intervals and provide basic definitions and tools. Continuous max-stable processes on [0,1] are characterized by their "distribution functions" G which can be represented via a norm on function space, called D-norm. The high conformity of this setup with the multivariate case leads to the introduction of a functional domain of attraction approach for stochastic processes, which is more general than the usual one based on weak convergence. We also introduce the concept of "sojourn time transformation" and compare several types of convergence on function space. Again in complete accordance with the uni- or multivariate case it is now possible to get functional generalized Pareto distributions (GPD) W via W = 1 + log(G) in the upper tail. In particular, this enables us to derive characterizations of the functional domain of attraction condition for copula processes. Moreover, we investigate the sojourn time above a high threshold of a continuous stochastic process. It turns out that the limit, as the threshold increases, of the expected sojourn time given that it is positive, exists if the copula process corresponding to Y is in the functional domain of attraction of a max-stable process. If the process is in a certain neighborhood of a generalized Pareto process, then we can replace the constant threshold by a general threshold function and we can compute the asymptotic sojourn time distribution.}, subject = {Extremwertstatistik}, language = {en} } @techreport{Englert2012, author = {Englert, Stefan}, title = {Mathematica in 15 Minuten (Mathematica Version 8.0)}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-70287}, year = {2012}, abstract = {Mathematica ist ein hervorragendes Programm um mathematische Berechnungen - auch sehr komplexe - auf relativ einfache Art und Weise durchf{\"u}hren zu lassen. Dieses Skript soll eine wirklich kurze Einf{\"u}hrung in Mathematica geben und als Nachschlagewerk einiger g{\"a}ngiger Anwendungen von Mathematica dienen. Dabei wird folgende Grobgliederung verwendet: - Grundlagen: Graphische Oberfl{\"a}che, einfache Berechnungen, Formeleingabe - Bedienung: Vorstellung einiger Kommandos und Einblick in die Funktionsweise - Praxis: Beispielhafte Berechnung einiger Abitur- und {\"U}bungsaufgaben}, subject = {Anwendungssoftware}, language = {de} } @phdthesis{Mauder2012, author = {Mauder, Markus}, title = {Time-Optimal Control of the Bi-Steerable Robot: A Case Study in Optimal Control of Nonholonomic Systems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-75036}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2012}, abstract = {In this thesis, time-optimal control of the bi-steerable robot is addressed. The bi-steerable robot, a vehicle with two independently steerable axles, is a complex nonholonomic system with applications in many areas of land-based robotics. Motion planning and optimal control are challenging tasks for this system, since standard control schemes do not apply. The model of the bi-steerable robot considered here is a reduced kinematic model with the driving velocity and the steering angles of the front and rear axle as inputs. The steering angles of the two axles can be set independently from each other. The reduced kinematic model is a control system with affine and non-affine inputs, as the driving velocity enters the system linearly, whereas the steering angles enter nonlinearly. In this work, a new approach to solve the time-optimal control problem for the bi-steerable robot is presented. In contrast to most standard methods for time-optimal control, our approach does not exclusively rely on discretization and purely numerical methods. Instead, the Pontryagin Maximum Principle is used to characterize candidates for time-optimal solutions. The resultant boundary value problem is solved by optimization to obtain solutions to the path planning problem over a given time horizon. The time horizon is decreased and the path planning is iterated to approximate a time-optimal solution. An optimality condition is introduced which depends on the number of cusps, i.e., reversals of the driving direction of the robot. This optimality condition allows to single out non-optimal solutions with too many cusps. In general, our approach only gives approximations of time-optimal solutions, since only normal regular extremals are considered as solutions to the path planning problem, and the path planning is terminated when an extremal with minimal number of cusps is found. However, for most desired configurations, normal regular extremals with the minimal number of cusps provide time-optimal solutions for the bi-steerable robot. The convergence of the approach is analyzed and its probabilistic completeness is shown. Moreover, simulation results on time-optimal solutions for the bi-steerable robot are presented.}, subject = {Mobiler Roboter}, language = {en} } @phdthesis{Akindeinde2012, author = {Akindeinde, Saheed Ojo}, title = {Numerical Verification of Optimality Conditions in Optimal Control Problems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-76065}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2012}, abstract = {This thesis is devoted to numerical verification of optimality conditions for non-convex optimal control problems. In the first part, we are concerned with a-posteriori verification of sufficient optimality conditions. It is a common knowledge that verification of such conditions for general non-convex PDE-constrained optimization problems is very challenging. We propose a method to verify second-order sufficient conditions for a general class of optimal control problem. If the proposed verification method confirms the fulfillment of the sufficient condition then a-posteriori error estimates can be computed. A special ingredient of our method is an error analysis for the Hessian of the underlying optimization problem. We derive conditions under which positive definiteness of the Hessian of the discrete problem implies positive definiteness of the Hessian of the continuous problem. The results are complemented with numerical experiments. In the second part, we investigate adaptive methods for optimal control problems with finitely many control parameters. We analyze a-posteriori error estimates based on verification of second-order sufficient optimality conditions using the method developed in the first part. Reliability and efficiency of the error estimator are shown. We illustrate through numerical experiments, the use of the estimator in guiding adaptive mesh refinement.}, subject = {Optimale Kontrolle}, language = {en} } @book{FalkMarohnMicheletal.2012, author = {Falk, Michael and Marohn, Frank and Michel, Ren{\´e} and Hofmann, Daniel and Macke, Maria and Spachmann, Christoph and Englert, Stefan}, title = {A First Course on Time Series Analysis : Examples with SAS [Version 2012.August.01]}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-72617}, publisher = {Universit{\"a}t W{\"u}rzburg}, year = {2012}, abstract = {The analysis of real data by means of statistical methods with the aid of a software package common in industry and administration usually is not an integral part of mathematics studies, but it will certainly be part of a future professional work. The present book links up elements from time series analysis with a selection of statistical procedures used in general practice including the statistical software package SAS. Consequently this book addresses students of statistics as well as students of other branches such as economics, demography and engineering, where lectures on statistics belong to their academic training. But it is also intended for the practician who, beyond the use of statistical tools, is interested in their mathematical background. Numerous problems illustrate the applicability of the presented statistical procedures, where SAS gives the solutions. The programs used are explicitly listed and explained. No previous experience is expected neither in SAS nor in a special computer system so that a short training period is guaranteed. This book is meant for a two semester course (lecture, seminar or practical training) where the first three chapters can be dealt within the first semester. They provide the principal components of the analysis of a time series in the time domain. Chapters 4, 5 and 6 deal with its analysis in the frequency domain and can be worked through in the second term. In order to understand the mathematical background some terms are useful such as convergence in distribution, stochastic convergence, maximum likelihood estimator as well as a basic knowledge of the test theory, so that work on the book can start after an introductory lecture on stochastics. Each chapter includes exercises. An exhaustive treatment is recommended. Chapter 7 (case study) deals with a practical case and demonstrates the presented methods. It is possible to use this chapter independent in a seminar or practical training course, if the concepts of time series analysis are already well understood. This book is consecutively subdivided in a statistical part and an SAS-specific part. For better clearness the SAS-specific parts are highlighted. This book is an open source project under the GNU Free Documentation License.}, subject = {Zeitreihenanalyse}, language = {en} } @phdthesis{Schoenlein2012, author = {Sch{\"o}nlein, Michael}, title = {Stability and Robustness of Fluid Networks: A Lyapunov Perspective}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-72235}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2012}, abstract = {In the verification of positive Harris recurrence of multiclass queueing networks the stability analysis for the class of fluid networks is of vital interest. This thesis addresses stability of fluid networks from a Lyapunov point of view. In particular, the focus is on converse Lyapunov theorems. To gain an unified approach the considerations are based on generic properties that fluid networks under widely used disciplines have in common. It is shown that the class of closed generic fluid network models (closed GFNs) is too wide to provide a reasonable Lyapunov theory. To overcome this fact the class of strict generic fluid network models (strict GFNs) is introduced. In this class it is required that closed GFNs satisfy additionally a concatenation and a lower semicontinuity condition. We show that for strict GFNs a converse Lyapunov theorem is true which provides a continuous Lyapunov function. Moreover, it is shown that for strict GFNs satisfying a trajectory estimate a smooth converse Lyapunov theorem holds. To see that widely used queueing disciplines fulfill the additional conditions, fluid networks are considered from a differential inclusions perspective. Within this approach it turns out that fluid networks under general work-conserving, priority and proportional processor-sharing disciplines define strict GFNs. Furthermore, we provide an alternative proof for the fact that the Markov process underlying a multiclass queueing network is positive Harris recurrent if the associate fluid network defining a strict GFN is stable. The proof explicitely uses the Lyapunov function admitted by the stable strict GFN. Also, the differential inclusions approach shows that first-in-first-out disciplines play a special role.}, subject = {Warteschlangennetz}, language = {en} } @article{ChenchiahSchloemerkemper2012, author = {Chenchiah, Isaac and Schl{\"o}merkemper, Anja}, title = {Non-laminate microstructures in monoclinic-I martensite}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-72134}, year = {2012}, abstract = {We study the symmetrised rank-one convex hull of monoclinic-I martensite (a twelve-variant material) in the context of geometrically-linear elasticity. We construct sets of T3s, which are (non-trivial) symmetrised rank-one convex hulls of 3-tuples of pairwise incompatible strains. Moreover we construct a five-dimensional continuum of T3s and show that its intersection with the boundary of the symmetrised rank-one convex hull is four-dimensional. We also show that there is another kind of monoclinic-I martensite with qualitatively different semi-convex hulls which, so far as we know, has not been experimentally observed. Our strategy is to combine understanding of the algebraic structure of symmetrised rank-one convex cones with knowledge of the faceting structure of the convex polytope formed by the strains.}, subject = {Martensit}, language = {en} } @phdthesis{Schroeter2012, author = {Schr{\"o}ter, Martin}, title = {Newton Methods for Image Registration}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-71490}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2012}, abstract = {Consider the situation where two or more images are taken from the same object. After taking the first image, the object is moved or rotated so that the second recording depicts it in a different manner. Additionally, take heed of the possibility that the imaging techniques may have also been changed. One of the main problems in image processing is to determine the spatial relation between such images. The corresponding process of finding the spatial alignment is called "registration". In this work, we study the optimization problem which corresponds to the registration task. Especially, we exploit the Lie group structure of the set of transformations to construct efficient, intrinsic algorithms. We also apply the algorithms to medical registration tasks. However, the methods developed are not restricted to the field of medical image processing. We also have a closer look at more general forms of optimization problems and show connections to related tasks.}, subject = {Newton-Verfahren}, language = {en} } @phdthesis{Nguyen2012, author = {Nguyen, Danh Nam}, title = {Understanding the development of the proving process within a dynamic geometry environment}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-71754}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2012}, abstract = {Argumentation and proof have played a fundamental role in mathematics education in recent years. The author of this dissertation would like to investigate the development of the proving process within a dynamic geometry system in order to support tertiary students understanding the proving process. The strengths of this dynamic system stimulate students to formulate conjectures and produce arguments during the proving process. Through empirical research, we classified different levels of proving and proposed a methodological model for proving. This methodological model makes a contribution to improve students' levels of proving and develop their dynamic visual thinking. We used Toulmin model of argumentation as a theoretical model to analyze the relationship between argumentation and proof. This research also offers some possible explanation so as to why students have cognitive difficulties in constructing proofs and provides mathematics educators with a deeper understanding on the proving process within a dynamic geometry system.}, subject = {Argumentation}, language = {en} } @phdthesis{Curtef2012, author = {Curtef, Oana}, title = {Rayleigh-quotient optimization on tensor products of Grassmannians}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-83383}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2012}, abstract = {Applications in various research areas such as signal processing, quantum computing, and computer vision, can be described as constrained optimization tasks on certain subsets of tensor products of vector spaces. In this work, we make use of techniques from Riemannian geometry and analyze optimization tasks on subsets of so-called simple tensors which can be equipped with a differentiable structure. In particular, we introduce a generalized Rayleigh-quotient function on the tensor product of Grassmannians and on the tensor product of Lagrange- Grassmannians. Its optimization enables a unified approach to well-known tasks from different areas of numerical linear algebra, such as: best low-rank approximations of tensors (data compression), computing geometric measures of entanglement (quantum computing) and subspace clustering (image processing). We perform a thorough analysis on the critical points of the generalized Rayleigh-quotient and develop intrinsic numerical methods for its optimization. Explicitly, using the techniques from Riemannian optimization, we present two type of algorithms: a Newton-like and a conjugated gradient algorithm. Their performance is analysed and compared with established methods from the literature.}, subject = {Optimierung}, language = {en} } @phdthesis{Tichy2011, author = {Tichy, Diana}, title = {On the Fragility Index}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-73610}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2011}, abstract = {The Fragility Index captures the amount of risk in a stochastic system of arbitrary dimension. Its main mathematical tool is the asymptotic distribution of exceedance counts within the system which can be derived by use of multivariate extreme value theory. Thereby the basic assumption is that data comes from a distribution which lies in the domain of attraction of a multivariate extreme value distribution. The Fragility Index itself and its extension can serve as a quantitative measure for tail dependence in arbitrary dimensions. It is linked to the well known extremal index for stochastic processes as well the extremal coefficient of an extreme value distribution.}, subject = {Extremwertstatistik}, language = {en} } @phdthesis{Dreves2011, author = {Dreves, Axel}, title = {Globally Convergent Algorithms for the Solution of Generalized Nash Equilibrium Problems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-69822}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2011}, abstract = {Es werden verschiedene Verfahren zur L{\"o}sung verallgemeinerter Nash-Gleichgewichtsprobleme mit dem Schwerpunkt auf deren globaler Konvergenz entwickelt. Ein globalisiertes Newton-Verfahren zur Berechnung normalisierter L{\"o}sungen, ein nichtglattes Optimierungsverfahren basierend auf einer unrestringierten Umformulierung des spieltheoretischen Problems, und ein Minimierungsansatz sowei eine Innere-Punkte-Methode zur L{\"o}sung der gemeinsamen Karush-Kuhn-Tucker-Bedingungen der Spieler werden theoretisch untersucht und numerisch getestet. Insbesondere das Innere-Punkte Verfahren erweist sich als das zur Zeit wohl beste Verfahren zur L{\"o}sung verallgemeinerter Nash-Gleichgewichtsprobleme.}, subject = {Nash-Gleichgewicht}, language = {en} } @phdthesis{Tichy2011, author = {Tichy, Michael}, title = {On algebraic aggregation methods in additive preconditioning}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-56541}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2011}, abstract = {In the following dissertation we consider three preconditioners of algebraic multigrid type, though they are defined for arbitrary prolongation and restriction operators, we consider them in more detail for the aggregation method. The strengthened Cauchy-Schwarz inequality and the resulting angle between the spaces will be our main interests. In this context we will introduce some modifications. For the problem of the one-dimensional convection we obtain perfect theoretical results. Although this is not the case for more complex problems, the numerical results we present will show that the modifications are also useful in these situation. Additionally, we will consider a symmetric problem in the energy norm and present a simple rule for algebraic aggregation.}, subject = {Pr{\"a}konditionierung}, language = {en} }