@phdthesis{Abt2019, author = {Abt, Raimond}, title = {Implementing Aspects of Quantum Information into the AdS/CFT Correspondence}, doi = {10.25972/OPUS-18801}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-188012}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2019}, abstract = {In recent years many discoveries have been made that reveal a close relation between quantum information and geometry in the context of the AdS/CFT correspondence. In this duality between a conformal quantum field theory (CFT) and a theory of gravity on Anti-de Sitter spaces (AdS) quantum information quantities in CFT are associated with geometric objects in AdS. Subject of this thesis is the examination of this intriguing property of AdS/CFT. We study two central elements of quantum information: subregion complexity -- which is a measure for the effort required to construct a given reduced state -- and the modular Hamiltonian -- which is given by the logarithm of a considered reduced state. While a clear definition for subregion complexity in terms of unitary gates exists for discrete systems, a rigorous formulation for quantum field theories is not known. In AdS/CFT, subregion complexity is proposed to be related to certain codimension one regions on the AdS side. The main focus of this thesis lies on the examination of such candidates for gravitational duals of subregion complexity. We introduce the concept of \textit{topological complexity}, which considers subregion complexity to be given by the integral over the Ricci scalar of codimension one regions in AdS. The Gauss-Bonnet theorem provides very general expressions for the topological complexity of CFT\(_2\) states dual to global AdS\(_3\), BTZ black holes and conical defects. In particular, our calculations show that the topology of the considered codimension one bulk region plays an essential role for topological complexity. Moreover, we study holographic subregion complexity (HSRC), which associates the volume of a particular codimension one bulk region with subregion complexity. We derive an explicit field theory expression for the HSRC of vacuum states. The formulation of HSRC in terms of field theory quantities may allow to investigate whether this bulk object indeed provides a concept of subregion complexity on the CFT side. In particular, if this turns out to be the case, our expression for HSRC may be seen as a field theory definition of subregion complexity. We extend our expression to states dual to BTZ black holes and conical defects. A further focus of this thesis is the modular Hamiltonian of a family of states \(\rho_\lambda\) depending on a continuous parameter \(\lambda\). Here \(\lambda\) may be associated with the energy density or the temperature, for instance. The importance of the modular Hamiltonian for quantum information is due to its contribution to relative entropy -- one of the very few objects in quantum information with a rigorous definition for quantum field theories. The first order contribution in \(\tilde{\lambda}=\lambda-\lambda_0\) of the modular Hamiltonian to the relative entropy between \(\rho_\lambda\) and a reference state \(\rho_{\lambda_0}\) is provided by the first law of entanglement. We study under which circumstances higher order contributions in \(\tilde{\lambda}\) are to be expected. We show that for states reduced to two entangling regions \(A\), \(B\) the modular Hamiltonian of at least one of these regions is expected to provide higher order contributions in \(\tilde{\lambda}\) to the relative entropy if \(A\) and \(B\) saturate the Araki-Lieb inequality. The statement of the Araki-Lieb inequality is that the difference between the entanglement entropies of \(A\) and \(B\) is always smaller or equal to the entanglement entropy of the union of \(A\) and \(B\). Regions for which this inequality is saturated are referred to as entanglement plateaux. In AdS/CFT the relation between geometry and quantum information provides many examples for entanglement plateaux. We apply our result to several of them, including large intervals for states dual to BTZ black holes and annuli for states dual to black brane geometries.}, subject = {AdS-CFT-Korrespondenz}, language = {en} } @phdthesis{Adamek2011, author = {Adamek, Julian}, title = {Classical and Quantum Aspects of Anisotropic Cosmology}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-65908}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2011}, abstract = {The idea that our observable Universe may have originated from a quantum tunneling event out of an eternally inflating false vacuum state is a cornerstone of the multiverse paradigm. Modern theories that are considered as an approach towards the ultraviolet-complete fundamental theory of particles and gravity, such as the various types of string theory, even suggest that a vast landscape of different vacuum configurations exists, and that gravitational tunneling is an important mechanism with which the Universe can explore this landscape. The tunneling scenario also presents a unique framework to address the initial conditions of our observable Universe. In particular, it allows to introduce deviations from the cosmological concordance model in a controlled and well-motivated way. These deviations are a central topic of this work. An important feature in most of the theories mentioned above is the presumed existence of additional space dimensions in excess of the three which we observe in our every-day experience. It was realized that these extra dimensions could avoid our detection if they are compactified to microscopic length scales far beyond the reach of current experiments. There also seem to be natural mechanisms available for dynamical compactification in those theories. These typically lead to a vast landscape of different vacuum configurations which also may differ in the number of macroscopic dimensions, only the total number of dimensions being determined by the theory. Transitions between these vacuum configurations may hence open up new directions which were previously compact, spontaneously compactify some previously macroscopic directions, or otherwise re-arrange the configuration of compact and macroscopic dimensions in a more general way. From within the bubble Universe, such a process may be perceived as an anisotropic background spacetime - intuitively, the dimensions which open up may give rise to preferred directions. If our 3+1 dimensional observable Universe was born in a process as described above, one may expect to find traces of a preferred direction in cosmological observations. For instance, two directions could be curved like on a sphere, while the third space direction is flat. Using a scenario of gravitational tunneling to fix the initial conditions, I show how the primordial signatures in such an anisotropic Universe can be obtained in principle and work out a particular example in more detail. A small deviation from isotropy also has phenomenological consequences for the later evolution of the Universe. I discuss the most important effects and show that backreaction can be dynamically important. In particular, under certain conditions, a buildup of anisotropic stress in different components of the cosmic fluid can lead to a dynamical isotropization of the total stress-energy tensor. The mechanism is again demonstrated with the help of a physical example.}, subject = {Kosmologie}, language = {en} } @phdthesis{Alboteanu2007, author = {Alboteanu, Ana Maria}, title = {The Noncommutative Standard Model : Construction Beyond Leading Order in Theta and Collider Phenomenology}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-24334}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2007}, abstract = {Trotz seiner pr{\"a}zisen {\"U}bereinstimmung mit dem Experiment ist die G{\"u}ltigkeit des Standardmodells (SM) der Elementarteilchenphysik bislang nur bis zu einer Energieskala von einigen hundert GeV gesichert. Abgesehen davon erweist sich schon das Einbinden der Gravitation in einer einheitlichen Beschreibung aller fundamentalen Wechselwirkungen als ein durch gew{\"o}hnliche Quantenfeldtheorie nicht zu l{\"o}sendes Problem. Das Interesse an Quantenfeldtheorien auf einer nichtkommutativen Raumzeit wurde durch deren Vorhersage als niederenergetischer Limes von Stringtheorien erweckt. Unabh{\"a}ngig davon, kann die Nichtlokalit{\"a}t einer solchen Theorie den Rahmen zur Einbeziehung der Gravitation in eine vereinheitlichende Theorie liefern. Die Hoffnung besteht, dass die Energieskala Lambda_NC, ab der solche Effekte sichtbar werden k{\"o}nnen und f{\"u}r die es einerlei theoretischen Vorhersagen gibt, schon bei der n{\"a}chsten Generation von Beschleunigern erreicht wird. Auf dieser Annahme beruht auch die vorliegende Arbeit, im Rahmen deren eine m{\"o}gliche Realisierung von Quantenfeldtheorien auf nichtkommutativer Raumzeit auf ihre ph{\"a}nomenologischen Konsequenzen hin untersucht wurde. Diese Arbeit ist durch fehlende LHC (Large Hadron Collider) Studien f{\"u}r nichkommutative Quantenfeldtheorien motiviert. Im ersten Teil des Vorhabens wurde der hadronische Prozess pp-> Z gamma -> l+l- gamma am LHC sowie die Elektron-Positron Paarvernichtung in ein Z-Boson und ein Photon am ILC (International Linear Collider) auf nichtkommutative Signale hin untersucht. Die ph{\"a}nomenlogischen Untersuchungen wurden im Rahmen dieses Modells in erster Ordnung des nichtkommutativen Parameters Theta durchgef{\"u}hrt. Eine nichtkommutative Raumzeit f{\"u}hrt zur Brechung der Rotationsinvarianz bez{\"u}glich der Strahlrichtung der einlaufenden Teilchen. Im differentiellen Wirkungsquerschnitt f{\"u}r Streuprozesse {\"a}ussert sich dieses als eine azimuthale Abh{\"a}ngigkeit, die weder im SM noch in anderen Modellen jenseits des SM auftritt. Diese klare, f\"ur nichtkommutative Theorien typische Signatur kann benutzt werden, um nichtkommutative Modelle von anderen Modellen, die neue Physik beschreiben, zu unterscheiden. Auch hat es sich erwiesen, dass die azimuthale Abh{\"a}ngigkeit des Wirkungsquerschnittes am besten daf\"ur geeignet ist, um die Sensitivit{\"a}t des LHC und des ILC auf der nichtkommutativen Skala \$\Lnc\$ zu bestimmen. Im ph{\"a}nomenologischen Teil der Arbeit wurde herausgefunden, dass Messungen am LHC f{\"u}r den Prozess pp-> Z gamma-> l+l- gamma nur in bestimmten F{\"a}llen auf nichtkommutative Effekte sensitiv sind. F{\"u}r diese F{\"a}lle wurde f{\"u}r die nichtkommutative Energieskala Lambda_NC eine Grenze von Lambda_NC > 1.2 TeV bestimmt. Diese ist um eine Gr{\"o}ßenordnung h{\"o}her als die Grenzen, die von bisherigen Beschleunigerexperimenten hergeleitet wurden. Bei einem zuk{\"u}nftigen Linearbeschleuniger, dem ILC, wird die Grenze auf Lambda_NC im Prozess e^+e^- -> Z gamma -> l^+ l^- gamma wesentlich erh{\"o}ht (bis zu 6 TeV). Abgesehen davon ist dem ILC gerade der f{\"u}r den LHC kaum zug{\"a}ngliche Parameterbereich der nichtkommutativen Theorie erschlossen, was die Komplementarit{\"a}t der beiden Beschleunigerexperimente hinsichtlich der nichtkommutativen Parameter zeigt. Der zweite Teil der Arbeit entwickelte sich aus der Notwendigkeit heraus, den G{\"u}ltigkeitsbereich der Theorie zu h{\"o}heren Energien hin zu erweitern. Daf{\"u}r haben wir den neutralen Sektor des nichtkommutativen SM um die n{\"a}chste Ordnung in Theta erg{\"a}nzt. Es stellte sich wider Erwarten heraus, dass die Theorie dabei um einige freie Parameter erweitert werden muss. Die zus{\"a}tzlichen Parameter sind durch die homogenen L{\"o}sungen der Eich{\"a}quivalenzbedingungen gegeben, welche Ambiguit\"aten der Seiberg-Witten Abbildungen darstellen. Die allgemeine Erwartung war, dass die Ambiguit{\"a}ten Feldredefinitionen entsprechen und daher in den Streumatrixelementen verschwinden m\"ussen. In dieser Arbeit wurde jedoch gezeigt, dass dies ab der zweiten Ordnung in Theta nicht der Fall ist und dass die Nichteindeutigkeit der Seiberg-Witten Abbildungen sich durchaus in Observablen niederschl{\"a}gt. Die Vermutung besteht, dass jede neue Ordnung in Theta neue Parameter in die Theorie einf{\"u}hrt. Wie weit und in welche Richtung die Theorie auf nichtkommutativer Raumzeit entwickelt werden muss, kann jedoch nur das Experiment entscheiden.}, subject = {Feldtheorie}, language = {en} } @phdthesis{Bach2013, author = {Bach, Fabian}, title = {Charged Current Top Quark Couplings at the LHC}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-82358}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2013}, abstract = {The top quark plays an important role in current particle physics, from a theoretical point of view because of its uniquely large mass, but also experimentally because of the large number of top events recorded by the LHC experiments ATLAS and CMS, which makes it possible to directly measure the properties of this particle, for example its couplings to the other particles of the standard model (SM), with previously unknown precision. In this thesis, an effective field theory approach is employed to introduce a minimal and consistent parametrization of all anomalous top couplings to the SM gauge bosons and fermions which are compatible with the SM symmetries. In addition, several aspects and consequences of the underlying effective operator relations for these couplings are discussed. The resulting set of couplings has been implemented in the parton level Monte Carlo event generator WHIZARD in order to provide a tool for the quantitative assessment of the phenomenological implications at present and future colliders such as the LHC or a planned international linear collider. The phenomenological part of this thesis is focused on the charged current couplings of the top quark, namely anomalous contributions to the trilinear tbW coupling as well as quartic four-fermion contact interactions of the form tbff, both affecting single top production as well as top decays at the LHC. The study includes various aspects of inclusive cross section measurements as well as differential distributions of single tops produced in the t channel, bq → tq', and in the s channel, ud → tb. We discuss the parton level modelling of these processes as well as detector effects, and finally present the prospected LHC reach for setting limits on these couplings with 10 resp. 100 fb-1 of data recorded at √s = 14 TeV.}, subject = {LHC}, language = {en} } @phdthesis{Banik2023, author = {Banik, Amitayus}, title = {Two Approaches to the Baryon Asymmetry of the Universe}, doi = {10.25972/OPUS-32046}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-320468}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {Explaining the baryon asymmetry of the Universe has been a long-standing problem of particle physics, with the consensus being that new physics is required as the Standard Model (SM) cannot resolve this issue. Beyond the Standard Model (BSM) scenarios would need to incorporate new sources of \(CP\) violation and either introduce new departures from thermal equilibrium or modify the existing electroweak phase transition. In this thesis, we explore two approaches to baryogenesis, i.e. the generation of this asymmetry. In the first approach, we study the two-particle irreducible (2PI) formalism as a means to investigate non-equilibrium phenomena. After arriving at the renormalised equations of motions (EOMs) to describe the dynamics of a phase transition, we discuss the techniques required to obtain the various counterterms in an on-shell scheme. To this end, we consider three truncations up to two-loop order of the 2PI effective action: the Hartree approximation, the scalar sunset approximation and the fermionic sunset approximation. We then reconsider the renormalisation procedure in an \(\overline{\text{MS}}\) scheme to evaluate the 2PI effective potential for the aforementioned truncations. In the Hartree and the scalar sunset approximations, we obtain analytic expressions for the various counterterms and subsequently calculate the effective potential by piecing together the finite contributions. For the fermionic sunset approximation, we obtain similar equations for the counterterms in terms of divergent parts of loop integrals. However, these integrals cannot be expressed in an analytic form, making it impossible to evaluate the 2PI effective potential with the fermionic contribution. Our main results are thus related to the renormalisation programme in the 2PI formalism: \( (i) \)the procedure to obtain the renormalised EOMs, now including fermions, which serve as the starting point for the transport equations for electroweak baryogenesis and \( (ii) \) the method to obtain the 2PI effective potential in a transparent manner. In the second approach, we study baryogenesis via leptogenesis. Here, an asymmetry in the lepton sector is generated, which is then converted into the baryon asymmetry via the sphaleron process in the SM. We proceed to consider an extension of the SM along the lines of a scotogenic framework. The newly introduced particles are charged odd under a \(\mathbb{Z}_2\) symmetry, and masses for the SM neutrinos are generated radiatively. The \(\mathbb{Z}_2\) symmetry results in the lightest BSM particle being stable, allowing for a suitable dark matter (DM) candidate. Furthermore, the newly introduced heavy Majorana fermionic singlets provide the necessary sources of \(CP\) violation through their Yukawa interactions and their out-of-equilibrium decays produce a lepton asymmetry. This model is constrained from a wide range of observables, such as consistency with neutrino oscillation data, limits on branching ratios of charged lepton flavour violating decays, electroweak observables and obtaining the observed DM relic density. We study leptogenesis in this model in light of the results of a Markov chain Monte Carlo scan, implemented in consideration of the aforementioned constraints. Successful leptogenesis in this model, to account for the baryon asymmetry, then severely constrains the available parameter space.}, subject = {Baryonenasymmetrie}, language = {en} } @phdthesis{Bechmann2004, author = {Bechmann, Michael}, title = {Dynamics in quantum spin glass systems}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-12519}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2004}, abstract = {This thesis aims at a description of the equilibrium dynamics of quantum spin glass systems. To this end a generic fermionic SU(2), spin 1/2 spin glass model with infinite-range interactions is defined in the first part. The model is treated in the framework of imaginary-time Grassmann field theory along with the replica formalism. A dynamical two-step decoupling procedure, which retains the full time dependence of the (replica-symmetric) saddle point, is presented. As a main result, a set of highly coupled self-consistency equations for the spin-spin correlations can be formulated. Beyond the so-called spin-static approximation two complementary systematic approximation schemes are developed in order to render the occurring integration problem feasible. One of these methods restricts the quantum-spin dynamics to a manageable number of bosonic Matsubara frequencies. A sequence of improved approximants to some quantity can be obtained by gradually extending the set of employed discrete frequencies. Extrapolation of such a sequence yields an estimate of the full dynamical solution. The other method is based on a perturbative expansion of the self-consistency equations in terms of the dynamical correlations. In the second part these techniques are applied to the isotropic Heisenberg spin glass both on the Fock space (HSGF) and, exploiting the Popov-Fedotov trick, on the spin space (HSGS). The critical temperatures of the paramagnet to spin glass phase transitions are determined accurately. Compared to the spin-static results, the dynamics causes slight increases of T_c by about 3\% and 2\%, respectively. For the HSGS the specific heat C(T) is investigated in the paramagnetic phase and, by way of a perturbative method, below but close to T_c. The exact C(T)-curve is shown to exhibit a pronounced non-analyticity at T_c and, contradictory to recent reports by other authors, there is no indication of maximum above T_c. In the last part of this thesis the spin glass model is augmented with a nearest-neighbor hopping term on an infinite-dimensional cubic lattice. An extended self-consistency structure can be derived by combining the decoupling procedure with the dynamical CPA method. For the itinerant Ising spin glass numerous solutions within the spin-static approximation are presented both at finite and zero temperature. Systematic dynamical corrections to the spin-static phase diagram in the plane of temperature and hopping strength are calculated, and the location of the quantum critical point is determined.}, subject = {Spinglas}, language = {en} } @phdthesis{Bercx2014, author = {Bercx, Martin Helmut}, title = {Numerical studies of heavy-fermion systems and correlated topological insulators}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-116138}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {In this thesis, we investigate aspects of the physics of heavy-fermion systems and correlated topological insulators. We numerically solve the interacting Hamiltonians that model the physical systems using quantum Monte Carlo algorithms to access both ground-state and finite-temperature observables. Initially, we focus on the metamagnetic transition in the Kondo lattice model for heavy fermions. On the basis of the dynamical mean-field theory and the dynamical cluster approximation, our calculations point towards a continuous transition, where the signatures of metamagnetism are linked to a Lifshitz transition of heavy-fermion bands. In the second part of the thesis, we study various aspects of magnetic pi fluxes in the Kane-Mele-Hubbard model of a correlated topological insulator. We describe a numerical measurement of the topological index, based on the localized mid-gap states that are provided by pi flux insertions. Furthermore, we take advantage of the intrinsic spin degree of freedom of a pi flux to devise instances of interacting quantum spin systems. In the third part of the thesis, we introduce and characterize the Kane-Mele-Hubbard model on the pi flux honeycomb lattice. We place particular emphasis on the correlations effects along the one-dimensional boundary of the lattice and compare results from a bosonization study with finite-size quantum Monte Carlo simulations.}, subject = {Schwere-Fermionen-System}, language = {en} } @phdthesis{Berger2009, author = {Berger, Karsten}, title = {Discovery and Characterization of the first Low-Peaked and Intermediate-Peaked BL Lacertae Objects in the Very High Energy Gamma-Ray Regime}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-37431}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2009}, abstract = {20 years after the discovery of the Crab Nebula as a source of very high energy gamma-rays, the number of sources newly discovered above 100 GeV using ground-based Cherenkov telescopes has considerably grown, at the time of writing of this thesis to a total of 81. The sources are of different types, including galactic sources such as supernova remnants, pulsars, binary systems, or so-far unidentified accelerators and extragalactic sources such as blazars and radio galaxies. The goal of this thesis work was to search for gamma-ray emission from a particular type of blazars previously undetected at very high gamma-ray energies, by using the MAGIC telescope. Those blazars previously detected were all of the same type, the so-called high-peaked BL Lacertae objects. The sources emit purely non-thermal emission, and exhibit a peak in their radio-to-X-ray spectral energy distribution at X-ray energies. The entire blazar population extends from these rare, low-luminosity BL Lacertae objects with peaks at X-ray energies to the much more numerous, high-luminosity infrared-peaked radio quasars. Indeed, the low-peaked sources dominate the source counts obtained from space-borne observations at gamma-ray energies up to 10 GeV. Their spectra observed at lower gamma-ray energies show power-law extensions to higher energies, although theoretical models suggest them to turn over at energies below 100 GeV. This opened the quest for MAGIC as the Cherenkov telescope with the currently lowest energy threshold. In the framework of this thesis, the search was focused on the prominent sources BL Lac, W Comae and S5 0716+714, respectively. Two of the sources were unambiguously discovered at very high energy gamma-rays with the MAGIC telescope, based on the analysis of a total of about 150 hours worth of data collected between 2005 and 2008. The analysis of this very large data set required novel techniques for treating the effects of twilight conditions on the data quality. This was successfully achieved and resulted in a vastly improved performance of the MAGIC telescope in monitoring campaigns. The detections of low-peaked and intermediate-peaked BL Lac objects are in line with theoretical expectations, but push the models based on electron shock acceleration and inverse-Compton cooling to their limits. The short variability time scales of the order of one day observed at very high energies show that the gamma-rays originate rather close to the putative supermassive black holes in the centers of blazars, corresponding to less than 1000 Schwarzschild radii when taking into account relativistic bulk motion.}, subject = {Aktiver galaktischer Kern}, language = {en} } @phdthesis{Beyl2020, author = {Beyl, Stefan}, title = {Hybrid Quantum Monte Carlo for Condensed Matter Models}, doi = {10.25972/OPUS-19122}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-191225}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2020}, abstract = {In this thesis we consider the hybrid quantum Monte Carlo method for simulations of the Hubbard and Su-Schrieffer-Heeger model. In the first instance, we discuss the hybrid quantum Monte Carlo method for the Hubbard model on a square lattice. We point out potential ergodicity issues and provide a way to circumvent them by a complexification of the method. Furthermore, we compare the efficiency of the hybrid quantum Monte Carlo method with a well established determinantal quantum Monte Carlo method for simulations of the half-filled Hubbard model on square lattices. One reason why the hybrid quantum Monte Carlo method loses the comparison is that we do not observe the desired sub-quadratic scaling of the numerical effort. Afterwards we present a formulation of the hybrid quantum Monte Carlo method for the Su-Schrieffer-Heeger model in two dimensions. Electron-phonon models like this are in general very hard to simulate using other Monte Carlo methods in more than one dimensions. It turns out that the hybrid quantum Monte Carlo method is much better suited for this model . We achieve favorable scaling properties and provide a proof of concept. Subsequently, we use the hybrid quantum Monte Carlo method to investigate the Su-Schrieffer-Heeger model in detail at half-filling in two dimensions. We present numerical data for staggered valence bond order at small phonon frequencies and an antiferromagnetic order at high frequencies. Due to an O(4) symmetry the antiferromagnetic order is connected to a superconducting charge density wave. Considering the Su-Schrieffer-Heeger model without tight-binding hopping reveals an additional unconstrained Z_2 gauge theory. In this case, we find indications for π-fluxes and a possible Z_2 Dirac deconfined phase as well as for a columnar valence bond ordered state at low phonon energies. In our investigations of the several phase transitions we discuss the different possibilities for the underlying mechanisms and reveal first insights into a rich phase diagram.}, subject = {Monte-Carlo-Simulation}, language = {en} } @phdthesis{BolanosRosales2016, author = {Bola{\~n}os-Rosales, Alejandro}, title = {Low Mach Number Simulations of Convective Boundary Mixing in Classical Novae}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-132863}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2016}, abstract = {Classical novae are thermonuclear explosions occurring on the surface of white dwarfs. When co-existing in a binary system with a main sequence or more evolved star, mass accretion from the companion star to the white dwarf can take place if the companion overflows its Roche lobe. The envelope of hydrogen-rich matter which builds on top of the white dwarf eventually ignites under degenerate conditions, leading to a thermonuclear runaway and an explosion in the order of 1046 erg, while leaving the white dwarf intact. Spectral analyses from the debris indicate an abundance of isotopes that are tracers of nuclear burning via the hot CNO cycle, which in turn reveal some sort of mixing between the envelope and the white dwarf underneath. The exact mechanism is still a matter of debate. The convection and deflagration in novae develop in the low Mach number regime. We used the Seven League Hydro code (SLH ), which employs numerical schemes designed to correctly simulate low Mach number flows, to perform two and three- dimensional simulations of classical novae. Based on a spherically-symmetric model created with aid of a stellar evolution code, we developed our own nova model and tested it on a variety of numerical grids and boundary conditions for validation. We focused on the evolution of temperature, density and nuclear energy generation rate at the layers between white dwarf and envelope, where most of the energy is generated, to understand the structure of the transition region, and its effect on the nuclear burning. We analyzed the resulting dredge-up efficiency stemming from the convective motions in the envelope. Our models yield similar results to the literature, but seem to depend very strongly on the numerical resolution. We followed the evolution of the nuclear species involved in the CNO cycle and concluded that the thermonuclear reactions primarily taking place are those of the cold and not the hot CNO cycle. The reason behind this could be that under the conditions generally assumed for multi-dimensional simulations, the envelope is in fact not degenerate. We performed initial tests for 3D simulations and realized that alternative boundary conditions are needed.}, subject = {Nova}, language = {en} }