@phdthesis{Simon2021, author = {Simon, Camilla}, title = {Financial Market Effects of Macroeconomic Policies}, doi = {10.25972/OPUS-21765}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-217654}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2021}, abstract = {Within three self-contained studies, this dissertation studies the impact and interactions between different macroeconomic policy measures in the context of financial markets empirically and quantitatively. The first study of this dissertation sheds light on the financial market effects of unconventional central bank asset purchase programs in the Eurozone, in particular sovereign bond asset purchase programs. The second study quantifies the direct implications of unconventional monetary policy on decisions by German public debt management regarding the maturity structure of gross issuance. The third study provides novel evidence on the role of private credit markets in the propagation of public spending toward private consumption in the U.S. economy. Across these three studies a set of different time-series econometric methods is applied including error correction models and event study frameworks to analyze contemporaneous interactions in financial and macroeconomic data in the context of unconventional monetary policy, as well as vector auto regressions (VARs) and local projections to trace the dynamic consequences of macroeconomic policies over time.}, subject = {Geldpolitik}, language = {en} } @phdthesis{Bauer2021, author = {Bauer, Andr{\´e}}, title = {Automated Hybrid Time Series Forecasting: Design, Benchmarking, and Use Cases}, doi = {10.25972/OPUS-22025}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-220255}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2021}, abstract = {These days, we are living in a digitalized world. Both our professional and private lives are pervaded by various IT services, which are typically operated using distributed computing systems (e.g., cloud environments). Due to the high level of digitalization, the operators of such systems are confronted with fast-paced and changing requirements. In particular, cloud environments have to cope with load fluctuations and respective rapid and unexpected changes in the computing resource demands. To face this challenge, so-called auto-scalers, such as the threshold-based mechanism in Amazon Web Services EC2, can be employed to enable elastic scaling of the computing resources. However, despite this opportunity, business-critical applications are still run with highly overprovisioned resources to guarantee a stable and reliable service operation. This strategy is pursued due to the lack of trust in auto-scalers and the concern that inaccurate or delayed adaptations may result in financial losses. To adapt the resource capacity in time, the future resource demands must be "foreseen", as reacting to changes once they are observed introduces an inherent delay. In other words, accurate forecasting methods are required to adapt systems proactively. A powerful approach in this context is time series forecasting, which is also applied in many other domains. The core idea is to examine past values and predict how these values will evolve as time progresses. According to the "No-Free-Lunch Theorem", there is no algorithm that performs best for all scenarios. Therefore, selecting a suitable forecasting method for a given use case is a crucial task. Simply put, each method has its benefits and drawbacks, depending on the specific use case. The choice of the forecasting method is usually based on expert knowledge, which cannot be fully automated, or on trial-and-error. In both cases, this is expensive and prone to error. Although auto-scaling and time series forecasting are established research fields, existing approaches cannot fully address the mentioned challenges: (i) In our survey on time series forecasting, we found that publications on time series forecasting typically consider only a small set of (mostly related) methods and evaluate their performance on a small number of time series with only a few error measures while providing no information on the execution time of the studied methods. Therefore, such articles cannot be used to guide the choice of an appropriate method for a particular use case; (ii) Existing open-source hybrid forecasting methods that take advantage of at least two methods to tackle the "No-Free-Lunch Theorem" are computationally intensive, poorly automated, designed for a particular data set, or they lack a predictable time-to-result. Methods exhibiting a high variance in the time-to-result cannot be applied for time-critical scenarios (e.g., auto-scaling), while methods tailored to a specific data set introduce restrictions on the possible use cases (e.g., forecasting only annual time series); (iii) Auto-scalers typically scale an application either proactively or reactively. Even though some hybrid auto-scalers exist, they lack sophisticated solutions to combine reactive and proactive scaling. For instance, resources are only released proactively while resource allocation is entirely done in a reactive manner (inherently delayed); (iv) The majority of existing mechanisms do not take the provider's pricing scheme into account while scaling an application in a public cloud environment, which often results in excessive charged costs. Even though some cost-aware auto-scalers have been proposed, they only consider the current resource demands, neglecting their development over time. For example, resources are often shut down prematurely, even though they might be required again soon. To address the mentioned challenges and the shortcomings of existing work, this thesis presents three contributions: (i) The first contribution-a forecasting benchmark-addresses the problem of limited comparability between existing forecasting methods; (ii) The second contribution-Telescope-provides an automated hybrid time series forecasting method addressing the challenge posed by the "No-Free-Lunch Theorem"; (iii) The third contribution-Chamulteon-provides a novel hybrid auto-scaler for coordinated scaling of applications comprising multiple services, leveraging Telescope to forecast the workload intensity as a basis for proactive resource provisioning. In the following, the three contributions of the thesis are summarized: Contribution I - Forecasting Benchmark To establish a level playing field for evaluating the performance of forecasting methods in a broad setting, we propose a novel benchmark that automatically evaluates and ranks forecasting methods based on their performance in a diverse set of evaluation scenarios. The benchmark comprises four different use cases, each covering 100 heterogeneous time series taken from different domains. The data set was assembled from publicly available time series and was designed to exhibit much higher diversity than existing forecasting competitions. Besides proposing a new data set, we introduce two new measures that describe different aspects of a forecast. We applied the developed benchmark to evaluate Telescope. Contribution II - Telescope To provide a generic forecasting method, we introduce a novel machine learning-based forecasting approach that automatically retrieves relevant information from a given time series. More precisely, Telescope automatically extracts intrinsic time series features and then decomposes the time series into components, building a forecasting model for each of them. Each component is forecast by applying a different method and then the final forecast is assembled from the forecast components by employing a regression-based machine learning algorithm. In more than 1300 hours of experiments benchmarking 15 competing methods (including approaches from Uber and Facebook) on 400 time series, Telescope outperformed all methods, exhibiting the best forecast accuracy coupled with a low and reliable time-to-result. Compared to the competing methods that exhibited, on average, a forecast error (more precisely, the symmetric mean absolute forecast error) of 29\%, Telescope exhibited an error of 20\% while being 2556 times faster. In particular, the methods from Uber and Facebook exhibited an error of 48\% and 36\%, and were 7334 and 19 times slower than Telescope, respectively. Contribution III - Chamulteon To enable reliable auto-scaling, we present a hybrid auto-scaler that combines proactive and reactive techniques to scale distributed cloud applications comprising multiple services in a coordinated and cost-effective manner. More precisely, proactive adaptations are planned based on forecasts of Telescope, while reactive adaptations are triggered based on actual observations of the monitored load intensity. To solve occurring conflicts between reactive and proactive adaptations, a complex conflict resolution algorithm is implemented. Moreover, when deployed in public cloud environments, Chamulteon reviews adaptations with respect to the cloud provider's pricing scheme in order to minimize the charged costs. In more than 400 hours of experiments evaluating five competing auto-scaling mechanisms in scenarios covering five different workloads, four different applications, and three different cloud environments, Chamulteon exhibited the best auto-scaling performance and reliability while at the same time reducing the charged costs. The competing methods provided insufficient resources for (on average) 31\% of the experimental time; in contrast, Chamulteon cut this time to 8\% and the SLO (service level objective) violations from 18\% to 6\% while using up to 15\% less resources and reducing the charged costs by up to 45\%. The contributions of this thesis can be seen as major milestones in the domain of time series forecasting and cloud resource management. (i) This thesis is the first to present a forecasting benchmark that covers a variety of different domains with a high diversity between the analyzed time series. Based on the provided data set and the automatic evaluation procedure, the proposed benchmark contributes to enhance the comparability of forecasting methods. The benchmarking results for different forecasting methods enable the selection of the most appropriate forecasting method for a given use case. (ii) Telescope provides the first generic and fully automated time series forecasting approach that delivers both accurate and reliable forecasts while making no assumptions about the analyzed time series. Hence, it eliminates the need for expensive, time-consuming, and error-prone procedures, such as trial-and-error searches or consulting an expert. This opens up new possibilities especially in time-critical scenarios, where Telescope can provide accurate forecasts with a short and reliable time-to-result. Although Telescope was applied for this thesis in the field of cloud computing, there is absolutely no limitation regarding the applicability of Telescope in other domains, as demonstrated in the evaluation. Moreover, Telescope, which was made available on GitHub, is already used in a number of interdisciplinary data science projects, for instance, predictive maintenance in an Industry 4.0 context, heart failure prediction in medicine, or as a component of predictive models of beehive development. (iii) In the context of cloud resource management, Chamulteon is a major milestone for increasing the trust in cloud auto-scalers. The complex resolution algorithm enables reliable and accurate scaling behavior that reduces losses caused by excessive resource allocation or SLO violations. In other words, Chamulteon provides reliable online adaptations minimizing charged costs while at the same time maximizing user experience.}, subject = {Zeitreihenanalyse}, language = {en} } @phdthesis{Dechert2014, author = {Dechert, Andreas}, title = {Fraktionale Integration und Kointegration in Theorie und Praxis}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-110028}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {Das Ziel der Arbeit ist eine Zusammenfassung {\"u}ber den Stand der Forschung {\"u}ber das Thema der fraktionalen Integration und Kointegration sowie Weiterentwicklungen der aktuellen Methoden im Hinblick darauf, dass sie robuster auf eine Reihe von empirischen Gegebenheiten anwendbar sind. Hierzu wurden insbesondere die M{\"o}glichkeiten von Strukturbr{\"u}chen in deterministischen Prozessanteilen vorgeschlagen sowie deren Auswirkungen auf Sch{\"a}tzeigenschaften analysiert. Mit diesem Wissen k{\"o}nnen Sch{\"a}tzstrategien entwickelt werden, die auch im empirischen Teil der Arbeit angewandt wurden. Der Aufbau der Arbeit gestaltet sich so, dass nach der Einleitung und Problemstellung im zweiten Kapitel der Arbeit zun{\"a}chst in die Zeitreihenanalyse eingef{\"u}hrt wird. Hierbei wird auch eine intuitive Motivation f{\"u}r die Betrachtung von Long-Memory-Prozessen gegeben. Diese gestaltet sich so, dass der klassischerweise als ganzzahlig angenommene Integrationsgrad eines Prozesses nun jede beliebige Zahl, also auch Br{\"u}che, annehmen kann. Diese Annahme f{\"u}hrt wiederum dazu, dass hiermit sehr langfristige Abh{\"a}ngigkeiten von Zeitreihen effizient beschrieben werden k{\"o}nnen, da diese lediglich von einem einzigen Parameter abh{\"a}ngen. Die Sch{\"a}tzung dieses nunmehr fraktionalen Integrationsgrads wird im dritten Kapitel ausf{\"u}hrlich beschrieben und in mehreren Simulationsstudien ausgiebig analysiert. Hierzu werden neben parametrischen Sch{\"a}tzmethoden, die einer genauen Spezifizierung der Korrelationsstruktur von Zeitreihen bed{\"u}rfen, auch semiparametrische Methoden angef{\"u}hrt, die in der Praxis robuster einsetzbar sind, da ihre Sch{\"a}tzgenauigkeit und Effizienz nicht von einer korrekten Klassifizierung von sog. Short-Memory-Komponenten beeinflusst werden. Die Analyse dieser Methode erfolgt in erster Linie im Hinblick auf eine empirische Anwendbarkeit und bietet auch als Ergebnis Empfehlungen f{\"u}r eine optimale Sch{\"a}tzstrategie. Das vierte Kapitel besch{\"a}ftigt sich in erster Linie mit Integrationstests wie z.B. Einheitswurzeltests und deren Anwendbarkeit bei Existenz von Long-Memory-Prozessbestandteilen. Dar{\"u}ber hinaus werden auch Sch{\"a}tz- und Testmethoden f{\"u}r das Vorliegen von deterministischen Trends thematisiert, die wiederum auch die M{\"o}glichkeit von Strukturbr{\"u}chen zulassen. Eine multivariate Betrachtungsweise erm{\"o}glicht das f{\"u}nfte Kapitel mit der Einf{\"u}hrung der fraktionalen Kointegration. Auch liegt der Fokus der Arbeit darin, die empirische Anwendbarkeit zu verbessern, indem in Simulationsstudien Effekte von empirischen Gegebenheiten - wie Strukturbr{\"u}che - analysiert und optimale Sch{\"a}tzstrategien entwickelt werden. Im sechsten Kapitel der Arbeit wird im Rahmen der {\"o}konomischen Theorie der Markterwartungshypothese die Verzinsung deutscher im Zeitraum Oktober 1998 bis November 2011 untersucht. Diese Hypothese impliziert, dass zwischen den einzelnen Zinss{\"a}tzen eine multivariate Beziehung in Form von Kointegrationsbeziehungen bestehen sollte, da die Zinssatzdifferenzen einer Liquidit{\"a}tspr{\"a}mie entsprechen. Von dieser wurde in bisherigen Studien angenommen, dass sie station{\"a}r ist, d.h. dass sie allenfalls eine Short-Memory-Eigenschaft aufweist, welche nur relativ kurzfristige Abh{\"a}ngigkeit impliziert. Von dieser Sichtweise l{\"o}st sich die Arbeit, indem sie die M{\"o}glichkeit von fraktionalen Kointegrationsbeziehungen erm{\"o}glicht, die eine Aussage {\"u}ber die Persistenz der Liquidit{\"a}tspr{\"a}mie erm{\"o}glicht. Im Rahmen dieser Analyse konnten eine Reihe interessanter Erkenntnisse gewonnen werden, wie z.B. dass das Ausmaß der Persistenz (d.h. die Tr{\"a}gheit der Anpassung auf {\"o}konomische Schocks) mit ansteigender Laufzeitdifferenz sukzessive gr{\"o}ßer wird und auch nicht mehr durch klassisch angenommene Prozessstrukturen erkl{\"a}rt werden kann. Nichtsdestotrotz k{\"o}nnen die Ergebnisse der empirischen Analyse die Annahmen der Markterwartungshypothese nicht best{\"a}tigen, da insbesondere der Integrationsgrad f{\"u}r sehr lange Laufzeitdifferenzen so groß ausf{\"a}llt, dass selbst eine relativ schwache fraktionale Kointegrationsbeziehung abgelehnt werden muss.}, subject = {Zeitreihenanalyse}, language = {de} } @book{FalkMarohnMicheletal.2012, author = {Falk, Michael and Marohn, Frank and Michel, Ren{\´e} and Hofmann, Daniel and Macke, Maria and Spachmann, Christoph and Englert, Stefan}, title = {A First Course on Time Series Analysis : Examples with SAS [Version 2012.August.01]}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-72617}, publisher = {Universit{\"a}t W{\"u}rzburg}, year = {2012}, abstract = {The analysis of real data by means of statistical methods with the aid of a software package common in industry and administration usually is not an integral part of mathematics studies, but it will certainly be part of a future professional work. The present book links up elements from time series analysis with a selection of statistical procedures used in general practice including the statistical software package SAS. Consequently this book addresses students of statistics as well as students of other branches such as economics, demography and engineering, where lectures on statistics belong to their academic training. But it is also intended for the practician who, beyond the use of statistical tools, is interested in their mathematical background. Numerous problems illustrate the applicability of the presented statistical procedures, where SAS gives the solutions. The programs used are explicitly listed and explained. No previous experience is expected neither in SAS nor in a special computer system so that a short training period is guaranteed. This book is meant for a two semester course (lecture, seminar or practical training) where the first three chapters can be dealt within the first semester. They provide the principal components of the analysis of a time series in the time domain. Chapters 4, 5 and 6 deal with its analysis in the frequency domain and can be worked through in the second term. In order to understand the mathematical background some terms are useful such as convergence in distribution, stochastic convergence, maximum likelihood estimator as well as a basic knowledge of the test theory, so that work on the book can start after an introductory lecture on stochastics. Each chapter includes exercises. An exhaustive treatment is recommended. Chapter 7 (case study) deals with a practical case and demonstrates the presented methods. It is possible to use this chapter independent in a seminar or practical training course, if the concepts of time series analysis are already well understood. This book is consecutively subdivided in a statistical part and an SAS-specific part. For better clearness the SAS-specific parts are highlighted. This book is an open source project under the GNU Free Documentation License.}, subject = {Zeitreihenanalyse}, language = {en} } @phdthesis{Cord2012, author = {Cord, Anna}, title = {Potential of multi-temporal remote sensing data for modeling tree species distributions and species richness in Mexico}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-71021}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2012}, abstract = {Current changes of biodiversity result almost exclusively from human activities. This anthropogenic conversion of natural ecosystems during the last decades has led to the so-called 'biodiversity crisis', which comprises the loss of species as well as changes in the global distribution patterns of organisms. Species richness is unevenly distributed worldwide. Altogether, 17 so-called 'megadiverse' nations cover less than 10\% of the earth's land surface but support nearly 70\% of global species richness. Mexico, the study area of this thesis, is one of those countries. However, due to Mexico's large extent and geographical complexity, it is impossible to conduct reliable and spatially explicit assessments of species distribution ranges based on these collection data and field work alone. In the last two decades, Species distribution models (SDMs) have been established as important tools for extrapolating such in situ observations. SDMs analyze empirical correlations between geo-referenced species occurrence data and environmental variables to obtain spatially explicit surfaces indicating the probability of species occurrence. Remote sensing can provide such variables which describe biophysical land surface characteristics with high effective spatial resolutions. Especially during the last three to five years, the number of studies making use of remote sensing data for modeling species distributions has therefore multiplied. Due to the novelty of this field of research, the published literature consists mostly of selective case studies. A systematic framework for modeling species distributions by means of remote sensing is still missing. This research gap was taken up by this thesis and specific studies were designed which addressed the combination of climate and remote sensing data in SDMs, the suitability of continuous remote sensing variables in comparison with categorical land cover classification data, the criteria for selecting appropriate remote sensing data depending on species characteristics, and the effects of inter-annual variability in remotely sensed time series on the performance of species distribution models. The corresponding novel analyses were conducted with the Maximum Entropy algorithm developed by Phillips et al. (2004). In this thesis, a more comprehensive set of remote sensing predictors than in the existing literature was utilized for species distribution modeling. The products were selected based on their ecological relevance for characterizing species distributions. Two 1 km Terra-MODIS Land 16-day composite standard products including the Enhanced Vegetation Index (EVI), Reflectance Data, and Land Surface Temperature (LST) were assembled into enhanced time series for the time period of 2001 to 2009. These high-dimensional time series data were then transformed into 18 phenological and 35 statistical metrics that were selected based on an extensive literature review. Spatial distributions of twelve tree species were modeled in a hierarchical framework which integrated climate (WorldClim) and MODIS remote sensing data. The species are representative of the major Mexican forest types and cover a variety of ecological traits, such as range size and biotope specificity. Trees were selected because they have a high probability of detection in the field and since mapping vegetation has a long tradition in remote sensing. The result of this thesis showed that the integration of remote sensing data into species distribution models has a significant potential for improving and both spatial detail and accuracy of the model predictions.}, subject = {Fernerkundung}, language = {en} } @book{FalkMarohnMicheletal.2011, author = {Falk, Michael and Marohn, Frank and Michel, Ren{\´e} and Hofmann, Daniel and Macke, Maria and Tewes, Bernward and Dinges, Peter and Spachmann, Christoph and Englert, Stefan}, title = {A First Course on Time Series Analysis : Examples with SAS}, organization = {Universit{\"a}t W{\"u}rzburg / Lehrstuhl f{\"u}r Statistik}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-56489}, publisher = {Universit{\"a}t W{\"u}rzburg}, year = {2011}, abstract = {The analysis of real data by means of statistical methods with the aid of a software package common in industry and administration usually is not an integral part of mathematics studies, but it will certainly be part of a future professional work. The present book links up elements from time series analysis with a selection of statistical procedures used in general practice including the statistical software package SAS. Consequently this book addresses students of statistics as well as students of other branches such as economics, demography and engineering, where lectures on statistics belong to their academic training. But it is also intended for the practician who, beyond the use of statistical tools, is interested in their mathematical background. Numerous problems illustrate the applicability of the presented statistical procedures, where SAS gives the solutions. The programs used are explicitly listed and explained. No previous experience is expected neither in SAS nor in a special computer system so that a short training period is guaranteed. This book is meant for a two semester course (lecture, seminar or practical training) where the first three chapters can be dealt within the first semester. They provide the principal components of the analysis of a time series in the time domain. Chapters 4, 5 and 6 deal with its analysis in the frequency domain and can be worked through in the second term. In order to understand the mathematical background some terms are useful such as convergence in distribution, stochastic convergence, maximum likelihood estimator as well as a basic knowledge of the test theory, so that work on the book can start after an introductory lecture on stochastics. Each chapter includes exercises. An exhaustive treatment is recommended. Chapter 7 (case study) deals with a practical case and demonstrates the presented methods. It is possible to use this chapter independent in a seminar or practical training course, if the concepts of time series analysis are already well understood. This book is consecutively subdivided in a statistical part and an SAS-specific part. For better clearness the SAS-specific parts are highlighted. This book is an open source project under the GNU Free Documentation License.}, subject = {Zeitreihenanalyse}, language = {en} } @book{FalkMarohnMicheletal.2006, author = {Falk, Michael and Marohn, Frank and Michel, Ren{\´e} and Hofmann, Daniel and Macke, Maria and Tewes, Bernward and Dinges, Peter}, title = {A First Course on Time Series Analysis : Examples with SAS}, organization = {Universit{\"a}t W{\"u}rzburg / Lehrstuhl f{\"u}r Statistik}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-16919}, publisher = {Universit{\"a}t W{\"u}rzburg}, year = {2006}, abstract = {The analysis of real data by means of statistical methods with the aid of a software package common in industry and administration usually is not an integral part of mathematics studies, but it will certainly be part of a future professional work. The present book links up elements from time series analysis with a selection of statistical procedures used in general practice including the statistical software package SAS Statistical Analysis System). Consequently this book addresses students of statistics as well as students of other branches such as economics, demography and engineering, where lectures on statistics belong to their academic training. But it is also intended for the practician who, beyond the use of statistical tools, is interested in their mathematical background. Numerous problems illustrate the applicability of the presented statistical procedures, where SAS gives the solutions. The programs used are explicitly listed and explained. No previous experience is expected neither in SAS nor in a special computer system so that a short training period is guaranteed. This book is meant for a two semester course (lecture, seminar or practical training) where the first two chapters can be dealt with in the first semester. They provide the principal components of the analysis of a time series in the time domain. Chapters 3, 4 and 5 deal with its analysis in the frequency domain and can be worked through in the second term. In order to understand the mathematical background some terms are useful such as convergence in distribution, stochastic convergence, maximum likelihood estimator as well as a basic knowledge of the test theory, so that work on the book can start after an introductory lecture on stochastics. Each chapter includes exercises. An exhaustive treatment is recommended. This book is consecutively subdivided in a statistical part and an SAS-specific part. For better clearness the SAS-specific part, including the diagrams generated with SAS, always starts with a computer symbol, representing the beginning of a session at the computer, and ends with a printer symbol for the end of this session. This book is an open source project under the GNU Free Documentation License.}, subject = {Zeitreihenanalyse}, language = {en} } @book{FalkMarohnMicheletal.2005, author = {Falk, Michael and Marohn, Frank and Michel, Ren{\´e} and Hofmann, Daniel and Macke, Maria and Tewes, Bernward and Dinges, Peter}, title = {A First Course on Time Series Analysis : Examples with SAS}, organization = {Universit{\"a}t W{\"u}rzburg / Lehrstuhl f{\"u}r Statistik , Universit{\"a}t Eichst{\"a}tt/Rechenzentrum}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-12593}, publisher = {Universit{\"a}t W{\"u}rzburg}, year = {2005}, abstract = {The analysis of real data by means of statistical methods with the aid of a software package common in industry and administration usually is not an integral part of mathematics studies, but it will certainly be part of a future professional work. The present book links up elements from time series analysis with a selection of statistical procedures used in general practice including the statistical software package SAS Statistical Analysis System). Consequently this book addresses students of statistics as well as students of other branches such as economics, demography and engineering, where lectures on statistics belong to their academic training. But it is also intended for the practician who, beyond the use of statistical tools, is interested in their mathematical background. Numerous problems illustrate the applicability of the presented statistical procedures, where SAS gives the solutions. The programs used are explicitly listed and explained. No previous experience is expected neither in SAS nor in a special computer system so that a short training period is guaranteed. This book is meant for a two semester course (lecture, seminar or practical training) where the first two chapters can be dealt with in the first semester. They provide the principal components of the analysis of a time series in the time domain. Chapters 3, 4 and 5 deal with its analysis in the frequency domain and can be worked through in the second term. In order to understand the mathematical background some terms are useful such as convergence in distribution, stochastic convergence, maximum likelihood estimator as well as a basic knowledge of the test theory, so that work on the book can start after an introductory lecture on stochastics. Each chapter includes exercises. An exhaustive treatment is recommended. This book is consecutively subdivided in a statistical part and an SAS-specific part. For better clearness the SAS-specific part, including the diagrams generated with SAS, always starts with a computer symbol, representing the beginning of a session at the computer, and ends with a printer symbol for the end of this session. This book is an open source project under the GNU Free Documentation License.}, subject = {Zeitreihenanalyse}, language = {en} }