@phdthesis{Dechert2014, author = {Dechert, Andreas}, title = {Fraktionale Integration und Kointegration in Theorie und Praxis}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-110028}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {Das Ziel der Arbeit ist eine Zusammenfassung {\"u}ber den Stand der Forschung {\"u}ber das Thema der fraktionalen Integration und Kointegration sowie Weiterentwicklungen der aktuellen Methoden im Hinblick darauf, dass sie robuster auf eine Reihe von empirischen Gegebenheiten anwendbar sind. Hierzu wurden insbesondere die M{\"o}glichkeiten von Strukturbr{\"u}chen in deterministischen Prozessanteilen vorgeschlagen sowie deren Auswirkungen auf Sch{\"a}tzeigenschaften analysiert. Mit diesem Wissen k{\"o}nnen Sch{\"a}tzstrategien entwickelt werden, die auch im empirischen Teil der Arbeit angewandt wurden. Der Aufbau der Arbeit gestaltet sich so, dass nach der Einleitung und Problemstellung im zweiten Kapitel der Arbeit zun{\"a}chst in die Zeitreihenanalyse eingef{\"u}hrt wird. Hierbei wird auch eine intuitive Motivation f{\"u}r die Betrachtung von Long-Memory-Prozessen gegeben. Diese gestaltet sich so, dass der klassischerweise als ganzzahlig angenommene Integrationsgrad eines Prozesses nun jede beliebige Zahl, also auch Br{\"u}che, annehmen kann. Diese Annahme f{\"u}hrt wiederum dazu, dass hiermit sehr langfristige Abh{\"a}ngigkeiten von Zeitreihen effizient beschrieben werden k{\"o}nnen, da diese lediglich von einem einzigen Parameter abh{\"a}ngen. Die Sch{\"a}tzung dieses nunmehr fraktionalen Integrationsgrads wird im dritten Kapitel ausf{\"u}hrlich beschrieben und in mehreren Simulationsstudien ausgiebig analysiert. Hierzu werden neben parametrischen Sch{\"a}tzmethoden, die einer genauen Spezifizierung der Korrelationsstruktur von Zeitreihen bed{\"u}rfen, auch semiparametrische Methoden angef{\"u}hrt, die in der Praxis robuster einsetzbar sind, da ihre Sch{\"a}tzgenauigkeit und Effizienz nicht von einer korrekten Klassifizierung von sog. Short-Memory-Komponenten beeinflusst werden. Die Analyse dieser Methode erfolgt in erster Linie im Hinblick auf eine empirische Anwendbarkeit und bietet auch als Ergebnis Empfehlungen f{\"u}r eine optimale Sch{\"a}tzstrategie. Das vierte Kapitel besch{\"a}ftigt sich in erster Linie mit Integrationstests wie z.B. Einheitswurzeltests und deren Anwendbarkeit bei Existenz von Long-Memory-Prozessbestandteilen. Dar{\"u}ber hinaus werden auch Sch{\"a}tz- und Testmethoden f{\"u}r das Vorliegen von deterministischen Trends thematisiert, die wiederum auch die M{\"o}glichkeit von Strukturbr{\"u}chen zulassen. Eine multivariate Betrachtungsweise erm{\"o}glicht das f{\"u}nfte Kapitel mit der Einf{\"u}hrung der fraktionalen Kointegration. Auch liegt der Fokus der Arbeit darin, die empirische Anwendbarkeit zu verbessern, indem in Simulationsstudien Effekte von empirischen Gegebenheiten - wie Strukturbr{\"u}che - analysiert und optimale Sch{\"a}tzstrategien entwickelt werden. Im sechsten Kapitel der Arbeit wird im Rahmen der {\"o}konomischen Theorie der Markterwartungshypothese die Verzinsung deutscher im Zeitraum Oktober 1998 bis November 2011 untersucht. Diese Hypothese impliziert, dass zwischen den einzelnen Zinss{\"a}tzen eine multivariate Beziehung in Form von Kointegrationsbeziehungen bestehen sollte, da die Zinssatzdifferenzen einer Liquidit{\"a}tspr{\"a}mie entsprechen. Von dieser wurde in bisherigen Studien angenommen, dass sie station{\"a}r ist, d.h. dass sie allenfalls eine Short-Memory-Eigenschaft aufweist, welche nur relativ kurzfristige Abh{\"a}ngigkeit impliziert. Von dieser Sichtweise l{\"o}st sich die Arbeit, indem sie die M{\"o}glichkeit von fraktionalen Kointegrationsbeziehungen erm{\"o}glicht, die eine Aussage {\"u}ber die Persistenz der Liquidit{\"a}tspr{\"a}mie erm{\"o}glicht. Im Rahmen dieser Analyse konnten eine Reihe interessanter Erkenntnisse gewonnen werden, wie z.B. dass das Ausmaß der Persistenz (d.h. die Tr{\"a}gheit der Anpassung auf {\"o}konomische Schocks) mit ansteigender Laufzeitdifferenz sukzessive gr{\"o}ßer wird und auch nicht mehr durch klassisch angenommene Prozessstrukturen erkl{\"a}rt werden kann. Nichtsdestotrotz k{\"o}nnen die Ergebnisse der empirischen Analyse die Annahmen der Markterwartungshypothese nicht best{\"a}tigen, da insbesondere der Integrationsgrad f{\"u}r sehr lange Laufzeitdifferenzen so groß ausf{\"a}llt, dass selbst eine relativ schwache fraktionale Kointegrationsbeziehung abgelehnt werden muss.}, subject = {Zeitreihenanalyse}, language = {de} } @phdthesis{Schamberger2022, author = {Schamberger, Tamara Svenja}, title = {Methodological Advances in Composite-based Structural Equation Modeling}, isbn = {978-90-365-5375-9}, doi = {10.3990/1.9789036553759}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-276794}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2022}, abstract = {This thesis is about composite-based structural equation modeling. Structural equation modeling in general can be used to model both theoretical concepts and their relations to one another. In traditional factor-based structural equation modeling, these theoretical concepts are modeled as common factors, i.e., as latent variables which explain the covariance structure of their observed variables. In contrast, in composite-based structural equation modeling, the theoretical concepts can be modeled both as common factors and as composites, i.e., as linear combinations of observed variables that convey all the information between their observed variables and all other variables in the model. This thesis presents some methodological advancements in the field of composite-based structural equation modeling. In all, this thesis is made up of seven chapters. Chapter 1 provides an overview of the underlying model, as well as explicating the meaning of the term composite-based structural equation modeling. Chapter 2 gives guidelines on how to perform Monte Carlo simulations in the statistic software R using the package "cSEM" with various estimators in the context of composite-based structural equation modeling. These guidelines are illustrated by an example simulation study that investigates the finite sample behavior of partial least squares path modeling (PLS-PM) and consistent partial least squares (PLSc) estimates, particularly regarding the consequences of sample correlations between measurement errors on statistical inference. The third Chapter presents estimators of composite-based structural equation modeling that are robust in responding to outlier distortion. For this purpose, estimators of composite-based structural equation modeling, PLS-PM and PLSc, are adapted. Unlike the original estimators, these adjustments can avoid distortion that could arise from random outliers in samples, as is demonstrated through a simulation study. Chapter 4 presents an approach to performing predictions based on models estimated with ordinal partial least squares and ordinal consistent partial least squares. Here, the observed variables lie on an ordinal categorical scale which is explicitly taken into account in both estimation and prediction. The prediction performance is evaluated by means of a simulation study. In addition, the chapter gives guidelines on how to perform such predictions using the R package "cSEM". This is demonstrated by means of an empirical example. Chapter 5 introduces confirmatory composite analysis (CCA) for research in "Human Development". Using CCA, composite models can be estimated and assessed. This chapter uses the Henseler-Ogasawara specification for composite models, allowing, for example, the maximum likelihood method to be used for parameter estimation. Since the maximum likelihood estimator based on the Henseler-Ogasawara specification has limitations, Chapter 6 presents another specification of the composite model by means of which composite models can be estimated with the maximum likelihood method. The results of this maximum likelihood estimator are compared with those of PLS-PM, thus showing that this maximum likelihood estimator gives valid results even in finite samples. The last chapter, Chapter 7, gives an overview of the development and different strands of composite-based structural equation modeling. Additionally, here I examine the contribution the previous chapters make to the wider distribution of composite-based structural equation modeling.}, subject = {Structural Equation Modeling}, language = {en} } @phdthesis{Schuberth2019, author = {Schuberth, Florian}, title = {Composite-based Methods in Structural Equation Modeling}, doi = {10.25972/OPUS-15465}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-154653}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2019}, abstract = {This dissertation deals with composite-based methods for structural equation models with latent variables and their enhancement. It comprises five chapters. Besides a brief introduction in the first chapter, the remaining chapters consisting of four essays cover the results of my PhD studies.Two of the essays have already been published in an international journal. The first essay considers an alternative way of construct modeling in structural equation modeling.While in social and behavioral sciences theoretical constructs are typically modeled as common factors, in other sciences the common factor model is an inadequate way construct modeling due to its assumptions. This essay introduces the confirmatory composite analysis (CCA) analogous to confirmatory factor analysis (CFA). In contrast to CFA, CCA models theoretical constructs as composites instead of common factors. Besides the theoretical presentation of CCA and its assumptions, a Monte Carlo simulation is conducted which demonstrates that misspecifications of the composite model can be detected by the introduced test for overall model fit. The second essay rises the question of how parameter differences can be assessed in the framework of partial least squares path modeling. Since the standard errors of the estimated parameters have no analytical closed-form, the t- and F-test known from regression analysis cannot be directly used to test for parameter differences. However, bootstrapping provides a solution to this problem. It can be employed to construct confidence intervals for the estimated parameter differences, which can be used for making inferences about the parameter difference in the population. To guide practitioners, guidelines were developed and demonstrated by means of empirical examples. The third essay answers the question of how ordinal categorical indicators can be dealt with in partial least squares path modeling. A new consistent estimator is developed which combines the polychoric correlation and partial least squares path modeling to appropriately deal with the qualitative character of ordinal categorical indicators. The new estimator named ordinal consistent partial least squares combines consistent partial least squares with ordinal partial least squares. Besides its derivation, a Monte Carlo simulation is conducted which shows that the new estimator performs well in finite samples. Moreover, for illustration, an empirical example is estimated by ordinal consistent partial least squares. The last essay introduces a new consistent estimator for polynomial factor models. Similarly to consistent partial least squares, weights are determined to build stand-ins for the latent variables, however a non-iterative approach is used. A Monte Carlo simulation shows that the new estimator behaves well in finite samples.}, subject = {Strukturgleichungsmodell}, language = {en} } @phdthesis{Bauer2023, author = {Bauer, Carsten}, title = {Learning Curve Effects in Hospitals as Highly Specialized Expert Organizations}, doi = {10.25972/OPUS-32871}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-328717}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {The collection at hand is concerned with learning curve effects in hospitals as highly specialized expert organizations and comprises four papers, each focusing on a different aspect of the topic. Three papers are concerned with surgeons, and one is concerned with the staff of the emergency room in a conservative treatment. The preface compactly addresses the steadily increasing health care costs and economic pressure, the hospital landscape in Germany as well as its development. Furthermore, the DRG lump-sum compensation and the characteristics of the health sector, which is strongly regulated by the state and in which ethical aspects must be omnipresent, are outlined. Besides, the benefit of knowing about learning curve effects in order to cut costs and to keep quality stable or even improve it, is addressed. The first paper of the collection investigates the learning effects in a hospital which has specialized on endoprosthetics (total hip and knee replacement). Doing so, the specialized as well as the non-specialized interventions are studied. Costs are not investigated directly, but cost indicators. The indicator of costs in the short term are operating room times. The one of medium- to long-term costs is quality. It is operationalized by complications in the post-anesthesia care unit. The study estimates regression models (OLS and logit). The results indicate that the specialization comes along with advantages due to learning effects in terms of shorter operating room times and lower complication rates in endoprosthetic interventions. For the non-specialized interventions, the results are the same. There are no possibly negative effects of specialization on non-specialized surgeries, but advantageous spillover effects. Altogether, the specialization can be regarded as reasonable, as it cuts costs of all surgeries in the short, medium, and long term. The authors are Carsten Bauer, Nele M{\"o}bs, Oliver Unger, Andrea Szczesny, and Christian Ernst. In the second paper surgeons' learning curves effects in a teamwork vs. an individual work setting are in the focus of interest. Thus, the study combines learning curve effects with teamwork in health care, an issue increasingly discussed in recent literature. The investigated interventions are tonsillectomies (surgical excision of the palatine tonsils), a standard intervention. The indicator of costs in the short and medium to long term are again operating room times and complications as a proxy for quality respectively. Complications are secondary bleedings, which usually occur a few days after surgery. The study estimates regression models (OLS and logit). The results show that operating room times decrease with increasing surgeon's experience. Surgeons who also operate in teams learn faster than the ones always operating on their own. Thus, operating room times are shorter for surgeons who also take part in team interventions. As a special feature, the data set contains the costs per case. This enables assuring that the assumed cost indicators are valid. The findings recommend team surgeries especially for resident physicians. The authors are Carsten Bauer, Oliver Unger, and Martin Holderried. The third paper is dedicated to stapes surgery, a therapy for conductive hearing loss caused by otosclerosis (overflow bone growth). It is conceptually simple, but technically difficult. Therefore, it is regarded as the optimum to study learning curve effects in surgery. The paper seeks a comprehensive investigation. Thus, operating room times are employed as short-term cost indicator and quality as the medium to long term one. To measure quality, the postoperative difference between air and bone conduction threshold as well as a combination of this difference and the absence of complications. This paper also estimates different regression models (OLS and logit). Besides investigating the effects on department level, the study also considers the individual level, this means operating room times and quality are investigated for individual surgeons. This improves the comparison of learning curves, as the surgeons worked under widely identical conditions. It becomes apparent that the operating room times initially decrease with increasing experience. The marginal effect of additional experience gets smaller until the direction of the effect changes and the operating room times increase with increasing experience, probably caused by the allocation of difficult cases to the most experienced surgeons. Regarding quality, no learning curve effects are observed. The authors are Carsten Bauer, Johannes Taeger, and Kristen Rak. The fourth paper is a systematic literature review on learning effects in the treatment of ischemic strokes. In case of stroke, every minute counts. Therefore, there is the inherent need to reduce the time from symptom onset to treatment. The article is concerned with the reduction of the time from arrival at the hospital to thrombolysis treatment, the so-called "door-to-needle time". In the literature, there are studies on learning in a broader sense caused by a quality improvement program as well as learning in a narrower sense, in which learning curve effects are evaluated. Besides, studies on the time differences between low-volume and high-volume hospitals are considered, as the differences are probably the result of learning and economies of scale. Virtually all the 165 evaluated articles report improvements regarding the time to treatment. Furthermore, the clinical results substantiate the common association of shorter times from arrival to treatment with improved clinical outcomes. The review additionally discusses the economic implications of the results. The author is Carsten Bauer. The preface brings forward that after the measurement of learning curve effects, further efforts are necessary for using them in order to increase efficiency, as the issue does not admit of easy, standardized solutions. Furthermore, the postface emphasizes the importance of multiperspectivity in research for the patient outcome, the health care system, and society.}, subject = {Lernkurve}, language = {en} } @phdthesis{Teichert2018, author = {Teichert, Max}, title = {The interest rate risk of banks: current topics}, edition = {1. Auflage}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-070-2}, doi = {10.25972/WUP-978-3-95826-071-9}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-153669}, school = {Universit{\"a}t W{\"u}rzburg}, pages = {XIX, 252}, year = {2018}, abstract = {Die vorliegende Dissertation besch{\"a}ftigt sich mit dem Zins{\"a}nderungsrisiko von Banken. Sie bearbeitet Themen mit hoher aktueller Relevanz angesichts gegenw{\"a}rtiger Entwicklungen in der Geldpolitik, der Volkswirtschaftslehre und der Bankenregulierung. Im ersten Teil werden vier Grundlagen gelegt. Erstens wird die moderne Auffassung des Bankgesch{\"a}fts vorgestellt, der nach Banken Geld in Form von Ersparnissen schaffen, wenn sie Kredite gew{\"a}hren. Mit dieser Auffassung geh{\"o}rt die {\"U}bernahme von Zins{\"a}nderungsrisiken zum normalen Bankgesch{\"a}ft. Zweitens wird ein {\"U}berblick {\"u}ber die Mikro{\"o}konomie des Bankgesch{\"a}fts gegeben, in dem der j{\"u}ngst vollzogene Wechsel zum Paradigma des Risikos dargestellt wird. Unter diesem Paradigma sind Banken wesentlich Risikonehmer auch von Zins{\"a}nderungsrisiko. Drittens wird die Geldtheorie der Transmissionskan{\"a}le zusammengefasst, wobei der Fokus auf dem zuletzt starke Beachtung findenden Risikoneigungskanal liegt. Dieser Transmissionskanal stellt auch eine Verbindung zwischen der Geldpolitik und der {\"U}bernahme von Zins{\"a}nderungsrisiko durch Banken her. Viertens werden Ans{\"a}tze und Spezifika der Behandlung des Zins{\"a}nderungsrisikos von Banken in der {\"o}konomischen Forschung zusammengetragen. Das ist das Handwerkszeug f{\"u}r die Erarbeitung neuer Forschungsbeitr{\"a}ge. Im zweiten Teil werden drei Erweiterungen entwickelt. Die erste Erweiterung begegnet dem nahezu vollst{\"a}ndigen Fehlen von spezifischen Daten zum Zins{\"a}nderungsrisiko von Banken in Deutschland mit einer umfassenden Auswertung allgemeiner, {\"o}ffentlich verf{\"u}gbarer Statistiken. Es zeigt sich, dass das Zins{\"a}nderungsrisiko von Banken in Deutschland {\"u}ber dem Durchschnitt des Euroraums liegt und einem steigenden Trend folgt, der sich insbesondere aus einer Verschiebung hin zu kurzfristigerer Refinanzierung speist. Von den unterschiedlichen Arten von Banken in Deutschland pr{\"a}sentieren sich Sparkassen und Genossenschaftsbanken als besonders exponiert. Die zweite Erweiterung untersucht die Ver{\"a}nderungen der Zinsstruktur in Deutschland und nimmt damit die zweite Komponente des Zins{\"a}nderungsrisikos neben der Position der Banken in den Blick. Analysen historischer sowie prognostizierter Ver{\"a}nderungen weisen auf ein sinkendes Zins{\"a}nderungsrisiko hin. Auch auf Basis einer erg{\"a}nzenden Szenarioanalyse ergeben sich konkrete Kritikpunkte an j{\"u}ngst auf internationaler Ebene beschlossenen regulatorischen Standards sowie genaue Vorschl{\"a}ge zur Erg{\"a}nzung im Rahmen ihrer Implementierung. Die dritte Erweiterung adressiert ein m{\"o}gliches Streben nach Rendite (search for yield) von Banken bei der {\"U}bernahme von Zins{\"a}nderungsrisiko, die geringere Profitabilit{\"a}t zu h{\"o}herer Risiko{\"u}bernahme f{\"u}hren l{\"a}sst. Ein theoretisches Modell f{\"u}hrt dieses Verhalten auf eine plausible Nutzenfunktion von Bankmanagern zur{\"u}ck. Eine empirische Untersuchung belegt die statistische Signifikanz und {\"o}konomische Relevanz mit Daten aus Deutschland.}, subject = {Zins{\"a}nderungsrisiko}, language = {en} } @phdthesis{Rademaker2020, author = {Rademaker, Manuel Elias}, title = {Composite-based Structural Equation Modeling}, doi = {10.25972/OPUS-21593}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-215935}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2020}, abstract = {Structural equation modeling (SEM) has been used and developed for decades across various domains and research fields such as, among others, psychology, sociology, and business research. Although no unique definition exists, SEM is best understood as the entirety of a set of related theories, mathematical models, methods, algorithms, and terminologies related to analyzing the relationships between theoretical entities -- so-called concepts --, their statistical representations -- referred to as constructs --, and observables -- usually called indicators, items or manifest variables. This thesis is concerned with aspects of a particular strain of research within SEM -- namely, composite-based SEM. Composite-based SEM is defined as SEM involving linear compounds, i.e., linear combinations of observables when estimating parameters of interest. The content of the thesis is based on a working paper (Chapter 2), a published refereed journal article (Chapter 3), a working paper that is, at the time of submission of this thesis, under review for publication (Chapter 4), and a steadily growing documentation that I am writing for the R package cSEM (Chapter 5). The cSEM package -- written by myself and my former colleague at the University of Wuerzburg, Florian Schuberth -- provides functions to estimate, analyze, assess, and test nonlinear, hierarchical and multigroup structural equation models using composite-based approaches and procedures. In Chapter 1, I briefly discuss some of the key SEM terminology. Chapter 2 is based on a working paper to be submitted to the Journal of Business Research titled "Assessing overall model fit of composite models in structural equation modeling". The article is concerned with the topic of overall model fit assessment of the composite model. Three main contributions to the literature are made. First, we discuss the concept of model fit in SEM in general and composite-based SEM in particular. Second, we review common fit indices and explain if and how they can be applied to assess composite models. Third, we show that, if used for overall model fit assessment, the root mean square outer residual covariance (RMS_theta) is identical to another well-known index called the standardized root mean square residual (SRMR). Chapter 3 is based on a journal article published in Internet Research called "Measurement error correlation within blocks of indicators in consistent partial least squares: Issues and remedies". The article enhances consistent partial least squares (PLSc) to yield consistent parameter estimates for population models whose indicator blocks contain a subset of correlated measurement errors. This is achieved by modifying the correction for attenuation as originally applied by PLSc to include a priori assumptions on the structure of the measurement error correlations within blocks of indicators. To assess the efficacy of the modification, a Monte Carlo simulation is conducted. The paper is joint work with Florian Schuberth and Theo Dijkstra. Chapter 4 is based on a journal article under review for publication in Industrial Management \& Data Systems called "Estimating and testing second-order constructs using PLS-PM: the case of composites of composites". The purpose of this article is threefold: (i) evaluate and compare common approaches to estimate models containing second-order constructs modeled as composites of composites, (ii) provide and statistically assess a two-step testing procedure to test the overall model fit of such models, and (iii) formulate recommendation for practitioners based on our findings. Moreover, a Monte Carlo simulation to compare the approaches in terms of Fisher consistency, estimated bias, and RMSE is conducted. The paper is joint work with Florian Schuberth and J{\"o}rg Henseler.}, subject = {trukturgleichungsmodell}, language = {en} }