@phdthesis{Hegmann2024, author = {Hegmann, Reinhold}, title = {Pr{\"u}ferqualifikation und Pr{\"u}fungsqualit{\"a}t - Eine empirische Untersuchung privater pr{\"u}fungspflichtiger Unternehmen in Deutschland}, doi = {10.25972/OPUS-32254}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-322546}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2024}, abstract = {Die Jahresabschlusspr{\"u}fung verfolgt das Ziel, die Verl{\"a}sslichkeit der Rechnungslegung zu best{\"a}tigen. Folglich kann sie einen wesentlichen Beitrag zu einem hohen Informationsniveau an den M{\"a}rkten leisten. Angesichts dieser großen {\"o}konomischen Bedeutung unternimmt der deutsche Gesetzgeber zahlreiche Anstrengungen, um eine hohe Pr{\"u}fungsqualit{\"a}t sicherzustellen. Die Sichtung der Wirtschaftspr{\"u}ferordnung zeigt hierbei, dass regulatorische Maßnahmen ergriffen werden, die am Kern der Jahresabschlusspr{\"u}fung ansetzen, n{\"a}mlich an den Berufsangeh{\"o}rigen selbst. So wurde der Zugang zum Berufsstand der vereidigten Buchpr{\"u}fer mehrmals geschlossen und wiederer{\"o}ffnet. Des Weiteren sind markante Anpassungen des Niveaus des Wirtschaftspr{\"u}fungsexamens im Zeitablauf zu erkennen. Bei der Jahresabschlusspr{\"u}fung der Unternehmen von {\"o}ffentlichem Interesse sind außerdem besondere Berufspflichten zu erf{\"u}llen. Zum einen ist diesen schweren Eingriffen in die Freiheit der Berufswahl und der Berufsaus{\"u}bung gemein, dass sie allesamt die Qualifikation des Abschlusspr{\"u}fers adressieren. Zum anderen werden die entsprechenden Gesetzes{\"a}nderungen mehrheitlich mit einer St{\"a}rkung der Pr{\"u}fungsqualit{\"a}t begr{\"u}ndet. Fraglich ist, inwiefern jene Facetten der Pr{\"u}ferqualifikation tats{\"a}chlich einen Einfluss auf die Pr{\"u}fungsqualit{\"a}t aus{\"u}ben. Aufgrund mangelnder Evidenz ergibt sich die Notwendigkeit, eine empirische Studie am deutschen Pr{\"u}fermarkt durchzuf{\"u}hren und somit den Beginn zur Schließung der identifizierten Forschungsl{\"u}cke zu setzen. Das Ziel der vorliegenden Dissertation besteht folglich darin, den Zusammenhang zwischen der Pr{\"u}ferqualifikation und der Pr{\"u}fungsqualit{\"a}t mittels Regressionsanalysen zu untersuchen. Dazu wurde ein einzigartiger Datensatz zu deutschen privaten pr{\"u}fungspflichtigen Kapitalgesellschaften mit unkonsolidierten Finanz- und Pr{\"u}ferinformationen im Zeitraum 2006-2018 mit insgesamt 217.585 grundlegenden Beobachtungen erhoben, bereinigt und aufbereitet. Da die Pr{\"u}fungsqualit{\"a}t nicht direkt beobachtbar ist, wird zwischen wahrgenommener Pr{\"u}fungsqualit{\"a}t und tats{\"a}chlicher Pr{\"u}fungsqualit{\"a}t unterschieden. Im Rahmen dieser Dissertation wird die wahrgenommene Pr{\"u}fungsqualit{\"a}t {\"u}ber Fremdkapitalkosten und die tats{\"a}chliche Pr{\"u}fungsqualit{\"a}t {\"u}ber absolute diskretion{\"a}re Periodenabgrenzungen approximiert. Die Ergebnisse der Hauptregressionen zeigen {\"u}berwiegend, dass kein Zusammenhang zwischen den Maßgr{\"o}ßen der Pr{\"u}ferqualifikation und der wahrgenommenen und tats{\"a}chlichen Pr{\"u}fungsqualit{\"a}t besteht. Die Zusatz- und Sensitivit{\"a}tsanalysen unterst{\"u}tzen diesen Befund. So k{\"o}nnen mit Blick auf die Berufszugangsregelungen keine Qualit{\"a}tsunterschiede zwischen den Berufsst{\"a}nden der Wirtschaftspr{\"u}fer und der vereidigten Buchpr{\"u}fer nachgewiesen werden. Auch innerhalb des Berufstandes der Wirtschaftspr{\"u}fer ergeben sich keine Hinweise auf ein Qualit{\"a}tsgef{\"a}lle zwischen den Pr{\"u}fergruppen, die unterschiedliche Examensanforderungen durchlebt haben. Hinsichtlich der Berufsaus{\"u}bungsregelungen ist zu beobachten, dass die zus{\"a}tzlichen Anforderungen an die Jahresabschlusspr{\"u}fung der Unternehmen von {\"o}ffentlichem Interesse nicht mit einer anderen Pr{\"u}fungsqualit{\"a}t bei privaten Unternehmen verbunden sind. Die beschriebenen regulatorischen Schritte des Gesetzgebers im Bereich der Pr{\"u}ferqualifikation erscheinen somit im Lichte einer verbesserten Pr{\"u}fungsqualit{\"a}t nicht zwingend gerechtfertigt.}, subject = {Pr{\"u}fungsqualit{\"a}t}, language = {de} } @article{LeimeisterStieglitzMatzneretal.2021, author = {Leimeister, Jan Marco and Stieglitz, Stefan and Matzner, Martin and Kundisch, Dennis and Flath, Christoph and R{\"o}glinger, Maximilian}, title = {Quo Vadis Conferences in the Business and Information Systems Engineering (BISE) Community After Covid}, series = {Business \& Information Systems Engineering}, volume = {63}, journal = {Business \& Information Systems Engineering}, number = {6}, issn = {2363-7005}, doi = {10.1007/s12599-021-00707-x}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-308902}, pages = {741-749}, year = {2021}, language = {en} } @article{GriebelSegebarthSteinetal.2023, author = {Griebel, Matthias and Segebarth, Dennis and Stein, Nikolai and Schukraft, Nina and Tovote, Philip and Blum, Robert and Flath, Christoph M.}, title = {Deep learning-enabled segmentation of ambiguous bioimages with deepflash2}, series = {Nature Communications}, volume = {14}, journal = {Nature Communications}, doi = {10.1038/s41467-023-36960-9}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-357286}, year = {2023}, abstract = {Bioimages frequently exhibit low signal-to-noise ratios due to experimental conditions, specimen characteristics, and imaging trade-offs. Reliable segmentation of such ambiguous images is difficult and laborious. Here we introduce deepflash2, a deep learning-enabled segmentation tool for bioimage analysis. The tool addresses typical challenges that may arise during the training, evaluation, and application of deep learning models on ambiguous data. The tool's training and evaluation pipeline uses multiple expert annotations and deep model ensembles to achieve accurate results. The application pipeline supports various use-cases for expert annotations and includes a quality assurance mechanism in the form of uncertainty measures. Benchmarked against other tools, deepflash2 offers both high predictive accuracy and efficient computational resource usage. The tool is built upon established deep learning libraries and enables sharing of trained model ensembles with the research community. deepflash2 aims to simplify the integration of deep learning into bioimage analysis projects while improving accuracy and reliability.}, language = {en} } @book{Knoll2024, author = {Knoll, Leonhard}, title = {De exemplis deterrentibus: Bemerkenswerte Befunde aus der Praxis der rechtsgepr{\"a}gten Unternehmensbewertung in Aufgabenform}, edition = {4. Auflage}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-243-0}, doi = {10.25972/WUP-978-3-95826-243-0}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-348840}, publisher = {W{\"u}rzburg University Press}, pages = {XII, 232}, year = {2024}, abstract = {Das vorliegende Buch besch{\"a}ftigt sich anhand einer Sammlung von realen F{\"a}llen, die in Aufgabenform formuliert sind, mit dem leider oft gest{\"o}rten Verh{\"a}ltnis von Theorie und Praxis in der rechtsgepr{\"a}gten Unternehmensbewertung. Es weist {\"a}hnlich wie „normale" Fallsammlungen die jeweiligen Aufgabenstellungen und die zugeh{\"o}rigen L{\"o}sungen aus. Die eigentlichen Fragestellungen in den Aufgabentexten sind durch kurze Erl{\"a}uterungen eingerahmt, damit jeder Fall als solcher von einem mit Bewertungsfragen halbwegs Vertrauten relativ leicht verstanden und in seiner Bedeutung eingeordnet werden kann. Dieses Vorgehen {\"a}hnelt wiederum Lehrb{\"u}chern, die Inhalte {\"u}ber F{\"a}lle vermitteln, nur dass hier nicht hypothetische F{\"a}lle das jeweils idealtypisch richtige Vorgehen zeigen, sondern Praxisf{\"a}lle plakative Verst{\"o}ße contra legem artis.}, subject = {Unternehmensbewertung}, language = {de} } @article{HermSteinbachWanneretal.2022, author = {Herm, Lukas-Valentin and Steinbach, Theresa and Wanner, Jonas and Janiesch, Christian}, title = {A nascent design theory for explainable intelligent systems}, series = {Electronic Markets}, volume = {32}, journal = {Electronic Markets}, number = {4}, issn = {1019-6781}, doi = {10.1007/s12525-022-00606-3}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-323809}, pages = {2185-2205}, year = {2022}, abstract = {Due to computational advances in the past decades, so-called intelligent systems can learn from increasingly complex data, analyze situations, and support users in their decision-making to address them. However, in practice, the complexity of these intelligent systems renders the user hardly able to comprehend the inherent decision logic of the underlying machine learning model. As a result, the adoption of this technology, especially for high-stake scenarios, is hampered. In this context, explainable artificial intelligence offers numerous starting points for making the inherent logic explainable to people. While research manifests the necessity for incorporating explainable artificial intelligence into intelligent systems, there is still a lack of knowledge about how to socio-technically design these systems to address acceptance barriers among different user groups. In response, we have derived and evaluated a nascent design theory for explainable intelligent systems based on a structured literature review, two qualitative expert studies, a real-world use case application, and quantitative research. Our design theory includes design requirements, design principles, and design features covering the topics of global explainability, local explainability, personalized interface design, as well as psychological/emotional factors.}, language = {en} } @article{WannerHermHeinrichetal.2022, author = {Wanner, Jonas and Herm, Lukas-Valentin and Heinrich, Kai and Janiesch, Christian}, title = {The effect of transparency and trust on intelligent system acceptance: evidence from a user-based study}, series = {Electronic Markets}, volume = {32}, journal = {Electronic Markets}, number = {4}, issn = {1019-6781}, doi = {10.1007/s12525-022-00593-5}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-323829}, pages = {2079-2102}, year = {2022}, abstract = {Contemporary decision support systems are increasingly relying on artificial intelligence technology such as machine learning algorithms to form intelligent systems. These systems have human-like decision capacity for selected applications based on a decision rationale which cannot be looked-up conveniently and constitutes a black box. As a consequence, acceptance by end-users remains somewhat hesitant. While lacking transparency has been said to hinder trust and enforce aversion towards these systems, studies that connect user trust to transparency and subsequently acceptance are scarce. In response, our research is concerned with the development of a theoretical model that explains end-user acceptance of intelligent systems. We utilize the unified theory of acceptance and use in information technology as well as explanation theory and related theories on initial trust and user trust in information systems. The proposed model is tested in an industrial maintenance workplace scenario using maintenance experts as participants to represent the user group. Results show that acceptance is performance-driven at first sight. However, transparency plays an important indirect role in regulating trust and the perception of performance.}, language = {en} } @article{OberdorfSchaschekWeinzierletal.2023, author = {Oberdorf, Felix and Schaschek, Myriam and Weinzierl, Sven and Stein, Nikolai and Matzner, Martin and Flath, Christoph M.}, title = {Predictive end-to-end enterprise process network monitoring}, series = {Business \& Information Systems Engineering}, volume = {65}, journal = {Business \& Information Systems Engineering}, number = {1}, issn = {2363-7005}, doi = {10.1007/s12599-022-00778-4}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-323814}, pages = {49-64}, year = {2023}, abstract = {Ever-growing data availability combined with rapid progress in analytics has laid the foundation for the emergence of business process analytics. Organizations strive to leverage predictive process analytics to obtain insights. However, current implementations are designed to deal with homogeneous data. Consequently, there is limited practical use in an organization with heterogeneous data sources. The paper proposes a method for predictive end-to-end enterprise process network monitoring leveraging multi-headed deep neural networks to overcome this limitation. A case study performed with a medium-sized German manufacturing company highlights the method's utility for organizations.}, language = {en} } @article{HermJanieschHelmetal.2023, author = {Herm, Lukas-Valentin and Janiesch, Christian and Helm, Alexander and Imgrund, Florian and Hofmann, Adrian and Winkelmann, Axel}, title = {A framework for implementing robotic process automation projects}, series = {Information Systems and e-Business Management}, volume = {21}, journal = {Information Systems and e-Business Management}, number = {1}, issn = {1617-9846}, doi = {10.1007/s10257-022-00553-8}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-323798}, pages = {1-35}, year = {2023}, abstract = {Robotic process automation is a disruptive technology to automate already digital yet manual tasks and subprocesses as well as whole business processes rapidly. In contrast to other process automation technologies, robotic process automation is lightweight and only accesses the presentation layer of IT systems to mimic human behavior. Due to the novelty of robotic process automation and the varying approaches when implementing the technology, there are reports that up to 50\% of robotic process automation projects fail. To tackle this issue, we use a design science research approach to develop a framework for the implementation of robotic process automation projects. We analyzed 35 reports on real-life projects to derive a preliminary sequential model. Then, we performed multiple expert interviews and workshops to validate and refine our model. The result is a framework with variable stages that offers guidelines with enough flexibility to be applicable in complex and heterogeneous corporate environments as well as for small and medium-sized companies. It is structured by the three phases of initialization, implementation, and scaling. They comprise eleven stages relevant during a project and as a continuous cycle spanning individual projects. Together they structure how to manage knowledge and support processes for the execution of robotic process automation implementation projects.}, language = {en} } @article{HermJanieschFuchs2022, author = {Herm, Lukas-Valentin and Janiesch, Christian and Fuchs, Patrick}, title = {Der Einfluss von menschlichen Denkmustern auf k{\"u}nstliche Intelligenz - eine strukturierte Untersuchung von kognitiven Verzerrungen}, series = {HMD Praxis der Wirtschaftsinformatik}, volume = {59}, journal = {HMD Praxis der Wirtschaftsinformatik}, number = {2}, issn = {1436-3011}, doi = {10.1365/s40702-022-00844-1}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-323787}, pages = {556-571}, year = {2022}, abstract = {K{\"u}nstliche Intelligenz (KI) dringt vermehrt in sensible Bereiche des allt{\"a}glichen menschlichen Lebens ein. Es werden nicht mehr nur noch einfache Entscheidungen durch intelligente Systeme getroffen, sondern zunehmend auch komplexe Entscheidungen. So entscheiden z. B. intelligente Systeme, ob Bewerber in ein Unternehmen eingestellt werden sollen oder nicht. Oftmals kann die zugrundeliegende Entscheidungsfindung nur schwer nachvollzogen werden und ungerechtfertigte Entscheidungen k{\"o}nnen dadurch unerkannt bleiben, weshalb die Implementierung einer solchen KI auch h{\"a}ufig als sogenannte Blackbox bezeichnet wird. Folglich steigt die Bedrohung, durch unfaire und diskriminierende Entscheidungen einer KI benachteiligt behandelt zu werden. Resultieren diese Verzerrungen aus menschlichen Handlungen und Denkmustern spricht man von einer kognitiven Verzerrung oder einem kognitiven Bias. Aufgrund der Neuigkeit dieser Thematik ist jedoch bisher nicht ersichtlich, welche verschiedenen kognitiven Bias innerhalb eines KI-Projektes auftreten k{\"o}nnen. Ziel dieses Beitrages ist es, anhand einer strukturierten Literaturanalyse, eine gesamtheitliche Darstellung zu erm{\"o}glichen. Die gewonnenen Erkenntnisse werden anhand des in der Praxis weit verbreiten Cross-Industry Standard Process for Data Mining (CRISP-DM) Modell aufgearbeitet und klassifiziert. Diese Betrachtung zeigt, dass der menschliche Einfluss auf eine KI in jeder Entwicklungsphase des Modells gegeben ist und es daher wichtig ist „mensch-{\"a}hnlichen" Bias in einer KI explizit zu untersuchen.}, language = {de} } @article{FreichelSteegmansWinkelmann2022, author = {Freichel, Chiara and Steegmans, Timo-Christian and Winkelmann, Axel}, title = {Ziele und Gestaltung digitaler Plattformen f{\"u}r Produktionsnetzwerke}, series = {HMD Praxis der Wirtschaftsinformatik}, volume = {59}, journal = {HMD Praxis der Wirtschaftsinformatik}, number = {5}, issn = {1436-3011}, doi = {10.1365/s40702-022-00908-2}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-323779}, pages = {1281-1311}, year = {2022}, abstract = {Die interorganisatorische Zusammenarbeit in Produktionsnetzwerken kann Herausforderungen durch eine hohe Marktdynamik, immer anspruchsvollere Kundenbed{\"u}rfnisse und steigenden Kostendruck entgegenwirken. Neben der klassischen vertikalen Verschiebung von Kapazit{\"a}ten in Richtung geeigneter Zulieferer, lassen sich Fertigungskapazit{\"a}ten auch durch eine horizontale Zusammenarbeit zwischen produzierenden Unternehmen handeln. Im Sinne der Sharing Economy bieten digitale Plattformen eine geeignete Infrastruktur zur Verkn{\"u}pfung und Koordination der Marktakteure eines Produktionsnetzwerks. So k{\"o}nnen Fertigungsunternehmen flexibel Produktionsausf{\"a}llen entgegenwirken und freie Maschinenkapazit{\"a}ten auslasten. Eine wesentliche Voraussetzung f{\"u}r den Erfolg solcher digitalen Plattformen f{\"u}r Produktionsnetzwerke ist die Definition von Zielen, welche bisher in der Literatur nur unzureichend und nicht bezogen auf diese spezifische Plattformart untersucht wurden. In dieser Arbeit wird ein umf{\"a}ngliches konzeptionelles Zielmodell f{\"u}r diese spezifische Plattformart erstellt. Zu spezifischen Zielen digitaler Plattformen f{\"u}r Produktionsnetzwerke z{\"a}hlen neben wirtschaftlichen oder technischen Zielen beispielsweise auch produktionsbezogene Marktleistungsziele wie die Gew{\"a}hrleistung von Produktionsflexibilit{\"a}t. Aufbauend darauf wird gezeigt, wie das Design der beschriebenen Plattformen einen Einfluss auf die Erreichung bestimmter Ziele hat und wie spezielle Mechanismen zur Zielerreichung beitragen.}, language = {de} } @phdthesis{Bauer2023, author = {Bauer, Carsten}, title = {Learning Curve Effects in Hospitals as Highly Specialized Expert Organizations}, doi = {10.25972/OPUS-32871}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-328717}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {The collection at hand is concerned with learning curve effects in hospitals as highly specialized expert organizations and comprises four papers, each focusing on a different aspect of the topic. Three papers are concerned with surgeons, and one is concerned with the staff of the emergency room in a conservative treatment. The preface compactly addresses the steadily increasing health care costs and economic pressure, the hospital landscape in Germany as well as its development. Furthermore, the DRG lump-sum compensation and the characteristics of the health sector, which is strongly regulated by the state and in which ethical aspects must be omnipresent, are outlined. Besides, the benefit of knowing about learning curve effects in order to cut costs and to keep quality stable or even improve it, is addressed. The first paper of the collection investigates the learning effects in a hospital which has specialized on endoprosthetics (total hip and knee replacement). Doing so, the specialized as well as the non-specialized interventions are studied. Costs are not investigated directly, but cost indicators. The indicator of costs in the short term are operating room times. The one of medium- to long-term costs is quality. It is operationalized by complications in the post-anesthesia care unit. The study estimates regression models (OLS and logit). The results indicate that the specialization comes along with advantages due to learning effects in terms of shorter operating room times and lower complication rates in endoprosthetic interventions. For the non-specialized interventions, the results are the same. There are no possibly negative effects of specialization on non-specialized surgeries, but advantageous spillover effects. Altogether, the specialization can be regarded as reasonable, as it cuts costs of all surgeries in the short, medium, and long term. The authors are Carsten Bauer, Nele M{\"o}bs, Oliver Unger, Andrea Szczesny, and Christian Ernst. In the second paper surgeons' learning curves effects in a teamwork vs. an individual work setting are in the focus of interest. Thus, the study combines learning curve effects with teamwork in health care, an issue increasingly discussed in recent literature. The investigated interventions are tonsillectomies (surgical excision of the palatine tonsils), a standard intervention. The indicator of costs in the short and medium to long term are again operating room times and complications as a proxy for quality respectively. Complications are secondary bleedings, which usually occur a few days after surgery. The study estimates regression models (OLS and logit). The results show that operating room times decrease with increasing surgeon's experience. Surgeons who also operate in teams learn faster than the ones always operating on their own. Thus, operating room times are shorter for surgeons who also take part in team interventions. As a special feature, the data set contains the costs per case. This enables assuring that the assumed cost indicators are valid. The findings recommend team surgeries especially for resident physicians. The authors are Carsten Bauer, Oliver Unger, and Martin Holderried. The third paper is dedicated to stapes surgery, a therapy for conductive hearing loss caused by otosclerosis (overflow bone growth). It is conceptually simple, but technically difficult. Therefore, it is regarded as the optimum to study learning curve effects in surgery. The paper seeks a comprehensive investigation. Thus, operating room times are employed as short-term cost indicator and quality as the medium to long term one. To measure quality, the postoperative difference between air and bone conduction threshold as well as a combination of this difference and the absence of complications. This paper also estimates different regression models (OLS and logit). Besides investigating the effects on department level, the study also considers the individual level, this means operating room times and quality are investigated for individual surgeons. This improves the comparison of learning curves, as the surgeons worked under widely identical conditions. It becomes apparent that the operating room times initially decrease with increasing experience. The marginal effect of additional experience gets smaller until the direction of the effect changes and the operating room times increase with increasing experience, probably caused by the allocation of difficult cases to the most experienced surgeons. Regarding quality, no learning curve effects are observed. The authors are Carsten Bauer, Johannes Taeger, and Kristen Rak. The fourth paper is a systematic literature review on learning effects in the treatment of ischemic strokes. In case of stroke, every minute counts. Therefore, there is the inherent need to reduce the time from symptom onset to treatment. The article is concerned with the reduction of the time from arrival at the hospital to thrombolysis treatment, the so-called "door-to-needle time". In the literature, there are studies on learning in a broader sense caused by a quality improvement program as well as learning in a narrower sense, in which learning curve effects are evaluated. Besides, studies on the time differences between low-volume and high-volume hospitals are considered, as the differences are probably the result of learning and economies of scale. Virtually all the 165 evaluated articles report improvements regarding the time to treatment. Furthermore, the clinical results substantiate the common association of shorter times from arrival to treatment with improved clinical outcomes. The review additionally discusses the economic implications of the results. The author is Carsten Bauer. The preface brings forward that after the measurement of learning curve effects, further efforts are necessary for using them in order to increase efficiency, as the issue does not admit of easy, standardized solutions. Furthermore, the postface emphasizes the importance of multiperspectivity in research for the patient outcome, the health care system, and society.}, subject = {Lernkurve}, language = {en} } @phdthesis{Herm2023, author = {Herm, Lukas-Valentin}, title = {Algorithmic Decision-Making Facilities: Perception and Design of Explainable AI-based Decision Support Systems}, doi = {10.25972/OPUS-32294}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-322948}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {Recent computing advances are driving the integration of artificial intelligence (AI)-based systems into nearly every facet of our daily lives. To this end, AI is becoming a frontier for enabling algorithmic decision-making by mimicking or even surpassing human intelligence. Thereupon, these AI-based systems can function as decision support systems (DSSs) that assist experts in high-stakes use cases where human lives are at risk. All that glitters is not gold, due to the accompanying complexity of the underlying machine learning (ML) models, which apply mathematical and statistical algorithms to autonomously derive nonlinear decision knowledge. One particular subclass of ML models, called deep learning models, accomplishes unsurpassed performance, with the drawback that these models are no longer explainable to humans. This divergence may result in an end-user's unwillingness to utilize this type of AI-based DSS, thus diminishing the end-user's system acceptance. Hence, the explainable AI (XAI) research stream has gained momentum, as it develops techniques to unravel this black-box while maintaining system performance. Non-surprisingly, these XAI techniques become necessary for justifying, evaluating, improving, or managing the utilization of AI-based DSSs. This yields a plethora of explanation techniques, creating an XAI jungle from which end-users must choose. In turn, these techniques are preliminarily engineered by developers for developers without ensuring an actual end-user fit. Thus, it renders unknown how an end-user's mental model behaves when encountering such explanation techniques. For this purpose, this cumulative thesis seeks to address this research deficiency by investigating end-user perceptions when encountering intrinsic ML and post-hoc XAI explanations. Drawing on this, the findings are synthesized into design knowledge to enable the deployment of XAI-based DSSs in practice. To this end, this thesis comprises six research contributions that follow the iterative and alternating interplay between behavioral science and design science research employed in information systems (IS) research and thus contribute to the overall research objectives as follows: First, an in-depth study of the impact of transparency and (initial) trust on end-user acceptance is conducted by extending and validating the unified theory of acceptance and use of technology model. This study indicates both factors' strong but indirect effects on system acceptance, validating further research incentives. In particular, this thesis focuses on the overarching concept of transparency. Herein, a systematization in the form of a taxonomy and pattern analysis of existing user-centered XAI studies is derived to structure and guide future research endeavors, which enables the empirical investigation of the theoretical trade-off between performance and explainability in intrinsic ML algorithms, yielding a less gradual trade-off, fragmented into three explainability groups. This includes an empirical investigation on end-users' perceived explainability of post-hoc explanation types, with local explanation types performing best. Furthermore, an empirical investigation emphasizes the correlation between comprehensibility and explainability, indicating almost significant (with outliers) results for the assumed correlation. The final empirical investigation aims at researching XAI explanation types on end-user cognitive load and the effect of cognitive load on end-user task performance and task time, which also positions local explanation types as best and demonstrates the correlations between cognitive load and task performance and, moreover, between cognitive load and task time. Finally, the last research paper utilizes i.a. the obtained knowledge and derives a nascent design theory for XAI-based DSSs. This design theory encompasses (meta-) design requirements, design principles, and design features in a domain-independent and interdisciplinary fashion, including end-users and developers as potential user groups. This design theory is ultimately tested through a real-world instantiation in a high-stakes maintenance scenario. From an IS research perspective, this cumulative thesis addresses the lack of research on perception and design knowledge for an ensured utilization of XAI-based DSS. This lays the foundation for future research to obtain a holistic understanding of end-users' heuristic behaviors during decision-making to facilitate the acceptance of XAI-based DSSs in operational practice.}, subject = {K{\"u}nstliche Intelligenz}, language = {en} } @phdthesis{Hubmann2023, author = {Hubmann, Maximilian}, title = {Steuervermeidung und grenz{\"u}berschreitende Besteuerung - eine betriebswirtschaftliche, dogmatische und wissenschaftstheoretische Analyse}, doi = {10.25972/OPUS-30369}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-303698}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {In dieser Dissertation werden ausgew{\"a}hlte Aspekte der Steuervermeidung und grenz{\"u}berschreitenden Besteuerung betrachtet. Im Teil B liegt der Fokus auf der Empirie zu Steuervermeidung und Gewinnverlagerung multinationaler Unternehmen mit drei einzelnen Aufs{\"a}tzen. Der Teil C untersucht die unterschiedliche Besteuerung von Human- und Sachverm{\"o}gen anhand der beiden fundamentalen Besteuerungsprinzipien des {\"A}quivalenz- und des Leistungsf{\"a}higkeitsprinzips. Der letzte Aufsatz (Teil D) analysiert das Werturteilsfreiheitspostulat im Stakeholder-Ansatz und zeigt mithilfe eines Fallbeispiels, wie die Unternehmensbesteuerung in unterschiedliche Stakeholder-Ans{\"a}tze integriert werden kann. Eine abschließende Gesamtw{\"u}rdigung geht auf verbleibende Forschungsfragen ein (Teil E). Somit wird in der vorliegenden Dissertation grenz{\"u}berschreitende Besteuerung anhand betriebswirtschaftlicher, besteuerungsprinzipiengest{\"u}tzter bzw. dogmatischer und wissenschaftstheoretischer Gesichtspunkte untersucht.}, subject = {Steuervermeidung}, language = {de} } @phdthesis{Siller2023, author = {Siller, Benjamin}, title = {Influence of Lead Time and Emission Policies on the Design of Supply Chains - Insights from Supply Chain Design Models}, doi = {10.25972/OPUS-29671}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-296713}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {Companies are expected to act as international players and to use their capabilities to provide customized products and services quickly and efficiently. Today, consumers expect their requirements to be met within a short time and at a favorable price. Order-to-delivery lead time has steadily gained in importance for consumers. Furthermore, governments can use various emissions policies to force companies and customers to reduce their greenhouse gas emissions. This thesis investigates the influence of order-to-delivery lead time and different emission policies on the design of a supply chain. Within this work different supply chain design models are developed to examine these different influences. The first model incorporates lead times and total costs, and various emission policies are implemented to illustrate the trade-off between the different measures. The second model reflects the influence of order-to-delivery lead time sensitive consumers, and different emission policies are implemented to study their impacts. The analysis shows that the share of order-to-delivery lead time sensitive consumers has a significant impact on the design of a supply chain. Demand uncertainty and uncertainty in the design of different emission policies are investigated by developing an appropriate robust mathematical optimization model. Results show that especially uncertainties on the design of an emission policy can significantly impact the total cost of a supply chain. The effects of differently designed emission policies in various countries are investigated in the fourth model. The analyses highlight that both lead times and emission policies can strongly influence companies' offshoring and nearshoring strategies.}, subject = {Supply Chain Management}, language = {en} } @article{RodriguezEntrenaSchuberthGelhard2018, author = {Rodr{\´i}guez-Entrena, Macario and Schuberth, Florian and Gelhard, Carsten}, title = {Assessing statistical differences between parameters estimates in Partial Least Squares path modeling}, series = {Quality \& Quantity}, volume = {52}, journal = {Quality \& Quantity}, number = {1}, doi = {10.1007/s11135-016-0400-8}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-226403}, pages = {57-69}, year = {2018}, abstract = {Structural equation modeling using partial least squares (PLS-SEM) has become a main-stream modeling approach in various disciplines. Nevertheless, prior literature still lacks a practical guidance on how to properly test for differences between parameter estimates. Whereas existing techniques such as parametric and non-parametric approaches in PLS multi-group analysis solely allow to assess differences between parameters that are estimated for different subpopulations, the study at hand introduces a technique that allows to also assess whether two parameter estimates that are derived from the same sample are statistically different. To illustrate this advancement to PLS-SEM, we particularly refer to a reduced version of the well-established technology acceptance model.}, language = {en} } @phdthesis{Oberdorf2022, author = {Oberdorf, Felix}, title = {Design and Evaluation of Data-Driven Enterprise Process Monitoring Systems}, doi = {10.25972/OPUS-29853}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-298531}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2022}, abstract = {Increasing global competition forces organizations to improve their processes to gain a competitive advantage. In the manufacturing sector, this is facilitated through tremendous digital transformation. Fundamental components in such digitalized environments are process-aware information systems that record the execution of business processes, assist in process automation, and unlock the potential to analyze processes. However, most enterprise information systems focus on informational aspects, process automation, or data collection but do not tap into predictive or prescriptive analytics to foster data-driven decision-making. Therefore, this dissertation is set out to investigate the design of analytics-enabled information systems in five independent parts, which step-wise introduce analytics capabilities and assess potential opportunities for process improvement in real-world scenarios. To set up and extend analytics-enabled information systems, an essential prerequisite is identifying success factors, which we identify in the context of process mining as a descriptive analytics technique. We combine an established process mining framework and a success model to provide a structured approach for assessing success factors and identifying challenges, motivations, and perceived business value of process mining from employees across organizations as well as process mining experts and consultants. We extend the existing success model and provide lessons for business value generation through process mining based on the derived findings. To assist the realization of process mining enabled business value, we design an artifact for context-aware process mining. The artifact combines standard process logs with additional context information to assist the automated identification of process realization paths associated with specific context events. Yet, realizing business value is a challenging task, as transforming processes based on informational insights is time-consuming. To overcome this, we showcase the development of a predictive process monitoring system for disruption handling in a production environment. The system leverages state-of-the-art machine learning algorithms for disruption type classification and duration prediction. It combines the algorithms with additional organizational data sources and a simple assignment procedure to assist the disruption handling process. The design of such a system and analytics models is a challenging task, which we address by engineering a five-phase method for predictive end-to-end enterprise process network monitoring leveraging multi-headed deep neural networks. The method facilitates the integration of heterogeneous data sources through dedicated neural network input heads, which are concatenated for a prediction. An evaluation based on a real-world use-case highlights the superior performance of the resulting multi-headed network. Even the improved model performance provides no perfect results, and thus decisions about assigning agents to solve disruptions have to be made under uncertainty. Mathematical models can assist here, but due to complex real-world conditions, the number of potential scenarios massively increases and limits the solution of assignment models. To overcome this and tap into the potential of prescriptive process monitoring systems, we set out a data-driven approximate dynamic stochastic programming approach, which incorporates multiple uncertainties for an assignment decision. The resulting model has significant performance improvement and ultimately highlights the particular importance of analytics-enabled information systems for organizational process improvement.}, subject = {Operations Management}, language = {en} } @article{RademakerSchuberthDijkstra2019, author = {Rademaker, Manuel E. and Schuberth, Florian and Dijkstra, Theo K.}, title = {Measurement error correlation within blocks of indicators in consistent partial least squares : Issues and remedies}, series = {Internet Research}, volume = {29}, journal = {Internet Research}, number = {3}, doi = {10.1108/IntR-12-2017-0525}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-224901}, pages = {448-463}, year = {2019}, abstract = {Purpose The purpose of this paper is to enhance consistent partial least squares (PLSc) to yield consistent parameter estimates for population models whose indicator blocks contain a subset of correlated measurement errors. Design/methodology/approach Correction for attenuation as originally applied by PLSc is modified to include a priori assumptions on the structure of the measurement error correlations within blocks of indicators. To assess the efficacy of the modification, a Monte Carlo simulation is conducted. Findings In the presence of population measurement error correlation, estimated parameter bias is generally small for original and modified PLSc, with the latter outperforming the former for large sample sizes. In terms of the root mean squared error, the results are virtually identical for both original and modified PLSc. Only for relatively large sample sizes, high population measurement error correlation, and low population composite reliability are the increased standard errors associated with the modification outweighed by a smaller bias. These findings are regarded as initial evidence that original PLSc is comparatively robust with respect to misspecification of the structure of measurement error correlations within blocks of indicators. Originality/value Introducing and investigating a new approach to address measurement error correlation within blocks of indicators in PLSc, this paper contributes to the ongoing development and assessment of recent advancements in partial least squares path modeling.}, language = {en} } @techreport{BaumgartBredebachHermetal.2022, author = {Baumgart, Michael and Bredebach, Patrick and Herm, Lukas-Valentin and Hock, David and Hofmann, Adrian and Janiesch, Christian and Jankowski, Leif Ole and Kampik, Timotheus and Keil, Matthias and Kolb, Julian and Kr{\"o}hn, Michael and Pytel, Norman and Schaschek, Myriam and St{\"u}bs, Oliver and Winkelmann, Axel and Zeiß, Christian}, title = {Plattform f{\"u}r das integrierte Management von Kollaborationen in Wertsch{\"o}pfungsnetzwerken (PIMKoWe)}, editor = {Winkelmann, Axel and Janiesch, Christian}, issn = {2199-0328}, doi = {10.25972/OPUS-29335}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-293354}, pages = {248}, year = {2022}, abstract = {Das Verbundprojekt „Plattform f{\"u}r das integrierte Management von Kollaborationen in Wertsch{\"o}pfungsnetzwerken" (PIMKoWe - F{\"o}rderkennzeichen „02P17D160") ist ein Forschungsvorhaben im Rahmen des Forschungsprogramms „Innovationen f{\"u}r die Produktion, Dienstleistung und Arbeit von morgen" der Bekanntmachung „Industrie 4.0 - Intelligente Kollaborationen in dynamischen Wertsch{\"o}pfungs-netzwerken" (InKoWe). Das Forschungsvorhaben wurde mit Mitteln des Bundesministeriums f{\"u}r Bildung und Forschung (BMBF) gef{\"o}rdert und durch den Projekttr{\"a}ger des Karlsruher Instituts f{\"u}r Technologie (PTKA) betreut. Ziel des Forschungsprojekts PIMKoWe ist die Entwicklung und Bereitstellung einer Plattforml{\"o}sung zur Flexibilisierung, Automatisierung und Absicherung von Kooperationen in Wertsch{\"o}pfungsnetzwerken des industriellen Sektors.}, subject = {Blockchain}, language = {de} } @phdthesis{Hornung2022, author = {Hornung, Vanessa}, title = {Leading by Purpose and Employer Attractiveness - Eine konzeptionelle und empirische Analyse am Beispiel von Unilever}, doi = {10.25972/OPUS-28894}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-288941}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2022}, abstract = {Die Welt befindet sich in einem tiefgreifenden Wandlungsprozess von einer Industrie- zu einer Wissensgesellschaft. Die Automatisierung sowohl physischer als auch kognitiver Arbeit verlagert die Nachfrage des Arbeitsmarktes zunehmend zu hoch qualifizierten Mitarbeitern, die als High Potentials bezeichnet werden. Diese zeichnen sich neben ihrer Intelligenz durch vielf{\"a}ltige F{\"a}higkeiten wie Empathieverm{\"o}gen, Kreativit{\"a}t und Probleml{\"o}sungskompetenzen aus. Humankapital gilt als Wettbewerbsfaktor der Zukunft, jedoch beklagten Unternehmen bereits Ende des 20. Jahrhunderts einen Mangel an Fach- und F{\"u}hrungspersonal, der durch die Pandemie weiter verst{\"a}rkt wird. Aus diesem Grund r{\"u}cken Konzepte zur Rekrutierung und Mitarbeiterbindung in den Fokus der Unternehmen. Da ethisches und {\"o}kologisches Bewusstsein in der Bev{\"o}lkerung an Bedeutung gewinnen, l{\"a}sst sich annehmen, dass Bewerber zuk{\"u}nftig verantwortungsbewusste Arbeitgeber bevorzugen. Nachhaltigkeit bzw. Corporate Responsibility wird damit zum Wettbewerbsfaktor zur Gewinnung und Bindung von Talenten. Mit Hilfe des Ansatzes der identit{\"a}tsorientierten Markenf{\"u}hrung wird ein Verst{\"a}ndnis davon hergestellt, wie es Unternehmen gelingt, eine starke Arbeitgebermarke aufzubauen. Anhand einer konzeptionellen, praktischen und empirischen Untersuchung am Unternehmensbeispiel Unilever werden die Auswirkungen von umfassendem {\"o}kologischem und gesellschaftlichem Engagement auf die Arbeitgeberattraktivit{\"a}t analysiert. Es zeigt sich, dass Nachhaltigkeit - konkretisiert {\"u}ber die 17 Sustainable Develop-ment Goals (SDGs) und verankert im Kern der Marke - die erfolgreiche F{\"u}hrung einer Employer Brand erm{\"o}glicht. Dieses Ergebnis resultiert sowohl aus dem theoretischen als auch aus dem empirischen Teil dieser Arbeit. Im letzteren konnten unter Einsatz eines Strukturgleichungsmodells drei generelle positive Wirkzusammenh{\"a}nge best{\"a}tigt werden: Bewerber f{\"u}hlen sich zu verantwortungsbewussten Unternehmen hingezogen, weshalb sie einen P-O-F empfinden. Diese wahrgenommene Passung mit dem Unternehmen steigert die Arbeitgeberattraktivit{\"a}t aus Sicht der potenziellen Bewerber, wodurch sich wiederum die Wahrscheinlichkeit f{\"u}r eine Bewerbungsabsicht und die Akzeptanz eines Arbeitsplatzangebotes erh{\"o}ht. Es wird damit die Annahme best{\"a}tigt, dass den Herausforderungen der Personalbeschaffung {\"u}ber eine konsequente nachhaltige Ausrichtung der Gesch{\"a}ftst{\"a}tigkeit und deren glaubhafte Kommunikation {\"u}ber die Arbeitgebermarke begegnet werden kann.}, subject = {Personalmarketing}, language = {de} } @misc{Hochmuth2022, type = {Master Thesis}, author = {Hochmuth, Christian Andreas}, title = {Innovative Software in Unternehmen: Strategie und Erfolgsfaktoren f{\"u}r Einf{\"u}hrungsprojekte}, doi = {10.25972/OPUS-28841}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-288411}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2022}, abstract = {Innovative Software kann die Position eines Unternehmens im Wettbewerb sichern. Die Einf{\"u}hrung innovativer Software ist aber alles andere als einfach. Denn obgleich die technischen Aspekte offensichtlicher sind, dominieren organisationale Aspekte. Zu viele Softwareprojekte schlagen fehl, da die Einf{\"u}hrung nicht gelingt, trotz Erf{\"u}llung technischer Anforderungen. Vor diesem Hintergrund ist das Forschungsziel der Masterarbeit, Risiken und Erfolgsfaktoren f{\"u}r die Einf{\"u}hrung innovativer Software in Unternehmen zu finden, eine Strategie zu formulieren und dabei die Bedeutung von Schl{\"u}sselpersonen zu bestimmen.}, subject = {Innovationsmanagement}, language = {de} } @phdthesis{Hofmann2022, author = {Hofmann, Adrian}, title = {Challenges and Solution Approaches for Blockchain Technology}, doi = {10.25972/OPUS-28261}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-282618}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2022}, abstract = {The digital transformation facilitates new forms of collaboration between companies along the supply chain and between companies and consumers. Besides sharing information on centralized platforms, blockchain technology is often regarded as a potential basis for this kind of collaboration. However, there is much hype surrounding the technology due to the rising popularity of cryptocurrencies, decentralized finance (DeFi), and non-fungible tokens (NFTs). This leads to potential issues being overlooked. Therefore, this thesis aims to investigate, highlight, and address the current weaknesses of blockchain technology: Inefficient consensus, privacy, smart contract security, and scalability. First, to provide a foundation, the four key challenges are introduced, and the research objectives are defined, followed by a brief presentation of the preliminary work for this thesis. The following four parts highlight the four main problem areas of blockchain. Using big data analytics, we extracted and analyzed the blockchain data of six major blockchains to identify potential weaknesses in their consensus algorithm. To improve smart contract security, we classified smart contract functionalities to identify similarities in structure and design. The resulting taxonomy serves as a basis for future standardization efforts for security-relevant features, such as safe math functions and oracle services. To challenge privacy assumptions, we researched consortium blockchains from an adversary role. We chose four blockchains with misconfigured nodes and extracted as much information from those nodes as possible. Finally, we compared scalability solutions for blockchain applications and developed a decision process that serves as a guideline to improve the scalability of their applications. Building on the scalability framework, we showcase three potential applications for blockchain technology. First, we develop a token-based approach for inter-company value stream mapping. By only relying on simple tokens instead of complex smart-contracts, the computational load on the network is expected to be much lower compared to other solutions. The following two solutions use offloading transactions and computations from the main blockchain. The first approach uses secure multiparty computation to offload the matching of supply and demand for manufacturing capacities to a trustless network. The transaction is written to the main blockchain only after the match is made. The second approach uses the concept of payment channel networks to enable high-frequency bidirectional micropayments for WiFi sharing. The host gets paid for every second of data usage through an off-chain channel. The full payment is only written to the blockchain after the connection to the client gets terminated. Finally, the thesis concludes by briefly summarizing and discussing the results and providing avenues for further research.}, subject = {Blockchain}, language = {en} } @phdthesis{Schaetzlein2022, author = {Sch{\"a}tzlein, Uwe}, title = {Untersuchungen ausgew{\"a}hlter Reformen und Reformbedarfe in der deutschen Alterssicherung}, doi = {10.25972/OPUS-27804}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-278042}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2022}, abstract = {In der Dissertation werden drei ausgew{\"a}hlte Reformen oder Reformbedarfe im deutschen Drei-S{\"a}ulen-System der Alterssicherung untersucht: In der S{\"a}ule der gesetzlichen Altersversorgung werden M{\"o}glichkeiten zur Wiedereinsetzung des 2018 ausgesetzten Nachholfaktors in der gesetzlichen Rentenversicherung erarbeitet. Je nachdem, ob Erh{\"o}hungen des aktuellen Rentenwertes verursacht durch die Niveauschutzklausel in k{\"u}nftigen Jahren aufgerechnet werden sollen oder nicht, werden zwei unterschiedliche Verfahren - das Getrennte Verfahren und das Integrierte Verfahren - pr{\"a}sentiert, in welche sich der Nachholfaktor bei aktiver Schutzklausel und Niveauschutzklausel konsistent einf{\"u}gt. In der S{\"a}ule der betrieblichen Altersversorgung werden M{\"o}glichkeiten zur Reform des steuerrechtlichen Rechnungszinsfußes von 6 \% f{\"u}r Pensionsr{\"u}ckstellungen analysiert. Dabei wird betrachtet, welche Auswirkungen es f{\"u}r Arbeitgeber hat, wenn der Rechnungszinsfuß diskretion{\"a}r einen neuen Wert erhielte, wenn er regelgebunden einem Referenzzins folgte, wenn steuerrechtlich der handelsrechtlichen Bewertung gefolgt w{\"u}rde, und wenn ein innovatives Tranchierungsverfahren eingef{\"u}hrt w{\"u}rde. Anschließend wird er{\"o}rtert, inwieweit {\"u}berhaupt gesetzgeberischer Anpassungsbedarf besteht. Es kristallisiert sich der Eindruck heraus, dass mit dem steuerrechtlichen Rechnungszinsfuß eine Gesamtkapitalrendite typisiert wird. Die Hypothese kann nicht verworfen werden, dass 6 \% durchaus realistisch f{\"u}r deutsche Unternehmen sind. In der S{\"a}ule der privaten Altersvorsorge wird erschlossen, wann im Falle eines Riester-gef{\"o}rderten Erwerbs einer Immobilie in der Rentenphase des Eigenheimrentners der optimale Zeitpunkt zur Aus{\"u}bung seines Wahlrechts, seine nachgelagerte Besteuerung vorzeitig zu beenden, kommt. Bei vorzeitiger Beendigung sind alle ausstehenden Betr{\"a}ge auf einmal, jedoch nur zu 70 \% zu versteuern. Wann dieser 30\%ige Nachlass vorteilhaft wird, wird demonstriert unter Variation des Wohnf{\"o}rderkontostands, der Renteneink{\"u}nfte, des Marktzinssatzes, des Rentenbeginns, der {\"U}berlebenswahrscheinlichkeiten sowie des Besteuerungsanteils.}, subject = {Rentenversicherung}, language = {de} } @phdthesis{Griebel2022, author = {Griebel, Matthias}, title = {Applied Deep Learning: from Data to Deployment}, doi = {10.25972/OPUS-27765}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-277650}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2022}, abstract = {Novel deep learning (DL) architectures, better data availability, and a significant increase in computing power have enabled scientists to solve problems that were considered unassailable for many years. A case in point is the "protein folding problem", a 50-year-old grand challenge in biology that was recently solved by the DL-system AlphaFold. Other examples comprise the development of large DL-based language models that, for instance, generate newspaper articles that hardly differ from those written by humans. However, developing unbiased, reliable, and accurate DL models for various practical applications remains a major challenge - and many promising DL projects get stuck in the piloting stage, never to be completed. In light of these observations, this thesis investigates the practical challenges encountered throughout the life cycle of DL projects and proposes solutions to develop and deploy rigorous DL models. The first part of the thesis is concerned with prototyping DL solutions in different domains. First, we conceptualize guidelines for applied image recognition and showcase their application in a biomedical research project. Next, we illustrate the bottom-up development of a DL backend for an augmented intelligence system in the manufacturing sector. We then turn to the fashion domain and present an artificial curation system for individual fashion outfit recommendations that leverages DL techniques and unstructured data from social media and fashion blogs. After that, we showcase how DL solutions can assist fashion designers in the creative process. Finally, we present our award-winning DL solution for the segmentation of glomeruli in human kidney tissue images that was developed for the Kaggle data science competition HuBMAP - Hacking the Kidney. The second part continues the development path of the biomedical research project beyond the prototyping stage. Using data from five laboratories, we show that ground truth estimation from multiple human annotators and training of DL model ensembles help to establish objectivity, reliability, and validity in DL-based bioimage analyses. In the third part, we present deepflash2, a DL solution that addresses the typical challenges encountered during training, evaluation, and application of DL models in bioimaging. The tool facilitates the objective and reliable segmentation of ambiguous bioimages through multi-expert annotations and integrated quality assurance. It is embedded in an easy-to-use graphical user interface and offers best-in-class predictive performance for semantic and instance segmentation under economical usage of computational resources.}, language = {en} } @phdthesis{Schickhardt2022, author = {Schickhardt, Irene}, title = {A Process Model for Selecting the Most Appropriate Production Site : an Application-oriented Approach for OEMs based on a Literature Analysis}, doi = {10.25972/OPUS-27665}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-276652}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2022}, abstract = {The global selection of production sites is a very complex task of great strategic importance for Original Equipment Manufacturers (OEMs), not only to ensure their sustained competitiveness, but also due to the sizeable long-term investment associated with a production site. With this in mind, this work develops a process model with which OEMs can select the most appropriate production site for their specific production activity in practice. Based on a literature analysis, the process model is developed by determining all necessary preparation, by defining the properties of the selection process model, providing all necessary instructions for choosing and evaluating location factors, and by laying out the procedure of the selection process model. Moreover, the selection process model includes a discussion of location factors which are possibly relevant for OEMs when selecting a production site. This discussion contains a description and, if relevant, a macroeconomic analysis of each location factor, an explanation of their relevance for constructing and operating a production site, additional information for choosing relevant location factors, and information and instructions on evaluating them in the selection process model. To be successfully applicable, the selection process model is developed based on the assumption that the production site must not be selected in isolation, but as part of the global production network and supply chain of the OEM and, additionally, to advance the OEM's related strategic goals. Furthermore, the selection process model is developed on the premise that a purely quantitative model cannot realistically solve an OEM's complex selection of a production site, that the realistic analysis of the conditions at potential production sites requires evaluating the changes of these conditions over the planning horizon of the production site and that the future development of many of these conditions can only be assessed with uncertainty.}, subject = {Produktionsstandort}, language = {en} } @article{FoellThiesse2021, author = {F{\"o}ll, Patrick and Thiesse, Fr{\´e}d{\´e}ric}, title = {Exploring Information Systems Curricula}, series = {Business \& Information Systems Engineering}, volume = {63}, journal = {Business \& Information Systems Engineering}, number = {6}, issn = {1867-0202}, doi = {10.1007/s12599-021-00702-2}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-270178}, pages = {711-732}, year = {2021}, abstract = {The study considers the application of text mining techniques to the analysis of curricula for study programs offered by institutions of higher education. It presents a novel procedure for efficient and scalable quantitative content analysis of module handbooks using topic modeling. The proposed approach allows for collecting, analyzing, evaluating, and comparing curricula from arbitrary academic disciplines as a partially automated, scalable alternative to qualitative content analysis, which is traditionally conducted manually. The procedure is illustrated by the example of IS study programs in Germany, based on a data set of more than 90 programs and 3700 distinct modules. The contributions made by the study address the needs of several different stakeholders and provide insights into the differences and similarities among the study programs examined. For example, the results may aid academic management in updating the IS curricula and can be incorporated into the curricular design process. With regard to employers, the results provide insights into the fulfillment of their employee skill expectations by various universities and degrees. Prospective students can incorporate the results into their decision concerning where and what to study, while university sponsors can utilize the results in their grant processes.}, language = {en} } @article{JanieschZschechHeinrich2021, author = {Janiesch, Christian and Zschech, Patrick and Heinrich, Kai}, title = {Machine learning and deep learning}, series = {Electronic Markets}, volume = {31}, journal = {Electronic Markets}, number = {3}, issn = {1422-8890}, doi = {10.1007/s12525-021-00475-2}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-270155}, pages = {685-695}, year = {2021}, abstract = {Today, intelligent systems that offer artificial intelligence capabilities often rely on machine learning. Machine learning describes the capacity of systems to learn from problem-specific training data to automate the process of analytical model building and solve associated tasks. Deep learning is a machine learning concept based on artificial neural networks. For many applications, deep learning models outperform shallow machine learning models and traditional data analysis approaches. In this article, we summarize the fundamentals of machine learning and deep learning to generate a broader understanding of the methodical underpinning of current intelligent systems. In particular, we provide a conceptual distinction between relevant terms and concepts, explain the process of automated analytical model building through machine learning and deep learning, and discuss the challenges that arise when implementing such intelligent systems in the field of electronic markets and networked business. These naturally go beyond technological aspects and highlight issues in human-machine interaction and artificial intelligence servitization.}, language = {en} } @article{GeyerHaanLorenzetal.2022, author = {Geyer, Johannes and Haan, Peter and Lorenz, Svenja and Zwick, Thomas and Bruns, Mona}, title = {Role of labor demand in the labor market effects of a pension reform}, series = {Industrial Relations}, volume = {61}, journal = {Industrial Relations}, number = {2}, doi = {10.1111/irel.12293}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-259446}, pages = {152-192}, year = {2022}, abstract = {This paper shows that labor demand plays an important role in the labor market reactions to a pension reform in Germany. Employers with a high share of older worker inflow compared with their younger worker inflow, employers in sectors with few investments in research and development, and employers in sectors with a high share of collective bargaining agreements allow their employees to stay employed longer after the reform. These employers offer their older employees partial retirement instead of forcing them into unemployment before early retirement because the older employees incur low substitution costs and high dismissal costs.}, language = {en} } @book{Knoll2022, author = {Knoll, Leonhard}, title = {De exemplis deterrentibus}, edition = {3. Auflage}, isbn = {978-3-95826-180-8}, doi = {10.25972/WUP-978-3-95826-181-5}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-257178}, publisher = {W{\"u}rzburg University Press}, pages = {xii, 185}, year = {2022}, abstract = {Das vorliegende Buch besch{\"a}ftigt sich anhand einer Sammlung von realen F{\"a}llen, die in Aufgabenform formuliert sind, mit dem leider oft gest{\"o}rten Verh{\"a}ltnis von Theorie und Praxis in der rechtsgepr{\"a}gten Unternehmensbewertung. Es weist {\"a}hnlich wie „normale" Fallsammlungen die jeweiligen Aufgabenstellungen und die zugeh{\"o}rigen L{\"o}sungen aus. Die eigentlichen Fragestellungen in den Aufgabentexten sind durch kurze Erl{\"a}uterungen eingerahmt, damit jeder Fall als solcher von einem mit Bewertungsfragen halbwegs Vertrauten relativ leicht verstanden und in seiner Bedeutung eingeordnet werden kann. Dieses Vorgehen {\"a}hnelt wiederum Lehrb{\"u}chern, die Inhalte {\"u}ber F{\"a}lle vermitteln, nur dass hier nicht hypothetische F{\"a}lle das jeweils idealtypisch richtige Vorgehen zeigen, sondern Praxisf{\"a}lle plakative Verst{\"o}ße contra legem artis.}, subject = {Unternehmensbewertung}, language = {de} } @phdthesis{Wanner2022, author = {Wanner, Jonas Paul}, title = {Artificial Intelligence for Human Decision-Makers: Systematization, Perception, and Adoption of Intelligent Decision Support Systems in Industry 4.0}, doi = {10.25972/OPUS-25901}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-259014}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2022}, abstract = {Innovative possibilities for data collection, networking, and evaluation are unleashing previously untapped potential for industrial production. However, harnessing this potential also requires a change in the way we work. In addition to expanded automation, human-machine cooperation is becoming more important: The machine achieves a reduction in complexity for humans through artificial intelligence. In fractions of a second large amounts of data of high decision quality are analyzed and suggestions are offered. The human being, for this part, usually makes the ultimate decision. He validates the machine's suggestions and, if necessary, (physically) executes them. Both entities are highly dependent on each other to accomplish the task in the best possible way. Therefore, it seems particularly important to understand to what extent such cooperation can be effective. Current developments in the field of artificial intelligence show that research in this area is particularly focused on neural network approaches. These are considered to be highly powerful but have the disadvantage of lacking transparency. Their inherent computational processes and the respective result reasoning remain opaque to humans. Some researchers assume that human users might therefore reject the system's suggestions. The research domain of explainable artificial intelligence (XAI) addresses this problem and tries to develop methods to realize systems that are highly efficient and explainable. This work is intended to provide further insights relevant to the defined goal of XAI. For this purpose, artifacts are developed that represent research achievements regarding the systematization, perception, and adoption of artificially intelligent decision support systems from a user perspective. The focus is on socio-technical insights with the aim to better understand which factors are important for effective human-machine cooperation. The elaborations predominantly represent extended grounded research. Thus, the artifacts imply an extension of knowledge in order to develop and/ or test effective XAI methods and techniques based on this knowledge. Industry 4.0, with a focus on maintenance, is used as the context for this development.}, subject = {K{\"u}nstliche Intelligenz}, language = {en} } @phdthesis{Blank2021, author = {Blank, Felix}, title = {The use of the Hypercube Queueing Model for the location optimization decision of Emergency Medical Service systems}, doi = {10.25972/OPUS-24909}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-249093}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2021}, abstract = {Die strategische Planung von medizinischen Notfallsystemen steht in einem unmittelbaren Zusammenhang mit der {\"U}berlebenswahrscheinlichkeit von betroffenen Patienten. Die Forschung hat zahlreiche Kenngr{\"o}ßen und Evaluationsparameter entwickelt, die zur Bewertung verwendet werden k{\"o}nnen. Darunter fallen beispielsweise die Reaktionszeit, die Systemauslastung, diverse Wartezeitenparameter sowie der Anteil der Nachfrage, der nicht unmittelbar bedient werden kann. Dabei ist das Hypercube Queueing Modell eines der am h{\"a}ufigsten verwendeten Modelle. Aufgrund seines theoretischen Hintergrundes und der damit verbundenen hohen notwendigen Rechenzeiten wurde das Hypercube Queueing Modell erst in der j{\"u}ngeren Vergangenheit h{\"a}ufiger zur Optimierung von medizinischen Notfallsystemen verwendet. Gleichermaßen wurden nur wenige Systemparameter mit Hilfe des Modelles berechnet und das volle Potenzial demnach noch nicht ausgesch{\"o}pft. Die meisten der bereits vorhandenen Studien im Bereich der Optimierung unter Zuhilfenahme eines Hypercube Queueing Modells nutzen die zu erwartende Reaktionszeit des Systems als Zielparameter. Obwohl die Verwendung von diesem eine zumeist ausgeglichene Systemkonfiguration zur Folge hat, wurden andere Zielparameter identifziert. Die Verwendung des Hypercube Queueing Modells in den Modellen der robusten Optimierung sowie des robusten Goal Programmings haben versucht einen ganzheitlicheren Blick, durch die Verwendung von unterschiedlichen Tageszeiten, zu offerieren. Dabei hat sich gezeigt, dass das Verhalten von medizinischen Notfallsystemen sowie die Parameter stark von diesen abh{\"a}ngen. Daher sollte die Analyse und gegebenenfalls Optimierung dieser Systeme unterschiedliche Verteilungen der Nachfrage, in Abh{\"a}ngigkeit ihrer Menge und r{\"a}umlichen Verteilung, unbedingt ber{\"u}cksichtigen um eine m{\"o}glichst ganzheitliche Entscheidungsgrundlage zu garantieren.}, subject = {Warteschlangentheorie}, language = {en} } @phdthesis{Notz2021, author = {Notz, Pascal Markus}, title = {Prescriptive Analytics for Data-driven Capacity Management}, doi = {10.25972/OPUS-24042}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-240423}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2021}, abstract = {Digitization and artificial intelligence are radically changing virtually all areas across business and society. These developments are mainly driven by the technology of machine learning (ML), which is enabled by the coming together of large amounts of training data, statistical learning theory, and sufficient computational power. This technology forms the basis for the development of new approaches to solve classical planning problems of Operations Research (OR): prescriptive analytics approaches integrate ML prediction and OR optimization into a single prescription step, so they learn from historical observations of demand and a set of features (co-variates) and provide a model that directly prescribes future decisions. These novel approaches provide enormous potential to improve planning decisions, as first case reports showed, and, consequently, constitute a new field of research in Operations Management (OM). First works in this new field of research have studied approaches to solving comparatively simple planning problems in the area of inventory management. However, common OM planning problems often have a more complex structure, and many of these complex planning problems are within the domain of capacity planning. Therefore, this dissertation focuses on developing new prescriptive analytics approaches for complex capacity management problems. This dissertation consists of three independent articles that develop new prescriptive approaches and use these to solve realistic capacity planning problems. The first article, "Prescriptive Analytics for Flexible Capacity Management", develops two prescriptive analytics approaches, weighted sample average approximation (wSAA) and kernelized empirical risk minimization (kERM), to solve a complex two-stage capacity planning problem that has been studied extensively in the literature: a logistics service provider sorts daily incoming mail items on three service lines that must be staffed on a weekly basis. This article is the first to develop a kERM approach to solve a complex two-stage stochastic capacity planning problem with matrix-valued observations of demand and vector-valued decisions. The article develops out-of-sample performance guarantees for kERM and various kernels, and shows the universal approximation property when using a universal kernel. The results of the numerical study suggest that prescriptive analytics approaches may lead to significant improvements in performance compared to traditional two-step approaches or SAA and that their performance is more robust to variations in the exogenous cost parameters. The second article, "Prescriptive Analytics for a Multi-Shift Staffing Problem", uses prescriptive analytics approaches to solve the (queuing-type) multi-shift staffing problem (MSSP) of an aviation maintenance provider that receives customer requests of uncertain number and at uncertain arrival times throughout each day and plans staff capacity for two shifts. This planning problem is particularly complex because the order inflow and processing are modelled as a queuing system, and the demand in each day is non-stationary. The article addresses this complexity by deriving an approximation of the MSSP that enables the planning problem to be solved using wSAA, kERM, and a novel Optimization Prediction approach. A numerical evaluation shows that wSAA leads to the best performance in this particular case. The solution method developed in this article builds a foundation for solving queuing-type planning problems using prescriptive analytics approaches, so it bridges the "worlds" of queuing theory and prescriptive analytics. The third article, "Explainable Subgradient Tree Boosting for Prescriptive Analytics in Operations Management" proposes a novel prescriptive analytics approach to solve the two capacity planning problems studied in the first and second articles that allows decision-makers to derive explanations for prescribed decisions: Subgradient Tree Boosting (STB). STB combines the machine learning method Gradient Boosting with SAA and relies on subgradients because the cost function of OR planning problems often cannot be differentiated. A comprehensive numerical analysis suggests that STB can lead to a prescription performance that is comparable to that of wSAA and kERM. The explainability of STB prescriptions is demonstrated by breaking exemplary decisions down into the impacts of individual features. The novel STB approach is an attractive choice not only because of its prescription performance, but also because of the explainability that helps decision-makers understand the causality behind the prescriptions. The results presented in these three articles demonstrate that using prescriptive analytics approaches, such as wSAA, kERM, and STB, to solve complex planning problems can lead to significantly better decisions compared to traditional approaches that neglect feature data or rely on a parametric distribution estimation.}, subject = {Maschinelles Lernen}, language = {en} } @phdthesis{Tschinkl2021, author = {Tschinkl, Dominik}, title = {Der Einfluss von Steuern auf Ersparnisbildung und Altersvorsorge - Experimentelle und qualitative Untersuchungen}, edition = {1. Auflage}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-150-1}, doi = {10.25972/WUP-978-3-95826-151-8}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-216798}, school = {W{\"u}rzburg University Press}, pages = {XXII, 227}, year = {2021}, abstract = {Aufgrund der bekannten Probleme der umlagefinanzierten gesetzlichen Rentenversicherung versucht der deutsche Gesetzgeber seit einiger Zeit, die eigenverantwortliche Altersvorsorge zu f{\"o}rdern. H{\"a}ufig steht dabei die betriebliche Altersversorgung (bAV) im Fokus. In dieser Arbeit wird mittels Experten- und Arbeitnehmerinterviews ausf{\"u}hrlich herausgearbeitet, wo zentrale Verbreitungshemmnisse der bAV liegen und wie diese durch Anpassung der steuer- und sozialversicherungsrechtlichen Rahmenbedingungen adressiert werden k{\"o}nnen. Wesentliche Elemente dieser Reform{\"u}berlegungen sind in das zum 01.01.2018 in Kraft getretene Betriebsrentenst{\"a}rkungsgesetz eingeflossen. Daneben wird in dieser Arbeit mithilfe einer experimental{\"o}konomischen Analyse gezeigt, wie verschiedene Arten der Besteuerung individuelle Sparentscheidungen beeinflussen k{\"o}nnen. Dabei wird deutlich, dass Individuen die Wirkung einer nachgelagerten Besteuerung h{\"a}ufig nicht korrekt wahrnehmen.}, subject = {Deutschland}, language = {de} } @article{EhnertParsaRoperetal.2016, author = {Ehnert, Ina and Parsa, Sepideh and Roper, Ian and Wagner, Marcus and Muller-Camen, Michael}, title = {Reporting on sustainability and HRM: a comparative study of sustainability reporting practices by the world's largest companies}, series = {International Journal of Human Resource Management}, volume = {27}, journal = {International Journal of Human Resource Management}, number = {1}, doi = {10.1080/09585192.2015.1024157}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-191141}, pages = {88-108}, year = {2016}, abstract = {As a response to the growing public awareness on the importance of organisational contributions to sustainable development, there is an increased incentive for corporations to report on their sustainability activities. In parallel with this has been the development of Sustainable HRM' which embraces a growing body of practitioner and academic literature connecting the notions of corporate sustainability to HRM. The aim of this article is to analyse corporate sustainability reporting amongst the world's largest companies and to assess the HRM aspects of sustainability within these reports in comparison to environmental aspects of sustainable management and whether organisational attributes - principally country-of-origin - influences the reporting of such practices. A focus in this article is the extent to which the reporting of various aspects of sustainability may reflect dominant models of corporate governance in the country in which a company is headquartered. The findings suggest, first and against expectations, that the overall disclosure on HRM-related performance is not lower than that on environmental performance. Second, companies report more on their internal workforce compared to their external workforce. Finally, international differences, in particular those between companies headquartered in liberal market economies and coordinated market economies, are not as apparent as expected.}, language = {en} } @phdthesis{Lauton2021, author = {Lauton, Felix}, title = {Three Essays on the Procurement of Essential Medicines in Developing Countries}, doi = {10.25972/OPUS-22063}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-220631}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2021}, abstract = {The first problem is that of the optimal volume allocation in procurement. The choice of this problem was motivated by a study whose objective was to support decision-making at two procurement organizations for the procurement of Depot Medroxyprogesterone Acetate (DMPA), an injectable contraceptive. At the time of this study, only one supplier that had undergone the costly and lengthy process of WHO pre-qualification was available to these organizations. However, a new entrant supplier was expected to receive WHO qualification within the next year, thus becoming a viable second source for DMPA procurement. When deciding how to allocate the procurement volume between the two suppliers, the buyers had to consider the impact on price as well as risk. Higher allocations to one supplier yield lower prices but expose a buyer to higher supply risks, while an even allocation will result in lower supply risk but also reduce competitive pressure, resulting in higher prices. Our research investigates this single- versus dual-sourcing problem and quantifies in one model the impact of the procurement volume on competition and risk. To support decision-makers, we develop a mathematical framework that accounts for the characteristics of donor-funded global health markets and models the effects of an entrant on purchasing costs and supply risks. Our in-depth analysis provides insights into how the optimal allocation decision is affected by various parameters and explores the trade-off between competition and supply risk. For example, we find that, even if the entrant supplier introduces longer leads times and a higher default risk, the buyer still benefits from dual sourcing. However, these risk-diversification benefits depend heavily on the entrant's in-country registration: If the buyer can ship the entrant's product to only a selected number of countries, the buyer does not benefit from dual sourcing as much as it would if entrant's product could be shipped to all supplied countries. We show that the buyer should be interested in qualifying the entrant's product in countries with high demand first. In the second problem we explore a new tendering mechanism called the postponement tender, which can be useful when buyers in the global health industry want to contract new generics suppliers with uncertain product quality. The mechanism allows a buyer to postpone part of the procurement volume's allocation so the buyer can learn about the unknown quality before allocating the remaining volume to the best supplier in terms of both price and quality. We develop a mathematical model to capture the decision-maker's trade-offs in setting the right split between the initial volume and the postponed volume. Our analysis shows that a buyer can benefit from this mechanism more than it can from a single-sourcing format, as it can decrease the risk of receiving poor quality (in terms of product quality and logistics performance) and even increase competitive pressure between the suppliers, thereby lowering the purchasing costs. By considering market parameters like the buyer's size, the suppliers' value (difference between quality and cost), quality uncertainty, and minimum order volumes, we derive optimal sourcing strategies for various market structures and explore how competition is affected by the buyer's learning about the suppliers' quality through the initial volume. The third problem considers the repeated procurement problem of pharmacies in Kenya that have multi-product inventories. Coordinating orders allows pharmacies to achieve lower procurement prices by using the quantity discounts manufacturers offer and sharing fixed ordering costs, such as logistics costs. However, coordinating and optimizing orders for multiple products is complex and costly. To solve the coordinated procurement problem, also known as the Joint Replenishment Problem (JRP) with quantity discounts, a novel, data-driven inventory policy using sample-average approximation is proposed. The inventory policy is developed based on renewal theory and is evaluated using real-world sales data from Kenyan pharmacies. Multiple benchmarks are used to evaluate the performance of the approach. First, it is compared to the theoretically optimal policy --- that is, a dynamic-programming policy --- in the single-product setting without quantity discounts to show that the proposed policy results in comparable inventory costs. Second, the policy is evaluated for the original multi-product setting with quantity discounts and compared to ex-post optimal costs. The evaluation shows that the policy's performance in the multi-product setting is similar to its performance in the single-product setting (with respect to ex-post optimal costs), suggesting that the proposed policy offers a promising, data-driven solution to these types of multi-product inventory problems.}, subject = {Entwicklungsl{\"a}nder}, language = {en} } @phdthesis{Kloos2020, author = {Kloos, Konstantin}, title = {Allocation Planning in Sales Hierarchies}, doi = {10.25972/OPUS-19373}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-193734}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2020}, abstract = {Allocation planning describes the process of allocating scarce supply to individual customers in order to prioritize demands from more important customers, i.e. because they request a higher service-level target. A common assumption across publications is that allocation planning is performed by a single planner with the ability to decide on the allocations to all customers simultaneously. In many companies, however, there does not exist such a central planner and, instead, allocation planning is a decentral and iterative process aligned with the company's multi-level hierarchical sales organization. This thesis provides a rigorous analytical and numerical analysis of allocation planning in such hierarchical settings. It studies allocation methods currently used in practice and shows that these approaches typically lead to suboptimal allocations associated with significant performance losses. Therefore, this thesis provides multiple new allocation approaches which show a much higher performance, but still are simple enough to lend themselves to practical application. The findings in this thesis can guide decision makers when to choose which allocation approach and what factors are decisive for their performance. In general, our research suggests that with a suitable hierarchical allocation approach, decision makers can expect a similar performance as under centralized planning.}, subject = {Supply Chain Management}, language = {en} } @phdthesis{Hauser2020, author = {Hauser, Matthias}, title = {Smart Store Applications in Fashion Retail}, doi = {10.25972/OPUS-19301}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-193017}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2020}, abstract = {Traditional fashion retailers are increasingly hard-pressed to keep up with their digital competitors. In this context, the re-invention of brick-and-mortar stores as smart retail environments is being touted as a crucial step towards regaining a competitive edge. This thesis describes a design-oriented research project that deals with automated product tracking on the sales floor and presents three smart fashion store applications that are tied to such localization information: (i) an electronic article surveillance (EAS) system that distinguishes between theft and non-theft events, (ii) an automated checkout system that detects customers' purchases when they are leaving the store and associates them with individual shopping baskets to automatically initiate payment processes, and (iii) a smart fitting room that detects the items customers bring into individual cabins and identifies the items they are currently most interested in to offer additional customer services (e.g., product recommendations or omnichannel services). The implementation of such cyberphysical systems in established retail environments is challenging, as architectural constraints, well-established customer processes, and customer expectations regarding privacy and convenience pose challenges to system design. To overcome these challenges, this thesis leverages Radio Frequency Identification (RFID) technology and machine learning techniques to address the different detection tasks. To optimally configure the systems and draw robust conclusions regarding their economic value contribution, beyond technological performance criteria, this thesis furthermore introduces a service operations model that allows mapping the systems' technical detection characteristics to business relevant metrics such as service quality and profitability. This analytical model reveals that the same system component for the detection of object transitions is well suited for the EAS application but does not have the necessary high detection accuracy to be used as a component of an automated checkout system.}, subject = {Laden}, language = {en} } @phdthesis{Menzel2020, author = {Menzel, Moritz}, title = {Das Betriebsrentenst{\"a}rkungsgesetz und seine Auswirkungen auf Geringverdiener. Eine modelltheoretische Analyse}, edition = {1. Auflage}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-126-6}, doi = {10.25972/WUP-978-3-95826-127-3}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-191753}, school = {W{\"u}rzburg University Press}, pages = {XXII, 201}, year = {2020}, abstract = {Vor allem unter Geringverdienern ist die betriebliche Altersversorgung nur unterdurchschnittlich verbreitet. Mit dem zum 01.01.2018 in Kraft getretenen Betriebsrentenst{\"a}rkungsgesetz und insbesondere dem sogenannten BAV-F{\"o}rderbetrag (\S 100 EStG) versucht der Gesetzgeber daher, diese Altersvorsorgeform attraktiver zu gestalten und so deren Verbreitung unter Geringverdienern auszuweiten. Dass dieses Ziel zumindest aus modelltheoretischer Sicht erreicht werden kann, zeigen die Ergebnisse dieser Studie auf. Anhand eines deterministischen Rechenmodells werden die finanziellen Vor- und Nachteile verschiedener Vorsorgealternativen aufgedeckt und pr{\"a}zise beziffert. Daneben widmet sich die Arbeit auch den steuer-, sozialversicherungs- und arbeitsrechtlichen Regelungen der betrieblichen Altersversorgung vor und nach Inkrafttreten des Betriebsrentenst{\"a}rkungsgesetzes und diskutiert dar{\"u}ber hinaus alternative Reformmaßnahmen.}, subject = {Betriebsrentenst{\"a}rkungsgesetz}, language = {de} } @article{HirschJahnZwick2020, author = {Hirsch, Boris and Jahn, Elke J. and Zwick, Thomas}, title = {Birds, Birds, Birds: Co-Worker Similarity, Workplace Diversity and Job Switches}, series = {British Journal of Industrial Relations}, volume = {58}, journal = {British Journal of Industrial Relations}, number = {3}, doi = {10.1111/bjir.12509}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-208666}, pages = {690-718}, year = {2020}, abstract = {We investigate how the demographic composition of the workforce along the sex, nationality, education, age and tenure dimensions affects job switches. Fitting duration models for workers' job-to-job turnover rate that control for workplace fixed effects in a representative sample of large manufacturing plants in Germany during 1975-2016, we find that larger co-worker similarity in all five dimensions substantially depresses job-to-job moves, whereas workplace diversity is of limited importance. In line with conventional wisdom, which has that birds of a feather flock together, our interpretation of the results is that workers prefer having co-workers of their kind and place less value on diverse workplaces.}, language = {en} } @phdthesis{Hoerner2020, author = {H{\"o}rner, Sven}, title = {Empirical Studies on Accounting - Shareholders' Perceptions of Earnings Quality}, doi = {10.25972/OPUS-18847}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-188473}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2020}, abstract = {Accounting plays an essential role in solving the principal-agent problem between managers and shareholders of capital market-oriented companies through the provision of information by the manager. However, this can succeed only if the accounting information is of high quality. In this context, the perceptions of shareholders regarding earnings quality are of particular importance. The present dissertation intends to contribute to a deeper understanding regarding earnings quality from the perspective of shareholders of capital market-oriented companies. In particular, the thesis deals with indicators of shareholders' perceptions of earnings quality, the influence of the auditor's independence on these perceptions, and the shareholders' assessment of the importance of earnings quality in general. Therefore, this dissertation examines market reactions to earnings announcements, measures of earnings quality and the auditor's independence, as well as shareholders' voting behavior at annual general meetings. Following the introduction and a theoretical part consisting of two chapters, which deal with the purposes of accounting and auditing as well as the relevance of shareholder voting at the annual general meeting in the context of the principal-agent theory, the dissertation presents three empirical studies. The empirical study presented in chapter 4 investigates auditor ratification votes in a U.S. setting. The study addresses the question of whether the results of auditor ratification votes are informative regarding shareholders' perceptions of earnings quality. Using a returns-earnings design, the study demonstrates that the results of auditor ratification votes are associated with market reactions to unexpected earnings at the earnings announcement date. Furthermore, there are indications that this association seems to be positively related to higher levels of information asymmetry between managers and shareholders. Thus, there is empirical support for the notion that the results of auditor ratification votes are earnings-related information that might help shareholders to make informed investment decisions. Chapter 5 investigates the relation between the economic importance of the client and perceived earnings quality. In particular, it is examined whether and when shareholders have a negative perception of an auditor's economic dependence on the client. The results from a Big 4 client sample in the U.S. (fiscal years 2010 through 2014) indicate a negative association between the economic importance of the client and shareholders' perceptions of earnings quality. The results are interpreted to mean that shareholders are still concerned about auditor independence even ten years after the implementation of the Sarbanes-Oxley Act. Furthermore, the association between the economic importance of the client and shareholders' perceptions of earnings quality applies predominantly to the subsample of clients that are more likely to be financially distressed. Therefore, the empirical results reveal that shareholders' perceptions of auditor independence are conditional on the client's circumstances. The study presented in chapter 6 sheds light on the question of whether earnings quality influences shareholders' satisfaction with the members of the company's board. Using data from 1,237 annual general meetings of German listed companies from 2010 through 2015, the study provides evidence that earnings quality - measured by the absolute value of discretionary accruals - is related to shareholders' satisfaction with the company's board. Moreover, the findings imply that shareholders predominantly blame the management board for inferior earnings quality. Overall, the evidence that earnings quality positively influences shareholders' satisfaction emphasizes the relevance of earnings quality.}, subject = {Qualit{\"a}t}, language = {en} } @phdthesis{Meller2020, author = {Meller, Jan Maximilian}, title = {Data-driven Operations Management: Combining Machine Learning and Optimization for Improved Decision-making}, doi = {10.25972/OPUS-20604}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-206049}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2020}, abstract = {This dissertation consists of three independent, self-contained research papers that investigate how state-of-the-art machine learning algorithms can be used in combination with operations management models to consider high dimensional data for improved planning decisions. More specifically, the thesis focuses on the question concerning how the underlying decision support models change structurally and how those changes affect the resulting decision quality. Over the past years, the volume of globally stored data has experienced tremendous growth. Rising market penetration of sensor-equipped production machinery, advanced ways to track user behavior, and the ongoing use of social media lead to large amounts of data on production processes, user behavior, and interactions, as well as condition information about technical gear, all of which can provide valuable information to companies in planning their operations. In the past, two generic concepts have emerged to accomplish this. The first concept, separated estimation and optimization (SEO), uses data to forecast the central inputs (i.e., the demand) of a decision support model. The forecast and a distribution of forecast errors are then used in a subsequent stochastic optimization model to determine optimal decisions. In contrast to this sequential approach, the second generic concept, joint estimation-optimization (JEO), combines the forecasting and optimization step into a single optimization problem. Following this approach, powerful machine learning techniques are employed to approximate highly complex functional relationships and hence relate feature data directly to optimal decisions. The first article, "Machine learning for inventory management: Analyzing two concepts to get from data to decisions", chapter 2, examines performance differences between implementations of these concepts in a single-period Newsvendor setting. The paper first proposes a novel JEO implementation based on the random forest algorithm to learn optimal decision rules directly from a data set that contains historical sales and auxiliary data. Going forward, we analyze structural properties that lead to these performance differences. Our results show that the JEO implementation achieves significant cost improvements over the SEO approach. These differences are strongly driven by the decision problem's cost structure and the amount and structure of the remaining forecast uncertainty. The second article, "Prescriptive call center staffing", chapter 3, applies the logic of integrating data analysis and optimization to a more complex problem class, an employee staffing problem in a call center. We introduce a novel approach to applying the JEO concept that augments historical call volume data with features like the day of the week, the beginning of the month, and national holiday periods. We employ a regression tree to learn the ex-post optimal staffing levels based on similarity structures in the data and then generalize these insights to determine future staffing levels. This approach, relying on only few modeling assumptions, significantly outperforms a state-of-the-art benchmark that uses considerably more model structure and assumptions. The third article, "Data-driven sales force scheduling", chapter 4, is motivated by the problem of how a company should allocate limited sales resources. We propose a novel approach based on the SEO concept that involves a machine learning model to predict the probability of winning a specific project. We develop a methodology that uses this prediction model to estimate the "uplift", that is, the incremental value of an additional visit to a particular customer location. To account for the remaining uncertainty at the subsequent optimization stage, we adapt the decision support model in such a way that it can control for the level of trust in the predicted uplifts. This novel policy dominates both a benchmark that relies completely on the uplift information and a robust benchmark that optimizes the sum of potential profits while neglecting any uplift information. The results of this thesis show that decision support models in operations management can be transformed fundamentally by considering additional data and benefit through better decision quality respectively lower mismatch costs. The way how machine learning algorithms can be integrated into these decision support models depends on the complexity and the context of the underlying decision problem. In summary, this dissertation provides an analysis based on three different, specific application scenarios that serve as a foundation for further analyses of employing machine learning for decision support in operations management.}, subject = {Operations Management}, language = {en} } @phdthesis{Taigel2020, author = {Taigel, Fabian Michael}, title = {Data-driven Operations Management: From Predictive to Prescriptive Analytics}, doi = {10.25972/OPUS-20651}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-206514}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2020}, abstract = {Autonomous cars and artificial intelligence that beats humans in Jeopardy or Go are glamorous examples of the so-called Second Machine Age that involves the automation of cognitive tasks [Brynjolfsson and McAfee, 2014]. However, the larger impact in terms of increasing the efficiency of industry and the productivity of society might come from computers that improve or take over business decisions by using large amounts of available data. This impact may even exceed that of the First Machine Age, the industrial revolution that started with James Watt's invention of an efficient steam engine in the late eighteenth century. Indeed, the prevalent phrase that calls data "the new oil" indicates the growing awareness of data's importance. However, many companies, especially those in the manufacturing and traditional service industries, still struggle to increase productivity using the vast amounts of data [for Economic Co-operation and Development, 2018]. One reason for this struggle is that companies stick with a traditional way of using data for decision support in operations management that is not well suited to automated decision-making. In traditional inventory and capacity management, some data - typically just historical demand data - is used to estimate a model that makes predictions about uncertain planning parameters, such as customer demand. The planner then has two tasks: to adjust the prediction with respect to additional information that was not part of the data but still might influence demand and to take the remaining uncertainty into account and determine a safety buffer based on the underage and overage costs. In the best case, the planner determines the safety buffer based on an optimization model that takes the costs and the distribution of historical forecast errors into account; however, these decisions are usually based on a planner's experience and intuition, rather than on solid data analysis. This two-step approach is referred to as separated estimation and optimization (SEO). With SEO, using more data and better models for making the predictions would improve only the first step, which would still improve decisions but would not automize (and, hence, revolutionize) decision-making. Using SEO is like using a stronger horse to pull the plow: one still has to walk behind. The real potential for increasing productivity lies in moving from predictive to prescriptive approaches, that is, from the two-step SEO approach, which uses predictive models in the estimation step, to a prescriptive approach, which integrates the optimization problem with the estimation of a model that then provides a direct functional relationship between the data and the decision. Following Akcay et al. [2011], we refer to this integrated approach as joint estimation-optimization (JEO). JEO approaches prescribe decisions, so they can automate the decision-making process. Just as the steam engine replaced manual work, JEO approaches replace cognitive work. The overarching objective of this dissertation is to analyze, develop, and evaluate new ways for how data can be used in making planning decisions in operations management to unlock the potential for increasing productivity. In doing so, the thesis comprises five self-contained research articles that forge the bridge from predictive to prescriptive approaches. While the first article focuses on how sensitive data like condition data from machinery can be used to make predictions of spare-parts demand, the remaining articles introduce, analyze, and discuss prescriptive approaches to inventory and capacity management. All five articles consider approach that use machine learning and data in innovative ways to improve current approaches to solving inventory or capacity management problems. The articles show that, by moving from predictive to prescriptive approaches, we can improve data-driven operations management in two ways: by making decisions more accurate and by automating decision-making. Thus, this dissertation provides examples of how digitization and the Second Machine Age can change decision-making in companies to increase efficiency and productivity.}, subject = {Maschinelles Lernen}, language = {en} } @phdthesis{Guenther2020, author = {G{\"u}nther, Johannes}, title = {Die Unabh{\"a}ngigkeit des Abschlusspr{\"u}fers bei privaten Unternehmen in Deutschland - Eine empirische Analyse im Kontext der Honorare f{\"u}r Pr{\"u}fung und Beratung}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-116-7 (print)}, doi = {10.25972/WUP-978-3-95826-117-4}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-185814}, school = {W{\"u}rzburg University Press}, pages = {XXX, 315}, year = {2020}, abstract = {Die vorliegende Arbeit untersucht empirisch die Unabh{\"a}ngigkeit des Abschlusspr{\"u}fers bei nicht kapitalmarktorientierten Unternehmen - im Folgenden private Unternehmen genannt - im Kontext von Pr{\"u}fungs- und Nichtpr{\"u}fungshonoraren in Deutschland. Als Surrogat f{\"u}r die Pr{\"u}fungsqualit{\"a}t wird die Wahrscheinlichkeit einer Going-Concern-Modifikation („GCM") herangezogen. GCM k{\"o}nnen als Indikator f{\"u}r die Pr{\"u}fungsqualit{\"a}t besonders geeignet sein, da sie ein direktes Ergebnis der T{\"a}tigkeit des Abschlusspr{\"u}fers sind und von ihm formuliert und verantwortet werden. F{\"u}r das Surrogat GCM existiert f{\"u}r Deutschland im Bereich der privaten Unternehmen bislang keine Studie. International ist lediglich die Untersuchung von HOPE/LANGLI (2010) vorhanden. Die Unabh{\"a}ngigkeit ist von anhaltender Relevanz, wird jedoch immer wieder in Frage gestellt. Der Fokus von Regulierungsbeh{\"o}rden und Forschung liegt auf kapitalmarktorientierten Unternehmen. Die Unabh{\"a}ngigkeit des Abschlusspr{\"u}fers kann besonders gef{\"a}hrdet sein, wenn Schutzmechanismen, wie z.B. die Haftung oder das Risiko eines Reputationsverlustes, besonders schwach ausgepr{\"a}gt sind. Aus vorangegangenen Forschungsarbeiten kann abgeleitet werden, dass bei privaten Unternehmen das Risiko eines Reputationsverlustes im Vergleich zu kapitalmarktorientierten Unternehmen geringer ist. Auch weitere Schutzmechanismen, wie z.B. die Pr{\"u}ferrotation, sind in Deutschland {\"u}berwiegend f{\"u}r kapitalmarktorientierte Unternehmen vorgesehen. Weiterhin ist das Haftungsrisiko f{\"u}r den Abschlusspr{\"u}fer in Deutschland verglichen mit angels{\"a}chsischen L{\"a}ndern geringer. Damit erfolgt die empirische Analyse in einem Umfeld, in dem die Unabh{\"a}ngigkeit des Abschlusspr{\"u}fers besonders gef{\"a}hrdet ist. Die Untersuchungsgruppe f{\"u}r die multivariate Regressionsanalyse besteht aus 245 Beobachtungen von privaten Unternehmen mit GCM im Zeitraum von 2009 bis 2012. Der Untersuchungsgruppe werden zwei unterschiedlich abgegrenzte Kontrollgruppen mit 1.921 bzw. 396 Beobachtungen von Unternehmen in finanziellen Schwierigkeiten ohne GCM gegen{\"u}bergestellt. Im Ergebnis k{\"o}nnen f{\"u}r die Einflussgr{\"o}ßen, die auf den Pr{\"u}fungs-, Nichtpr{\"u}fungs- und Gesamthonoraren basieren, keine Indizien f{\"u}r die Gef{\"a}hrdung der Unabh{\"a}ngigkeit identifiziert werden. F{\"u}r die Pr{\"u}fungs- und Gesamthonorare wird mit beiden Kontrollgruppen ein signifikant positiver Zusammenhang beobachtet. Der positive Zusammenhang kann auf den h{\"o}heren Pr{\"u}fungsaufwand durch die zus{\"a}tzlichen Pr{\"u}fungshandlungen bei einer GCM zur{\"u}ckgef{\"u}hrt werden. Trotz der geringeren Auspr{\"a}gung bei privaten Unternehmen k{\"o}nnen Reputations- und Haftungsrisiken als alternative Erkl{\"a}rung nicht ausgeschlossen werden. Weniger eindeutig, und abh{\"a}ngig von den Modellspezifikationen, ist der positive Zusammenhang f{\"u}r die Nichtpr{\"u}fungshonorare. Grunds{\"a}tzlich gelten die Ergebnisse auch f{\"u}r die abnormalen Honorare. Die Ergebnisse best{\"a}tigen sich im Wesentlichen in den durchgef{\"u}hrten Sensitivit{\"a}tsanalysen.}, subject = {Pr{\"u}fungsqualit{\"a}t}, language = {de} } @article{RauBredow2019, author = {Rau-Bredow, Hans}, title = {Bigger is not always safer: a critical analysis of the subadditivity assumption for coherent risk measures}, series = {Risks}, volume = {7}, journal = {Risks}, number = {3}, doi = {10.3390/risks7030091}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-201808}, pages = {91}, year = {2019}, abstract = {This paper provides a critical analysis of the subadditivity axiom, which is the key condition for coherent risk measures. Contrary to the subadditivity assumption, bank mergers can create extra risk. We begin with an analysis how a merger affects depositors, junior or senior bank creditors, and bank owners. Next it is shown that bank mergers can result in higher payouts having to be made by the deposit insurance scheme. Finally, we demonstrate that if banks are interconnected via interbank loans, a bank merger could lead to additional contagion risks. We conclude that the subadditivity assumption should be rejected, since a subadditive risk measure, by definition, cannot account for such increased risks.}, language = {en} } @phdthesis{Stein2019, author = {Stein, Nikolai Werner}, title = {Advanced Analytics in Operations Management and Information Systems: Methods and Applications}, doi = {10.25972/OPUS-19266}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-192668}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2019}, abstract = {Die digitale Transformation der Gesellschaft birgt enorme Potenziale f{\"u}r Unternehmen aus allen Sektoren. Diese verf{\"u}gen aufgrund neuer Datenquellen, wachsender Rechenleistung und verbesserter Konnektivit{\"a}t {\"u}ber rasant steigende Datenmengen. Um im digitalen Wandel zu bestehen und Wettbewerbsvorteile in Bezug auf Effizienz und Effektivit{\"a}t heben zu k{\"o}nnen m{\"u}ssen Unternehmen die verf{\"u}gbaren Daten nutzen und datengetriebene Entscheidungsprozesse etablieren. Dennoch verwendet die Mehrheit der Firmen lediglich Tools aus dem Bereich „descriptive analytics" und nur ein kleiner Teil der Unternehmen macht bereits heute von den M{\"o}glichkeiten der „predictive analytics" und „prescriptive analytics" Gebrauch. Ziel dieser Dissertation, die aus vier inhaltlich abgeschlossenen Teilen besteht, ist es, Einsatzm{\"o}glichkeiten von „prescriptive analytics" zu identifizieren. Da pr{\"a}diktive Modelle eine wesentliche Voraussetzung f{\"u}r „prescriptive analytics" sind, thematisieren die ersten beiden Teile dieser Arbeit Verfahren aus dem Bereich „predictive analytics." Ausgehend von Verfahren des maschinellen Lernens wird zun{\"a}chst die Entwicklung eines pr{\"a}diktiven Modells am Beispiel der Kapazit{\"a}ts- und Personalplanung bei einem IT-Beratungsunternehmen veranschaulicht. Im Anschluss wird eine Toolbox f{\"u}r Data Science Anwendungen entwickelt. Diese stellt Entscheidungstr{\"a}gern Richtlinien und bew{\"a}hrte Verfahren f{\"u}r die Modellierung, das Feature Engineering und die Modellinterpretation zur Verf{\"u}gung. Der Einsatz der Toolbox wird am Beispiel von Daten eines großen deutschen Industrieunternehmens veranschaulicht. Verbesserten Prognosen, die von leistungsf{\"a}higen Vorhersagemodellen bereitgestellt werden, erlauben es Entscheidungstr{\"a}gern in einigen Situationen bessere Entscheidungen zu treffen und auf diese Weise einen Mehrwert zu generieren. In vielen komplexen Entscheidungssituationen ist die Ableitungen von besseren Politiken aus zur Verf{\"u}gung stehenden Prognosen jedoch oft nicht trivial und erfordert die Entwicklung neuer Planungsalgorithmen. Aus diesem Grund fokussieren sich die letzten beiden Teile dieser Arbeit auf Verfahren aus dem Bereich „prescriptive analytics". Hierzu wird zun{\"a}chst analysiert, wie die Vorhersagen pr{\"a}diktiver Modelle in pr{\"a}skriptive Politiken zur L{\"o}sung eines „Optimal Searcher Path Problem" {\"u}bersetzt werden k{\"o}nnen. Trotz beeindruckender Fortschritte in der Forschung im Bereich k{\"u}nstlicher Intelligenz sind die Vorhersagen pr{\"a}diktiver Modelle auch heute noch mit einer gewissen Unsicherheit behaftet. Der letzte Teil dieser Arbeit schl{\"a}gt einen pr{\"a}skriptiven Ansatz vor, der diese Unsicherheit ber{\"u}cksichtigt. Insbesondere wird ein datengetriebenes Verfahren f{\"u}r die Einsatzplanung im Außendienst entwickelt. Dieser Ansatz integriert Vorhersagen bez{\"u}glich der Erfolgswahrscheinlichkeiten und die Modellqualit{\"a}t des entsprechenden Vorhersagemodells in ein „Team Orienteering Problem."}, subject = {Operations Management}, language = {en} } @phdthesis{Stralla2019, author = {Stralla, Markus Roland}, title = {Managerial incentives, earnings management and regulatory intervention in the banking sector}, doi = {10.25972/OPUS-17268}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-172682}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2019}, abstract = {Die vorliegende Dissertation umfasst drei Forschungspapiere, welche sich mit folgenden Bankenthemen besch{\"a}ftigen: Fehl-/Anreize und Risiko{\"u}bernahme, Ertragssteuerung und die Regulierung von Aufsichtsr{\"a}ten. „Do cooperative banks suffer from moral hazard behaviour? Evidence in the context of efficiency and risk": Wir verwenden Granger-Kausalit{\"a}tstechniken, um die intertemporalen Beziehungen zwischen Risiko, Effizienz und Kapital zu bewerten. Wir verwenden zwei verschiedene Maße der Effizienz, die Kosten- und Gewinneffizienz, da diese unterschiedliche Managementf{\"a}higkeiten widerspiegeln. Eine ist die F{\"a}higkeit, Kosten zu steuern, und die andere ist die M{\"o}glichkeit, Gewinne zu maximieren. Wir stellen fest, dass eine niedrigere Kosten- und Gewinneffizienz das Liquidit{\"a}tsrisiko erh{\"o}ht. Wir stellen ebenfalls fest, dass ein Anstieg des Kreditrisiko nachteilig f{\"u}r die Kosten und Gewinneffizienz ist. Am wichtigsten ist jedoch, dass unsere Ergebnisse eine positive Beziehung zwischen dem Kapital- und Kreditrisiko aufweisen, was zeigt, dass Moral Hazard Verhalten keine Anwendung (aufgrund von Haftungsbeschr{\"a}nkung und Einlagensicherung) bei unsere Stichprobe von Genossenschaftsbanken findet. Im Gegenteil, wir finden Hinweise darauf, dass Banken mit niedrigem Kapital ihre Kreditqualit{\"a}t in den Folgeperioden verbessern k{\"o}nnen. Diese Erkenntnisse k{\"o}nnen f{\"u}r die Regulierungsbeh{\"o}rden von Bedeutung sein, die bei der Einf{\"u}hrung neuer regulatorischer Kapitalbeschr{\"a}nkungen die Gesch{\"a}ftsmodelle der Banken ber{\"u}cksichtigen sollten. „Earnings Management Modelling in the Banking Industry - Evaluating valuable approaches": Die Rechungslegungsforschung hat den Bereich Earnings Management (EM) f{\"u}r die nichtfinanzielle und finanzielle Industrie gesondert untersucht. Da EM nicht direkt beobachtet werden kann, ist es f{\"u}r jede Forschungsfrage in jedem Umfeld wichtig, einen {\"u}berpr{\"u}fbare Proxy-Gr{\"o}ße f{\"u}r EM zu finden. Grunds{\"a}tzlich fehlt jedoch ein tiefes Verst{\"a}ndnis daf{\"u}r, welche Regressoren den Sch{\"a}tzvorgang verbessern k{\"o}nnen. Diese Studie versucht, diese L{\"u}cke zu schließen, und analysiert vorhandene Modellspezifikationen f{\"u}r diskretion{\"a}re Risikovorsorgen im Bankensektor, um gemeinsame und spezifische Muster zu identifizieren. Hierf{\"u}r verwenden wir einen US-Datensatz, bestehend aus den Jahren 2005-2015 und wenden g{\"a}ngige Testverfahren an, um das Ausmaß von Messfehlern, Verzerrungen aufgrund von Extrembeobachtungen und weggelassenen Variablen sowie die Vorhersagekraft der diskretion{\"a}ren Proxy-Gr{\"o}ßen zu untersuchen. Unsere Ergebnisse zeigen, dass ein gr{\"u}ndliches Verst{\"a}ndnis des methodischen Modellierungsprozesses von EM im Bankensektor wichtig ist. Die derzeit etablierten Modelle zur Sch{\"a}tzung des EM sind angemessen, jedoch optimierbar. Insbesondere identifizieren wir die Variablen der notleidenden Verm{\"o}genswerte als die wichtigste Gruppe, w{\"a}hrend Variablen der Risikovorsorge und Nettoausbuchungen einen gewissen Wert erbringen k{\"o}nnen. Dar{\"u}ber hinaus zeigen unsere Ergebnisse, dass die Nichtlinearit{\"a}t bestimmter Regressoren ein Problem sein kann, das in zuk{\"u}nftigen Untersuchungen angegangen werden sollte, w{\"a}hrend wir weiterhin einige ausgelassene und m{\"o}glicherweise korrelierte Variablen identifizieren, die einen Mehrwert generieren k{\"o}nnten. Die Ergebnisse zeigen auch, dass ein dynamischer, endogenit{\"a}t ber{\"u}cksichtigender Ansatz nicht unbedingt mit einer besseren Vorhersagekraft verkn{\"u}pft ist. „Board Regulation and its Impact on Composition and Effects - Evidence from German Cooperative Bank": In dieser Studie wird ein System-GMM-Sch{\"a}tzer verwendet, um die Auswirkungen m{\"o}glicher regulatorischer Eingriffe auf die Besetzung von Aufsichtsratspositionen bei Genossenschaftsbanken zu untersuchen. Hierf{\"u}r werden zwei verschiedene Untersuchungsdesigns angewandt. Zun{\"a}chst untersucht der Autor die {\"A}nderungen der Aufsichtsratsstruktur vor und nach der Einf{\"u}hrung des Gesetzes zur St{\"a}rkung der Finanzmarkt- und Versicherungsaufsicht (FinVAG). Zweitens sch{\"a}tzt der Autor den Einfluss von Doktoren und beruflicher Konzentration auf {\"A}nderungen des Bankrisikos unter Ber{\"u}cksichtigung der Umsetzung der FinVAG. Die untersuchte Stichprobe umfasst dabei 246 deutsche Genossenschaftsbanken in den Jahren von 2006 bis 2011. Bez{\"u}glich des Bankrisikos verwendet der Autor vier verschiedene Maße: das Kredit-, Kapital-, Liquidit{\"a}tsrisiko und den Z-Score, wobei die ersten drei ebenfalls im FinVAG adressiert werden. Die Ergebnisse zeigen, dass die Umsetzung des FinVAGs zu strukturellen {\"A}nderungen in der Zusammensetzung der Aufsichtsr{\"a}te f{\"u}hrt, insbesondere auf Kosten der Landwirte. Dar{\"u}ber hinaus wirkt sich die Umsetzung risikoreduzierend und damit wie beabsichtigt auf alle Risikokennzahlen und Beziehungen zwischen Risikokennzahlen und Aufsichtsratsmerkmalen aus. Um die komplexe Beziehung zwischen Charakteristika der Aufsichtsr{\"a}te und Risikomessgr{\"o}ßen aufzudecken, verwendet die Studie einen „two-step system-gmm" Sch{\"a}tzer, um nicht beobachtete Heterogenit{\"a}t zu ber{\"u}cksichtigen, um Endogenit{\"a}tsprobleme zu reduzieren. Die Ergebnisse k{\"o}nnen f{\"u}r Stakeholder, Aufsichtsbeh{\"o}rden, Vorgesetzte und Manager besonders relevant sein.}, subject = {Kreditgenossenschaft}, language = {en} } @techreport{HermJaniesch2019, type = {Working Paper}, author = {Herm, Lukas-Valentin and Janiesch, Christian}, title = {Anforderungsanalyse f{\"u}r eine Kollaborationsplattform in Blockchain-basierten Wertsch{\"o}pfungsnetzwerken}, doi = {10.25972/OPUS-18886}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-188866}, year = {2019}, abstract = {In our globalized world, companies operate on an international market. To concentrate on their main competencies and be more competitive, they integrate into supply chain networks. However, these potentials also bear many risks. The emergence of an international market also creates pressure from competitors, forcing companies to collaborate with new and unknown companies in dynamic supply chain networks. In many cases, this can cause a lack of trust as the application of illegal practices and the breaking of agreements through complex and nontransparent supply chain networks pose a threat. Blockchain technology provides a transparent, decentralized, and distributed means of chaining data storage and thus enables trust in its tamper-proof storage, even if there is no trust in the cooperation partners. The use of the blockchain also provides the opportunity to digitize, automate, and monitor processes within supply chain networks in real time. The research project "Plattform f{\"u}r das integrierte Management von Kollaborationen in Wertsch{\"o}pfungsnetzwerken" (PIMKoWe) addresses this issue. The aim of this report is to define requirements for such a collaboration platform. We define requirements based on a literature review and expert interviews, which allow for an objective consideration of scientific and practical aspects. An additional survey validates and further classifies these requirements as "essential", "optional", or "irrelevant". In total, we have derived a collection of 45 requirements from different dimensions for the collaboration platform. Employing these requirements, we illustrate a conceptual architecture of the platform as well as introduce a realistic application scenario. The presentation of the platform concept and the application scenario can provide the foundation for implementing and introducing a blockchain-based collaboration platform into existing supply chain networks in context of the research project PIMKoWe.}, subject = {Blockchain}, language = {de} } @misc{Rhoenisch2019, type = {Master Thesis}, author = {Rh{\"o}nisch, Anna Franziska}, title = {M{\"o}glichkeiten und Strategien der Technologieclusterentwicklung - Eine Analyse der Voraussetzungen f{\"u}r eine erfolgreiche Clusterbildung in der Region Mainfranken}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-179546}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2019}, abstract = {This paper focuses on the development of technology clusters and based on this, on two research questions: What are the preconditions for technology cluster development according to cluster research? And, does the region Mainfranken fulfill the requirements for a technology cluster formation? For this purpose, a qualitative study will be conducted by referring to various theoretical concepts of cluster formation. Due to this, the following determinants of cluster development can be deduced into: the traffic infrastructure and infrastructure component, the cluster environment component, the university component, the state component and the industrial component. The analysis of the parameter value of the separate cluster components shows that the core requirements of technology cluster development in the region of Mainfranken are fulfilled. Nevertheless, it is necessary to improve the infrastructure, the commercial and industrial availability of land and availability of capital to form a successful technology cluster. Within the framework of this paper, the potential of technology cluster development in the field of artificial intelligence could also be analyzed.}, subject = {Cluster }, language = {de} } @phdthesis{Bergmann2019, author = {Bergmann, Jonathan}, title = {Carry Trades - Eine empirische Analyse}, doi = {10.25972/OPUS-17955}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-179553}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2019}, abstract = {Die verfasste Arbeit besch{\"a}ftigt sich mit der Handelsstrategie Carry Trades. Grundlage dieser Strategie ist das Ausnutzen von Zinsunterschieden, welche zwischen zwei W{\"a}hrungsr{\"a}umen vorherrschen, und einer Wechselkursanpassung, die diese Unterschiede nicht komplett kompensiert. Investiert ein Anleger beispielsweise in eine ausl{\"a}ndische W{\"a}hrung mit h{\"o}herem Zinsniveau, so m{\"u}sste sich der Wechselkurs gem{\"a}ß der Zinsparit{\"a}tentheorie in der Folge so anpassen, dass der h{\"o}here Ertrag durch die Zinsen beim R{\"u}cktausch der W{\"a}hrung vollst{\"a}ndig egalisiert wird. Ziel dieser Arbeit war eine empirische Untersuchung f{\"u}r die W{\"a}hrungen der G10 auf w{\"o}chentlicher Handelsbasis sowie die Konstruktion und Ber{\"u}cksichtigung von ex ante Sharpe-Ratios als Handelsindikator.}, subject = {Devisenspekulation}, language = {de} } @book{Knoll2019, author = {Knoll, Prof. Dr. Leonhard}, title = {De exemplis deterrentibus}, edition = {2. Auflage}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-108-2}, doi = {10.25972/WUP-978-3-95826-109-9}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-178695}, publisher = {W{\"u}rzburg University Press}, pages = {xii, 163}, year = {2019}, abstract = {Das vorliegende Buch besch{\"a}ftigt sich anhand einer Sammlung von realen F{\"a}llen, die in Aufgabenform formuliert sind, mit dem leider oft gest{\"o}rten Verh{\"a}ltnis von Theorie und Praxis in der rechtsgepr{\"a}gten Unternehmensbewertung. Es weist {\"a}hnlich wie „normale" Fallsammlungen die jeweiligen Aufgabenstellungen und die zugeh{\"o}rigen L{\"o}sungen aus. Die eigentlichen Fragestellungen in den Aufgabentexten sind durch kurze Erl{\"a}uterungen eingerahmt, damit jeder Fall als solcher von einem mit Bewertungsfragen halbwegs Vertrauten relativ leicht verstanden und in seiner Bedeutung eingeordnet werden kann. Dieses Vorgehen {\"a}hnelt wiederum Lehrb{\"u}chern, die Inhalte {\"u}ber F{\"a}lle vermitteln, nur dass hier nicht hypothetische F{\"a}lle das jeweils idealtypisch richtige Vorgehen zeigen, sondern Praxisf{\"a}lle plakative Verst{\"o}ße contra legem artis.}, subject = {Unternehmensbewertung}, language = {de} } @techreport{ImgrundJanieschFischeretal.2019, author = {Imgrund, Florian and Janiesch, Christian and Fischer, Marcus and Winkelmann, Axel}, title = {Success Factors for Process Modeling Projects: An Empirical Analysis}, doi = {10.25972/OPUS-17924}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-179246}, pages = {68}, year = {2019}, abstract = {Business process modeling is one of the most crucial activities of BPM and enables companies to realize various benefits in terms of communication, coordination, and distribution of organizational knowledge. While numerous techniques support process modeling, companies frequently face challenges when adopting BPM to their organization. Existing techniques are often modified or replaced by self-developed approaches so that companies cannot fully exploit the benefits of standardization. To explore the current state of the art in process modeling as well as emerging challenges and potential success factors, we conducted a large-scale quantitative study. We received feedback from 314 respondents who completed the survey between July 2 and September 6, 2017. Thus, our study provides in-depth insights into the status quo of process modeling and allows us to provide three major contributions. Our study suggests that the success of process modeling projects depends on four major factors, which we extracted using exploratory factor analysis. We found employee education, management involvement, usability of project results, and the companies' degree of process orientation to be decisive for the success of a process modeling project. We conclude this report with a summary of results and present potential avenues for future research. We thereby emphasize the need of quantitative and qualitative insights to process modeling in practice is needed to strengthen the quality of process modeling in practice and to be able to react quickly to changing conditions, attitudes, and possible constraints that practitioners face.}, language = {en} }