@phdthesis{Hegmann2024, author = {Hegmann, Reinhold}, title = {Pr{\"u}ferqualifikation und Pr{\"u}fungsqualit{\"a}t - Eine empirische Untersuchung privater pr{\"u}fungspflichtiger Unternehmen in Deutschland}, doi = {10.25972/OPUS-32254}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-322546}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2024}, abstract = {Die Jahresabschlusspr{\"u}fung verfolgt das Ziel, die Verl{\"a}sslichkeit der Rechnungslegung zu best{\"a}tigen. Folglich kann sie einen wesentlichen Beitrag zu einem hohen Informationsniveau an den M{\"a}rkten leisten. Angesichts dieser großen {\"o}konomischen Bedeutung unternimmt der deutsche Gesetzgeber zahlreiche Anstrengungen, um eine hohe Pr{\"u}fungsqualit{\"a}t sicherzustellen. Die Sichtung der Wirtschaftspr{\"u}ferordnung zeigt hierbei, dass regulatorische Maßnahmen ergriffen werden, die am Kern der Jahresabschlusspr{\"u}fung ansetzen, n{\"a}mlich an den Berufsangeh{\"o}rigen selbst. So wurde der Zugang zum Berufsstand der vereidigten Buchpr{\"u}fer mehrmals geschlossen und wiederer{\"o}ffnet. Des Weiteren sind markante Anpassungen des Niveaus des Wirtschaftspr{\"u}fungsexamens im Zeitablauf zu erkennen. Bei der Jahresabschlusspr{\"u}fung der Unternehmen von {\"o}ffentlichem Interesse sind außerdem besondere Berufspflichten zu erf{\"u}llen. Zum einen ist diesen schweren Eingriffen in die Freiheit der Berufswahl und der Berufsaus{\"u}bung gemein, dass sie allesamt die Qualifikation des Abschlusspr{\"u}fers adressieren. Zum anderen werden die entsprechenden Gesetzes{\"a}nderungen mehrheitlich mit einer St{\"a}rkung der Pr{\"u}fungsqualit{\"a}t begr{\"u}ndet. Fraglich ist, inwiefern jene Facetten der Pr{\"u}ferqualifikation tats{\"a}chlich einen Einfluss auf die Pr{\"u}fungsqualit{\"a}t aus{\"u}ben. Aufgrund mangelnder Evidenz ergibt sich die Notwendigkeit, eine empirische Studie am deutschen Pr{\"u}fermarkt durchzuf{\"u}hren und somit den Beginn zur Schließung der identifizierten Forschungsl{\"u}cke zu setzen. Das Ziel der vorliegenden Dissertation besteht folglich darin, den Zusammenhang zwischen der Pr{\"u}ferqualifikation und der Pr{\"u}fungsqualit{\"a}t mittels Regressionsanalysen zu untersuchen. Dazu wurde ein einzigartiger Datensatz zu deutschen privaten pr{\"u}fungspflichtigen Kapitalgesellschaften mit unkonsolidierten Finanz- und Pr{\"u}ferinformationen im Zeitraum 2006-2018 mit insgesamt 217.585 grundlegenden Beobachtungen erhoben, bereinigt und aufbereitet. Da die Pr{\"u}fungsqualit{\"a}t nicht direkt beobachtbar ist, wird zwischen wahrgenommener Pr{\"u}fungsqualit{\"a}t und tats{\"a}chlicher Pr{\"u}fungsqualit{\"a}t unterschieden. Im Rahmen dieser Dissertation wird die wahrgenommene Pr{\"u}fungsqualit{\"a}t {\"u}ber Fremdkapitalkosten und die tats{\"a}chliche Pr{\"u}fungsqualit{\"a}t {\"u}ber absolute diskretion{\"a}re Periodenabgrenzungen approximiert. Die Ergebnisse der Hauptregressionen zeigen {\"u}berwiegend, dass kein Zusammenhang zwischen den Maßgr{\"o}ßen der Pr{\"u}ferqualifikation und der wahrgenommenen und tats{\"a}chlichen Pr{\"u}fungsqualit{\"a}t besteht. Die Zusatz- und Sensitivit{\"a}tsanalysen unterst{\"u}tzen diesen Befund. So k{\"o}nnen mit Blick auf die Berufszugangsregelungen keine Qualit{\"a}tsunterschiede zwischen den Berufsst{\"a}nden der Wirtschaftspr{\"u}fer und der vereidigten Buchpr{\"u}fer nachgewiesen werden. Auch innerhalb des Berufstandes der Wirtschaftspr{\"u}fer ergeben sich keine Hinweise auf ein Qualit{\"a}tsgef{\"a}lle zwischen den Pr{\"u}fergruppen, die unterschiedliche Examensanforderungen durchlebt haben. Hinsichtlich der Berufsaus{\"u}bungsregelungen ist zu beobachten, dass die zus{\"a}tzlichen Anforderungen an die Jahresabschlusspr{\"u}fung der Unternehmen von {\"o}ffentlichem Interesse nicht mit einer anderen Pr{\"u}fungsqualit{\"a}t bei privaten Unternehmen verbunden sind. Die beschriebenen regulatorischen Schritte des Gesetzgebers im Bereich der Pr{\"u}ferqualifikation erscheinen somit im Lichte einer verbesserten Pr{\"u}fungsqualit{\"a}t nicht zwingend gerechtfertigt.}, subject = {Pr{\"u}fungsqualit{\"a}t}, language = {de} } @article{LeimeisterStieglitzMatzneretal.2021, author = {Leimeister, Jan Marco and Stieglitz, Stefan and Matzner, Martin and Kundisch, Dennis and Flath, Christoph and R{\"o}glinger, Maximilian}, title = {Quo Vadis Conferences in the Business and Information Systems Engineering (BISE) Community After Covid}, series = {Business \& Information Systems Engineering}, volume = {63}, journal = {Business \& Information Systems Engineering}, number = {6}, issn = {2363-7005}, doi = {10.1007/s12599-021-00707-x}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-308902}, pages = {741-749}, year = {2021}, language = {en} } @phdthesis{deGraafgebButtler2024, author = {de Graaf [geb. Buttler], Simone Linda}, title = {From Small to Large Data: Leveraging Synthetic Data for Inventory Management}, doi = {10.25972/OPUS-36136}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-361364}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2024}, abstract = {In a world of constant change, uncertainty has become a daily challenge for businesses. Rapidly shifting market conditions highlight the need for flexible responses to unforeseen events. Operations Management (OM) is crucial for optimizing business processes, including site planning, production control, and inventory management. Traditionally, companies have relied on theoretical models from microeconomics, game theory, optimization, and simulation. However, advancements in machine learning and mathematical optimization have led to a new research field: data-driven OM. Data-driven OM uses real data, especially time series data, to create more realistic models that better capture decision-making complexities. Despite the promise of this new research area, a significant challenge remains: the availability of extensive historical training data. Synthetic data, which mimics real data, has been used to address this issue in other machine learning applications. Therefore, this dissertation explores how synthetic data can be leveraged to improve decisions for data-driven inventory management, focusing on the single-period newsvendor problem, a classic stochastic optimization problem in inventory management. The first article, "A Meta Analysis of Data-Driven Newsvendor Approaches", presents a standardized evaluation framework for data-driven prescriptive approaches, tested through a numerical study. Findings suggest model performance is not robust, emphasizing the need for a standardized evaluation process. The second article, "Application of Generative Adversarial Networks in Inventory Management", examines using synthetic data generated by Generative Adversarial Networks (GANs) for the newsvendor problem. This study shows GANs can model complex demand relationships, offering a promising alternative to traditional methods. The third article, "Combining Synthetic Data and Transfer Learning for Deep Reinforcement Learning in Inventory Management", proposes a method using Deep Reinforcement Learning (DRL) with synthetic and real data through transfer learning. This approach trains a generative model to learn demand distributions, generates synthetic data, and fine-tunes a DRL agent on a smaller real dataset. This method outperforms traditional approaches in controlled and practical settings, though further research is needed to generalize these findings.}, subject = {Bestandsmanagement}, language = {en} } @article{GriebelSegebarthSteinetal.2023, author = {Griebel, Matthias and Segebarth, Dennis and Stein, Nikolai and Schukraft, Nina and Tovote, Philip and Blum, Robert and Flath, Christoph M.}, title = {Deep learning-enabled segmentation of ambiguous bioimages with deepflash2}, series = {Nature Communications}, volume = {14}, journal = {Nature Communications}, doi = {10.1038/s41467-023-36960-9}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-357286}, year = {2023}, abstract = {Bioimages frequently exhibit low signal-to-noise ratios due to experimental conditions, specimen characteristics, and imaging trade-offs. Reliable segmentation of such ambiguous images is difficult and laborious. Here we introduce deepflash2, a deep learning-enabled segmentation tool for bioimage analysis. The tool addresses typical challenges that may arise during the training, evaluation, and application of deep learning models on ambiguous data. The tool's training and evaluation pipeline uses multiple expert annotations and deep model ensembles to achieve accurate results. The application pipeline supports various use-cases for expert annotations and includes a quality assurance mechanism in the form of uncertainty measures. Benchmarked against other tools, deepflash2 offers both high predictive accuracy and efficient computational resource usage. The tool is built upon established deep learning libraries and enables sharing of trained model ensembles with the research community. deepflash2 aims to simplify the integration of deep learning into bioimage analysis projects while improving accuracy and reliability.}, language = {en} } @book{Knoll2024, author = {Knoll, Leonhard}, title = {De exemplis deterrentibus: Bemerkenswerte Befunde aus der Praxis der rechtsgepr{\"a}gten Unternehmensbewertung in Aufgabenform}, edition = {4. Auflage}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-243-0}, doi = {10.25972/WUP-978-3-95826-243-0}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-348840}, publisher = {W{\"u}rzburg University Press}, pages = {XII, 232}, year = {2024}, abstract = {Das vorliegende Buch besch{\"a}ftigt sich anhand einer Sammlung von realen F{\"a}llen, die in Aufgabenform formuliert sind, mit dem leider oft gest{\"o}rten Verh{\"a}ltnis von Theorie und Praxis in der rechtsgepr{\"a}gten Unternehmensbewertung. Es weist {\"a}hnlich wie „normale" Fallsammlungen die jeweiligen Aufgabenstellungen und die zugeh{\"o}rigen L{\"o}sungen aus. Die eigentlichen Fragestellungen in den Aufgabentexten sind durch kurze Erl{\"a}uterungen eingerahmt, damit jeder Fall als solcher von einem mit Bewertungsfragen halbwegs Vertrauten relativ leicht verstanden und in seiner Bedeutung eingeordnet werden kann. Dieses Vorgehen {\"a}hnelt wiederum Lehrb{\"u}chern, die Inhalte {\"u}ber F{\"a}lle vermitteln, nur dass hier nicht hypothetische F{\"a}lle das jeweils idealtypisch richtige Vorgehen zeigen, sondern Praxisf{\"a}lle plakative Verst{\"o}ße contra legem artis.}, subject = {Unternehmensbewertung}, language = {de} } @article{HermSteinbachWanneretal.2022, author = {Herm, Lukas-Valentin and Steinbach, Theresa and Wanner, Jonas and Janiesch, Christian}, title = {A nascent design theory for explainable intelligent systems}, series = {Electronic Markets}, volume = {32}, journal = {Electronic Markets}, number = {4}, issn = {1019-6781}, doi = {10.1007/s12525-022-00606-3}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-323809}, pages = {2185-2205}, year = {2022}, abstract = {Due to computational advances in the past decades, so-called intelligent systems can learn from increasingly complex data, analyze situations, and support users in their decision-making to address them. However, in practice, the complexity of these intelligent systems renders the user hardly able to comprehend the inherent decision logic of the underlying machine learning model. As a result, the adoption of this technology, especially for high-stake scenarios, is hampered. In this context, explainable artificial intelligence offers numerous starting points for making the inherent logic explainable to people. While research manifests the necessity for incorporating explainable artificial intelligence into intelligent systems, there is still a lack of knowledge about how to socio-technically design these systems to address acceptance barriers among different user groups. In response, we have derived and evaluated a nascent design theory for explainable intelligent systems based on a structured literature review, two qualitative expert studies, a real-world use case application, and quantitative research. Our design theory includes design requirements, design principles, and design features covering the topics of global explainability, local explainability, personalized interface design, as well as psychological/emotional factors.}, language = {en} } @article{WannerHermHeinrichetal.2022, author = {Wanner, Jonas and Herm, Lukas-Valentin and Heinrich, Kai and Janiesch, Christian}, title = {The effect of transparency and trust on intelligent system acceptance: evidence from a user-based study}, series = {Electronic Markets}, volume = {32}, journal = {Electronic Markets}, number = {4}, issn = {1019-6781}, doi = {10.1007/s12525-022-00593-5}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-323829}, pages = {2079-2102}, year = {2022}, abstract = {Contemporary decision support systems are increasingly relying on artificial intelligence technology such as machine learning algorithms to form intelligent systems. These systems have human-like decision capacity for selected applications based on a decision rationale which cannot be looked-up conveniently and constitutes a black box. As a consequence, acceptance by end-users remains somewhat hesitant. While lacking transparency has been said to hinder trust and enforce aversion towards these systems, studies that connect user trust to transparency and subsequently acceptance are scarce. In response, our research is concerned with the development of a theoretical model that explains end-user acceptance of intelligent systems. We utilize the unified theory of acceptance and use in information technology as well as explanation theory and related theories on initial trust and user trust in information systems. The proposed model is tested in an industrial maintenance workplace scenario using maintenance experts as participants to represent the user group. Results show that acceptance is performance-driven at first sight. However, transparency plays an important indirect role in regulating trust and the perception of performance.}, language = {en} } @article{OberdorfSchaschekWeinzierletal.2023, author = {Oberdorf, Felix and Schaschek, Myriam and Weinzierl, Sven and Stein, Nikolai and Matzner, Martin and Flath, Christoph M.}, title = {Predictive end-to-end enterprise process network monitoring}, series = {Business \& Information Systems Engineering}, volume = {65}, journal = {Business \& Information Systems Engineering}, number = {1}, issn = {2363-7005}, doi = {10.1007/s12599-022-00778-4}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-323814}, pages = {49-64}, year = {2023}, abstract = {Ever-growing data availability combined with rapid progress in analytics has laid the foundation for the emergence of business process analytics. Organizations strive to leverage predictive process analytics to obtain insights. However, current implementations are designed to deal with homogeneous data. Consequently, there is limited practical use in an organization with heterogeneous data sources. The paper proposes a method for predictive end-to-end enterprise process network monitoring leveraging multi-headed deep neural networks to overcome this limitation. A case study performed with a medium-sized German manufacturing company highlights the method's utility for organizations.}, language = {en} } @article{HermJanieschHelmetal.2023, author = {Herm, Lukas-Valentin and Janiesch, Christian and Helm, Alexander and Imgrund, Florian and Hofmann, Adrian and Winkelmann, Axel}, title = {A framework for implementing robotic process automation projects}, series = {Information Systems and e-Business Management}, volume = {21}, journal = {Information Systems and e-Business Management}, number = {1}, issn = {1617-9846}, doi = {10.1007/s10257-022-00553-8}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-323798}, pages = {1-35}, year = {2023}, abstract = {Robotic process automation is a disruptive technology to automate already digital yet manual tasks and subprocesses as well as whole business processes rapidly. In contrast to other process automation technologies, robotic process automation is lightweight and only accesses the presentation layer of IT systems to mimic human behavior. Due to the novelty of robotic process automation and the varying approaches when implementing the technology, there are reports that up to 50\% of robotic process automation projects fail. To tackle this issue, we use a design science research approach to develop a framework for the implementation of robotic process automation projects. We analyzed 35 reports on real-life projects to derive a preliminary sequential model. Then, we performed multiple expert interviews and workshops to validate and refine our model. The result is a framework with variable stages that offers guidelines with enough flexibility to be applicable in complex and heterogeneous corporate environments as well as for small and medium-sized companies. It is structured by the three phases of initialization, implementation, and scaling. They comprise eleven stages relevant during a project and as a continuous cycle spanning individual projects. Together they structure how to manage knowledge and support processes for the execution of robotic process automation implementation projects.}, language = {en} } @article{HermJanieschFuchs2022, author = {Herm, Lukas-Valentin and Janiesch, Christian and Fuchs, Patrick}, title = {Der Einfluss von menschlichen Denkmustern auf k{\"u}nstliche Intelligenz - eine strukturierte Untersuchung von kognitiven Verzerrungen}, series = {HMD Praxis der Wirtschaftsinformatik}, volume = {59}, journal = {HMD Praxis der Wirtschaftsinformatik}, number = {2}, issn = {1436-3011}, doi = {10.1365/s40702-022-00844-1}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-323787}, pages = {556-571}, year = {2022}, abstract = {K{\"u}nstliche Intelligenz (KI) dringt vermehrt in sensible Bereiche des allt{\"a}glichen menschlichen Lebens ein. Es werden nicht mehr nur noch einfache Entscheidungen durch intelligente Systeme getroffen, sondern zunehmend auch komplexe Entscheidungen. So entscheiden z. B. intelligente Systeme, ob Bewerber in ein Unternehmen eingestellt werden sollen oder nicht. Oftmals kann die zugrundeliegende Entscheidungsfindung nur schwer nachvollzogen werden und ungerechtfertigte Entscheidungen k{\"o}nnen dadurch unerkannt bleiben, weshalb die Implementierung einer solchen KI auch h{\"a}ufig als sogenannte Blackbox bezeichnet wird. Folglich steigt die Bedrohung, durch unfaire und diskriminierende Entscheidungen einer KI benachteiligt behandelt zu werden. Resultieren diese Verzerrungen aus menschlichen Handlungen und Denkmustern spricht man von einer kognitiven Verzerrung oder einem kognitiven Bias. Aufgrund der Neuigkeit dieser Thematik ist jedoch bisher nicht ersichtlich, welche verschiedenen kognitiven Bias innerhalb eines KI-Projektes auftreten k{\"o}nnen. Ziel dieses Beitrages ist es, anhand einer strukturierten Literaturanalyse, eine gesamtheitliche Darstellung zu erm{\"o}glichen. Die gewonnenen Erkenntnisse werden anhand des in der Praxis weit verbreiten Cross-Industry Standard Process for Data Mining (CRISP-DM) Modell aufgearbeitet und klassifiziert. Diese Betrachtung zeigt, dass der menschliche Einfluss auf eine KI in jeder Entwicklungsphase des Modells gegeben ist und es daher wichtig ist „mensch-{\"a}hnlichen" Bias in einer KI explizit zu untersuchen.}, language = {de} } @article{FreichelSteegmansWinkelmann2022, author = {Freichel, Chiara and Steegmans, Timo-Christian and Winkelmann, Axel}, title = {Ziele und Gestaltung digitaler Plattformen f{\"u}r Produktionsnetzwerke}, series = {HMD Praxis der Wirtschaftsinformatik}, volume = {59}, journal = {HMD Praxis der Wirtschaftsinformatik}, number = {5}, issn = {1436-3011}, doi = {10.1365/s40702-022-00908-2}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-323779}, pages = {1281-1311}, year = {2022}, abstract = {Die interorganisatorische Zusammenarbeit in Produktionsnetzwerken kann Herausforderungen durch eine hohe Marktdynamik, immer anspruchsvollere Kundenbed{\"u}rfnisse und steigenden Kostendruck entgegenwirken. Neben der klassischen vertikalen Verschiebung von Kapazit{\"a}ten in Richtung geeigneter Zulieferer, lassen sich Fertigungskapazit{\"a}ten auch durch eine horizontale Zusammenarbeit zwischen produzierenden Unternehmen handeln. Im Sinne der Sharing Economy bieten digitale Plattformen eine geeignete Infrastruktur zur Verkn{\"u}pfung und Koordination der Marktakteure eines Produktionsnetzwerks. So k{\"o}nnen Fertigungsunternehmen flexibel Produktionsausf{\"a}llen entgegenwirken und freie Maschinenkapazit{\"a}ten auslasten. Eine wesentliche Voraussetzung f{\"u}r den Erfolg solcher digitalen Plattformen f{\"u}r Produktionsnetzwerke ist die Definition von Zielen, welche bisher in der Literatur nur unzureichend und nicht bezogen auf diese spezifische Plattformart untersucht wurden. In dieser Arbeit wird ein umf{\"a}ngliches konzeptionelles Zielmodell f{\"u}r diese spezifische Plattformart erstellt. Zu spezifischen Zielen digitaler Plattformen f{\"u}r Produktionsnetzwerke z{\"a}hlen neben wirtschaftlichen oder technischen Zielen beispielsweise auch produktionsbezogene Marktleistungsziele wie die Gew{\"a}hrleistung von Produktionsflexibilit{\"a}t. Aufbauend darauf wird gezeigt, wie das Design der beschriebenen Plattformen einen Einfluss auf die Erreichung bestimmter Ziele hat und wie spezielle Mechanismen zur Zielerreichung beitragen.}, language = {de} } @phdthesis{Bauer2023, author = {Bauer, Carsten}, title = {Learning Curve Effects in Hospitals as Highly Specialized Expert Organizations}, doi = {10.25972/OPUS-32871}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-328717}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {The collection at hand is concerned with learning curve effects in hospitals as highly specialized expert organizations and comprises four papers, each focusing on a different aspect of the topic. Three papers are concerned with surgeons, and one is concerned with the staff of the emergency room in a conservative treatment. The preface compactly addresses the steadily increasing health care costs and economic pressure, the hospital landscape in Germany as well as its development. Furthermore, the DRG lump-sum compensation and the characteristics of the health sector, which is strongly regulated by the state and in which ethical aspects must be omnipresent, are outlined. Besides, the benefit of knowing about learning curve effects in order to cut costs and to keep quality stable or even improve it, is addressed. The first paper of the collection investigates the learning effects in a hospital which has specialized on endoprosthetics (total hip and knee replacement). Doing so, the specialized as well as the non-specialized interventions are studied. Costs are not investigated directly, but cost indicators. The indicator of costs in the short term are operating room times. The one of medium- to long-term costs is quality. It is operationalized by complications in the post-anesthesia care unit. The study estimates regression models (OLS and logit). The results indicate that the specialization comes along with advantages due to learning effects in terms of shorter operating room times and lower complication rates in endoprosthetic interventions. For the non-specialized interventions, the results are the same. There are no possibly negative effects of specialization on non-specialized surgeries, but advantageous spillover effects. Altogether, the specialization can be regarded as reasonable, as it cuts costs of all surgeries in the short, medium, and long term. The authors are Carsten Bauer, Nele M{\"o}bs, Oliver Unger, Andrea Szczesny, and Christian Ernst. In the second paper surgeons' learning curves effects in a teamwork vs. an individual work setting are in the focus of interest. Thus, the study combines learning curve effects with teamwork in health care, an issue increasingly discussed in recent literature. The investigated interventions are tonsillectomies (surgical excision of the palatine tonsils), a standard intervention. The indicator of costs in the short and medium to long term are again operating room times and complications as a proxy for quality respectively. Complications are secondary bleedings, which usually occur a few days after surgery. The study estimates regression models (OLS and logit). The results show that operating room times decrease with increasing surgeon's experience. Surgeons who also operate in teams learn faster than the ones always operating on their own. Thus, operating room times are shorter for surgeons who also take part in team interventions. As a special feature, the data set contains the costs per case. This enables assuring that the assumed cost indicators are valid. The findings recommend team surgeries especially for resident physicians. The authors are Carsten Bauer, Oliver Unger, and Martin Holderried. The third paper is dedicated to stapes surgery, a therapy for conductive hearing loss caused by otosclerosis (overflow bone growth). It is conceptually simple, but technically difficult. Therefore, it is regarded as the optimum to study learning curve effects in surgery. The paper seeks a comprehensive investigation. Thus, operating room times are employed as short-term cost indicator and quality as the medium to long term one. To measure quality, the postoperative difference between air and bone conduction threshold as well as a combination of this difference and the absence of complications. This paper also estimates different regression models (OLS and logit). Besides investigating the effects on department level, the study also considers the individual level, this means operating room times and quality are investigated for individual surgeons. This improves the comparison of learning curves, as the surgeons worked under widely identical conditions. It becomes apparent that the operating room times initially decrease with increasing experience. The marginal effect of additional experience gets smaller until the direction of the effect changes and the operating room times increase with increasing experience, probably caused by the allocation of difficult cases to the most experienced surgeons. Regarding quality, no learning curve effects are observed. The authors are Carsten Bauer, Johannes Taeger, and Kristen Rak. The fourth paper is a systematic literature review on learning effects in the treatment of ischemic strokes. In case of stroke, every minute counts. Therefore, there is the inherent need to reduce the time from symptom onset to treatment. The article is concerned with the reduction of the time from arrival at the hospital to thrombolysis treatment, the so-called "door-to-needle time". In the literature, there are studies on learning in a broader sense caused by a quality improvement program as well as learning in a narrower sense, in which learning curve effects are evaluated. Besides, studies on the time differences between low-volume and high-volume hospitals are considered, as the differences are probably the result of learning and economies of scale. Virtually all the 165 evaluated articles report improvements regarding the time to treatment. Furthermore, the clinical results substantiate the common association of shorter times from arrival to treatment with improved clinical outcomes. The review additionally discusses the economic implications of the results. The author is Carsten Bauer. The preface brings forward that after the measurement of learning curve effects, further efforts are necessary for using them in order to increase efficiency, as the issue does not admit of easy, standardized solutions. Furthermore, the postface emphasizes the importance of multiperspectivity in research for the patient outcome, the health care system, and society.}, subject = {Lernkurve}, language = {en} } @phdthesis{Herm2023, author = {Herm, Lukas-Valentin}, title = {Algorithmic Decision-Making Facilities: Perception and Design of Explainable AI-based Decision Support Systems}, doi = {10.25972/OPUS-32294}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-322948}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {Recent computing advances are driving the integration of artificial intelligence (AI)-based systems into nearly every facet of our daily lives. To this end, AI is becoming a frontier for enabling algorithmic decision-making by mimicking or even surpassing human intelligence. Thereupon, these AI-based systems can function as decision support systems (DSSs) that assist experts in high-stakes use cases where human lives are at risk. All that glitters is not gold, due to the accompanying complexity of the underlying machine learning (ML) models, which apply mathematical and statistical algorithms to autonomously derive nonlinear decision knowledge. One particular subclass of ML models, called deep learning models, accomplishes unsurpassed performance, with the drawback that these models are no longer explainable to humans. This divergence may result in an end-user's unwillingness to utilize this type of AI-based DSS, thus diminishing the end-user's system acceptance. Hence, the explainable AI (XAI) research stream has gained momentum, as it develops techniques to unravel this black-box while maintaining system performance. Non-surprisingly, these XAI techniques become necessary for justifying, evaluating, improving, or managing the utilization of AI-based DSSs. This yields a plethora of explanation techniques, creating an XAI jungle from which end-users must choose. In turn, these techniques are preliminarily engineered by developers for developers without ensuring an actual end-user fit. Thus, it renders unknown how an end-user's mental model behaves when encountering such explanation techniques. For this purpose, this cumulative thesis seeks to address this research deficiency by investigating end-user perceptions when encountering intrinsic ML and post-hoc XAI explanations. Drawing on this, the findings are synthesized into design knowledge to enable the deployment of XAI-based DSSs in practice. To this end, this thesis comprises six research contributions that follow the iterative and alternating interplay between behavioral science and design science research employed in information systems (IS) research and thus contribute to the overall research objectives as follows: First, an in-depth study of the impact of transparency and (initial) trust on end-user acceptance is conducted by extending and validating the unified theory of acceptance and use of technology model. This study indicates both factors' strong but indirect effects on system acceptance, validating further research incentives. In particular, this thesis focuses on the overarching concept of transparency. Herein, a systematization in the form of a taxonomy and pattern analysis of existing user-centered XAI studies is derived to structure and guide future research endeavors, which enables the empirical investigation of the theoretical trade-off between performance and explainability in intrinsic ML algorithms, yielding a less gradual trade-off, fragmented into three explainability groups. This includes an empirical investigation on end-users' perceived explainability of post-hoc explanation types, with local explanation types performing best. Furthermore, an empirical investigation emphasizes the correlation between comprehensibility and explainability, indicating almost significant (with outliers) results for the assumed correlation. The final empirical investigation aims at researching XAI explanation types on end-user cognitive load and the effect of cognitive load on end-user task performance and task time, which also positions local explanation types as best and demonstrates the correlations between cognitive load and task performance and, moreover, between cognitive load and task time. Finally, the last research paper utilizes i.a. the obtained knowledge and derives a nascent design theory for XAI-based DSSs. This design theory encompasses (meta-) design requirements, design principles, and design features in a domain-independent and interdisciplinary fashion, including end-users and developers as potential user groups. This design theory is ultimately tested through a real-world instantiation in a high-stakes maintenance scenario. From an IS research perspective, this cumulative thesis addresses the lack of research on perception and design knowledge for an ensured utilization of XAI-based DSS. This lays the foundation for future research to obtain a holistic understanding of end-users' heuristic behaviors during decision-making to facilitate the acceptance of XAI-based DSSs in operational practice.}, subject = {K{\"u}nstliche Intelligenz}, language = {en} } @phdthesis{Hubmann2023, author = {Hubmann, Maximilian}, title = {Steuervermeidung und grenz{\"u}berschreitende Besteuerung - eine betriebswirtschaftliche, dogmatische und wissenschaftstheoretische Analyse}, doi = {10.25972/OPUS-30369}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-303698}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {In dieser Dissertation werden ausgew{\"a}hlte Aspekte der Steuervermeidung und grenz{\"u}berschreitenden Besteuerung betrachtet. Im Teil B liegt der Fokus auf der Empirie zu Steuervermeidung und Gewinnverlagerung multinationaler Unternehmen mit drei einzelnen Aufs{\"a}tzen. Der Teil C untersucht die unterschiedliche Besteuerung von Human- und Sachverm{\"o}gen anhand der beiden fundamentalen Besteuerungsprinzipien des {\"A}quivalenz- und des Leistungsf{\"a}higkeitsprinzips. Der letzte Aufsatz (Teil D) analysiert das Werturteilsfreiheitspostulat im Stakeholder-Ansatz und zeigt mithilfe eines Fallbeispiels, wie die Unternehmensbesteuerung in unterschiedliche Stakeholder-Ans{\"a}tze integriert werden kann. Eine abschließende Gesamtw{\"u}rdigung geht auf verbleibende Forschungsfragen ein (Teil E). Somit wird in der vorliegenden Dissertation grenz{\"u}berschreitende Besteuerung anhand betriebswirtschaftlicher, besteuerungsprinzipiengest{\"u}tzter bzw. dogmatischer und wissenschaftstheoretischer Gesichtspunkte untersucht.}, subject = {Steuervermeidung}, language = {de} } @phdthesis{Siller2023, author = {Siller, Benjamin}, title = {Influence of Lead Time and Emission Policies on the Design of Supply Chains - Insights from Supply Chain Design Models}, doi = {10.25972/OPUS-29671}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-296713}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {Companies are expected to act as international players and to use their capabilities to provide customized products and services quickly and efficiently. Today, consumers expect their requirements to be met within a short time and at a favorable price. Order-to-delivery lead time has steadily gained in importance for consumers. Furthermore, governments can use various emissions policies to force companies and customers to reduce their greenhouse gas emissions. This thesis investigates the influence of order-to-delivery lead time and different emission policies on the design of a supply chain. Within this work different supply chain design models are developed to examine these different influences. The first model incorporates lead times and total costs, and various emission policies are implemented to illustrate the trade-off between the different measures. The second model reflects the influence of order-to-delivery lead time sensitive consumers, and different emission policies are implemented to study their impacts. The analysis shows that the share of order-to-delivery lead time sensitive consumers has a significant impact on the design of a supply chain. Demand uncertainty and uncertainty in the design of different emission policies are investigated by developing an appropriate robust mathematical optimization model. Results show that especially uncertainties on the design of an emission policy can significantly impact the total cost of a supply chain. The effects of differently designed emission policies in various countries are investigated in the fourth model. The analyses highlight that both lead times and emission policies can strongly influence companies' offshoring and nearshoring strategies.}, subject = {Supply Chain Management}, language = {en} } @article{RodriguezEntrenaSchuberthGelhard2018, author = {Rodr{\´i}guez-Entrena, Macario and Schuberth, Florian and Gelhard, Carsten}, title = {Assessing statistical differences between parameters estimates in Partial Least Squares path modeling}, series = {Quality \& Quantity}, volume = {52}, journal = {Quality \& Quantity}, number = {1}, doi = {10.1007/s11135-016-0400-8}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-226403}, pages = {57-69}, year = {2018}, abstract = {Structural equation modeling using partial least squares (PLS-SEM) has become a main-stream modeling approach in various disciplines. Nevertheless, prior literature still lacks a practical guidance on how to properly test for differences between parameter estimates. Whereas existing techniques such as parametric and non-parametric approaches in PLS multi-group analysis solely allow to assess differences between parameters that are estimated for different subpopulations, the study at hand introduces a technique that allows to also assess whether two parameter estimates that are derived from the same sample are statistically different. To illustrate this advancement to PLS-SEM, we particularly refer to a reduced version of the well-established technology acceptance model.}, language = {en} } @phdthesis{Oberdorf2022, author = {Oberdorf, Felix}, title = {Design and Evaluation of Data-Driven Enterprise Process Monitoring Systems}, doi = {10.25972/OPUS-29853}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-298531}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2022}, abstract = {Increasing global competition forces organizations to improve their processes to gain a competitive advantage. In the manufacturing sector, this is facilitated through tremendous digital transformation. Fundamental components in such digitalized environments are process-aware information systems that record the execution of business processes, assist in process automation, and unlock the potential to analyze processes. However, most enterprise information systems focus on informational aspects, process automation, or data collection but do not tap into predictive or prescriptive analytics to foster data-driven decision-making. Therefore, this dissertation is set out to investigate the design of analytics-enabled information systems in five independent parts, which step-wise introduce analytics capabilities and assess potential opportunities for process improvement in real-world scenarios. To set up and extend analytics-enabled information systems, an essential prerequisite is identifying success factors, which we identify in the context of process mining as a descriptive analytics technique. We combine an established process mining framework and a success model to provide a structured approach for assessing success factors and identifying challenges, motivations, and perceived business value of process mining from employees across organizations as well as process mining experts and consultants. We extend the existing success model and provide lessons for business value generation through process mining based on the derived findings. To assist the realization of process mining enabled business value, we design an artifact for context-aware process mining. The artifact combines standard process logs with additional context information to assist the automated identification of process realization paths associated with specific context events. Yet, realizing business value is a challenging task, as transforming processes based on informational insights is time-consuming. To overcome this, we showcase the development of a predictive process monitoring system for disruption handling in a production environment. The system leverages state-of-the-art machine learning algorithms for disruption type classification and duration prediction. It combines the algorithms with additional organizational data sources and a simple assignment procedure to assist the disruption handling process. The design of such a system and analytics models is a challenging task, which we address by engineering a five-phase method for predictive end-to-end enterprise process network monitoring leveraging multi-headed deep neural networks. The method facilitates the integration of heterogeneous data sources through dedicated neural network input heads, which are concatenated for a prediction. An evaluation based on a real-world use-case highlights the superior performance of the resulting multi-headed network. Even the improved model performance provides no perfect results, and thus decisions about assigning agents to solve disruptions have to be made under uncertainty. Mathematical models can assist here, but due to complex real-world conditions, the number of potential scenarios massively increases and limits the solution of assignment models. To overcome this and tap into the potential of prescriptive process monitoring systems, we set out a data-driven approximate dynamic stochastic programming approach, which incorporates multiple uncertainties for an assignment decision. The resulting model has significant performance improvement and ultimately highlights the particular importance of analytics-enabled information systems for organizational process improvement.}, subject = {Operations Management}, language = {en} } @article{RademakerSchuberthDijkstra2019, author = {Rademaker, Manuel E. and Schuberth, Florian and Dijkstra, Theo K.}, title = {Measurement error correlation within blocks of indicators in consistent partial least squares : Issues and remedies}, series = {Internet Research}, volume = {29}, journal = {Internet Research}, number = {3}, doi = {10.1108/IntR-12-2017-0525}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-224901}, pages = {448-463}, year = {2019}, abstract = {Purpose The purpose of this paper is to enhance consistent partial least squares (PLSc) to yield consistent parameter estimates for population models whose indicator blocks contain a subset of correlated measurement errors. Design/methodology/approach Correction for attenuation as originally applied by PLSc is modified to include a priori assumptions on the structure of the measurement error correlations within blocks of indicators. To assess the efficacy of the modification, a Monte Carlo simulation is conducted. Findings In the presence of population measurement error correlation, estimated parameter bias is generally small for original and modified PLSc, with the latter outperforming the former for large sample sizes. In terms of the root mean squared error, the results are virtually identical for both original and modified PLSc. Only for relatively large sample sizes, high population measurement error correlation, and low population composite reliability are the increased standard errors associated with the modification outweighed by a smaller bias. These findings are regarded as initial evidence that original PLSc is comparatively robust with respect to misspecification of the structure of measurement error correlations within blocks of indicators. Originality/value Introducing and investigating a new approach to address measurement error correlation within blocks of indicators in PLSc, this paper contributes to the ongoing development and assessment of recent advancements in partial least squares path modeling.}, language = {en} } @techreport{BaumgartBredebachHermetal.2022, author = {Baumgart, Michael and Bredebach, Patrick and Herm, Lukas-Valentin and Hock, David and Hofmann, Adrian and Janiesch, Christian and Jankowski, Leif Ole and Kampik, Timotheus and Keil, Matthias and Kolb, Julian and Kr{\"o}hn, Michael and Pytel, Norman and Schaschek, Myriam and St{\"u}bs, Oliver and Winkelmann, Axel and Zeiß, Christian}, title = {Plattform f{\"u}r das integrierte Management von Kollaborationen in Wertsch{\"o}pfungsnetzwerken (PIMKoWe)}, editor = {Winkelmann, Axel and Janiesch, Christian}, issn = {2199-0328}, doi = {10.25972/OPUS-29335}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-293354}, pages = {248}, year = {2022}, abstract = {Das Verbundprojekt „Plattform f{\"u}r das integrierte Management von Kollaborationen in Wertsch{\"o}pfungsnetzwerken" (PIMKoWe - F{\"o}rderkennzeichen „02P17D160") ist ein Forschungsvorhaben im Rahmen des Forschungsprogramms „Innovationen f{\"u}r die Produktion, Dienstleistung und Arbeit von morgen" der Bekanntmachung „Industrie 4.0 - Intelligente Kollaborationen in dynamischen Wertsch{\"o}pfungs-netzwerken" (InKoWe). Das Forschungsvorhaben wurde mit Mitteln des Bundesministeriums f{\"u}r Bildung und Forschung (BMBF) gef{\"o}rdert und durch den Projekttr{\"a}ger des Karlsruher Instituts f{\"u}r Technologie (PTKA) betreut. Ziel des Forschungsprojekts PIMKoWe ist die Entwicklung und Bereitstellung einer Plattforml{\"o}sung zur Flexibilisierung, Automatisierung und Absicherung von Kooperationen in Wertsch{\"o}pfungsnetzwerken des industriellen Sektors.}, subject = {Blockchain}, language = {de} } @phdthesis{Hornung2022, author = {Hornung, Vanessa}, title = {Leading by Purpose and Employer Attractiveness - Eine konzeptionelle und empirische Analyse am Beispiel von Unilever}, doi = {10.25972/OPUS-28894}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-288941}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2022}, abstract = {Die Welt befindet sich in einem tiefgreifenden Wandlungsprozess von einer Industrie- zu einer Wissensgesellschaft. Die Automatisierung sowohl physischer als auch kognitiver Arbeit verlagert die Nachfrage des Arbeitsmarktes zunehmend zu hoch qualifizierten Mitarbeitern, die als High Potentials bezeichnet werden. Diese zeichnen sich neben ihrer Intelligenz durch vielf{\"a}ltige F{\"a}higkeiten wie Empathieverm{\"o}gen, Kreativit{\"a}t und Probleml{\"o}sungskompetenzen aus. Humankapital gilt als Wettbewerbsfaktor der Zukunft, jedoch beklagten Unternehmen bereits Ende des 20. Jahrhunderts einen Mangel an Fach- und F{\"u}hrungspersonal, der durch die Pandemie weiter verst{\"a}rkt wird. Aus diesem Grund r{\"u}cken Konzepte zur Rekrutierung und Mitarbeiterbindung in den Fokus der Unternehmen. Da ethisches und {\"o}kologisches Bewusstsein in der Bev{\"o}lkerung an Bedeutung gewinnen, l{\"a}sst sich annehmen, dass Bewerber zuk{\"u}nftig verantwortungsbewusste Arbeitgeber bevorzugen. Nachhaltigkeit bzw. Corporate Responsibility wird damit zum Wettbewerbsfaktor zur Gewinnung und Bindung von Talenten. Mit Hilfe des Ansatzes der identit{\"a}tsorientierten Markenf{\"u}hrung wird ein Verst{\"a}ndnis davon hergestellt, wie es Unternehmen gelingt, eine starke Arbeitgebermarke aufzubauen. Anhand einer konzeptionellen, praktischen und empirischen Untersuchung am Unternehmensbeispiel Unilever werden die Auswirkungen von umfassendem {\"o}kologischem und gesellschaftlichem Engagement auf die Arbeitgeberattraktivit{\"a}t analysiert. Es zeigt sich, dass Nachhaltigkeit - konkretisiert {\"u}ber die 17 Sustainable Develop-ment Goals (SDGs) und verankert im Kern der Marke - die erfolgreiche F{\"u}hrung einer Employer Brand erm{\"o}glicht. Dieses Ergebnis resultiert sowohl aus dem theoretischen als auch aus dem empirischen Teil dieser Arbeit. Im letzteren konnten unter Einsatz eines Strukturgleichungsmodells drei generelle positive Wirkzusammenh{\"a}nge best{\"a}tigt werden: Bewerber f{\"u}hlen sich zu verantwortungsbewussten Unternehmen hingezogen, weshalb sie einen P-O-F empfinden. Diese wahrgenommene Passung mit dem Unternehmen steigert die Arbeitgeberattraktivit{\"a}t aus Sicht der potenziellen Bewerber, wodurch sich wiederum die Wahrscheinlichkeit f{\"u}r eine Bewerbungsabsicht und die Akzeptanz eines Arbeitsplatzangebotes erh{\"o}ht. Es wird damit die Annahme best{\"a}tigt, dass den Herausforderungen der Personalbeschaffung {\"u}ber eine konsequente nachhaltige Ausrichtung der Gesch{\"a}ftst{\"a}tigkeit und deren glaubhafte Kommunikation {\"u}ber die Arbeitgebermarke begegnet werden kann.}, subject = {Personalmarketing}, language = {de} }