@techreport{AlbersKerstingKosse2023, type = {Working Paper}, author = {Albers, Thilo N. H. and Kersting, Felix and Kosse, Fabian}, title = {Income misperception and populism}, doi = {10.25972/OPUS-32169}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-321696}, pages = {38}, year = {2023}, abstract = {We propose that false beliefs about own current economic status are an important factor for explaining populist attitudes. Eliciting subjects' receptiveness to rightwing populism and their perceived relative income positions in a representative survey of German households, we find that people with pessimistic beliefs about their income position are more attuned to populist statements. Key to understanding the misperception-populism relationship are strong gender differences in the mechanism: men are much more likely to channel their discontent into affection for populist ideas. A simple information provision does neither sustainably reduce misperception nor curb populism.}, subject = {Populismus}, language = {en} } @phdthesis{Blank2021, author = {Blank, Felix}, title = {The use of the Hypercube Queueing Model for the location optimization decision of Emergency Medical Service systems}, doi = {10.25972/OPUS-24909}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-249093}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2021}, abstract = {Die strategische Planung von medizinischen Notfallsystemen steht in einem unmittelbaren Zusammenhang mit der {\"U}berlebenswahrscheinlichkeit von betroffenen Patienten. Die Forschung hat zahlreiche Kenngr{\"o}ßen und Evaluationsparameter entwickelt, die zur Bewertung verwendet werden k{\"o}nnen. Darunter fallen beispielsweise die Reaktionszeit, die Systemauslastung, diverse Wartezeitenparameter sowie der Anteil der Nachfrage, der nicht unmittelbar bedient werden kann. Dabei ist das Hypercube Queueing Modell eines der am h{\"a}ufigsten verwendeten Modelle. Aufgrund seines theoretischen Hintergrundes und der damit verbundenen hohen notwendigen Rechenzeiten wurde das Hypercube Queueing Modell erst in der j{\"u}ngeren Vergangenheit h{\"a}ufiger zur Optimierung von medizinischen Notfallsystemen verwendet. Gleichermaßen wurden nur wenige Systemparameter mit Hilfe des Modelles berechnet und das volle Potenzial demnach noch nicht ausgesch{\"o}pft. Die meisten der bereits vorhandenen Studien im Bereich der Optimierung unter Zuhilfenahme eines Hypercube Queueing Modells nutzen die zu erwartende Reaktionszeit des Systems als Zielparameter. Obwohl die Verwendung von diesem eine zumeist ausgeglichene Systemkonfiguration zur Folge hat, wurden andere Zielparameter identifziert. Die Verwendung des Hypercube Queueing Modells in den Modellen der robusten Optimierung sowie des robusten Goal Programmings haben versucht einen ganzheitlicheren Blick, durch die Verwendung von unterschiedlichen Tageszeiten, zu offerieren. Dabei hat sich gezeigt, dass das Verhalten von medizinischen Notfallsystemen sowie die Parameter stark von diesen abh{\"a}ngen. Daher sollte die Analyse und gegebenenfalls Optimierung dieser Systeme unterschiedliche Verteilungen der Nachfrage, in Abh{\"a}ngigkeit ihrer Menge und r{\"a}umlichen Verteilung, unbedingt ber{\"u}cksichtigen um eine m{\"o}glichst ganzheitliche Entscheidungsgrundlage zu garantieren.}, subject = {Warteschlangentheorie}, language = {en} } @phdthesis{Demmer2019, author = {Demmer, Claudia}, title = {Merger-specific Efficiency Gains}, doi = {10.25972/OPUS-18392}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-183928}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2019}, abstract = {The present thesis analyzes whether and - if so - under which conditions mergers result in merger-specific efficiency gains. The analysis concentrates on manufacturing firms in Europe that participate in horizontal mergers as either buyer or target in the years 2005 to 2014. The result of the present study is that mergers are idiosyncratic processes. Thus, the possibilities to define general conditions that predict merger-specific efficiency gains are limited. However, the results of the present study indicate that efficiency gains are possible as a direct consequence of a merger. Efficiency changes can be measured by a Total Factor Productivity (TFP) approach. Significant merger-specific efficiency gains are more likely for targets than for buyers. Moreover, mergers of firms that mainly operate in the same segment are likely to generate efficiency losses. Efficiency gains most likely result from reductions in material and labor costs, especially on a short- and mid-term perspective. The analysis of conditions that predict efficiency gains indicates that firm that announce the merger themselves are capable to generate efficiency gains in a short- and mid-term perspective. Furthermore, buyers that are mid-sized firms are more likely to generate efficiency gains than small or large buyers. Results also indicate that capital intense firms are likely to generate efficiency gains after a merger. The present study is structured as follows. Chapter 1 motivates the analysis of merger-specific efficiency gains. The definition of conditions that reasonably likely predict when and to which extent mergers will result in merger-specific efficiency gains, would improve the merger approval or denial process. Chapter 2 gives a literature review of some relevant empirical studies that analyzed merger-specific efficiency gains. None of the empirical studies have analyzed horizontal mergers of European firms in the manufacturing sector in the years 2005 to 2014. Thus, the present study contributes to the existing literature by analyzing efficiency gains from those mergers. Chapter 3 focuses on the identification of mergers. The merger term is defined according to the EC Merger Regulation and the Horizontal Merger Guidelines. The definition and the requirements of mergers according to legislation provides the framework of merger identification. Chapter 4 concentrates on the efficiency measurement methodology. Most empirical studies apply a Total Factor Productivity (TFP) approach to estimate efficiency. The TFP approach uses linear regression in combination with a control function approach. The estimation of coefficients is done by a General Method of Moments approach. The resulting efficiency estimates are used in the analysis of merger-specific efficiency gains in chapter 5. This analysis is done separately for buyers and targets by applying a Difference-In-Difference (DID) approach. Chapter 6 concentrates on an alternative approach to estimate efficiency, that is a Stochastic Frontier Analysis (SFA) approach. Comparable to the TFP approach, the SFA approach is a stochastic efficiency estimation methodology. In contrast to TFP, SFA estimates the production function as a frontier function instead of an average function. The frontier function allows to estimate efficiency in percent. Chapter 7 analyses the impact of different merger- and firm-specific characteristics on efficiency changes of buyers and targets. The analysis is based on a multiple regression, which is applied for short-, mid- and long-term efficiency changes of buyers and targets. Chapter 8 concludes.}, subject = {Verarbeitende Industrie}, language = {en} } @techreport{FischerHrsg2017, author = {Fischer (Hrsg.), Doris}, title = {Tourism in W{\"u}rzburg: Suggestions on how to enhance the travel experience for Chinese tourists}, edition = {1. Auflage}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-143898}, pages = {64}, year = {2017}, abstract = {This report provides suggestions on how to enhance the travel experience for Chinese tourists in the German city of W{\"u}rzburg. Based on a user experience survey and a market research, this work includes a quantitative and competitive analysis. It further provides concrete and hands-on measurements for the city council to improve the experience of Chinese visitors coming to W{\"u}rzburg.}, subject = {China}, language = {en} } @article{KuemmelLindenberger2014, author = {K{\"u}mmel, Reiner and Lindenberger, Dietmar}, title = {How energy conversion drives economic growth far from the equilibrium of neoclassical economics}, series = {New Journal of Physics}, volume = {16}, journal = {New Journal of Physics}, number = {125008}, issn = {1367-2630}, doi = {10.1088/1367-2630/16/12/125008}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-118102}, year = {2014}, abstract = {Energy conversion in the machines and information processors of the capital stock drives the growth of modern economies. This is exemplified for Germany, Japan, and the USA during the second half of the 20th century: econometric analyses reveal that the output elasticity, i.e. the economic weight, of energy is much larger than energyʼs share in total factor cost, while for labor just the opposite is true. This is at variance with mainstream economic theory according to which an economy should operate in the neoclassical equilibrium, where output elasticities equal factor cost shares. The standard derivation of the neoclassical equilibrium from the maximization of profit or of time-integrated utility disregards technological constraints. We show that the inclusion of these constraints in our nonlinear-optimization calculus results in equilibrium conditions, where generalized shadow prices destroy the equality of output elasticities and cost shares. Consequently, at the prices of capital, labor, and energy we have known so far, industrial economies have evolved far from the neoclassical equilibrium. This is illustrated by the example of the German industrial sector evolving on the mountain of factor costs before and during the first and the second oil price explosion. It indicates the influence of the 'virtually binding' technological constraints on entrepreneurial decisions, and the existence of 'soft constraints' as well. Implications for employment and future economic growth are discussed.}, language = {en} } @phdthesis{Meller2020, author = {Meller, Jan Maximilian}, title = {Data-driven Operations Management: Combining Machine Learning and Optimization for Improved Decision-making}, doi = {10.25972/OPUS-20604}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-206049}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2020}, abstract = {This dissertation consists of three independent, self-contained research papers that investigate how state-of-the-art machine learning algorithms can be used in combination with operations management models to consider high dimensional data for improved planning decisions. More specifically, the thesis focuses on the question concerning how the underlying decision support models change structurally and how those changes affect the resulting decision quality. Over the past years, the volume of globally stored data has experienced tremendous growth. Rising market penetration of sensor-equipped production machinery, advanced ways to track user behavior, and the ongoing use of social media lead to large amounts of data on production processes, user behavior, and interactions, as well as condition information about technical gear, all of which can provide valuable information to companies in planning their operations. In the past, two generic concepts have emerged to accomplish this. The first concept, separated estimation and optimization (SEO), uses data to forecast the central inputs (i.e., the demand) of a decision support model. The forecast and a distribution of forecast errors are then used in a subsequent stochastic optimization model to determine optimal decisions. In contrast to this sequential approach, the second generic concept, joint estimation-optimization (JEO), combines the forecasting and optimization step into a single optimization problem. Following this approach, powerful machine learning techniques are employed to approximate highly complex functional relationships and hence relate feature data directly to optimal decisions. The first article, "Machine learning for inventory management: Analyzing two concepts to get from data to decisions", chapter 2, examines performance differences between implementations of these concepts in a single-period Newsvendor setting. The paper first proposes a novel JEO implementation based on the random forest algorithm to learn optimal decision rules directly from a data set that contains historical sales and auxiliary data. Going forward, we analyze structural properties that lead to these performance differences. Our results show that the JEO implementation achieves significant cost improvements over the SEO approach. These differences are strongly driven by the decision problem's cost structure and the amount and structure of the remaining forecast uncertainty. The second article, "Prescriptive call center staffing", chapter 3, applies the logic of integrating data analysis and optimization to a more complex problem class, an employee staffing problem in a call center. We introduce a novel approach to applying the JEO concept that augments historical call volume data with features like the day of the week, the beginning of the month, and national holiday periods. We employ a regression tree to learn the ex-post optimal staffing levels based on similarity structures in the data and then generalize these insights to determine future staffing levels. This approach, relying on only few modeling assumptions, significantly outperforms a state-of-the-art benchmark that uses considerably more model structure and assumptions. The third article, "Data-driven sales force scheduling", chapter 4, is motivated by the problem of how a company should allocate limited sales resources. We propose a novel approach based on the SEO concept that involves a machine learning model to predict the probability of winning a specific project. We develop a methodology that uses this prediction model to estimate the "uplift", that is, the incremental value of an additional visit to a particular customer location. To account for the remaining uncertainty at the subsequent optimization stage, we adapt the decision support model in such a way that it can control for the level of trust in the predicted uplifts. This novel policy dominates both a benchmark that relies completely on the uplift information and a robust benchmark that optimizes the sum of potential profits while neglecting any uplift information. The results of this thesis show that decision support models in operations management can be transformed fundamentally by considering additional data and benefit through better decision quality respectively lower mismatch costs. The way how machine learning algorithms can be integrated into these decision support models depends on the complexity and the context of the underlying decision problem. In summary, this dissertation provides an analysis based on three different, specific application scenarios that serve as a foundation for further analyses of employing machine learning for decision support in operations management.}, subject = {Operations Management}, language = {en} } @phdthesis{Notz2021, author = {Notz, Pascal Markus}, title = {Prescriptive Analytics for Data-driven Capacity Management}, doi = {10.25972/OPUS-24042}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-240423}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2021}, abstract = {Digitization and artificial intelligence are radically changing virtually all areas across business and society. These developments are mainly driven by the technology of machine learning (ML), which is enabled by the coming together of large amounts of training data, statistical learning theory, and sufficient computational power. This technology forms the basis for the development of new approaches to solve classical planning problems of Operations Research (OR): prescriptive analytics approaches integrate ML prediction and OR optimization into a single prescription step, so they learn from historical observations of demand and a set of features (co-variates) and provide a model that directly prescribes future decisions. These novel approaches provide enormous potential to improve planning decisions, as first case reports showed, and, consequently, constitute a new field of research in Operations Management (OM). First works in this new field of research have studied approaches to solving comparatively simple planning problems in the area of inventory management. However, common OM planning problems often have a more complex structure, and many of these complex planning problems are within the domain of capacity planning. Therefore, this dissertation focuses on developing new prescriptive analytics approaches for complex capacity management problems. This dissertation consists of three independent articles that develop new prescriptive approaches and use these to solve realistic capacity planning problems. The first article, "Prescriptive Analytics for Flexible Capacity Management", develops two prescriptive analytics approaches, weighted sample average approximation (wSAA) and kernelized empirical risk minimization (kERM), to solve a complex two-stage capacity planning problem that has been studied extensively in the literature: a logistics service provider sorts daily incoming mail items on three service lines that must be staffed on a weekly basis. This article is the first to develop a kERM approach to solve a complex two-stage stochastic capacity planning problem with matrix-valued observations of demand and vector-valued decisions. The article develops out-of-sample performance guarantees for kERM and various kernels, and shows the universal approximation property when using a universal kernel. The results of the numerical study suggest that prescriptive analytics approaches may lead to significant improvements in performance compared to traditional two-step approaches or SAA and that their performance is more robust to variations in the exogenous cost parameters. The second article, "Prescriptive Analytics for a Multi-Shift Staffing Problem", uses prescriptive analytics approaches to solve the (queuing-type) multi-shift staffing problem (MSSP) of an aviation maintenance provider that receives customer requests of uncertain number and at uncertain arrival times throughout each day and plans staff capacity for two shifts. This planning problem is particularly complex because the order inflow and processing are modelled as a queuing system, and the demand in each day is non-stationary. The article addresses this complexity by deriving an approximation of the MSSP that enables the planning problem to be solved using wSAA, kERM, and a novel Optimization Prediction approach. A numerical evaluation shows that wSAA leads to the best performance in this particular case. The solution method developed in this article builds a foundation for solving queuing-type planning problems using prescriptive analytics approaches, so it bridges the "worlds" of queuing theory and prescriptive analytics. The third article, "Explainable Subgradient Tree Boosting for Prescriptive Analytics in Operations Management" proposes a novel prescriptive analytics approach to solve the two capacity planning problems studied in the first and second articles that allows decision-makers to derive explanations for prescribed decisions: Subgradient Tree Boosting (STB). STB combines the machine learning method Gradient Boosting with SAA and relies on subgradients because the cost function of OR planning problems often cannot be differentiated. A comprehensive numerical analysis suggests that STB can lead to a prescription performance that is comparable to that of wSAA and kERM. The explainability of STB prescriptions is demonstrated by breaking exemplary decisions down into the impacts of individual features. The novel STB approach is an attractive choice not only because of its prescription performance, but also because of the explainability that helps decision-makers understand the causality behind the prescriptions. The results presented in these three articles demonstrate that using prescriptive analytics approaches, such as wSAA, kERM, and STB, to solve complex planning problems can lead to significantly better decisions compared to traditional approaches that neglect feature data or rely on a parametric distribution estimation.}, subject = {Maschinelles Lernen}, language = {en} } @phdthesis{Oberdorf2022, author = {Oberdorf, Felix}, title = {Design and Evaluation of Data-Driven Enterprise Process Monitoring Systems}, doi = {10.25972/OPUS-29853}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-298531}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2022}, abstract = {Increasing global competition forces organizations to improve their processes to gain a competitive advantage. In the manufacturing sector, this is facilitated through tremendous digital transformation. Fundamental components in such digitalized environments are process-aware information systems that record the execution of business processes, assist in process automation, and unlock the potential to analyze processes. However, most enterprise information systems focus on informational aspects, process automation, or data collection but do not tap into predictive or prescriptive analytics to foster data-driven decision-making. Therefore, this dissertation is set out to investigate the design of analytics-enabled information systems in five independent parts, which step-wise introduce analytics capabilities and assess potential opportunities for process improvement in real-world scenarios. To set up and extend analytics-enabled information systems, an essential prerequisite is identifying success factors, which we identify in the context of process mining as a descriptive analytics technique. We combine an established process mining framework and a success model to provide a structured approach for assessing success factors and identifying challenges, motivations, and perceived business value of process mining from employees across organizations as well as process mining experts and consultants. We extend the existing success model and provide lessons for business value generation through process mining based on the derived findings. To assist the realization of process mining enabled business value, we design an artifact for context-aware process mining. The artifact combines standard process logs with additional context information to assist the automated identification of process realization paths associated with specific context events. Yet, realizing business value is a challenging task, as transforming processes based on informational insights is time-consuming. To overcome this, we showcase the development of a predictive process monitoring system for disruption handling in a production environment. The system leverages state-of-the-art machine learning algorithms for disruption type classification and duration prediction. It combines the algorithms with additional organizational data sources and a simple assignment procedure to assist the disruption handling process. The design of such a system and analytics models is a challenging task, which we address by engineering a five-phase method for predictive end-to-end enterprise process network monitoring leveraging multi-headed deep neural networks. The method facilitates the integration of heterogeneous data sources through dedicated neural network input heads, which are concatenated for a prediction. An evaluation based on a real-world use-case highlights the superior performance of the resulting multi-headed network. Even the improved model performance provides no perfect results, and thus decisions about assigning agents to solve disruptions have to be made under uncertainty. Mathematical models can assist here, but due to complex real-world conditions, the number of potential scenarios massively increases and limits the solution of assignment models. To overcome this and tap into the potential of prescriptive process monitoring systems, we set out a data-driven approximate dynamic stochastic programming approach, which incorporates multiple uncertainties for an assignment decision. The resulting model has significant performance improvement and ultimately highlights the particular importance of analytics-enabled information systems for organizational process improvement.}, subject = {Operations Management}, language = {en} } @book{Schubert2013, author = {Schubert, Fabian}, title = {Lagequalit{\"a}t, Lagequalit{\"a}t, Lagequalit{\"a}t - Standortbewertungsmethoden f{\"u}r den Einzelhandel und Lagewertigkeitsver{\"a}nderungen durch Business Improvement Districts - am Beispiel der Stadt Gießen}, publisher = {Verlag MetaGIS Infosysteme}, address = {Mannheim}, isbn = {978-3-936438-64-2}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-180730}, publisher = {Universit{\"a}t W{\"u}rzburg}, pages = {317}, year = {2013}, abstract = {Die Lage(qualit{\"a}t) stellt den wichtigsten Faktor f{\"u}r den Erfolg eines Standorts dar! Dies gilt sp{\"a}testens seit der Entstehung der ersten Fußg{\"a}ngerzonen in den 1950er Jahren und der Herausbildung der 1A-Lagen als begehrte innerst{\"a}dtische Unternehmensstandorte. Verwunderlich ist jedoch, dass trotz einer weitl{\"a}ufigen Bekanntheit des Begriffs der Lage(qualit{\"a}t), bzw. der 1A-, B- und C-Lage, zum aktuellen Zeitpunkt in Theorie und Praxis nicht nur vielf{\"a}ltige Bezeichnungen zur Beschreibung und Klassifizierung innerst{\"a}dtischer Handelsstandorte, sondern auch eine große Bandbreite an Kriterien und Methodiken bestehen, die zur Qualit{\"a}tsermittlung herangezogen werden. Im Hinblick auf die aktuell knappen kommunalen Haushaltsmittel, den steigenden Wettbewerbsdruck im Handel und die zunehmende Krisenanf{\"a}lligkeit des Wirtschafts-, Finanz- und Immobiliensektors und dem daraus resultierenden Bedeutungszuwachs fundierter Standort- bzw. Lageanalysen, stellt sich die Frage, welche Kriterien aus wissenschaftlicher Sicht zur Ermittlung von Lagequalit{\"a}ten geeignet sind und wie ein aus diesen bestehendes Instrumentarium auszugestalten ist. Dar{\"u}ber hinaus ist vor dem Hintergrund der in den letzten Jahren wachsenden Aktivit{\"a}ten zur Zentrenrevitalisierung zudem zu {\"u}berpr{\"u}fen, ob ein solches Lagequalit{\"a}teninstrumentarium zur Schaffung einer soliden Datenbasis eingesetzt werden k{\"o}nnte, welche als wesentliche Grundlage zur Evaluierung verschiedener innerst{\"a}dtischer Wiederbelebungsmaßnahmen fungiert. Diesen und weiteren im Kontext der aktuellen Innenstadt- und Einzelhandelsentwicklung auftretenden Fragestellungen geht die vorliegende Arbeit nach.}, subject = {Gießen}, language = {de} } @phdthesis{Siller2023, author = {Siller, Benjamin}, title = {Influence of Lead Time and Emission Policies on the Design of Supply Chains - Insights from Supply Chain Design Models}, doi = {10.25972/OPUS-29671}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-296713}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {Companies are expected to act as international players and to use their capabilities to provide customized products and services quickly and efficiently. Today, consumers expect their requirements to be met within a short time and at a favorable price. Order-to-delivery lead time has steadily gained in importance for consumers. Furthermore, governments can use various emissions policies to force companies and customers to reduce their greenhouse gas emissions. This thesis investigates the influence of order-to-delivery lead time and different emission policies on the design of a supply chain. Within this work different supply chain design models are developed to examine these different influences. The first model incorporates lead times and total costs, and various emission policies are implemented to illustrate the trade-off between the different measures. The second model reflects the influence of order-to-delivery lead time sensitive consumers, and different emission policies are implemented to study their impacts. The analysis shows that the share of order-to-delivery lead time sensitive consumers has a significant impact on the design of a supply chain. Demand uncertainty and uncertainty in the design of different emission policies are investigated by developing an appropriate robust mathematical optimization model. Results show that especially uncertainties on the design of an emission policy can significantly impact the total cost of a supply chain. The effects of differently designed emission policies in various countries are investigated in the fourth model. The analyses highlight that both lead times and emission policies can strongly influence companies' offshoring and nearshoring strategies.}, subject = {Supply Chain Management}, language = {en} } @phdthesis{Stein2019, author = {Stein, Nikolai Werner}, title = {Advanced Analytics in Operations Management and Information Systems: Methods and Applications}, doi = {10.25972/OPUS-19266}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-192668}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2019}, abstract = {Die digitale Transformation der Gesellschaft birgt enorme Potenziale f{\"u}r Unternehmen aus allen Sektoren. Diese verf{\"u}gen aufgrund neuer Datenquellen, wachsender Rechenleistung und verbesserter Konnektivit{\"a}t {\"u}ber rasant steigende Datenmengen. Um im digitalen Wandel zu bestehen und Wettbewerbsvorteile in Bezug auf Effizienz und Effektivit{\"a}t heben zu k{\"o}nnen m{\"u}ssen Unternehmen die verf{\"u}gbaren Daten nutzen und datengetriebene Entscheidungsprozesse etablieren. Dennoch verwendet die Mehrheit der Firmen lediglich Tools aus dem Bereich „descriptive analytics" und nur ein kleiner Teil der Unternehmen macht bereits heute von den M{\"o}glichkeiten der „predictive analytics" und „prescriptive analytics" Gebrauch. Ziel dieser Dissertation, die aus vier inhaltlich abgeschlossenen Teilen besteht, ist es, Einsatzm{\"o}glichkeiten von „prescriptive analytics" zu identifizieren. Da pr{\"a}diktive Modelle eine wesentliche Voraussetzung f{\"u}r „prescriptive analytics" sind, thematisieren die ersten beiden Teile dieser Arbeit Verfahren aus dem Bereich „predictive analytics." Ausgehend von Verfahren des maschinellen Lernens wird zun{\"a}chst die Entwicklung eines pr{\"a}diktiven Modells am Beispiel der Kapazit{\"a}ts- und Personalplanung bei einem IT-Beratungsunternehmen veranschaulicht. Im Anschluss wird eine Toolbox f{\"u}r Data Science Anwendungen entwickelt. Diese stellt Entscheidungstr{\"a}gern Richtlinien und bew{\"a}hrte Verfahren f{\"u}r die Modellierung, das Feature Engineering und die Modellinterpretation zur Verf{\"u}gung. Der Einsatz der Toolbox wird am Beispiel von Daten eines großen deutschen Industrieunternehmens veranschaulicht. Verbesserten Prognosen, die von leistungsf{\"a}higen Vorhersagemodellen bereitgestellt werden, erlauben es Entscheidungstr{\"a}gern in einigen Situationen bessere Entscheidungen zu treffen und auf diese Weise einen Mehrwert zu generieren. In vielen komplexen Entscheidungssituationen ist die Ableitungen von besseren Politiken aus zur Verf{\"u}gung stehenden Prognosen jedoch oft nicht trivial und erfordert die Entwicklung neuer Planungsalgorithmen. Aus diesem Grund fokussieren sich die letzten beiden Teile dieser Arbeit auf Verfahren aus dem Bereich „prescriptive analytics". Hierzu wird zun{\"a}chst analysiert, wie die Vorhersagen pr{\"a}diktiver Modelle in pr{\"a}skriptive Politiken zur L{\"o}sung eines „Optimal Searcher Path Problem" {\"u}bersetzt werden k{\"o}nnen. Trotz beeindruckender Fortschritte in der Forschung im Bereich k{\"u}nstlicher Intelligenz sind die Vorhersagen pr{\"a}diktiver Modelle auch heute noch mit einer gewissen Unsicherheit behaftet. Der letzte Teil dieser Arbeit schl{\"a}gt einen pr{\"a}skriptiven Ansatz vor, der diese Unsicherheit ber{\"u}cksichtigt. Insbesondere wird ein datengetriebenes Verfahren f{\"u}r die Einsatzplanung im Außendienst entwickelt. Dieser Ansatz integriert Vorhersagen bez{\"u}glich der Erfolgswahrscheinlichkeiten und die Modellqualit{\"a}t des entsprechenden Vorhersagemodells in ein „Team Orienteering Problem."}, subject = {Operations Management}, language = {en} } @phdthesis{Taigel2020, author = {Taigel, Fabian Michael}, title = {Data-driven Operations Management: From Predictive to Prescriptive Analytics}, doi = {10.25972/OPUS-20651}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-206514}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2020}, abstract = {Autonomous cars and artificial intelligence that beats humans in Jeopardy or Go are glamorous examples of the so-called Second Machine Age that involves the automation of cognitive tasks [Brynjolfsson and McAfee, 2014]. However, the larger impact in terms of increasing the efficiency of industry and the productivity of society might come from computers that improve or take over business decisions by using large amounts of available data. This impact may even exceed that of the First Machine Age, the industrial revolution that started with James Watt's invention of an efficient steam engine in the late eighteenth century. Indeed, the prevalent phrase that calls data "the new oil" indicates the growing awareness of data's importance. However, many companies, especially those in the manufacturing and traditional service industries, still struggle to increase productivity using the vast amounts of data [for Economic Co-operation and Development, 2018]. One reason for this struggle is that companies stick with a traditional way of using data for decision support in operations management that is not well suited to automated decision-making. In traditional inventory and capacity management, some data - typically just historical demand data - is used to estimate a model that makes predictions about uncertain planning parameters, such as customer demand. The planner then has two tasks: to adjust the prediction with respect to additional information that was not part of the data but still might influence demand and to take the remaining uncertainty into account and determine a safety buffer based on the underage and overage costs. In the best case, the planner determines the safety buffer based on an optimization model that takes the costs and the distribution of historical forecast errors into account; however, these decisions are usually based on a planner's experience and intuition, rather than on solid data analysis. This two-step approach is referred to as separated estimation and optimization (SEO). With SEO, using more data and better models for making the predictions would improve only the first step, which would still improve decisions but would not automize (and, hence, revolutionize) decision-making. Using SEO is like using a stronger horse to pull the plow: one still has to walk behind. The real potential for increasing productivity lies in moving from predictive to prescriptive approaches, that is, from the two-step SEO approach, which uses predictive models in the estimation step, to a prescriptive approach, which integrates the optimization problem with the estimation of a model that then provides a direct functional relationship between the data and the decision. Following Akcay et al. [2011], we refer to this integrated approach as joint estimation-optimization (JEO). JEO approaches prescribe decisions, so they can automate the decision-making process. Just as the steam engine replaced manual work, JEO approaches replace cognitive work. The overarching objective of this dissertation is to analyze, develop, and evaluate new ways for how data can be used in making planning decisions in operations management to unlock the potential for increasing productivity. In doing so, the thesis comprises five self-contained research articles that forge the bridge from predictive to prescriptive approaches. While the first article focuses on how sensitive data like condition data from machinery can be used to make predictions of spare-parts demand, the remaining articles introduce, analyze, and discuss prescriptive approaches to inventory and capacity management. All five articles consider approach that use machine learning and data in innovative ways to improve current approaches to solving inventory or capacity management problems. The articles show that, by moving from predictive to prescriptive approaches, we can improve data-driven operations management in two ways: by making decisions more accurate and by automating decision-making. Thus, this dissertation provides examples of how digitization and the Second Machine Age can change decision-making in companies to increase efficiency and productivity.}, subject = {Maschinelles Lernen}, language = {en} } @book{Wieland2015, author = {Wieland, Thomas}, title = {R{\"a}umliches Einkaufsverhalten und Standortpolitik im Einzelhandel unter Ber{\"u}cksichtigung von Agglomerationseffekten - Theoretische Erkl{\"a}rungsans{\"a}tze, modellanalytische Zug{\"a}nge und eine empirisch-{\"o}konometrische Marktgebietsanalyse anhand eines Fallbeispiels aus dem l{\"a}ndlichen Raum Ostwestfalens/S{\"u}dniedersachsens}, publisher = {Verlag MetaGIS Infosysteme}, address = {Mannheim}, isbn = {978-3-936438-73-4}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-180753}, publisher = {Universit{\"a}t W{\"u}rzburg}, pages = {X, 289}, year = {2015}, abstract = {Die steigende Relevanz von Einzelhandelsagglomerationen z{\"a}hlt zu den zentralen raumbezogenen Elementen des Strukturwandels im Einzelhandel. Sowohl geplante Einkaufszentren als auch Standortkooperationen von eigentlich in interformalem Wettbewerb stehenden Betriebsformen pr{\"a}gen immer mehr die Standortstrukturen des Einzelhandels. Die vorliegende Untersuchung besch{\"a}ftigt sich mit dem r{\"a}umlichen Einkaufsverhalten der Konsumenten im Zusammenhang mit derartigen Erscheinungen. Zun{\"a}chst werden aus verschiedenen theoretischen Perspektiven (Mikro{\"o}konomie, Raumwirtschaftstheorie, verhaltenswissenschaftliche Marketing-Forschung) jene positiven Agglomerationseffekte im Einzelhandel hergeleitet, die auf dem Kundenverhalten basieren; hierbei lassen sich verschiedene Typen von Kopplungs- und Vergleichsk{\"a}ufen als relevante Einkaufsstrategien identifizieren. Die angenommene (positive) Wirkung von Einzelhandelsagglomerationen wird mithilfe eines {\"o}konometrischen Marktgebietsmodells - dem Multiplicative Competitive Interaction (MCI) Model - auf der Grundlage prim{\"a}rempirisch erhobener Marktgebiete {\"u}berpr{\"u}ft. Die Analyseergebnisse zeigen {\"u}berwiegend positive Einfl{\"u}sse des Potenzials f{\"u}r Kopplungs- und Vergleichsk{\"a}ufe auf die Kundenzufl{\"u}sse einzelner Anbieter, wenngleich sich diese in ihrer Intensit{\"a}t und Ausgestaltung unterscheiden. Die Untersuchung zeigt die Relevanz von Agglomerationseffekten im Einzelhandel auf, wobei ein quantitatives Modell auf der Basis des h{\"a}ufig verwendeten Huff-Modells formuliert wird, mit dem es m{\"o}glich ist, diese Effekte zu analysieren. Konkrete Anwendungen hierf{\"u}r finden sich in der betrieblichen Standortanalyse und der Vertr{\"a}glichkeitsbeurteilung von Einzelhandelsansiedlungen.}, subject = {Einzelhandel}, language = {de} }