@phdthesis{Kurz2017, author = {Kurz, Julian Frederick}, title = {Capacity Planning and Control with Advanced Information}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-154097}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2017}, abstract = {Die Dissertation „Capacity Planning and Control with Advanced Information" besteht aus drei inhaltlich abgeschlossenen Teilen, die ein {\"u}bergeordnetes Thema zur Grundlage haben: Wie k{\"o}nnen Daten {\"u}ber zuk{\"u}nftige Bedarfe zur Kapazit{\"a}tsplanung und -steuerung genutzt werden? Im Rahmen von Industrie 4.0 werden zunehmend Daten erzeugt und f{\"u}r pr{\"a}dikative Analysen genutzt. Zum Beispiel werden Flugzeugtriebwerke mit Sensoren ausgestattet, die verschiedene Parameter in Echtzeit ermitteln und {\"u}bertragen. In Kombination mit Flugpl{\"a}nen k{\"o}nnen diese Daten, unter Einsatz geeigneter Machine Learning Algorithmen, zur Vorhersage des Zeitpunkts der n{\"a}chsten Wartung und des Wartungsbedarfs genutzt werden. In dieser Arbeit werden diese Vorhersagedaten zur optimalen Planung und Steuerung der Kapazit{\"a}t eines MRO (Maintenance, Repair and Overhaul) Dienstleisters genutzt. Im ersten Artikel, "Capacity Planning for a Maintenance Service Provider with Advanced Information", wird die aus mehreren Stationen bestehende Produktionsst{\"a}tte des MRO Dienstleisters mit Hilfe eines Netzwerks aus GI/G/1 Warteschlagen beschrieben. Durch L{\"o}sung eines Optimierungsproblems werden die Kapazit{\"a}ten der einzelnen Stationen so ermittelt, dass Kapazit{\"a}ts- und Strafkosten f{\"u}r eine zu lange Durchlaufzeit minimiert werden. Dar{\"u}berhinaus wird untersucht, wie Vorhersagedaten bez{\"u}glich des Eintreffens und Wartungsaufwands zuk{\"u}nftiger Auftr{\"a}ge genutzt werden k{\"o}nnen, um die Gesamtkosten zu reduzieren. Der Artikel "Flexible Capacity Management with Future Information" nutzt Informationen hinsichtlich zuk{\"u}nftigerWartungsbedarfe f{\"u}r die Steuerung einer flexiblen Kapazit{\"a}t. Die Produktionsst{\"a}tte des MRO Dienstleisters wird als M/M/1 Warteschlange beschrieben, die zwischen einer Basiskapazit{\"a}t und einer erh{\"o}hten Kapazit{\"a}t wechseln kann. Allerdings kann die hohe Kapazit{\"a}t nur einen definierten Zeitanteil genutzt werden. In dem Artikel werden Politiken entwickelt, welche die erwartete Warteschlangenl{\"a}ge minimieren, falls keine Informationen bez{\"u}glich des Eintreffens zuk{\"u}nftiger Auftr{\"a}ge verf{\"u}gbar sind beziehungsweise alle Informationen in einem unendlich langen Zeitfenster vorliegen. Es zeigt sich, dass die erwartete Warteschlangenl{\"a}nge drastisch reduziert werden kann, falls Informationen {\"u}ber zuk{\"u}nftige Bedarfe genutzt werden k{\"o}nnen. Im dritten Artikel, "Queueing with Limited Future Information", wird neben der Steuerung einer flexiblen Kapazit{\"a}t auch die Zulassungskontrolle behandelt: Welche Auftr{\"a}ge sollten umgeleitet werden, zum Beispiel an einen Subdienstleister, falls ein definierter Anteil aller ankommenden Triebwerke nicht angenommen werden muss? Es werden Politiken zur Steuerung der flexiblen Kapazit{\"a}t und f{\"u}r die Zulassungskontrolle entwickelt, die zuk{\"u}nftige Informationen in verschieden langen Zeitfenstern ber{\"u}cksichtigen: keine Informationen, endlich und unendlich lange Zeitfenster. Numerische Analysen zeigen, dass die Ber{\"u}cksichtigung von Informationen {\"u}ber die Zukunft im Vergleich zu reaktiven Politiken zu einer Verringerung der mittleren Warteschlangenl{\"a}nge f{\"u}hrt. Andererseits wird ersichtlich, dass die Nutzung von k{\"u}rzeren Zeitfenstern unter bestimmten Umst{\"a}nden vorteilhaft sein kann. Den inhaltlichen Rahmen der Dissertation bilden die Einleitung im ersten Kapitel sowie ein Ausblick in Kapitel 5. Beweise werden im Anhang zusammengefasst.}, language = {en} } @phdthesis{Meller2020, author = {Meller, Jan Maximilian}, title = {Data-driven Operations Management: Combining Machine Learning and Optimization for Improved Decision-making}, doi = {10.25972/OPUS-20604}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-206049}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2020}, abstract = {This dissertation consists of three independent, self-contained research papers that investigate how state-of-the-art machine learning algorithms can be used in combination with operations management models to consider high dimensional data for improved planning decisions. More specifically, the thesis focuses on the question concerning how the underlying decision support models change structurally and how those changes affect the resulting decision quality. Over the past years, the volume of globally stored data has experienced tremendous growth. Rising market penetration of sensor-equipped production machinery, advanced ways to track user behavior, and the ongoing use of social media lead to large amounts of data on production processes, user behavior, and interactions, as well as condition information about technical gear, all of which can provide valuable information to companies in planning their operations. In the past, two generic concepts have emerged to accomplish this. The first concept, separated estimation and optimization (SEO), uses data to forecast the central inputs (i.e., the demand) of a decision support model. The forecast and a distribution of forecast errors are then used in a subsequent stochastic optimization model to determine optimal decisions. In contrast to this sequential approach, the second generic concept, joint estimation-optimization (JEO), combines the forecasting and optimization step into a single optimization problem. Following this approach, powerful machine learning techniques are employed to approximate highly complex functional relationships and hence relate feature data directly to optimal decisions. The first article, "Machine learning for inventory management: Analyzing two concepts to get from data to decisions", chapter 2, examines performance differences between implementations of these concepts in a single-period Newsvendor setting. The paper first proposes a novel JEO implementation based on the random forest algorithm to learn optimal decision rules directly from a data set that contains historical sales and auxiliary data. Going forward, we analyze structural properties that lead to these performance differences. Our results show that the JEO implementation achieves significant cost improvements over the SEO approach. These differences are strongly driven by the decision problem's cost structure and the amount and structure of the remaining forecast uncertainty. The second article, "Prescriptive call center staffing", chapter 3, applies the logic of integrating data analysis and optimization to a more complex problem class, an employee staffing problem in a call center. We introduce a novel approach to applying the JEO concept that augments historical call volume data with features like the day of the week, the beginning of the month, and national holiday periods. We employ a regression tree to learn the ex-post optimal staffing levels based on similarity structures in the data and then generalize these insights to determine future staffing levels. This approach, relying on only few modeling assumptions, significantly outperforms a state-of-the-art benchmark that uses considerably more model structure and assumptions. The third article, "Data-driven sales force scheduling", chapter 4, is motivated by the problem of how a company should allocate limited sales resources. We propose a novel approach based on the SEO concept that involves a machine learning model to predict the probability of winning a specific project. We develop a methodology that uses this prediction model to estimate the "uplift", that is, the incremental value of an additional visit to a particular customer location. To account for the remaining uncertainty at the subsequent optimization stage, we adapt the decision support model in such a way that it can control for the level of trust in the predicted uplifts. This novel policy dominates both a benchmark that relies completely on the uplift information and a robust benchmark that optimizes the sum of potential profits while neglecting any uplift information. The results of this thesis show that decision support models in operations management can be transformed fundamentally by considering additional data and benefit through better decision quality respectively lower mismatch costs. The way how machine learning algorithms can be integrated into these decision support models depends on the complexity and the context of the underlying decision problem. In summary, this dissertation provides an analysis based on three different, specific application scenarios that serve as a foundation for further analyses of employing machine learning for decision support in operations management.}, subject = {Operations Management}, language = {en} } @phdthesis{Kloos2020, author = {Kloos, Konstantin}, title = {Allocation Planning in Sales Hierarchies}, doi = {10.25972/OPUS-19373}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-193734}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2020}, abstract = {Allocation planning describes the process of allocating scarce supply to individual customers in order to prioritize demands from more important customers, i.e. because they request a higher service-level target. A common assumption across publications is that allocation planning is performed by a single planner with the ability to decide on the allocations to all customers simultaneously. In many companies, however, there does not exist such a central planner and, instead, allocation planning is a decentral and iterative process aligned with the company's multi-level hierarchical sales organization. This thesis provides a rigorous analytical and numerical analysis of allocation planning in such hierarchical settings. It studies allocation methods currently used in practice and shows that these approaches typically lead to suboptimal allocations associated with significant performance losses. Therefore, this thesis provides multiple new allocation approaches which show a much higher performance, but still are simple enough to lend themselves to practical application. The findings in this thesis can guide decision makers when to choose which allocation approach and what factors are decisive for their performance. In general, our research suggests that with a suitable hierarchical allocation approach, decision makers can expect a similar performance as under centralized planning.}, subject = {Supply Chain Management}, language = {en} } @phdthesis{Taigel2020, author = {Taigel, Fabian Michael}, title = {Data-driven Operations Management: From Predictive to Prescriptive Analytics}, doi = {10.25972/OPUS-20651}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-206514}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2020}, abstract = {Autonomous cars and artificial intelligence that beats humans in Jeopardy or Go are glamorous examples of the so-called Second Machine Age that involves the automation of cognitive tasks [Brynjolfsson and McAfee, 2014]. However, the larger impact in terms of increasing the efficiency of industry and the productivity of society might come from computers that improve or take over business decisions by using large amounts of available data. This impact may even exceed that of the First Machine Age, the industrial revolution that started with James Watt's invention of an efficient steam engine in the late eighteenth century. Indeed, the prevalent phrase that calls data "the new oil" indicates the growing awareness of data's importance. However, many companies, especially those in the manufacturing and traditional service industries, still struggle to increase productivity using the vast amounts of data [for Economic Co-operation and Development, 2018]. One reason for this struggle is that companies stick with a traditional way of using data for decision support in operations management that is not well suited to automated decision-making. In traditional inventory and capacity management, some data - typically just historical demand data - is used to estimate a model that makes predictions about uncertain planning parameters, such as customer demand. The planner then has two tasks: to adjust the prediction with respect to additional information that was not part of the data but still might influence demand and to take the remaining uncertainty into account and determine a safety buffer based on the underage and overage costs. In the best case, the planner determines the safety buffer based on an optimization model that takes the costs and the distribution of historical forecast errors into account; however, these decisions are usually based on a planner's experience and intuition, rather than on solid data analysis. This two-step approach is referred to as separated estimation and optimization (SEO). With SEO, using more data and better models for making the predictions would improve only the first step, which would still improve decisions but would not automize (and, hence, revolutionize) decision-making. Using SEO is like using a stronger horse to pull the plow: one still has to walk behind. The real potential for increasing productivity lies in moving from predictive to prescriptive approaches, that is, from the two-step SEO approach, which uses predictive models in the estimation step, to a prescriptive approach, which integrates the optimization problem with the estimation of a model that then provides a direct functional relationship between the data and the decision. Following Akcay et al. [2011], we refer to this integrated approach as joint estimation-optimization (JEO). JEO approaches prescribe decisions, so they can automate the decision-making process. Just as the steam engine replaced manual work, JEO approaches replace cognitive work. The overarching objective of this dissertation is to analyze, develop, and evaluate new ways for how data can be used in making planning decisions in operations management to unlock the potential for increasing productivity. In doing so, the thesis comprises five self-contained research articles that forge the bridge from predictive to prescriptive approaches. While the first article focuses on how sensitive data like condition data from machinery can be used to make predictions of spare-parts demand, the remaining articles introduce, analyze, and discuss prescriptive approaches to inventory and capacity management. All five articles consider approach that use machine learning and data in innovative ways to improve current approaches to solving inventory or capacity management problems. The articles show that, by moving from predictive to prescriptive approaches, we can improve data-driven operations management in two ways: by making decisions more accurate and by automating decision-making. Thus, this dissertation provides examples of how digitization and the Second Machine Age can change decision-making in companies to increase efficiency and productivity.}, subject = {Maschinelles Lernen}, language = {en} } @phdthesis{Lauton2021, author = {Lauton, Felix}, title = {Three Essays on the Procurement of Essential Medicines in Developing Countries}, doi = {10.25972/OPUS-22063}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-220631}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2021}, abstract = {The first problem is that of the optimal volume allocation in procurement. The choice of this problem was motivated by a study whose objective was to support decision-making at two procurement organizations for the procurement of Depot Medroxyprogesterone Acetate (DMPA), an injectable contraceptive. At the time of this study, only one supplier that had undergone the costly and lengthy process of WHO pre-qualification was available to these organizations. However, a new entrant supplier was expected to receive WHO qualification within the next year, thus becoming a viable second source for DMPA procurement. When deciding how to allocate the procurement volume between the two suppliers, the buyers had to consider the impact on price as well as risk. Higher allocations to one supplier yield lower prices but expose a buyer to higher supply risks, while an even allocation will result in lower supply risk but also reduce competitive pressure, resulting in higher prices. Our research investigates this single- versus dual-sourcing problem and quantifies in one model the impact of the procurement volume on competition and risk. To support decision-makers, we develop a mathematical framework that accounts for the characteristics of donor-funded global health markets and models the effects of an entrant on purchasing costs and supply risks. Our in-depth analysis provides insights into how the optimal allocation decision is affected by various parameters and explores the trade-off between competition and supply risk. For example, we find that, even if the entrant supplier introduces longer leads times and a higher default risk, the buyer still benefits from dual sourcing. However, these risk-diversification benefits depend heavily on the entrant's in-country registration: If the buyer can ship the entrant's product to only a selected number of countries, the buyer does not benefit from dual sourcing as much as it would if entrant's product could be shipped to all supplied countries. We show that the buyer should be interested in qualifying the entrant's product in countries with high demand first. In the second problem we explore a new tendering mechanism called the postponement tender, which can be useful when buyers in the global health industry want to contract new generics suppliers with uncertain product quality. The mechanism allows a buyer to postpone part of the procurement volume's allocation so the buyer can learn about the unknown quality before allocating the remaining volume to the best supplier in terms of both price and quality. We develop a mathematical model to capture the decision-maker's trade-offs in setting the right split between the initial volume and the postponed volume. Our analysis shows that a buyer can benefit from this mechanism more than it can from a single-sourcing format, as it can decrease the risk of receiving poor quality (in terms of product quality and logistics performance) and even increase competitive pressure between the suppliers, thereby lowering the purchasing costs. By considering market parameters like the buyer's size, the suppliers' value (difference between quality and cost), quality uncertainty, and minimum order volumes, we derive optimal sourcing strategies for various market structures and explore how competition is affected by the buyer's learning about the suppliers' quality through the initial volume. The third problem considers the repeated procurement problem of pharmacies in Kenya that have multi-product inventories. Coordinating orders allows pharmacies to achieve lower procurement prices by using the quantity discounts manufacturers offer and sharing fixed ordering costs, such as logistics costs. However, coordinating and optimizing orders for multiple products is complex and costly. To solve the coordinated procurement problem, also known as the Joint Replenishment Problem (JRP) with quantity discounts, a novel, data-driven inventory policy using sample-average approximation is proposed. The inventory policy is developed based on renewal theory and is evaluated using real-world sales data from Kenyan pharmacies. Multiple benchmarks are used to evaluate the performance of the approach. First, it is compared to the theoretically optimal policy --- that is, a dynamic-programming policy --- in the single-product setting without quantity discounts to show that the proposed policy results in comparable inventory costs. Second, the policy is evaluated for the original multi-product setting with quantity discounts and compared to ex-post optimal costs. The evaluation shows that the policy's performance in the multi-product setting is similar to its performance in the single-product setting (with respect to ex-post optimal costs), suggesting that the proposed policy offers a promising, data-driven solution to these types of multi-product inventory problems.}, subject = {Entwicklungsl{\"a}nder}, language = {en} } @phdthesis{Notz2021, author = {Notz, Pascal Markus}, title = {Prescriptive Analytics for Data-driven Capacity Management}, doi = {10.25972/OPUS-24042}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-240423}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2021}, abstract = {Digitization and artificial intelligence are radically changing virtually all areas across business and society. These developments are mainly driven by the technology of machine learning (ML), which is enabled by the coming together of large amounts of training data, statistical learning theory, and sufficient computational power. This technology forms the basis for the development of new approaches to solve classical planning problems of Operations Research (OR): prescriptive analytics approaches integrate ML prediction and OR optimization into a single prescription step, so they learn from historical observations of demand and a set of features (co-variates) and provide a model that directly prescribes future decisions. These novel approaches provide enormous potential to improve planning decisions, as first case reports showed, and, consequently, constitute a new field of research in Operations Management (OM). First works in this new field of research have studied approaches to solving comparatively simple planning problems in the area of inventory management. However, common OM planning problems often have a more complex structure, and many of these complex planning problems are within the domain of capacity planning. Therefore, this dissertation focuses on developing new prescriptive analytics approaches for complex capacity management problems. This dissertation consists of three independent articles that develop new prescriptive approaches and use these to solve realistic capacity planning problems. The first article, "Prescriptive Analytics for Flexible Capacity Management", develops two prescriptive analytics approaches, weighted sample average approximation (wSAA) and kernelized empirical risk minimization (kERM), to solve a complex two-stage capacity planning problem that has been studied extensively in the literature: a logistics service provider sorts daily incoming mail items on three service lines that must be staffed on a weekly basis. This article is the first to develop a kERM approach to solve a complex two-stage stochastic capacity planning problem with matrix-valued observations of demand and vector-valued decisions. The article develops out-of-sample performance guarantees for kERM and various kernels, and shows the universal approximation property when using a universal kernel. The results of the numerical study suggest that prescriptive analytics approaches may lead to significant improvements in performance compared to traditional two-step approaches or SAA and that their performance is more robust to variations in the exogenous cost parameters. The second article, "Prescriptive Analytics for a Multi-Shift Staffing Problem", uses prescriptive analytics approaches to solve the (queuing-type) multi-shift staffing problem (MSSP) of an aviation maintenance provider that receives customer requests of uncertain number and at uncertain arrival times throughout each day and plans staff capacity for two shifts. This planning problem is particularly complex because the order inflow and processing are modelled as a queuing system, and the demand in each day is non-stationary. The article addresses this complexity by deriving an approximation of the MSSP that enables the planning problem to be solved using wSAA, kERM, and a novel Optimization Prediction approach. A numerical evaluation shows that wSAA leads to the best performance in this particular case. The solution method developed in this article builds a foundation for solving queuing-type planning problems using prescriptive analytics approaches, so it bridges the "worlds" of queuing theory and prescriptive analytics. The third article, "Explainable Subgradient Tree Boosting for Prescriptive Analytics in Operations Management" proposes a novel prescriptive analytics approach to solve the two capacity planning problems studied in the first and second articles that allows decision-makers to derive explanations for prescribed decisions: Subgradient Tree Boosting (STB). STB combines the machine learning method Gradient Boosting with SAA and relies on subgradients because the cost function of OR planning problems often cannot be differentiated. A comprehensive numerical analysis suggests that STB can lead to a prescription performance that is comparable to that of wSAA and kERM. The explainability of STB prescriptions is demonstrated by breaking exemplary decisions down into the impacts of individual features. The novel STB approach is an attractive choice not only because of its prescription performance, but also because of the explainability that helps decision-makers understand the causality behind the prescriptions. The results presented in these three articles demonstrate that using prescriptive analytics approaches, such as wSAA, kERM, and STB, to solve complex planning problems can lead to significantly better decisions compared to traditional approaches that neglect feature data or rely on a parametric distribution estimation.}, subject = {Maschinelles Lernen}, language = {en} } @phdthesis{deGraafgebButtler2024, author = {de Graaf [geb. Buttler], Simone Linda}, title = {From Small to Large Data: Leveraging Synthetic Data for Inventory Management}, doi = {10.25972/OPUS-36136}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-361364}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2024}, abstract = {In a world of constant change, uncertainty has become a daily challenge for businesses. Rapidly shifting market conditions highlight the need for flexible responses to unforeseen events. Operations Management (OM) is crucial for optimizing business processes, including site planning, production control, and inventory management. Traditionally, companies have relied on theoretical models from microeconomics, game theory, optimization, and simulation. However, advancements in machine learning and mathematical optimization have led to a new research field: data-driven OM. Data-driven OM uses real data, especially time series data, to create more realistic models that better capture decision-making complexities. Despite the promise of this new research area, a significant challenge remains: the availability of extensive historical training data. Synthetic data, which mimics real data, has been used to address this issue in other machine learning applications. Therefore, this dissertation explores how synthetic data can be leveraged to improve decisions for data-driven inventory management, focusing on the single-period newsvendor problem, a classic stochastic optimization problem in inventory management. The first article, "A Meta Analysis of Data-Driven Newsvendor Approaches", presents a standardized evaluation framework for data-driven prescriptive approaches, tested through a numerical study. Findings suggest model performance is not robust, emphasizing the need for a standardized evaluation process. The second article, "Application of Generative Adversarial Networks in Inventory Management", examines using synthetic data generated by Generative Adversarial Networks (GANs) for the newsvendor problem. This study shows GANs can model complex demand relationships, offering a promising alternative to traditional methods. The third article, "Combining Synthetic Data and Transfer Learning for Deep Reinforcement Learning in Inventory Management", proposes a method using Deep Reinforcement Learning (DRL) with synthetic and real data through transfer learning. This approach trains a generative model to learn demand distributions, generates synthetic data, and fine-tunes a DRL agent on a smaller real dataset. This method outperforms traditional approaches in controlled and practical settings, though further research is needed to generalize these findings.}, subject = {Bestandsmanagement}, language = {en} }