@phdthesis{Marquardt2023, author = {Marquardt, Andr{\´e}}, title = {Machine-Learning-Based Identification of Tumor Entities, Tumor Subgroups, and Therapy Options}, doi = {10.25972/OPUS-32954}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-329548}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2023}, abstract = {Molecular genetic analyses, such as mutation analyses, are becoming increasingly important in the tumor field, especially in the context of therapy stratification. The identification of the underlying tumor entity is crucial, but can sometimes be difficult, for example in the case of metastases or the so-called Cancer of Unknown Primary (CUP) syndrome. In recent years, methylome and transcriptome utilizing machine learning (ML) approaches have been developed to enable fast and reliable tumor and tumor subtype identification. However, so far only methylome analysis have become widely used in routine diagnostics. The present work addresses the utility of publicly available RNA-sequencing data to determine the underlying tumor entity, possible subgroups, and potential therapy options. Identification of these by ML - in particular random forest (RF) models - was the first task. The results with test accuracies of up to 99\% provided new, previously unknown insights into the trained models and the corresponding entity prediction. Reducing the input data to the top 100 mRNA transcripts resulted in a minimal loss of prediction quality and could potentially enable application in clinical or real-world settings. By introducing the ratios of these top 100 genes to each other as a new database for RF models, a novel method was developed enabling the use of trained RF models on data from other sources. Further analysis of the transcriptomic differences of metastatic samples by visual clustering showed that there were no differences specific for the site of metastasis. Similarly, no distinct clusters were detectable when investigating primary tumors and metastases of cutaneous skin melanoma (SKCM). Subsequently, more than half of the validation datasets had a prediction accuracy of at least 80\%, with many datasets even achieving a prediction accuracy of - or close to - 100\%. To investigate the applicability of the used methods for subgroup identification, the TCGA-KIPAN dataset, consisting of the three major kidney cancer subgroups, was used. The results revealed a new, previously unknown subgroup consisting of all histopathological groups with clinically relevant characteristics, such as significantly different survival. Based on significant differences in gene expression, potential therapeutic options of the identified subgroup could be proposed. Concludingly, in exploring the potential applicability of RNA-sequencing data as a basis for therapy prediction, it was shown that this type of data is suitable to predict entities as well as subgroups with high accuracy. Clinical relevance was also demonstrated for a novel subgroup in renal cell carcinoma. The reduction of the number of genes required for entity prediction to 100 genes, enables panel sequencing and thus demonstrates potential applicability in a real-life setting.}, subject = {Maschinelles Lernen}, language = {en} } @phdthesis{Demirbas2022, author = {Demirbas, Senem}, title = {Prognostischer Wert neuer laborchemischer Biomarker bei diagnostisch naiven Patienten mit Verdacht auf Herzinsuffizienz - Follow-Up-II-Untersuchung zur randomisierten klinischen Studie „Objektivierung der kardiovaskul{\"a}ren Dysfunktion im ambulanten und haus{\"a}rztlichen Bereich mittels handgehaltener Echokardiographie und dem BNP-Schnelltest" (Handheld-BNP-Studie)}, doi = {10.25972/OPUS-28162}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-281622}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2022}, abstract = {Herzinsuffizienz ist eine sehr h{\"a}ufige Erkrankung im hohen Lebensalter mit zudem signifikant hoher Mortalit{\"a}t - vergleichbar mit der Mortalit{\"a}t h{\"a}ufiger Krebsarten. Biomarker wie die natriuretischen Peptide sind von großer Wichtigkeit hinsichtlich der Diagnosestellung und Prognoseabsch{\"a}tzung. Auch inflammatorische Marker, Copeptin sowie Mid-regionales Adrenomedullin (MR-proADM) haben eine wichtige Rolle sowohl in der Diagnosestellung der Herzinsuffizienz als auch in der Prognoseabsch{\"a}tzung eingenommen. Die Aussagekraft der Biomarker in einem diagnostisch naiven Kollektiv mit dem klinisch-anamnestischen Verdacht auf das Vorliegen einer Herzinsuffizienz ist jedoch bisher kaum untersucht worden. Die Handheld-BNP-Studie schloss diagnostisch naive Patienten ein, die sich mit Symptomen passend zu einer Herzinsuffizienz beim Hausarzt vorstellten. Binnen 14 Tagen erfolgte die Referenzdiagnose durch einen niedergelassenen Kardiologen. Ziel war es, die diagnostische Aussagekraft von BNP und der miniaturisierten Echokardiographie im prim{\"a}r{\"a}rztlichen Bereich zu {\"u}berpr{\"u}fen. Die vorliegenden Follow-Up-II-Untersuchung untersuchte die prognostische Aussagekraft moderner Biomarker (N-terminales B-natriuretisches Peptid (NT-proBNP), Mid-regionales atriales natriuretisches Peptid (MR-proANP), Mid-regionales Adrenomedullin (MR-proADM), Copeptin, Tumornekrosefaktor Alpha (TNF- α) und hochsensitives C-reaktives Protein (hsCRP)). Die Endpunkte waren Tod jeder Ursache sowie kardiovaskul{\"a}rer Tod. Insgesamt traten in unseren Analysen die natriuretischen Peptide mit ihrer prognostischen Aussagekraft hervor. In den univariaten Analysen zeigte sich das NT-proBNP als wichtigster Biomarker und in den multivariaten Analysen das MR-proANP. Bei diagnostisch naiven Patienten, die sich mit Herzinsuffizienzsymptomen bei ihrem Hausarzt vorstellen, besteht ein hohes Mortalit{\"a}tsrisiko. Um diese Patienten ad{\"a}quat zu selektieren, eine leitliniengerechte Therapie einzuleiten und um das Fortschreiten der Erkrankung aufzuhalten, ist eine fr{\"u}hzeitige Diagnosestellung beim Kardiologen wichtig. Natriuretische Peptide sind pr{\"a}diktiv, jedoch stellt das MR-proANP aufgrund fehlender generalisierter Verf{\"u}gbarkeit keine realistische Option im prim{\"a}r{\"a}rztlichen Bereich dar. Das NT-proBNP hat eine fl{\"a}chendeckende Verf{\"u}gbarkeit und wird mittlerweile in den Herzinsuffizienz-Leitlinien der ESC bei der Verdachtsdiagnose Herzinsuffizienz standardm{\"a}ßig empfohlen.}, subject = {Biomarker}, language = {de} } @phdthesis{Zuefle2022, author = {Z{\"u}fle, Marwin Otto}, title = {Proactive Critical Event Prediction based on Monitoring Data with Focus on Technical Systems}, doi = {10.25972/OPUS-25575}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-255757}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2022}, abstract = {The importance of proactive and timely prediction of critical events is steadily increasing, whether in the manufacturing industry or in private life. In the past, machines in the manufacturing industry were often maintained based on a regular schedule or threshold violations, which is no longer competitive as it causes unnecessary costs and downtime. In contrast, the predictions of critical events in everyday life are often much more concealed and hardly noticeable to the private individual, unless the critical event occurs. For instance, our electricity provider has to ensure that we, as end users, are always supplied with sufficient electricity, or our favorite streaming service has to guarantee that we can watch our favorite series without interruptions. For this purpose, they have to constantly analyze what the current situation is, how it will develop in the near future, and how they have to react in order to cope with future conditions without causing power outages or video stalling. In order to analyze the performance of a system, monitoring mechanisms are often integrated to observe characteristics that describe the workload and the state of the system and its environment. Reactive systems typically employ thresholds, utility functions, or models to determine the current state of the system. However, such reactive systems cannot proactively estimate future events, but only as they occur. In the case of critical events, reactive determination of the current system state is futile, whereas a proactive system could have predicted this event in advance and enabled timely countermeasures. To achieve proactivity, the system requires estimates of future system states. Given the gap between design time and runtime, it is typically not possible to use expert knowledge to a priori model all situations a system might encounter at runtime. Therefore, prediction methods must be integrated into the system. Depending on the available monitoring data and the complexity of the prediction task, either time series forecasting in combination with thresholding or more sophisticated machine and deep learning models have to be trained. Although numerous forecasting methods have been proposed in the literature, these methods have their advantages and disadvantages depending on the characteristics of the time series under consideration. Therefore, expert knowledge is required to decide which forecasting method to choose. However, since the time series observed at runtime cannot be known at design time, such expert knowledge cannot be implemented in the system. In addition to selecting an appropriate forecasting method, several time series preprocessing steps are required to achieve satisfactory forecasting accuracy. In the literature, this preprocessing is often done manually, which is not practical for autonomous computing systems, such as Self-Aware Computing Systems. Several approaches have also been presented in the literature for predicting critical events based on multivariate monitoring data using machine and deep learning. However, these approaches are typically highly domain-specific, such as financial failures, bearing failures, or product failures. Therefore, they require in-depth expert knowledge. For this reason, these approaches cannot be fully automated and are not transferable to other use cases. Thus, the literature lacks generalizable end-to-end workflows for modeling, detecting, and predicting failures that require only little expert knowledge. To overcome these shortcomings, this thesis presents a system model for meta-self-aware prediction of critical events based on the LRA-M loop of Self-Aware Computing Systems. Building upon this system model, this thesis provides six further contributions to critical event prediction. While the first two contributions address critical event prediction based on univariate data via time series forecasting, the three subsequent contributions address critical event prediction for multivariate monitoring data using machine and deep learning algorithms. Finally, the last contribution addresses the update procedure of the system model. Specifically, the seven main contributions of this thesis can be summarized as follows: First, we present a system model for meta self-aware prediction of critical events. To handle both univariate and multivariate monitoring data, it offers univariate time series forecasting for use cases where a single observed variable is representative of the state of the system, and machine learning algorithms combined with various preprocessing techniques for use cases where a large number of variables are observed to characterize the system's state. However, the two different modeling alternatives are not disjoint, as univariate time series forecasts can also be included to estimate future monitoring data as additional input to the machine learning models. Finally, a feedback loop is incorporated to monitor the achieved prediction quality and trigger model updates. We propose a novel hybrid time series forecasting method for univariate, seasonal time series, called Telescope. To this end, Telescope automatically preprocesses the time series, performs a kind of divide-and-conquer technique to split the time series into multiple components, and derives additional categorical information. It then forecasts the components and categorical information separately using a specific state-of-the-art method for each component. Finally, Telescope recombines the individual predictions. As Telescope performs both preprocessing and forecasting automatically, it represents a complete end-to-end approach to univariate seasonal time series forecasting. Experimental results show that Telescope achieves enhanced forecast accuracy, more reliable forecasts, and a substantial speedup. Furthermore, we apply Telescope to the scenario of predicting critical events for virtual machine auto-scaling. Here, results show that Telescope considerably reduces the average response time and significantly reduces the number of service level objective violations. For the automatic selection of a suitable forecasting method, we introduce two frameworks for recommending forecasting methods. The first framework extracts various time series characteristics to learn the relationship between them and forecast accuracy. In contrast, the other framework divides the historical observations into internal training and validation parts to estimate the most appropriate forecasting method. Moreover, this framework also includes time series preprocessing steps. Comparisons between the proposed forecasting method recommendation frameworks and the individual state-of-the-art forecasting methods and the state-of-the-art forecasting method recommendation approach show that the proposed frameworks considerably improve the forecast accuracy. With regard to multivariate monitoring data, we first present an end-to-end workflow to detect critical events in technical systems in the form of anomalous machine states. The end-to-end design includes raw data processing, phase segmentation, data resampling, feature extraction, and machine tool anomaly detection. In addition, the workflow does not rely on profound domain knowledge or specific monitoring variables, but merely assumes standard machine monitoring data. We evaluate the end-to-end workflow using data from a real CNC machine. The results indicate that conventional frequency analysis does not detect the critical machine conditions well, while our workflow detects the critical events very well with an F1-score of almost 91\%. To predict critical events rather than merely detecting them, we compare different modeling alternatives for critical event prediction in the use case of time-to-failure prediction of hard disk drives. Given that failure records are typically significantly less frequent than instances representing the normal state, we employ different oversampling strategies. Next, we compare the prediction quality of binary class modeling with downscaled multi-class modeling. Furthermore, we integrate univariate time series forecasting into the feature generation process to estimate future monitoring data. Finally, we model the time-to-failure using not only classification models but also regression models. The results suggest that multi-class modeling provides the overall best prediction quality with respect to practical requirements. In addition, we prove that forecasting the features of the prediction model significantly improves the critical event prediction quality. We propose an end-to-end workflow for predicting critical events of industrial machines. Again, this approach does not rely on expert knowledge except for the definition of monitoring data, and therefore represents a generalizable workflow for predicting critical events of industrial machines. The workflow includes feature extraction, feature handling, target class mapping, and model learning with integrated hyperparameter tuning via a grid-search technique. Drawing on the result of the previous contribution, the workflow models the time-to-failure prediction in terms of multiple classes, where we compare different labeling strategies for multi-class classification. The evaluation using real-world production data of an industrial press demonstrates that the workflow is capable of predicting six different time-to-failure windows with a macro F1-score of 90\%. When scaling the time-to-failure classes down to a binary prediction of critical events, the F1-score increases to above 98\%. Finally, we present four update triggers to assess when critical event prediction models should be re-trained during on-line application. Such re-training is required, for instance, due to concept drift. The update triggers introduced in this thesis take into account the elapsed time since the last update, the prediction quality achieved on the current test data, and the prediction quality achieved on the preceding test data. We compare the different update strategies with each other and with the static baseline model. The results demonstrate the necessity of model updates during on-line application and suggest that the update triggers that consider both the prediction quality of the current and preceding test data achieve the best trade-off between prediction quality and number of updates required. We are convinced that the contributions of this thesis constitute significant impulses for the academic research community as well as for practitioners. First of all, to the best of our knowledge, we are the first to propose a fully automated, end-to-end, hybrid, component-based forecasting method for seasonal time series that also includes time series preprocessing. Due to the combination of reliably high forecast accuracy and reliably low time-to-result, it offers many new opportunities in applications requiring accurate forecasts within a fixed time period in order to take timely countermeasures. In addition, the promising results of the forecasting method recommendation systems provide new opportunities to enhance forecasting performance for all types of time series, not just seasonal ones. Furthermore, we are the first to expose the deficiencies of the prior state-of-the-art forecasting method recommendation system. Concerning the contributions to critical event prediction based on multivariate monitoring data, we have already collaborated closely with industrial partners, which supports the practical relevance of the contributions of this thesis. The automated end-to-end design of the proposed workflows that do not demand profound domain or expert knowledge represents a milestone in bridging the gap between academic theory and industrial application. Finally, the workflow for predicting critical events in industrial machines is currently being operationalized in a real production system, underscoring the practical impact of this thesis.}, subject = {Prognose}, language = {en} } @phdthesis{Bauer2021, author = {Bauer, Andr{\´e}}, title = {Automated Hybrid Time Series Forecasting: Design, Benchmarking, and Use Cases}, doi = {10.25972/OPUS-22025}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-220255}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2021}, abstract = {These days, we are living in a digitalized world. Both our professional and private lives are pervaded by various IT services, which are typically operated using distributed computing systems (e.g., cloud environments). Due to the high level of digitalization, the operators of such systems are confronted with fast-paced and changing requirements. In particular, cloud environments have to cope with load fluctuations and respective rapid and unexpected changes in the computing resource demands. To face this challenge, so-called auto-scalers, such as the threshold-based mechanism in Amazon Web Services EC2, can be employed to enable elastic scaling of the computing resources. However, despite this opportunity, business-critical applications are still run with highly overprovisioned resources to guarantee a stable and reliable service operation. This strategy is pursued due to the lack of trust in auto-scalers and the concern that inaccurate or delayed adaptations may result in financial losses. To adapt the resource capacity in time, the future resource demands must be "foreseen", as reacting to changes once they are observed introduces an inherent delay. In other words, accurate forecasting methods are required to adapt systems proactively. A powerful approach in this context is time series forecasting, which is also applied in many other domains. The core idea is to examine past values and predict how these values will evolve as time progresses. According to the "No-Free-Lunch Theorem", there is no algorithm that performs best for all scenarios. Therefore, selecting a suitable forecasting method for a given use case is a crucial task. Simply put, each method has its benefits and drawbacks, depending on the specific use case. The choice of the forecasting method is usually based on expert knowledge, which cannot be fully automated, or on trial-and-error. In both cases, this is expensive and prone to error. Although auto-scaling and time series forecasting are established research fields, existing approaches cannot fully address the mentioned challenges: (i) In our survey on time series forecasting, we found that publications on time series forecasting typically consider only a small set of (mostly related) methods and evaluate their performance on a small number of time series with only a few error measures while providing no information on the execution time of the studied methods. Therefore, such articles cannot be used to guide the choice of an appropriate method for a particular use case; (ii) Existing open-source hybrid forecasting methods that take advantage of at least two methods to tackle the "No-Free-Lunch Theorem" are computationally intensive, poorly automated, designed for a particular data set, or they lack a predictable time-to-result. Methods exhibiting a high variance in the time-to-result cannot be applied for time-critical scenarios (e.g., auto-scaling), while methods tailored to a specific data set introduce restrictions on the possible use cases (e.g., forecasting only annual time series); (iii) Auto-scalers typically scale an application either proactively or reactively. Even though some hybrid auto-scalers exist, they lack sophisticated solutions to combine reactive and proactive scaling. For instance, resources are only released proactively while resource allocation is entirely done in a reactive manner (inherently delayed); (iv) The majority of existing mechanisms do not take the provider's pricing scheme into account while scaling an application in a public cloud environment, which often results in excessive charged costs. Even though some cost-aware auto-scalers have been proposed, they only consider the current resource demands, neglecting their development over time. For example, resources are often shut down prematurely, even though they might be required again soon. To address the mentioned challenges and the shortcomings of existing work, this thesis presents three contributions: (i) The first contribution-a forecasting benchmark-addresses the problem of limited comparability between existing forecasting methods; (ii) The second contribution-Telescope-provides an automated hybrid time series forecasting method addressing the challenge posed by the "No-Free-Lunch Theorem"; (iii) The third contribution-Chamulteon-provides a novel hybrid auto-scaler for coordinated scaling of applications comprising multiple services, leveraging Telescope to forecast the workload intensity as a basis for proactive resource provisioning. In the following, the three contributions of the thesis are summarized: Contribution I - Forecasting Benchmark To establish a level playing field for evaluating the performance of forecasting methods in a broad setting, we propose a novel benchmark that automatically evaluates and ranks forecasting methods based on their performance in a diverse set of evaluation scenarios. The benchmark comprises four different use cases, each covering 100 heterogeneous time series taken from different domains. The data set was assembled from publicly available time series and was designed to exhibit much higher diversity than existing forecasting competitions. Besides proposing a new data set, we introduce two new measures that describe different aspects of a forecast. We applied the developed benchmark to evaluate Telescope. Contribution II - Telescope To provide a generic forecasting method, we introduce a novel machine learning-based forecasting approach that automatically retrieves relevant information from a given time series. More precisely, Telescope automatically extracts intrinsic time series features and then decomposes the time series into components, building a forecasting model for each of them. Each component is forecast by applying a different method and then the final forecast is assembled from the forecast components by employing a regression-based machine learning algorithm. In more than 1300 hours of experiments benchmarking 15 competing methods (including approaches from Uber and Facebook) on 400 time series, Telescope outperformed all methods, exhibiting the best forecast accuracy coupled with a low and reliable time-to-result. Compared to the competing methods that exhibited, on average, a forecast error (more precisely, the symmetric mean absolute forecast error) of 29\%, Telescope exhibited an error of 20\% while being 2556 times faster. In particular, the methods from Uber and Facebook exhibited an error of 48\% and 36\%, and were 7334 and 19 times slower than Telescope, respectively. Contribution III - Chamulteon To enable reliable auto-scaling, we present a hybrid auto-scaler that combines proactive and reactive techniques to scale distributed cloud applications comprising multiple services in a coordinated and cost-effective manner. More precisely, proactive adaptations are planned based on forecasts of Telescope, while reactive adaptations are triggered based on actual observations of the monitored load intensity. To solve occurring conflicts between reactive and proactive adaptations, a complex conflict resolution algorithm is implemented. Moreover, when deployed in public cloud environments, Chamulteon reviews adaptations with respect to the cloud provider's pricing scheme in order to minimize the charged costs. In more than 400 hours of experiments evaluating five competing auto-scaling mechanisms in scenarios covering five different workloads, four different applications, and three different cloud environments, Chamulteon exhibited the best auto-scaling performance and reliability while at the same time reducing the charged costs. The competing methods provided insufficient resources for (on average) 31\% of the experimental time; in contrast, Chamulteon cut this time to 8\% and the SLO (service level objective) violations from 18\% to 6\% while using up to 15\% less resources and reducing the charged costs by up to 45\%. The contributions of this thesis can be seen as major milestones in the domain of time series forecasting and cloud resource management. (i) This thesis is the first to present a forecasting benchmark that covers a variety of different domains with a high diversity between the analyzed time series. Based on the provided data set and the automatic evaluation procedure, the proposed benchmark contributes to enhance the comparability of forecasting methods. The benchmarking results for different forecasting methods enable the selection of the most appropriate forecasting method for a given use case. (ii) Telescope provides the first generic and fully automated time series forecasting approach that delivers both accurate and reliable forecasts while making no assumptions about the analyzed time series. Hence, it eliminates the need for expensive, time-consuming, and error-prone procedures, such as trial-and-error searches or consulting an expert. This opens up new possibilities especially in time-critical scenarios, where Telescope can provide accurate forecasts with a short and reliable time-to-result. Although Telescope was applied for this thesis in the field of cloud computing, there is absolutely no limitation regarding the applicability of Telescope in other domains, as demonstrated in the evaluation. Moreover, Telescope, which was made available on GitHub, is already used in a number of interdisciplinary data science projects, for instance, predictive maintenance in an Industry 4.0 context, heart failure prediction in medicine, or as a component of predictive models of beehive development. (iii) In the context of cloud resource management, Chamulteon is a major milestone for increasing the trust in cloud auto-scalers. The complex resolution algorithm enables reliable and accurate scaling behavior that reduces losses caused by excessive resource allocation or SLO violations. In other words, Chamulteon provides reliable online adaptations minimizing charged costs while at the same time maximizing user experience.}, subject = {Zeitreihenanalyse}, language = {en} } @phdthesis{Chifu2021, author = {Chifu, Irina}, title = {Expression und prognostische Bedeutung der Chemokinrezeptoren CXCR4 und CXCR7 bei malignen Nebennierentumoren}, doi = {10.25972/OPUS-21722}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-217225}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2021}, abstract = {Zusammenfassung: Unsere Arbeit best{\"a}tigt die aus kleineren Studien bekannte hohe Expression der Chemokinrezeptoren CXCR4 und CXCR7 in der normalen Nebenniere und in der Mehrheit der Nebennierenkarzinome. Das auf mRNA Ebene best{\"a}tigte Vorkommen beider Chemokinrezeptoren im gesunden Nebennierengewebe deutet auf eine {\"u}berwiegend f{\"u}r die normale Nebennierenphysiologie wichtige Rolle dieser Chemokinrezeptoren hin. Eine eventuell dennoch bestehende pathophysiologische Relevanz der Rezeptoren wurde erg{\"a}nzend {\"u}berpr{\"u}ft und ergab keinen signifikanten Einfluss auf die Prognose des Nebennierenkarzinoms.}, subject = {Nebennierenrindencarcinom}, language = {de} } @phdthesis{Stonawski2020, author = {Stonawski, Saskia}, title = {Emotionale Informationsverarbeitungsprozesse als Pr{\"a}diktoren und Korrelate des Therapieoutcomes bei Patienten mit Depression}, doi = {10.25972/OPUS-18869}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-188691}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2020}, abstract = {Depressionen geh{\"o}ren zu den h{\"a}ufigsten psychischen Erkrankungen. Neben Symptomen wie Niedergeschlagenheit, Interessenlosigkeit oder Schlafst{\"o}rungen sind Depressionen auch durch Defizite in kognitiven Funktionen, wie z.B. Aufmerksamkeitsprozessen oder der Wahrnehmung, und eine negativ verzerrte Informationsverarbeitung gekennzeichnet. Aufgrund der hohen Pr{\"a}valenz, der starken psychosozialen Funktionseinschr{\"a}nkungen durch depressive Erkrankungen und deren rezidivierenden Charakter besteht die Notwendigkeit, die therapeutischen Interventionen zur Behandlung affektiver St{\"o}rungen zu verbessern, dadurch die Krankheitsphase der Patienten zu verk{\"u}rzen und letztendlich auch die Kosten f{\"u}r das Gesundheitssystem zu reduzieren. In diesem Zusammenhang werden in den letzten Jahren verst{\"a}rkt m{\"o}gliche Pr{\"a}diktoren und Korrelate des Therapieerfolgs untersucht. Hierf{\"u}r k{\"o}nnten negativ verzerrte Informationsverarbeitungsprozesse und Defizite in kognitiven Funktionen objektive Marker darstellen. Im ersten Teil dieser Arbeit wurde der Covariation Bias, der als {\"U}bersch{\"a}tzung des Zusammenhangs zwischen einem krankheitsrelevanten Stimulus und einer aversiven Konsequenz definiert wird, in einem Querschnittsdesign bei schwer depressiven Patienten zu Behandlungsbeginn im Vergleich zu einer Gruppe von Patienten nach einer sechsw{\"o}chigen Behandlung sowie einer gesunden Kontrollgruppe untersucht. Diese kognitive Verzerrung war bei Patienten mit schwererer Symptomatik unabh{\"a}ngig vom Behandlungszeitpunkt st{\"a}rker ausgepr{\"a}gt. Zudem pr{\"a}dizierte der Covariation Bias zu Behandlungsbeginn das Therapieoutcome nach sechs Behandlungswochen dahingehend, dass Patienten mit einer st{\"a}rkeren kognitiven Verzerrung ein schlechteres Ansprechen auf die Therapie zeigten. Im zweiten Teil dieser Arbeit wurde das Emotional Processing Paradigma, das aus Aufgaben zur Emotionserkennung und zur Aufmerksamkeitslenkung besteht, zum ersten Mal bei schwer depressiven Patienten im intraindividuellen Verlauf der Behandlung eingesetzt und in Zusammenhang mit dem Therapieerfolg gestellt. Neben Hinweisen darauf, dass Patienten, bei denen sich in den ersten Behandlungswochen unter anderem die Salienz negativer Emotionen verringerte, mit h{\"o}herer Wahrscheinlichkeit remittierten, zeigten sich vor allem zeitlich stabile Unterschiede im Sinne einer Trait-Variablen zwischen Patienten, die auf die initiale Therapie ansprachen, und Patienten, die keine bedeutsame Verbesserung erfuhren, in den globalen kognitiven Funktionen: Patienten, bei denen es zu keiner klinisch relevanten Verbesserung durch die Therapie kam, wiesen st{\"a}rkere Defizite auf. Zusammengenommen weisen die Ergebnisse der vorliegenden Arbeit auf ein stabiles Muster von Defiziten in globalen kognitiven Funktionen bei Patienten mit Depressionen hin. Diese Abweichungen liegen jedoch nicht bei allen schwer depressiven Patienten gleichermaßen vor. Bei Patienten mit Defiziten scheint das Therapieoutcome schlechter zu sein. Somit k{\"o}nnten diese Prozesse der Informationsverarbeitung und kognitive Defizite auf neuropsychologischer Ebene Pr{\"a}diktoren und Korrelate des Therapieoutcomes darstellen. Im Sinne der personalisierten Medizin k{\"o}nnte in Zukunft die Diagnostik um die Parameter der Informationsverarbeitungsprozesse erg{\"a}nzt werden und so die Prognose des Therapieerfolgs verbessert und die Behandlung der Patienten individualisiert werden.}, subject = {Depression}, language = {de} } @phdthesis{Kordsmeyer2020, author = {Kordsmeyer, Maren}, title = {Pr{\"a}valenz und prognostischer Einfluss von An{\"a}mie, Niereninsuffizienz und Eisenmangel bei Herzinsuffizienz}, doi = {10.25972/OPUS-21051}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-210516}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2020}, abstract = {An{\"a}mie (A), Niereninsuffizienz (RI) und Eisenmangel (ID) sind h{\"a}ufige Komorbidit{\"a}ten der Herzinsuffizienz. Zum ersten Mal wurden in dieser Analyse die Pr{\"a}valenz sowie der Einfluss auf Mortalit{\"a}t aller drei Komorbidit{\"a}ten einzeln sowie koinzident in einer Population aus akut dekompensierten Herzinsuffizienzpatienten untersucht. Ebenso fehlten in der Literatur bisher Studien {\"u}ber die Pr{\"a}valenz und den Einfluss auf die Mortalit{\"a}t von An{\"a}mie und Niereninsuffizienz abh{\"a}ngig von den vier AHA/ACC-Stadien bzw. von den verschiedenen Herzinsuffizienztypen HFrEF, HFpEF und dem hinsichtlich Herzinsuffizienz bisher asymptomatischen AHA/ACC-Stadium A/B. A, RI und ID sind h{\"a}ufig und treten bei {\"U}berlebenden nach Hospitalisierung mit akut dekompensierter HFrEF oft zusammen auf. Patienten mit A und RI mit oder ohne ID haben das h{\"o}chste Risiko f{\"u}r Mortalit{\"a}t. Die Definition und prognostische Rolle des ID nach akuter kardialer Dekompensation erfordert weitere Forschungsbem{\"u}hungen. Die Pr{\"a}valenz von A und insbesondere von RI ist bereits in den aymptomatischen AHA / ACC-Stadien A und B hoch und nimmt mit dem Schweregrad der Herzinsuffizienz zu. Sowohl A als auch RI haben einen individuellen und kumulativen prognostischen Einfluss {\"u}ber das gesamte AHA / ACC-Spektrum. A und RI waren bei allen Herzinsuffizienztypen h{\"a}ufig. Mehr als 20\% der asymptomatischen AHA / ACC-Patienten im Stadium A und B hatten bereits RI. A und RI zeigten einen negativen individuellen und kumulativen prognostischen Einfluss bei allen Herzinsuffizienztypen, einschließlich der asymptomatischen Patienten (bei HFpEF gab es nur einen Trend, h{\"o}chstwahrscheinlich aufgrund der geringeren Patientenzahl). Bei Bestehen von A, RI oder ID ist eine sorgf{\"a}ltige Ursachenforschung indiziert im Rahmen eines ganzheitlichen Managements der Herzinsuffizienz mit dem Ziel, die Prognose der Herzinsuffizienz zu verbessern.}, subject = {Herzinsuffizienz}, language = {de} } @phdthesis{Lurz2015, author = {Lurz, Kristina}, title = {Confidence and Prediction under Covariates and Prior Information}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-122748}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2015}, abstract = {The purpose of confidence and prediction intervals is to provide an interval estimation for an unknown distribution parameter or the future value of a phenomenon. In many applications, prior knowledge about the distribution parameter is available, but rarely made use of, unless in a Bayesian framework. This thesis provides exact frequentist confidence intervals of minimal volume exploiting prior information. The scheme is applied to distribution parameters of the binomial and the Poisson distribution. The Bayesian approach to obtain intervals on a distribution parameter in form of credibility intervals is considered, with particular emphasis on the binomial distribution. An application of interval estimation is found in auditing, where two-sided intervals of Stringer type are meant to contain the mean of a zero-inflated population. In the context of time series analysis, covariates are supposed to improve the prediction of future values. Exponential smoothing with covariates as an extension of the popular forecasting method exponential smoothing is considered in this thesis. A double-seasonality version of it is applied to forecast hourly electricity load under the use of meteorological covariates. Different kinds of prediction intervals for exponential smoothing with covariates are formulated.}, subject = {Konfidenzintervall}, language = {en} } @phdthesis{Paul2014, author = {Paul, Jens Christian}, title = {Retrospektive Analyse von 119 Patienten mit kutanen Sarkomen an der Universit{\"a}ts-Hautklinik W{\"u}rzburg aus den Jahren 1999-2009: Diagnostik, Therapie und Prognose}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-116846}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2014}, abstract = {In dieser Studie werden demographische, diagnostische und therapeutische Faktoren einer an kutanen Sarkomen erkrankten Patientengruppe der W{\"u}rzburger Hautklinik untersucht. Der prognostische Wert dieser Faktoren wird mit Hilfe verschiedener statistischer Tests und Vergleiche untersucht. Die Studienpopulation umfasst 119 Patienten, die im Zeitraum von 1999 bis 2009 behandelt wurden.}, subject = {Kutane Sarkome}, language = {de} } @article{ShityakovFoersterRethwilmetal.2014, author = {Shityakov, Sergey and F{\"o}rster, Carola and Rethwilm, Axel and Dandekar, Thomas}, title = {Evaluation and Prediction of the HIV-1 Central Polypurine Tract Influence on Foamy Viral Vectors to Transduce Dividing and Growth-Arrested Cells}, doi = {10.1155/2014/487969}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-112763}, year = {2014}, abstract = {Retroviral vectors are potent tools for gene delivery and various biomedical applications. To accomplish a gene transfer task successfully, retroviral vectors must effectively transduce diverse cell cultures at different phases of a cell cycle. However, very promising retroviral vectors based on the foamy viral (FV) backbone lack the capacity to efficiently transduce quiescent cells. It is hypothesized that this phenomenon might be explained as the inability of foamy viruses to form a pre-integration complex (PIC) with nuclear import activity in growth-arrested cells, which is the characteristic for lentiviruses (HIV-1). In this process, the HIV-1 central polypurine tract (cPPT) serves as a primer for plus-strand synthesis to produce a "flap" element and is believed to be crucial for the subsequent double-stranded cDNA formation of all retroviral RNA genomes. In this study, the effects of the lentiviral cPPT element on the FV transduction potential in dividing and growth-arrested (G1/S phase) adenocarcinomic human alveolar basal epithelial (A549) cells are investigated by experimental and theoretical methods. The results indicated that the HIV-1 cPPT element in a foamy viral vector background will lead to a significant reduction of the FV transduction and viral titre in growth-arrested cells due to the absence of PICs with nuclear import activity.}, subject = {Evaluation}, language = {en} }