@article{KuemmelLindenberger2020, author = {K{\"u}mmel, Reiner and Lindenberger, Dietmar}, title = {Energy, entropy, constraints, and creativity in economic growth and crises}, series = {Entropy}, volume = {22}, journal = {Entropy}, number = {10}, issn = {1099-4300}, doi = {10.3390/e22101156}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-216275}, year = {2020}, abstract = {The neoclassical mainstream theory of economic growth does not care about the First and the Second Law of Thermodynamics. It usually considers only capital and labor as the factors that produce the wealth of modern industrial economies. If energy is taken into account as a factor of production, its economic weight, that is its output elasticity, is assigned a meager magnitude of roughly 5 percent, according to the neoclassical cost-share theorem. Because of that, neoclassical economics has the problems of the "Solow Residual", which is the big difference between observed and computed economic growth, and of the failure to explain the economic recessions since World War 2 by the variations of the production factors. Having recalled these problems, we point out that technological constraints on factor combinations have been overlooked in the derivation of the cost-share theorem. Biophysical analyses of economic growth that disregard this theorem and mend the neoclassical deficiencies are sketched. They show that energy's output elasticity is much larger than its cost share and elucidate the existence of bidirectional causality between energy conversion and economic growth. This helps to understand how economic crises have been triggered and overcome by supply-side and demand-side actions. Human creativity changes the state of economic systems. We discuss the challenges to it by the risks from politics and markets in conjunction with energy sources and technologies, and by the constraints that the emissions of particles and heat from entropy production impose on industrial growth in the biosphere.}, language = {en} } @article{TerekhovElabyadSchreiber2021, author = {Terekhov, Maxim and Elabyad, Ibrahim A. and Schreiber, Laura M.}, title = {Global optimization of default phases for parallel transmit coils for ultra-high-field cardiac MRI}, series = {PLoS One}, volume = {16}, journal = {PLoS One}, number = {8}, doi = {10.1371/journal.pone.0255341}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-265737}, year = {2021}, abstract = {The development of novel multiple-element transmit-receive arrays is an essential factor for improving B\(_1\)\(^+\) field homogeneity in cardiac MRI at ultra-high magnetic field strength (B\(_0\) > = 7.0T). One of the key steps in the design and fine-tuning of such arrays during the development process is finding the default driving phases for individual coil elements providing the best possible homogeneity of the combined B\(_1\)\(^+\)-field that is achievable without (or before) subject-specific B\(_1\)\(^+\)-adjustment in the scanner. This task is often solved by time-consuming (brute-force) or by limited efficiency optimization methods. In this work, we propose a robust technique to find phase vectors providing optimization of the B-1-homogeneity in the default setup of multiple-element transceiver arrays. The key point of the described method is the pre-selection of starting vectors for the iterative solver-based search to maximize the probability of finding a global extremum for a cost function optimizing the homogeneity of a shaped B\(_1\)\(^+\)-field. This strategy allows for (i) drastic reduction of the computation time in comparison to a brute-force method and (ii) finding phase vectors providing a combined B\(_1\)\(^+\)-field with homogeneity characteristics superior to the one provided by the random-multi-start optimization approach. The method was efficiently used for optimizing the default phase settings in the in-house-built 8Tx/16Rx arrays designed for cMRI in pigs at 7T.}, language = {en} } @article{AtaeeMaghsoudiLatifietal.2019, author = {Ataee, Mohammad Sadegh and Maghsoudi, Yasser and Latifi, Hooman and Fadaie, Farhad}, title = {Improving estimation accuracy of growing stock by multi-frequency SAR and multi-spectral data over Iran's heterogeneously-structured broadleaf Hyrcanian forests}, series = {Forests}, volume = {10}, journal = {Forests}, number = {8}, issn = {1999-4907}, doi = {10.3390/f10080641}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-197212}, year = {2019}, abstract = {Via providing various ecosystem services, the old-growth Hyrcanian forests play a crucial role in the environment and anthropogenic aspects of Iran and beyond. The amount of growing stock volume (GSV) is a forest biophysical parameter with great importance in issues like economy, environmental protection, and adaptation to climate change. Thus, accurate and unbiased estimation of GSV is also crucial to be pursued across the Hyrcanian. Our goal was to investigate the potential of ALOS-2 and Sentinel-1's polarimetric features in combination with Sentinel-2 multi-spectral features for the GSV estimation in a portion of heterogeneously-structured and mountainous Hyrcanian forests. We used five different kernels by the support vector regression (nu-SVR) for the GSV estimation. Because each kernel differently models the parameters, we separately selected features for each kernel by a binary genetic algorithm (GA). We simultaneously optimized R\(^2\) and RMSE in a suggested GA fitness function. We calculated R\(^2\), RMSE to evaluate the models. We additionally calculated the standard deviation of validation metrics to estimate the model's stability. Also for models over-fitting or under-fitting analysis, we used mean difference (MD) index. The results suggested the use of polynomial kernel as the final model. Despite multiple methodical challenges raised from the composition and structure of the study site, we conclude that the combined use of polarimetric features (both dual and full) with spectral bands and indices can improve the GSV estimation over mixed broadleaf forests. This was partially supported by the use of proposed evaluation criterion within the GA, which helped to avoid the curse of dimensionality for the applied SVR and lowest over estimation or under estimation.}, language = {en} } @phdthesis{Herrmann2021, author = {Herrmann, Marc}, title = {The Total Variation on Surfaces and of Surfaces}, doi = {10.25972/OPUS-24073}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-240736}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2021}, abstract = {This thesis is concerned with applying the total variation (TV) regularizer to surfaces and different types of shape optimization problems. The resulting problems are challenging since they suffer from the non-differentiability of the TV-seminorm, but unlike most other priors it favors piecewise constant solutions, which results in piecewise flat geometries for shape optimization problems.The first part of this thesis deals with an analogue of the TV image reconstruction approach [Rudin, Osher, Fatemi (Physica D, 1992)] for images on smooth surfaces. A rigorous analytical framework is developed for this model and its Fenchel predual, which is a quadratic optimization problem with pointwise inequality constraints on the surface. A function space interior point method is proposed to solve it. Afterwards, a discrete variant (DTV) based on a nodal quadrature formula is defined for piecewise polynomial, globally discontinuous and continuous finite element functions on triangulated surface meshes. DTV has favorable properties, which include a convenient dual representation. Next, an analogue of the total variation prior for the normal vector field along the boundary of smooth shapes in 3D is introduced. Its analysis is based on a differential geometric setting in which the unit normal vector is viewed as an element of the two-dimensional sphere manifold. Shape calculus is used to characterize the relevant derivatives and an variant of the split Bregman method for manifold valued functions is proposed. This is followed by an extension of the total variation prior for the normal vector field for piecewise flat surfaces and the previous variant of split Bregman method is adapted. Numerical experiments confirm that the new prior favours polyhedral shapes.}, subject = {Gestaltoptimierung}, language = {en} } @article{KonijnenbergHerrmannKobeetal.2021, author = {Konijnenberg, Mark and Herrmann, Ken and Kobe, Carsten and Verburg, Frederik and Hindorf, Cecilia and Hustinx, Roland and Lassmann, Michael}, title = {EANM position paper on article 56 of the Council Directive 2013/59/Euratom (basic safety standards) for nuclear medicine therapy}, series = {European Journal of Nuclear Medicine and Molecular Imaging}, volume = {48}, journal = {European Journal of Nuclear Medicine and Molecular Imaging}, issn = {1619-7070}, doi = {10.1007/s00259-020-05038-9}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-235280}, pages = {67-72}, year = {2021}, abstract = {The EC Directive 2013/59/Euratom states in article 56 that exposures of target volumes in nuclear medicine treatments shall be individually planned and their delivery appropriately verified. The Directive also mentions that medical physics experts should always be appropriately involved in those treatments. Although it is obvious that, in nuclear medicine practice, every nuclear medicine physician and physicist should follow national rules and legislation, the EANM considered it necessary to provide guidance on how to interpret the Directive statements for nuclear medicine treatments. For this purpose, the EANM proposes to distinguish three levels in compliance to the optimization principle in the directive, inspired by the indication of levels in prescribing, recording and reporting of absorbed doses after radiotherapy defined by the International Commission on Radiation Units and Measurements (ICRU): Most nuclear medicine treatments currently applied in Europe are standardized. The minimum requirement for those treatments is ICRU level 1 ("activity-based prescription and patient-averaged dosimetry"), which is defined by administering the activity within 10\% of the intended activity, typically according to the package insert or to the respective EANM guidelines, followed by verification of the therapy delivery, if applicable. Non-standardized treatments are essentially those in developmental phase or approved radiopharmaceuticals being used off-label with significantly (> 25\% more than in the label) higher activities. These treatments should comply with ICRU level 2 ("activity-based prescription and patient-specific dosimetry"), which implies recording and reporting of the absorbed dose to organs at risk and optionally the absorbed dose to treatment regions. The EANM strongly encourages to foster research that eventually leads to treatment planning according to ICRU level 3 ("dosimetry-guided patient-specific prescription and verification"), whenever possible and relevant. Evidence for superiority of therapy prescription on basis of patient-specific dosimetry has not been obtained. However, the authors believe that a better understanding of therapy dosimetry, i.e. how much and where the energy is delivered, and radiobiology, i.e. radiation-related processes in tissues, are keys to the long-term improvement of our treatments.}, language = {en} } @phdthesis{Moldovan2021, author = {Moldovan, Christian}, title = {Performance Modeling of Mobile Video Streaming}, issn = {1432-8801}, doi = {10.25972/OPUS-22871}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-228715}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2021}, abstract = {In the past two decades, there has been a trend to move from traditional television to Internet-based video services. With video streaming becoming one of the most popular applications in the Internet and the current state of the art in media consumption, quality expectations of consumers are increasing. Low quality videos are no longer considered acceptable in contrast to some years ago due to the increased sizes and resolution of devices. If the high expectations of the users are not met and a video is delivered in poor quality, they often abandon the service. Therefore, Internet Service Providers (ISPs) and video service providers are facing the challenge of providing seamless multimedia delivery in high quality. Currently, during peak hours, video streaming causes almost 58\\% of the downstream traffic on the Internet. With higher mobile bandwidth, mobile video streaming has also become commonplace. According to the 2019 Cisco Visual Networking Index, in 2022 79\% of mobile traffic will be video traffic and, according to Ericsson, by 2025 video is forecasted to make up 76\% of total Internet traffic. Ericsson further predicts that in 2024 over 1.4 billion devices will be subscribed to 5G, which will offer a downlink data rate of 100 Mbit/s in dense urban environments. One of the most important goals of ISPs and video service providers is for their users to have a high Quality of Experience (QoE). The QoE describes the degree of delight or annoyance a user experiences when using a service or application. In video streaming the QoE depends on how seamless a video is played and whether there are stalling events or quality degradations. These characteristics of a transmitted video are described as the application layer Quality of Service (QoS). In general, the QoS is defined as "the totality of characteristics of a telecommunications service that bear on its ability to satisfy stated and implied needs of the user of the service" by the ITU. The network layer QoS describes the performance of the network and is decisive for the application layer QoS. In Internet video, typically a buffer is used to store downloaded video segments to compensate for network fluctuations. If the buffer runs empty, stalling occurs. If the available bandwidth decreases temporarily, the video can still be played out from the buffer without interruption. There are different policies and parameters that determine how large the buffer is, at what buffer level to start the video, and at what buffer level to resume playout after stalling. These have to be finely tuned to achieve the highest QoE for the user. If the bandwidth decreases for a longer time period, a limited buffer will deplete and stalling can not be avoided. An important research question is how to configure the buffer optimally for different users and situations. In this work, we tackle this question using analytic models and measurement studies. With HTTP Adaptive Streaming (HAS), the video players have the capability to adapt the video bit rate at the client side according to the available network capacity. This way the depletion of the video buffer and thus stalling can be avoided. In HAS, the quality in which the video is played and the number of quality switches also has an impact on the QoE. Thus, an important problem is the adaptation of video streaming so that these parameters are optimized. In a shared WiFi multiple video users share a single bottleneck link and compete for bandwidth. In such a scenario, it is important that resources are allocated to users in a way that all can have a similar QoE. In this work, we therefore investigate the possible fairness gain when moving from network fairness towards application-layer QoS fairness. In mobile scenarios, the energy and data consumption of the user device are limited resources and they must be managed besides the QoE. Therefore, it is also necessary, to investigate solutions, that conserve these resources in mobile devices. But how can resources be conserved without sacrificing application layer QoS? As an example for such a solution, this work presents a new probabilistic adaptation algorithm that uses abandonment statistics for ts decision making, aiming at minimizing the resource consumption while maintaining high QoS. With current protocol developments such as 5G, bandwidths are increasing, latencies are decreasing and networks are becoming more stable, leading to higher QoS. This allows for new real time data intensive applications such as cloud gaming, virtual reality and augmented reality applications to become feasible on mobile devices which pose completely new research questions. The high energy consumption of such applications still remains an issue as the energy capacity of devices is currently not increasing as quickly as the available data rates. In this work we compare the optimal performance of different strategies for adaptive 360-degree video streaming.}, subject = {Video{\"u}bertragung}, language = {en} } @article{DittmannBallesZabler2018, author = {Dittmann, Jonas and Balles, Andreas and Zabler, Simon}, title = {Optimization based evaluation of grating interferometric phase stepping series and analysis of mechanical setup instabilities}, series = {Journal of Imaging}, volume = {4}, journal = {Journal of Imaging}, number = {6}, issn = {2313-433X}, doi = {10.3390/jimaging4060077}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-197723}, pages = {77}, year = {2018}, abstract = {The diffraction contrast modalities accessible by X-ray grating interferometers are not imaged directly but have to be inferred from sine-like signal variations occurring in a series of images acquired at varying relative positions of the interferometer's gratings. The absolute spatial translations involved in the acquisition of these phase stepping series usually lie in the range of only a few hundred nanometers, wherefore positioning errors as small as 10 nm will already translate into signal uncertainties of 1-10\% in the final images if not accounted for. Classically, the relative grating positions in the phase stepping series are considered input parameters to the analysis and are, for the Fast Fourier Transform that is typically employed, required to be equidistantly distributed over multiples of the gratings' period. In the following, a fast converging optimization scheme is presented simultaneously determining the phase stepping curves' parameters as well as the actually performed motions of the stepped grating, including also erroneous rotational motions which are commonly neglected. While the correction of solely the translational errors along the stepping direction is found to be sufficient with regard to the reduction of image artifacts, the possibility to also detect minute rotations about all axes proves to be a valuable tool for system calibration and monitoring. The simplicity of the provided algorithm, in particular when only considering translational errors, makes it well suitable as a standard evaluation procedure also for large image series.}, language = {en} } @phdthesis{Fleszar2018, author = {Fleszar, Krzysztof}, title = {Network-Design Problems in Graphs and on the Plane}, edition = {1. Auflage}, publisher = {W{\"u}rzburg University Press}, address = {W{\"u}rzburg}, isbn = {978-3-95826-076-4 (Print)}, doi = {10.25972/WUP-978-3-95826-077-1}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-154904}, school = {W{\"u}rzburg University Press}, pages = {xi, 204}, year = {2018}, abstract = {A network design problem defines an infinite set whose elements, called instances, describe relationships and network constraints. It asks for an algorithm that, given an instance of this set, designs a network that respects the given constraints and at the same time optimizes some given criterion. In my thesis, I develop algorithms whose solutions are optimum or close to an optimum value within some guaranteed bound. I also examine the computational complexity of these problems. Problems from two vast areas are considered: graphs and the Euclidean plane. In the Maximum Edge Disjoint Paths problem, we are given a graph and a subset of vertex pairs that are called terminal pairs. We are asked for a set of paths where the endpoints of each path form a terminal pair. The constraint is that any two paths share at most one inner vertex. The optimization criterion is to maximize the cardinality of the set. In the hard-capacitated k-Facility Location problem, we are given an integer k and a complete graph where the distances obey a given metric and where each node has two numerical values: a capacity and an opening cost. We are asked for a subset of k nodes, called facilities, and an assignment of all the nodes, called clients, to the facilities. The constraint is that the number of clients assigned to a facility cannot exceed the facility's capacity value. The optimization criterion is to minimize the total cost which consists of the total opening cost of the facilities and the total distance between the clients and the facilities they are assigned to. In the Stabbing problem, we are given a set of axis-aligned rectangles in the plane. We are asked for a set of horizontal line segments such that, for every rectangle, there is a line segment crossing its left and right edge. The optimization criterion is to minimize the total length of the line segments. In the k-Colored Non-Crossing Euclidean Steiner Forest problem, we are given an integer k and a finite set of points in the plane where each point has one of k colors. For every color, we are asked for a drawing that connects all the points of the same color. The constraint is that drawings of different colors are not allowed to cross each other. The optimization criterion is to minimize the total length of the drawings. In the Minimum Rectilinear Polygon for Given Angle Sequence problem, we are given an angle sequence of left (+90°) turns and right (-90°) turns. We are asked for an axis-parallel simple polygon where the angles of the vertices yield the given sequence when walking around the polygon in counter-clockwise manner. The optimization criteria considered are to minimize the perimeter, the area, and the size of the axis-parallel bounding box of the polygon.}, subject = {Euklidische Ebene}, language = {en} } @article{BratengeierGaineyFlentje2011, author = {Bratengeier, Klaus and Gainey, Mark B. and Flentje, Michael}, title = {Fast IMRT by increasing the beam number and reducing the number of segments}, series = {Radiation Oncology}, volume = {6}, journal = {Radiation Oncology}, number = {170}, doi = {10.1186/1748-717X-6-170}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-137994}, year = {2011}, abstract = {Purpose The purpose of this work is to develop fast deliverable step and shoot IMRT technique. A reduction in the number of segments should theoretically be possible, whilst simultaneously maintaining plan quality, provided that the reduction is accompanied by an increased number of gantry angles. A benefit of this method is that the segment shaping could be performed during gantry motion, thereby reducing the delivery time. The aim was to find classes of such solutions whose plan quality can compete with conventional IMRT. Materials/Methods A planning study was performed. Step and shoot IMRT plans were created using direct machine parameter optimization (DMPO) as a reference. DMPO plans were compared to an IMRT variant having only one segment per angle ("2-Step Fast"). 2-Step Fast is based on a geometrical analysis of the topology of the planning target volume (PTV) and the organs at risk (OAR). A prostate/rectum case, spine metastasis/spinal cord, breast/lung and an artificial PTV/OAR combination of the ESTRO-Quasimodo phantom were used for the study. The composite objective value (COV), a quality score, and plan delivery time were compared. The delivery time for the DMPO reference plan and the 2-Step Fast IMRT technique was measured and calculated for two different linacs, a twelve year old Siemens Primus™ ("old" linac) and two Elekta Synergy™ "S" linacs ("new" linacs). Results 2-Step Fast had comparable or better quality than the reference DMPO plan. The number of segments was smaller than for the reference plan, the number of gantry angles was between 23 and 34. For the modern linac the delivery time was always smaller than that for the reference plan. The calculated (measured) values showed a mean delivery time reduction of 21\% (21\%) for the new linac, and of 7\% (3\%) for the old linac compared to the respective DMPO reference plans. For the old linac, the data handling time per beam was the limiting factor for the treatment time reduction. Conclusions 2-Step Fast plans are suited to reduce the delivery time, especially if the data handling time per beam is short. The plan quality can be retained or even increased for fewer segments provided more gantry angles are used.}, language = {en} } @article{BillerCholiBlaimeretal.2014, author = {Biller, Armin and Choli, Morwan and Blaimer, Martin and Breuer, Felix A. and Jakob, Peter M. and Bartsch, Andreas J.}, title = {Combined Acquisition Technique (CAT) for Neuroimaging of Multiple Sclerosis at Low Specific Absorption Rates (SAR)}, series = {PLOS ONE}, volume = {9}, journal = {PLOS ONE}, number = {3}, issn = {1932-6203}, doi = {10.1371/journal.pone.0091030}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-117179}, pages = {e91030}, year = {2014}, abstract = {Purpose: To compare a novel combined acquisition technique (CAT) of turbo-spin-echo (TSE) and echo-planar-imaging (EPI) with conventional TSE. CAT reduces the electromagnetic energy load transmitted for spin excitation. This radiofrequency (RF) burden is limited by the specific absorption rate (SAR) for patient safety. SAR limits restrict high-field MRI applications, in particular. Material and Methods: The study was approved by the local Medical Ethics Committee. Written informed consent was obtained from all participants. T2- and PD-weighted brain images of n = 40 Multiple Sclerosis (MS) patients were acquired by CAT and TSE at 3 Tesla. Lesions were recorded by two blinded, board-certificated neuroradiologists. Diagnostic equivalence of CAT and TSE to detect MS lesions was evaluated along with their SAR, sound pressure level (SPL) and sensations of acoustic noise, heating, vibration and peripheral nerve stimulation. Results: Every MS lesion revealed on TSE was detected by CAT according to both raters (Cohen's kappa of within-rater/across-CAT/TSE lesion detection kappa(CAT) = 1.00, at an inter-rater lesion detection agreement of kappa(LES) = 0.82). CAT reduced the SAR burden significantly compared to TSE (p<0.001). Mean SAR differences between TSE and CAT were 29.0 (+/- 5.7) \% for the T2-contrast and 32.7 (+/- 21.9) \% for the PD-contrast (expressed as percentages of the effective SAR limit of 3.2 W/kg for head examinations). Average SPL of CAT was no louder than during TSE. Sensations of CAT-vs. TSE-induced heating, noise and scanning vibrations did not differ. Conclusion: T2-/PD-CAT is diagnostically equivalent to TSE for MS lesion detection yet substantially reduces the RF exposure. Such SAR reduction facilitates high-field MRI applications at 3 Tesla or above and corresponding protocol standardizations but CAT can also be used to scan faster, at higher resolution or with more slices. According to our data, CAT is no more uncomfortable than TSE scanning.}, language = {en} } @phdthesis{Klug2006, author = {Klug, Andreas}, title = {Affine-Scaling Methods for Nonlinear Minimization Problems and Nonlinear Systems of Equations with Bound Constraints}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-18851}, school = {Universit{\"a}t W{\"u}rzburg}, year = {2006}, abstract = {In this thesis affine-scaling-methods for two different types of mathematical problems are considered. The first type of problems are nonlinear optimization problems subject to bound constraints. A class of new affine-scaling Newton-type methods is introduced. The methods are shown to be locally quadratically convergent without assuming strict complementarity of the solution. The new methods differ from previous ones mainly in the choice of the scaling matrix. The second type of problems are semismooth system of equations with bound constraints. A new affine-scaling trust-region method for these problems is developed. The method is shown to have strong global and local convergence properties under suitable assumptions. Numerical results are presented for a number of problems arising from different areas.}, subject = {Skalierungsfunktion}, language = {en} }