@article{StaigerCadotKooteretal.2012, author = {Staiger, Christine and Cadot, Sidney and Kooter, Raul and Dittrich, Marcus and M{\"u}ller, Tobias and Klau, Gunnar W. and Wessels, Lodewyk F. A.}, title = {A Critical Evaluation of Network and Pathway-Based Classifiers for Outcome Prediction in Breast Cancer}, series = {PLoS One}, volume = {7}, journal = {PLoS One}, number = {4}, doi = {10.1371/journal.pone.0034796}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-131323}, pages = {e34796}, year = {2012}, abstract = {Recently, several classifiers that combine primary tumor data, like gene expression data, and secondary data sources, such as protein-protein interaction networks, have been proposed for predicting outcome in breast cancer. In these approaches, new composite features are typically constructed by aggregating the expression levels of several genes. The secondary data sources are employed to guide this aggregation. Although many studies claim that these approaches improve classification performance over single genes classifiers, the gain in performance is difficult to assess. This stems mainly from the fact that different breast cancer data sets and validation procedures are employed to assess the performance. Here we address these issues by employing a large cohort of six breast cancer data sets as benchmark set and by performing an unbiased evaluation of the classification accuracies of the different approaches. Contrary to previous claims, we find that composite feature classifiers do not outperform simple single genes classifiers. We investigate the effect of (1) the number of selected features; (2) the specific gene set from which features are selected; (3) the size of the training set and (4) the heterogeneity of the data set on the performance of composite feature and single genes classifiers. Strikingly, we find that randomization of secondary data sources, which destroys all biological information in these sources, does not result in a deterioration in performance of composite feature classifiers. Finally, we show that when a proper correction for gene set size is performed, the stability of single genes sets is similar to the stability of composite feature sets. Based on these results there is currently no reason to prefer prognostic classifiers based on composite features over single genes classifiers for predicting outcome in breast cancer.}, language = {en} } @article{SchmidSchindelinCardonaetal.2010, author = {Schmid, Benjamin and Schindelin, Johannes and Cardona, Albert and Longair, Martin and Heisenberg, Martin}, title = {A high-level 3D visualization API for Java and ImageJ}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-67851}, year = {2010}, abstract = {Background: Current imaging methods such as Magnetic Resonance Imaging (MRI), Confocal microscopy, Electron Microscopy (EM) or Selective Plane Illumination Microscopy (SPIM) yield three-dimensional (3D) data sets in need of appropriate computational methods for their analysis. The reconstruction, segmentation and registration are best approached from the 3D representation of the data set. Results: Here we present a platform-independent framework based on Java and Java 3D for accelerated rendering of biological images. Our framework is seamlessly integrated into ImageJ, a free image processing package with a vast collection of community-developed biological image analysis tools. Our framework enriches the ImageJ software libraries with methods that greatly reduce the complexity of developing image analysis tools in an interactive 3D visualization environment. In particular, we provide high-level access to volume rendering, volume editing, surface extraction, and image annotation. The ability to rely on a library that removes the low-level details enables concentrating software development efforts on the algorithm implementation parts. Conclusions: Our framework enables biomedical image software development to be built with 3D visualization capabilities with very little effort. We offer the source code and convenient binary packages along with extensive documentation at http://3dviewer.neurofly.de.}, subject = {Visualisierung}, language = {en} } @article{PrantlZeckBaueretal.2022, author = {Prantl, Thomas and Zeck, Timo and Bauer, Andre and Ten, Peter and Prantl, Dominik and Yahya, Ala Eddine Ben and Ifflaender, Lukas and Dmitrienko, Alexandra and Krupitzer, Christian and Kounev, Samuel}, title = {A Survey on Secure Group Communication Schemes With Focus on IoT Communication}, series = {IEEE Access}, volume = {10}, journal = {IEEE Access}, doi = {10.1109/ACCESS.2022.3206451}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-300257}, pages = {99944 -- 99962}, year = {2022}, abstract = {A key feature for Internet of Things (IoT) is to control what content is available to each user. To handle this access management, encryption schemes can be used. Due to the diverse usage of encryption schemes, there are various realizations of 1-to-1, 1-to-n, and n-to-n schemes in the literature. This multitude of encryption methods with a wide variety of properties presents developers with the challenge of selecting the optimal method for a particular use case, which is further complicated by the fact that there is no overview of existing encryption schemes. To fill this gap, we envision a cryptography encyclopedia providing such an overview of existing encryption schemes. In this survey paper, we take a first step towards such an encyclopedia by creating a sub-encyclopedia for secure group communication (SGC) schemes, which belong to the n-to-n category. We extensively surveyed the state-of-the-art and classified 47 different schemes. More precisely, we provide (i) a comprehensive overview of the relevant security features, (ii) a set of relevant performance metrics, (iii) a classification for secure group communication schemes, and (iv) workflow descriptions of the 47 schemes. Moreover, we perform a detailed performance and security evaluation of the 47 secure group communication schemes. Based on this evaluation, we create a guideline for the selection of secure group communication schemes.}, language = {en} } @article{HeinWienrichLatoschik2021, author = {Hein, Rebecca M. and Wienrich, Carolin and Latoschik, Marc E.}, title = {A systematic review of foreign language learning with immersive technologies (2001-2020)}, series = {AIMS Electronics and Electrical Engineering}, volume = {5}, journal = {AIMS Electronics and Electrical Engineering}, number = {2}, doi = {10.3934/electreng.2021007}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-268811}, pages = {117-145}, year = {2021}, abstract = {This study provides a systematic literature review of research (2001-2020) in the field of teaching and learning a foreign language and intercultural learning using immersive technologies. Based on 2507 sources, 54 articles were selected according to a predefined selection criteria. The review is aimed at providing information about which immersive interventions are being used for foreign language learning and teaching and where potential research gaps exist. The papers were analyzed and coded according to the following categories: (1) investigation form and education level, (2) degree of immersion, and technology used, (3) predictors, and (4) criterions. The review identified key research findings relating the use of immersive technologies for learning and teaching a foreign language and intercultural learning at cognitive, affective, and conative levels. The findings revealed research gaps in the area of teachers as a target group, and virtual reality (VR) as a fully immersive intervention form. Furthermore, the studies reviewed rarely examined behavior, and implicit measurements related to inter- and trans-cultural learning and teaching. Inter- and transcultural learning and teaching especially is an underrepresented investigation subject. Finally, concrete suggestions for future research are given. The systematic review contributes to the challenge of interdisciplinary cooperation between pedagogy, foreign language didactics, and Human-Computer Interaction to achieve innovative teaching-learning formats and a successful digital transformation.}, language = {en} } @article{HalbigLatoschik2021, author = {Halbig, Andreas and Latoschik, Marc Erich}, title = {A systematic review of physiological measurements, factors, methods, and applications in virtual reality}, series = {Frontiers in Virtual Reality}, volume = {2}, journal = {Frontiers in Virtual Reality}, doi = {10.3389/frvir.2021.694567}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-260503}, year = {2021}, abstract = {Measurements of physiological parameters provide an objective, often non-intrusive, and (at least semi-)automatic evaluation and utilization of user behavior. In addition, specific hardware devices of Virtual Reality (VR) often ship with built-in sensors, i.e. eye-tracking and movements sensors. Hence, the combination of physiological measurements and VR applications seems promising. Several approaches have investigated the applicability and benefits of this combination for various fields of applications. However, the range of possible application fields, coupled with potentially useful and beneficial physiological parameters, types of sensor, target variables and factors, and analysis approaches and techniques is manifold. This article provides a systematic overview and an extensive state-of-the-art review of the usage of physiological measurements in VR. We identified 1,119 works that make use of physiological measurements in VR. Within these, we identified 32 approaches that focus on the classification of characteristics of experience, common in VR applications. The first part of this review categorizes the 1,119 works by field of application, i.e. therapy, training, entertainment, and communication and interaction, as well as by the specific target factors and variables measured by the physiological parameters. An additional category summarizes general VR approaches applicable to all specific fields of application since they target typical VR qualities. In the second part of this review, we analyze the target factors and variables regarding the respective methods used for an automatic analysis and, potentially, classification. For example, we highlight which measurement setups have been proven to be sensitive enough to distinguish different levels of arousal, valence, anxiety, stress, or cognitive workload in the virtual realm. This work may prove useful for all researchers wanting to use physiological data in VR and who want to have a good overview of prior approaches taken, their benefits and potential drawbacks.}, language = {en} } @article{GrohmannHerbstChalbanietal.2020, author = {Grohmann, Johannes and Herbst, Nikolas and Chalbani, Avi and Arian, Yair and Peretz, Noam and Kounev, Samuel}, title = {A Taxonomy of Techniques for SLO Failure Prediction in Software Systems}, series = {Computers}, volume = {9}, journal = {Computers}, number = {1}, issn = {2073-431X}, doi = {10.3390/computers9010010}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-200594}, pages = {10}, year = {2020}, abstract = {Failure prediction is an important aspect of self-aware computing systems. Therefore, a multitude of different approaches has been proposed in the literature over the past few years. In this work, we propose a taxonomy for organizing works focusing on the prediction of Service Level Objective (SLO) failures. Our taxonomy classifies related work along the dimensions of the prediction target (e.g., anomaly detection, performance prediction, or failure prediction), the time horizon (e.g., detection or prediction, online or offline application), and the applied modeling type (e.g., time series forecasting, machine learning, or queueing theory). The classification is derived based on a systematic mapping of relevant papers in the area. Additionally, we give an overview of different techniques in each sub-group and address remaining challenges in order to guide future research.}, language = {en} } @article{BartlWenningerWolfetal.2021, author = {Bartl, Andrea and Wenninger, Stephan and Wolf, Erik and Botsch, Mario and Latoschik, Marc Erich}, title = {Affordable but not cheap: a case study of the effects of two 3D-reconstruction methods of virtual humans}, series = {Frontiers in Virtual Reality}, volume = {2}, journal = {Frontiers in Virtual Reality}, doi = {10.3389/frvir.2021.694617}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-260492}, year = {2021}, abstract = {Realistic and lifelike 3D-reconstruction of virtual humans has various exciting and important use cases. Our and others' appearances have notable effects on ourselves and our interaction partners in virtual environments, e.g., on acceptance, preference, trust, believability, behavior (the Proteus effect), and more. Today, multiple approaches for the 3D-reconstruction of virtual humans exist. They significantly vary in terms of the degree of achievable realism, the technical complexities, and finally, the overall reconstruction costs involved. This article compares two 3D-reconstruction approaches with very different hardware requirements. The high-cost solution uses a typical complex and elaborated camera rig consisting of 94 digital single-lens reflex (DSLR) cameras. The recently developed low-cost solution uses a smartphone camera to create videos that capture multiple views of a person. Both methods use photogrammetric reconstruction and template fitting with the same template model and differ in their adaptation to the method-specific input material. Each method generates high-quality virtual humans ready to be processed, animated, and rendered by standard XR simulation and game engines such as Unreal or Unity. We compare the results of the two 3D-reconstruction methods in an immersive virtual environment against each other in a user study. Our results indicate that the virtual humans from the low-cost approach are perceived similarly to those from the high-cost approach regarding the perceived similarity to the original, human-likeness, beauty, and uncanniness, despite significant differences in the objectively measured quality. The perceived feeling of change of the own body was higher for the low-cost virtual humans. Quality differences were perceived more strongly for one's own body than for other virtual humans.}, language = {en} } @article{KammererGoesterReichertetal.2021, author = {Kammerer, Klaus and G{\"o}ster, Manuel and Reichert, Manfred and Pryss, R{\"u}diger}, title = {Ambalytics: a scalable and distributed system architecture concept for bibliometric network analyses}, series = {Future Internet}, volume = {13}, journal = {Future Internet}, number = {8}, issn = {1999-5903}, doi = {10.3390/fi13080203}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-244916}, year = {2021}, abstract = {A deep understanding about a field of research is valuable for academic researchers. In addition to technical knowledge, this includes knowledge about subareas, open research questions, and social communities (networks) of individuals and organizations within a given field. With bibliometric analyses, researchers can acquire quantitatively valuable knowledge about a research area by using bibliographic information on academic publications provided by bibliographic data providers. Bibliometric analyses include the calculation of bibliometric networks to describe affiliations or similarities of bibliometric entities (e.g., authors) and group them into clusters representing subareas or communities. Calculating and visualizing bibliometric networks is a nontrivial and time-consuming data science task that requires highly skilled individuals. In addition to domain knowledge, researchers must often provide statistical knowledge and programming skills or use software tools having limited functionality and usability. In this paper, we present the ambalytics bibliometric platform, which reduces the complexity of bibliometric network analysis and the visualization of results. It accompanies users through the process of bibliometric analysis and eliminates the need for individuals to have programming skills and statistical knowledge, while preserving advanced functionality, such as algorithm parameterization, for experts. As a proof-of-concept, and as an example of bibliometric analyses outcomes, the calculation of research fronts networks based on a hybrid similarity approach is shown. Being designed to scale, ambalytics makes use of distributed systems concepts and technologies. It is based on the microservice architecture concept and uses the Kubernetes framework for orchestration. This paper presents the initial building block of a comprehensive bibliometric analysis platform called ambalytics, which aims at a high usability for users as well as scalability.}, language = {en} } @article{TsouliasJoerissenNuechter2022, author = {Tsoulias, Nikos and J{\"o}rissen, Sven and N{\"u}chter, Andreas}, title = {An approach for monitoring temperature on fruit surface by means of thermal point cloud}, series = {MethodsX}, volume = {9}, journal = {MethodsX}, issn = {2215-0161}, doi = {10.1016/j.mex.2022.101712}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-300270}, year = {2022}, abstract = {Heat and excessive solar radiation can produce abiotic stresses during apple maturation, resulting fruit quality. Therefore, the monitoring of temperature on fruit surface (FST) over the growing period can allow to identify thresholds, above of which several physiological disorders such as sunburn may occur in apple. The current approaches neglect spatial variation of FST and have reduced repeatability, resulting in unreliable predictions. In this study, LiDAR laser scanning and thermal imaging were employed to detect the temperature on fruit surface by means of 3D point cloud. A process for calibrating the two sensors based on an active board target and producing a 3D thermal point cloud was suggested. After calibration, the sensor system was utilised to scan the fruit trees, while temperature values assigned in the corresponding 3D point cloud were based on the extrinsic calibration. Whereas a fruit detection algorithm was performed to segment the FST from each apple. • The approach allows the calibration of LiDAR laser scanner with thermal camera in order to produce a 3D thermal point cloud. • The method can be applied in apple trees for segmenting FST in 3D. Whereas the approach can be utilised to predict several physiological disorders including sunburn on fruit surface.}, language = {en} } @article{GageikStrohmeierMontenegro2013, author = {Gageik, Nils and Strohmeier, Michael and Montenegro, Sergio}, title = {An Autonomous UAV with an Optical Flow Sensor for Positioning and Navigation}, series = {International Journal of Advanced Robotic Systems}, journal = {International Journal of Advanced Robotic Systems}, doi = {10.5772/56813}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-96368}, year = {2013}, abstract = {A procedure to control all six DOF (degrees of freedom) of a UAV (unmanned aerial vehicle) without an external reference system and to enable fully autonomous flight is presented here. For 2D positioning the principle of optical flow is used. Together with the output of height estimation, fusing ultrasonic, infrared and inertial and pressure sensor data, the 3D position of the UAV can be computed, controlled and steered. All data processing is done on the UAV. An external computer with a pathway planning interface is for commanding purposes only. The presented system is part of the AQopterI8 project, which aims to develop an autonomous flying quadrocopter for indoor application. The focus of this paper is 2D positioning using an optical flow sensor. As a result of the performed evaluation, it can be concluded that for position hold, the standard deviation of the position error is 10cm and after landing the position error is about 30cm.}, language = {en} }