@article{DhillonDahmsKuebertFlocketal.2023, author = {Dhillon, Maninder Singh and Dahms, Thorsten and K{\"u}bert-Flock, Carina and Liepa, Adomas and Rummler, Thomas and Arnault, Joel and Steffan-Dewenter, Ingolf and Ullmann, Tobias}, title = {Impact of STARFM on crop yield predictions: fusing MODIS with Landsat 5, 7, and 8 NDVIs in Bavaria Germany}, series = {Remote Sensing}, volume = {15}, journal = {Remote Sensing}, number = {6}, issn = {2072-4292}, doi = {10.3390/rs15061651}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-311092}, year = {2023}, abstract = {Rapid and accurate yield estimates at both field and regional levels remain the goal of sustainable agriculture and food security. Hereby, the identification of consistent and reliable methodologies providing accurate yield predictions is one of the hot topics in agricultural research. This study investigated the relationship of spatiotemporal fusion modelling using STRAFM on crop yield prediction for winter wheat (WW) and oil-seed rape (OSR) using a semi-empirical light use efficiency (LUE) model for the Free State of Bavaria (70,550 km\(^2\)), Germany, from 2001 to 2019. A synthetic normalised difference vegetation index (NDVI) time series was generated and validated by fusing the high spatial resolution (30 m, 16 days) Landsat 5 Thematic Mapper (TM) (2001 to 2012), Landsat 7 Enhanced Thematic Mapper Plus (ETM+) (2012), and Landsat 8 Operational Land Imager (OLI) (2013 to 2019) with the coarse resolution of MOD13Q1 (250 m, 16 days) from 2001 to 2019. Except for some temporal periods (i.e., 2001, 2002, and 2012), the study obtained an R\(^2\) of more than 0.65 and a RMSE of less than 0.11, which proves that the Landsat 8 OLI fused products are of higher accuracy than the Landsat 5 TM products. Moreover, the accuracies of the NDVI fusion data have been found to correlate with the total number of available Landsat scenes every year (N), with a correlation coefficient (R) of +0.83 (between R\(^2\) of yearly synthetic NDVIs and N) and -0.84 (between RMSEs and N). For crop yield prediction, the synthetic NDVI time series and climate elements (such as minimum temperature, maximum temperature, relative humidity, evaporation, transpiration, and solar radiation) are inputted to the LUE model, resulting in an average R\(^2\) of 0.75 (WW) and 0.73 (OSR), and RMSEs of 4.33 dt/ha and 2.19 dt/ha. The yield prediction results prove the consistency and stability of the LUE model for yield estimation. Using the LUE model, accurate crop yield predictions were obtained for WW (R\(^2\) = 0.88) and OSR (R\(^2\) = 0.74). Lastly, the study observed a high positive correlation of R = 0.81 and R = 0.77 between the yearly R\(^2\) of synthetic accuracy and modelled yield accuracy for WW and OSR, respectively.}, language = {en} } @article{DhillonDahmsKuebertFlocketal.2022, author = {Dhillon, Maninder Singh and Dahms, Thorsten and K{\"u}bert-Flock, Carina and Steffan-Dewenter, Ingolf and Zhang, Jie and Ullmann, Tobias}, title = {Spatiotemporal Fusion Modelling Using STARFM: Examples of Landsat 8 and Sentinel-2 NDVI in Bavaria}, series = {Remote Sensing}, volume = {14}, journal = {Remote Sensing}, number = {3}, issn = {2072-4292}, doi = {10.3390/rs14030677}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-323471}, year = {2022}, abstract = {The increasing availability and variety of global satellite products provide a new level of data with different spatial, temporal, and spectral resolutions; however, identifying the most suited resolution for a specific application consumes increasingly more time and computation effort. The region's cloud coverage additionally influences the choice of the best trade-off between spatial and temporal resolution, and different pixel sizes of remote sensing (RS) data may hinder the accurate monitoring of different land cover (LC) classes such as agriculture, forest, grassland, water, urban, and natural-seminatural. To investigate the importance of RS data for these LC classes, the present study fuses NDVIs of two high spatial resolution data (high pair) (Landsat (30 m, 16 days; L) and Sentinel-2 (10 m, 5-6 days; S), with four low spatial resolution data (low pair) (MOD13Q1 (250 m, 16 days), MCD43A4 (500 m, one day), MOD09GQ (250 m, one-day), and MOD09Q1 (250 m, eight day)) using the spatial and temporal adaptive reflectance fusion model (STARFM), which fills regions' cloud or shadow gaps without losing spatial information. These eight synthetic NDVI STARFM products (2: high pair multiply 4: low pair) offer a spatial resolution of 10 or 30 m and temporal resolution of 1, 8, or 16 days for the entire state of Bavaria (Germany) in 2019. Due to their higher revisit frequency and more cloud and shadow-free scenes (S = 13, L = 9), Sentinel-2 (overall R\(^2\) = 0.71, and RMSE = 0.11) synthetic NDVI products provide more accurate results than Landsat (overall R\(^2\) = 0.61, and RMSE = 0.13). Likewise, for the agriculture class, synthetic products obtained using Sentinel-2 resulted in higher accuracy than Landsat except for L-MOD13Q1 (R\(^2\) = 0.62, RMSE = 0.11), resulting in similar accuracy preciseness as S-MOD13Q1 (R\(^2\) = 0.68, RMSE = 0.13). Similarly, comparing L-MOD13Q1 (R\(^2\) = 0.60, RMSE = 0.05) and S-MOD13Q1 (R\(^2\) = 0.52, RMSE = 0.09) for the forest class, the former resulted in higher accuracy and precision than the latter. Conclusively, both L-MOD13Q1 and S-MOD13Q1 are suitable for agricultural and forest monitoring; however, the spatial resolution of 30 m and low storage capacity makes L-MOD13Q1 more prominent and faster than that of S-MOD13Q1 with the 10-m spatial resolution.}, language = {en} } @article{EhrenfeldHerbortButz2013, author = {Ehrenfeld, Stephan and Herbort, Oliver and Butz, Martin V.}, title = {Modular neuron-based body estimation: maintaining consistency over different limbs, modalities, and frames of reference}, series = {Frontiers in Computational Neuroscience}, volume = {7}, journal = {Frontiers in Computational Neuroscience}, number = {148}, doi = {10.3389/fncom.2013.00148}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:20-opus-122253}, year = {2013}, abstract = {This paper addresses the question of how the brain maintains a probabilistic body state estimate over time from a modeling perspective. The neural Modular Modality Frame (nMMF) model simulates such a body state estimation process by continuously integrating redundant, multimodal body state information sources. The body state estimate itself is distributed over separate, but bidirectionally interacting modules. nMMF compares the incoming sensory and present body state information across the interacting modules and fuses the information sources accordingly. At the same time, nMMF enforces body state estimation consistency across the modules. nMMF is able to detect conflicting sensory information and to consequently decrease the influence of implausible sensor sources on the fly. In contrast to the previously published Modular Modality Frame (MMF) model, nMMF offers a biologically plausible neural implementation based on distributed, probabilistic population codes. Besides its neural plausibility, the neural encoding has the advantage of enabling (a) additional probabilistic information flow across the separate body state estimation modules and (b) the representation of arbitrary probability distributions of a body state. The results show that the neural estimates can detect and decrease the impact of false sensory information, can propagate conflicting information across modules, and can improve overall estimation accuracy due to additional module interactions. Even bodily illusions, such as the rubber hand illusion, can be simulated with nMMF. We conclude with an outlook on the potential of modeling human data and of invoking goal-directed behavioral control.}, language = {en} }