# \donttest{
# Simulate 4 time series with hierarchical seasonality
# and independent AR1 dynamic processes
set.seed(111)
simdat <- sim_mvgam(seasonality = 'hierarchical',
trend_model = AR(),
family = gaussian())
# Fit a model with shared seasonality
mod1 <- mvgam(y ~ s(season, bs = 'cc', k = 6),
data = rbind(simdat$data_train,
simdat$data_test),
family = gaussian(),
chains = 2,
silent = 2)
# Inspect the model and calculate LOO
conditional_effects(mod1)
mc.cores.def <- getOption('mc.cores')
options(mc.cores = 1)
loo(mod1)
# Now fit a model with hierarchical seasonality
mod2 <- update(mod1,
formula = y ~ s(season, bs = 'cc', k = 6) +
s(season, series, bs = 'fs',
xt = list(bs = 'cc'), k = 4),
chains = 2,
silent = 2)
conditional_effects(mod2)
loo(mod2)
# Now add AR1 dynamic errors to mod2
mod3 <- update(mod2,
trend_model = AR(),
chains = 2,
silent = 2)
conditional_effects(mod3)
plot(mod3, type = 'trend')
loo(mod3)
# Compare models using LOO
loo_compare(mod1, mod2, mod3)
options(mc.cores = mc.cores.def)
# Compare forecast abilities using an expanding training window and
# forecasting ahead 1 timepoint from each window; the first window by includes
# the first 92 timepoints (of the 100 that were simulated)
max(mod2$obs_data$time)
lfo_mod2 <- lfo_cv(mod2, min_t = 92)
lfo_mod3 <- lfo_cv(mod3, min_t = 92)
# Take the difference in forecast ELPDs; a model with higher ELPD is preferred,
# so negative values here indicate that mod3 gave better forecasts for a particular
# out of sample timepoint
plot(y = lfo_mod2$elpds - lfo_mod3$elpds,
x = lfo_mod2$eval_timepoints, pch = 16,
ylab = 'ELPD_mod2 - ELPD_mod3',
xlab = 'Evaluation timepoint')
abline(h = 0, lty = 'dashed')
# }
Run the code above in your browser using DataLab