diff --git a/.DS_Store b/.DS_Store index 0007e9c..8fc784f 100644 Binary files a/.DS_Store and b/.DS_Store differ diff --git a/.Rbuildignore b/.Rbuildignore index 06f5110..0f02cc4 100644 --- a/.Rbuildignore +++ b/.Rbuildignore @@ -1,10 +1,13 @@ ^.*\.Rproj$ ^\.Rproj\.user$ -README.rmd -README_cache/ -codecov.yml -README.md -cran-comments.md man-roxygen -\.travis.yml -_config.yml \ No newline at end of file +^README\.Rmd$ +^README-.*\.png$ +README_cache +^\.travis\.yml$ +^_config\.yml$ +^codecov\.yml$ +^cran-comments\.md$ +^/\.gitattributes$ +^doc$ +^Meta$ diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..f00d674 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,3 @@ +# Declare files that will always have LF line endings on checkout. +^configure\.ac$ text eol=lf +^cleanup$ text eol=lf \ No newline at end of file diff --git a/.gitignore b/.gitignore index 768eddc..7b0f9ac 100644 --- a/.gitignore +++ b/.gitignore @@ -1,10 +1,11 @@ .Rproj.user .Rhistory .RData -.Ruserdata src/*.o src/*.so src/*.dll -autom4te.cache/ +README_cache/ src/.DS_Store +Meta .DS_Store +doc diff --git a/.travis.yml b/.travis.yml index 36a8fb4..e75c301 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,29 +1,21 @@ # R for travis: see documentation at https://docs.travis-ci.com/user/languages/r language: R -sudo: false +sudo: true cache: packages +warnings_are_errors: true os: - linux - osx r: - - oldrel - release - - devel - -r_check_args: --as-cran - -matrix: - exclude: - - os: osx - r: devel branches: only: - master r_github_packages: - - jimhester/covr + - r-lib/covr r_binary_packages: - Rcpp - RcppArmadillo @@ -32,6 +24,16 @@ r_binary_packages: - testthat - roxygen2 - devtools + - factorstochvol + - tinytex + +before_install: + - chmod +x configure + - chmod +x cleanup + +r_build_args: --no-build-vignettes --no-manual --no-resave-data +r_check_args: --ignore-vignettes --no-manual + after_success: - tar -C .. -xf $PKG_TARBALL diff --git a/DESCRIPTION b/DESCRIPTION index 7e0fe86..3951240 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,21 +1,38 @@ Package: mfbvar Type: Package Title: Mixed-Frequency Bayesian VAR Models -Version: 0.5.0.9000 -Date: 2019-05-09 +Version: 0.5.6 +Date: 2021-02-09 Authors@R: c( person("Sebastian", "Ankargren", email = "sebastian.ankargren@statistics.uu.se", role = c("cre", "aut"), comment = c(ORCID = "0000-0003-4415-8734")), - person("Yukai", "Yang", email = "yukai.yang@statistics.uu.se", role = c("aut"), comment=c(ORCID="0000-0002-2623-8549"))) -Description: Estimation of mixed-frequency Bayesian vector autoregressive (VAR) models with Minnesota or steady-state priors. The package implements a state space-based VAR model that handles mixed frequencies of the data. The model is estimated using Markov Chain Monte Carlo to numerically approximate the posterior distribution, where the prior can be either the Minnesota prior, as used by Schorfheide and Song (2015) , or the steady-state prior, as advocated by Ankargren, Unosson and Yang (2018) . + person("Yukai", "Yang", email = "yukai.yang@statistics.uu.se", role = c("aut"), comment=c(ORCID="0000-0002-2623-8549")), + person("Gregor", "Kastner", role = "ctb", comment = c(ORCID="0000-0002-8237-8271"))) +Description: Functions and tools for estimation of mixed-frequency Bayesian vector autoregressive (VAR) models. The package implements a state space-based VAR model that handles mixed frequencies of the data as proposed by Schorfheide and Song (2015) , and extensions thereof developed by Ankargren, Unosson and Yang (2020) , Ankargren and Joneus (2019) , and Ankargren and Joneus (2020) . The models are estimated using Markov Chain Monte Carlo to numerically approximate the posterior distribution. Prior distributions that can be used include normal-inverse Wishart and normal-diffuse priors as well as steady-state priors. Stochastic volatility can be handled by common or factor stochastic volatility models. License: GPL-3 LazyData: TRUE URL: https://github.com/ankargren/mfbvar BugReports: https://github.com/ankargren/mfbvar/issues Imports: - Rcpp (>= 0.12.7), ggplot2 (>= 2.2.1), methods, pbapply, utils, factorstochvol, progress, lubridate, GIGrvg + Rcpp (>= 0.12.7), + ggplot2 (>= 3.3.0), + methods, + lubridate, + GIGrvg, + stochvol (>= 2.0.3), + RcppParallel, + dplyr, + magrittr, + tibble, + zoo LinkingTo: - Rcpp, RcppArmadillo, RcppProgress -Depends: R (>= 2.10) -Suggests: testthat, covr, tidyverse -RoxygenNote: 6.1.1 + Rcpp, + RcppArmadillo, + RcppProgress, + stochvol (>= 2.0.3), + RcppParallel +Depends: R (>= 3.5.0) +Suggests: testthat, covr, knitr, ggridges, alfred, factorstochvol +RoxygenNote: 7.1.1 Encoding: UTF-8 +SystemRequirements: GNU make +VignetteBuilder: knitr diff --git a/MFBVAR.Rproj b/MFBVAR.Rproj index d9de8b9..2c9794f 100644 --- a/MFBVAR.Rproj +++ b/MFBVAR.Rproj @@ -17,6 +17,8 @@ StripTrailingWhitespace: Yes BuildType: Package PackageUseDevtools: Yes -PackageInstallArgs: --no-multiarch --with-keep.source --clean -PackageCheckArgs: --as-cran --clean +PackageInstallArgs: --no-multiarch --with-keep.source --clean --compact-vignettes=both +PackageBuildArgs: --compact-vignettes=both +PackageBuildBinaryArgs: --compact-vignettes=both +PackageCheckArgs: --as-cran --clean --compact-vignettes=both PackageRoxygenize: rd,collate diff --git a/NAMESPACE b/NAMESPACE index 9a601f5..237f04f 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -1,34 +1,48 @@ importFrom(Rcpp, evalCpp) useDynLib(mfbvar, .registration = TRUE) import(stats) -importFrom("utils", "setTxtProgressBar", "txtProgressBar") -importFrom("pbapply", "setTimerProgressBar", "timerProgressBar") -importFrom("progress", "progress_bar") importFrom(magrittr,"%>%") importFrom(tibble,"tibble") -importFrom(dplyr, "group_by", "summarize", "ungroup", "mutate", "transmute", "bind_rows") -importFrom(lubridate, "%m-%", "%m+%", "days", "ymd", "quarter", "month", "year") +importFrom(dplyr, "group_by", "summarize", "ungroup", "mutate", "transmute", "bind_rows", "pull") +importFrom(lubridate, "%m-%", "%m+%", "days", "ymd", "quarter", "month", "year", + "as_date", "day", "days_in_month", "ceiling_date", "floor_date") import(ggplot2) +importFrom(zoo, "as.Date", "zoo", "as.Date.ts", "index", "merge.zoo") +importFrom(stochvol,svsample) S3method(print, mfbvar_prior) S3method(summary, mfbvar_prior) -S3method(mdd, mfbvar_minn_iw) -S3method(mdd, mfbvar_ss_iw) -S3method(mcmc_sampler, mfbvar_minn_iw) -S3method(mcmc_sampler, mfbvar_ss_iw) S3method(print, mfbvar) S3method(summary, mfbvar) S3method(plot, mfbvar_minn) S3method(plot, mfbvar_ss) S3method(plot, mfbvar_ssng) +S3method(plot, mfbvar_dl) S3method(plot, mfbvar_prior) +S3method(mcmc_sampler, mfbvar_minn_fsv) +S3method(mcmc_sampler, mfbvar_dl_fsv) +S3method(mcmc_sampler, mfbvar_ss_fsv) +S3method(mcmc_sampler, mfbvar_ssng_fsv) +S3method(mcmc_sampler, mfbvar_minn_diffuse) +S3method(mcmc_sampler, mfbvar_dl_diffuse) +S3method(mcmc_sampler, mfbvar_ss_diffuse) +S3method(mcmc_sampler, mfbvar_ssng_diffuse) +S3method(mcmc_sampler, mfbvar_minn_csv) +S3method(mcmc_sampler, mfbvar_ss_csv) +S3method(mcmc_sampler, mfbvar_ssng_csv) +S3method(mcmc_sampler, mfbvar_minn_iw) +S3method(mcmc_sampler, mfbvar_ss_iw) +S3method(mcmc_sampler, mfbvar_ssng_iw) +S3method(mdd, mfbvar_ss_iw) +S3method(mdd, mfbvar_minn_iw) S3method(predict, mfbvar) S3method(predict, sfbvar) importFrom("methods", "hasArg") +export(mdd) export(set_prior) export(update_prior) export(estimate_mfbvar) export(interval_to_moments) -export(mdd) -export(mcmc_sampler) export(varplot) importFrom(GIGrvg,rgig) +importFrom(RcppParallel, RcppParallelLibs) + diff --git a/NEWS.md b/NEWS.md new file mode 100644 index 0000000..8707ef4 --- /dev/null +++ b/NEWS.md @@ -0,0 +1,23 @@ +# mfbvar 0.5.6 (2021-02-03) +* Removed use of internet connection in vignette +* Enabled use of weekly-monthly frequency mix + +# mfbvar 0.5.4 (2020-05-14) +* Changes to the main interface. Data can (and should) now be given as a list of `zooreg` or `ts` objects. + +# mfbvar 0.5.3 (2020-03-18) +* Fixed a bug caused by the plotting functions + +# mfbvar 0.5.1 (2019-08-16) +* Support for more priors +* Stochastic volatility models +* Better `predict` functions +* Faster implementations +* Some support for quarterly/monthly (i.e. single-frequency) models +* Vignette added + +# TODO +* Impulse responses +* Marginal data densities for more specifications (currently `minn`-`iw` with `average` only), and in C++ +* Enable use of less lags than what the aggregations need + diff --git a/R/RcppExports.R b/R/RcppExports.R index d5d5252..1240316 100644 --- a/R/RcppExports.R +++ b/R/RcppExports.R @@ -5,6 +5,7 @@ #' @templateVar n_vars TRUE #' @templateVar n_lags TRUE #' @keywords internal +#' @noRd #' @template man_template build_U_cpp <- function(Pi, n_determ, n_vars, n_lags) { .Call(`_mfbvar_build_U_cpp`, Pi, n_determ, n_vars, n_lags) @@ -26,6 +27,10 @@ create_X_t_noint <- function(y) { .Call(`_mfbvar_create_X_t_noint`, y) } +dl_reg <- function(y, x, beta, aux, global, local, prior_Pi_Omega, n_reps, a, gig) { + invisible(.Call(`_mfbvar_dl_reg`, y, x, beta, aux, global, local, prior_Pi_Omega, n_reps, a, gig)) +} + #' @title Kalman filter and smoother #' #' @description Kalman filter and smoother (\code{kf_ragged}) and simulation smoother (\code{kf_sim_smooth}) for mixed-frequency data with ragged edges. This function is more computationally efficient than using a companion form representation. @@ -36,6 +41,7 @@ create_X_t_noint <- function(y) { #' @param n_q_ number of quarterly variables #' @param T_b_ final time period where all monthly variables are observed #' @keywords internal +#' @noRd #' @return For \code{kf_ragged}, a list with elements: #' \item{a}{The one-step predictions (for the compact form)} #' \item{a_tt}{The filtered estimates (for the compact form)} @@ -47,7 +53,6 @@ kf_loglike <- function(y_, Phi_, Sigma_, Lambda_, a00, P00) { } #' @title Kalman filter and smoother -#' #' @description Kalman filter and smoother (\code{kf_ragged}) and simulation smoother (\code{kf_sim_smooth}) for mixed-frequency data with ragged edges. This function is more computationally efficient than using a companion form representation. #' @param y_ matrix with the data #' @param Phi_ matrix with the autoregressive parameters, where the last column is the intercept @@ -56,6 +61,7 @@ kf_loglike <- function(y_, Phi_, Sigma_, Lambda_, a00, P00) { #' @param n_q_ number of quarterly variables #' @param T_b_ final time period where all monthly variables are observed #' @keywords internal +#' @noRd #' @return For \code{kf_ragged}, a list with elements: #' \item{a}{The one-step predictions (for the compact form)} #' \item{a_tt}{The filtered estimates (for the compact form)} @@ -80,45 +86,42 @@ kf_sim_smooth <- function(y_, Phi_, Sigma_, Lambda_, Z1_, n_q_, T_b_) { #' @templateVar A TRUE #' @template man_template #' @keywords internal +#' @noRd #' @return The maximum eigenvalue. max_eig_cpp <- function(A) { .Call(`_mfbvar_max_eig_cpp`, A) } -mcmc_minn_csv <- function(y_in_p, Pi, Sigma, Z, Z_fcst, phi, sigma, f, Lambda_comp, prior_Pi_Omega, inv_prior_Pi_Omega, Omega_Pi, prior_Pi_mean, prior_S, Z_1, priorlatent0, phi_invvar, phi_meaninvvar, prior_sigma2, prior_df, n_reps, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_thin, verbose) { - invisible(.Call(`_mfbvar_mcmc_minn_csv`, y_in_p, Pi, Sigma, Z, Z_fcst, phi, sigma, f, Lambda_comp, prior_Pi_Omega, inv_prior_Pi_Omega, Omega_Pi, prior_Pi_mean, prior_S, Z_1, priorlatent0, phi_invvar, phi_meaninvvar, prior_sigma2, prior_df, n_reps, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_thin, verbose)) +mcmc_minn_csv <- function(y_in_p, Pi, Sigma, Z, Z_fcst, phi, sigma, f, Lambda_comp, prior_Pi_Omega, inv_prior_Pi_Omega, Omega_Pi, prior_Pi_mean, prior_S, Z_1, priorlatent0, phi_invvar, phi_meaninvvar, prior_sigma2, prior_df, n_reps, n_burnin, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_thin, verbose) { + invisible(.Call(`_mfbvar_mcmc_minn_csv`, y_in_p, Pi, Sigma, Z, Z_fcst, phi, sigma, f, Lambda_comp, prior_Pi_Omega, inv_prior_Pi_Omega, Omega_Pi, prior_Pi_mean, prior_S, Z_1, priorlatent0, phi_invvar, phi_meaninvvar, prior_sigma2, prior_df, n_reps, n_burnin, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_thin, verbose)) } -mcmc_ss_csv <- function(y_in_p, Pi, Sigma, psi, Z, Z_fcst, phi, sigma, f, Lambda_comp, prior_Pi_Omega, inv_prior_Pi_Omega, Omega_Pi, prior_Pi_mean, prior_S, D_mat, dt, d1, d_fcst_lags, inv_prior_psi_Omega, inv_prior_psi_Omega_mean, check_roots, Z_1, priorlatent0, phi_invvar, phi_meaninvvar, prior_sigma2, prior_df, n_reps, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_determ, n_thin, verbose) { - invisible(.Call(`_mfbvar_mcmc_ss_csv`, y_in_p, Pi, Sigma, psi, Z, Z_fcst, phi, sigma, f, Lambda_comp, prior_Pi_Omega, inv_prior_Pi_Omega, Omega_Pi, prior_Pi_mean, prior_S, D_mat, dt, d1, d_fcst_lags, inv_prior_psi_Omega, inv_prior_psi_Omega_mean, check_roots, Z_1, priorlatent0, phi_invvar, phi_meaninvvar, prior_sigma2, prior_df, n_reps, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_determ, n_thin, verbose)) +mcmc_ssng_csv <- function(y_in_p, Pi, Sigma, psi, phi_mu, lambda_mu, omega, Z, Z_fcst, phi, sigma, f, Lambda_comp, prior_Pi_Omega, inv_prior_Pi_Omega, Omega_Pi, prior_Pi_mean, prior_S, D_mat, dt, d1, d_fcst_lags, prior_psi_mean, c0, c1, s, check_roots, Z_1, priorlatent0, phi_invvar, phi_meaninvvar, prior_sigma2, prior_df, n_reps, n_burnin, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_determ, n_thin, verbose, ssng) { + invisible(.Call(`_mfbvar_mcmc_ssng_csv`, y_in_p, Pi, Sigma, psi, phi_mu, lambda_mu, omega, Z, Z_fcst, phi, sigma, f, Lambda_comp, prior_Pi_Omega, inv_prior_Pi_Omega, Omega_Pi, prior_Pi_mean, prior_S, D_mat, dt, d1, d_fcst_lags, prior_psi_mean, c0, c1, s, check_roots, Z_1, priorlatent0, phi_invvar, phi_meaninvvar, prior_sigma2, prior_df, n_reps, n_burnin, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_determ, n_thin, verbose, ssng)) } -mcmc_ssng_csv <- function(y_in_p, Pi, Sigma, psi, phi_mu, lambda_mu, omega, Z, Z_fcst, phi, sigma, f, Lambda_comp, prior_Pi_Omega, inv_prior_Pi_Omega, Omega_Pi, prior_Pi_mean, prior_S, D_mat, dt, d1, d_fcst_lags, prior_psi_mean, c0, c1, s, check_roots, Z_1, priorlatent0, phi_invvar, phi_meaninvvar, prior_sigma2, prior_df, n_reps, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_determ, n_thin, verbose) { - invisible(.Call(`_mfbvar_mcmc_ssng_csv`, y_in_p, Pi, Sigma, psi, phi_mu, lambda_mu, omega, Z, Z_fcst, phi, sigma, f, Lambda_comp, prior_Pi_Omega, inv_prior_Pi_Omega, Omega_Pi, prior_Pi_mean, prior_S, D_mat, dt, d1, d_fcst_lags, prior_psi_mean, c0, c1, s, check_roots, Z_1, priorlatent0, phi_invvar, phi_meaninvvar, prior_sigma2, prior_df, n_reps, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_determ, n_thin, verbose)) +mcmc_minn_diffuse <- function(y_in_p, Pi, Sigma, Z, Z_fcst, aux, global, local, slice, Lambda_comp, prior_Pi_Omega, prior_Pi_mean_vec, Z_1, n_reps, n_burnin, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_thin, verbose, a, gig) { + invisible(.Call(`_mfbvar_mcmc_minn_diffuse`, y_in_p, Pi, Sigma, Z, Z_fcst, aux, global, local, slice, Lambda_comp, prior_Pi_Omega, prior_Pi_mean_vec, Z_1, n_reps, n_burnin, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_thin, verbose, a, gig)) } -mcmc_minn_diffuse <- function(y_in_p, Pi, Sigma, Z, Z_fcst, Lambda_comp, prior_Pi_Omega, Omega_Pi, Z_1, n_reps, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_thin, verbose) { - invisible(.Call(`_mfbvar_mcmc_minn_diffuse`, y_in_p, Pi, Sigma, Z, Z_fcst, Lambda_comp, prior_Pi_Omega, Omega_Pi, Z_1, n_reps, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_thin, verbose)) +mcmc_ssng_diffuse <- function(y_in_p, Pi, Sigma, psi, phi_mu, lambda_mu, omega, Z, Z_fcst, Lambda_comp, prior_Pi_Omega, Omega_Pi, D_mat, dt, d1, d_fcst_lags, prior_psi_mean, c0, c1, s, check_roots, Z_1, n_reps, n_burnin, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_determ, n_thin, verbose, ssng) { + invisible(.Call(`_mfbvar_mcmc_ssng_diffuse`, y_in_p, Pi, Sigma, psi, phi_mu, lambda_mu, omega, Z, Z_fcst, Lambda_comp, prior_Pi_Omega, Omega_Pi, D_mat, dt, d1, d_fcst_lags, prior_psi_mean, c0, c1, s, check_roots, Z_1, n_reps, n_burnin, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_determ, n_thin, verbose, ssng)) } -mcmc_minn_iw <- function(y_in_p, Pi, Sigma, Z, Z_fcst, Lambda_comp, prior_Pi_Omega, inv_prior_Pi_Omega, Omega_Pi, prior_Pi_mean, prior_S, Z_1, n_reps, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_thin, verbose, prior_nu) { - invisible(.Call(`_mfbvar_mcmc_minn_iw`, y_in_p, Pi, Sigma, Z, Z_fcst, Lambda_comp, prior_Pi_Omega, inv_prior_Pi_Omega, Omega_Pi, prior_Pi_mean, prior_S, Z_1, n_reps, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_thin, verbose, prior_nu)) +mcmc_minn_fsv <- function(y_in_p, Pi, Z, Z_fcst, mu, phi, sigma, f, facload, h, aux, global, local, slice, Lambda_comp, prior_Pi_Omega, prior_Pi_AR1, Z_1, bmu, Bmu, a0idi, b0idi, a0fac, b0fac, Bsigma, B011inv, B022inv, priorh0, armarestr, armatau2, n_fac, n_reps, n_burnin, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_thin, verbose, a, gig) { + invisible(.Call(`_mfbvar_mcmc_minn_fsv`, y_in_p, Pi, Z, Z_fcst, mu, phi, sigma, f, facload, h, aux, global, local, slice, Lambda_comp, prior_Pi_Omega, prior_Pi_AR1, Z_1, bmu, Bmu, a0idi, b0idi, a0fac, b0fac, Bsigma, B011inv, B022inv, priorh0, armarestr, armatau2, n_fac, n_reps, n_burnin, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_thin, verbose, a, gig)) } -mcmc_ss_diffuse <- function(y_in_p, Pi, Sigma, psi, Z, Z_fcst, Lambda_comp, prior_Pi_Omega, Omega_Pi, D_mat, dt, d1, d_fcst_lags, inv_prior_psi_Omega, inv_prior_psi_Omega_mean, check_roots, Z_1, n_reps, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_determ, n_thin, verbose) { - invisible(.Call(`_mfbvar_mcmc_ss_diffuse`, y_in_p, Pi, Sigma, psi, Z, Z_fcst, Lambda_comp, prior_Pi_Omega, Omega_Pi, D_mat, dt, d1, d_fcst_lags, inv_prior_psi_Omega, inv_prior_psi_Omega_mean, check_roots, Z_1, n_reps, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_determ, n_thin, verbose)) +mcmc_ssng_fsv <- function(y_in_p, Pi, psi, phi_mu, lambda_mu, omega, Z, Z_fcst, mu, phi, sigma, f, facload, h, Lambda_comp, prior_Pi_Omega, prior_Pi_AR1, D_mat, dt, d1, d_fcst_lags, prior_psi_mean, c0, c1, s, check_roots, Z_1, bmu, Bmu, a0idi, b0idi, a0fac, b0fac, Bsigma, B011inv, B022inv, priorh0, armarestr, armatau2, n_fac, n_reps, n_burnin, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_determ, n_thin, verbose, ssng) { + invisible(.Call(`_mfbvar_mcmc_ssng_fsv`, y_in_p, Pi, psi, phi_mu, lambda_mu, omega, Z, Z_fcst, mu, phi, sigma, f, facload, h, Lambda_comp, prior_Pi_Omega, prior_Pi_AR1, D_mat, dt, d1, d_fcst_lags, prior_psi_mean, c0, c1, s, check_roots, Z_1, bmu, Bmu, a0idi, b0idi, a0fac, b0fac, Bsigma, B011inv, B022inv, priorh0, armarestr, armatau2, n_fac, n_reps, n_burnin, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_determ, n_thin, verbose, ssng)) } -mcmc_ss_iw <- function(y_in_p, Pi, Sigma, psi, Z, Z_fcst, Lambda_comp, prior_Pi_Omega, inv_prior_Pi_Omega, Omega_Pi, prior_Pi_mean, prior_S, D_mat, dt, d1, d_fcst_lags, inv_prior_psi_Omega, inv_prior_psi_Omega_mean, check_roots, Z_1, n_reps, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_determ, n_thin, verbose) { - invisible(.Call(`_mfbvar_mcmc_ss_iw`, y_in_p, Pi, Sigma, psi, Z, Z_fcst, Lambda_comp, prior_Pi_Omega, inv_prior_Pi_Omega, Omega_Pi, prior_Pi_mean, prior_S, D_mat, dt, d1, d_fcst_lags, inv_prior_psi_Omega, inv_prior_psi_Omega_mean, check_roots, Z_1, n_reps, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_determ, n_thin, verbose)) +mcmc_minn_iw <- function(y_in_p, Pi, Sigma, Z, Z_fcst, Lambda_comp, prior_Pi_Omega, inv_prior_Pi_Omega, Omega_Pi, prior_Pi_mean, prior_S, Z_1, n_reps, n_burnin, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_thin, verbose, prior_nu) { + invisible(.Call(`_mfbvar_mcmc_minn_iw`, y_in_p, Pi, Sigma, Z, Z_fcst, Lambda_comp, prior_Pi_Omega, inv_prior_Pi_Omega, Omega_Pi, prior_Pi_mean, prior_S, Z_1, n_reps, n_burnin, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_thin, verbose, prior_nu)) } -mcmc_ssng_diffuse <- function(y_in_p, Pi, Sigma, psi, phi_mu, lambda_mu, omega, Z, Z_fcst, Lambda_comp, prior_Pi_Omega, Omega_Pi, D_mat, dt, d1, d_fcst_lags, prior_psi_mean, c0, c1, s, check_roots, Z_1, n_reps, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_determ, n_thin, verbose) { - invisible(.Call(`_mfbvar_mcmc_ssng_diffuse`, y_in_p, Pi, Sigma, psi, phi_mu, lambda_mu, omega, Z, Z_fcst, Lambda_comp, prior_Pi_Omega, Omega_Pi, D_mat, dt, d1, d_fcst_lags, prior_psi_mean, c0, c1, s, check_roots, Z_1, n_reps, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_determ, n_thin, verbose)) -} - -mcmc_ssng_iw <- function(y_in_p, Pi, Sigma, psi, phi_mu, lambda_mu, omega, Z, Z_fcst, Lambda_comp, prior_Pi_Omega, inv_prior_Pi_Omega, Omega_Pi, prior_Pi_mean, prior_S, D_mat, dt, d1, d_fcst_lags, prior_psi_mean, c0, c1, s, check_roots, Z_1, n_reps, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_determ, n_thin, verbose) { - invisible(.Call(`_mfbvar_mcmc_ssng_iw`, y_in_p, Pi, Sigma, psi, phi_mu, lambda_mu, omega, Z, Z_fcst, Lambda_comp, prior_Pi_Omega, inv_prior_Pi_Omega, Omega_Pi, prior_Pi_mean, prior_S, D_mat, dt, d1, d_fcst_lags, prior_psi_mean, c0, c1, s, check_roots, Z_1, n_reps, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_determ, n_thin, verbose)) +mcmc_ssng_iw <- function(y_in_p, Pi, Sigma, psi, phi_mu, lambda_mu, omega, Z, Z_fcst, Lambda_comp, prior_Pi_Omega, inv_prior_Pi_Omega, Omega_Pi, prior_Pi_mean, prior_S, D_mat, dt, d1, d_fcst_lags, prior_psi_mean, c0, c1, s, check_roots, Z_1, n_reps, n_burnin, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_determ, n_thin, verbose, ssng) { + invisible(.Call(`_mfbvar_mcmc_ssng_iw`, y_in_p, Pi, Sigma, psi, phi_mu, lambda_mu, omega, Z, Z_fcst, Lambda_comp, prior_Pi_Omega, inv_prior_Pi_Omega, Omega_Pi, prior_Pi_mean, prior_S, D_mat, dt, d1, d_fcst_lags, prior_psi_mean, c0, c1, s, check_roots, Z_1, n_reps, n_burnin, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_determ, n_thin, verbose, ssng)) } variances_fsv <- function(variances, latent, facload, variables_num, n_fac, n_reps, n_T, n_vars, n_plotvars) { @@ -173,6 +176,10 @@ do_rgig1 <- function(lambda, chi, psi) { .Call(`_mfbvar_do_rgig1`, lambda, chi, psi) } +rig <- function(mu, lambda) { + .Call(`_mfbvar_rig`, mu, lambda) +} + rmvn <- function(Phi, d, alpha) { .Call(`_mfbvar_rmvn`, Phi, d, alpha) } @@ -183,18 +190,21 @@ rmvn_ccm <- function(Phi, d, alpha, c, j) { #' @rdname dnorminvwish #' @keywords internal +#' @noRd rmatn <- function(M, Q, P) { .Call(`_mfbvar_rmatn`, M, Q, P) } #' @rdname dnorminvwish #' @keywords internal +#' @noRd rinvwish <- function(v, S) { .Call(`_mfbvar_rinvwish`, v, S) } #' @rdname dmultn #' @keywords internal +#' @noRd rmultn <- function(m, Sigma) { .Call(`_mfbvar_rmultn`, m, Sigma) } @@ -228,6 +238,7 @@ rsimsm_adaptive_univariate <- function(y_, Phi, Sigma, Lambda, Z1, n_q_, T_b, f) #' @templateVar P0 TRUE #' @template man_template #' @keywords internal +#' @noRd #' @return For \code{loglike}: #' \item{}{An \code{n_T}-long vector of the log-likelihoods. \code{exp(sum(loglike(...)))} is the likelihood.} loglike <- function(Y, Lambda, Pi_comp, Q_comp, n_T, n_vars, n_comp, z0, P0) { diff --git a/R/builders.R b/R/builders.R index 4134c2c..c382c00 100644 --- a/R/builders.R +++ b/R/builders.R @@ -5,6 +5,7 @@ #' @templateVar d TRUE #' @templateVar n_lags TRUE #' @keywords internal +#' @noRd #' @template man_template #' #' @return \item{DD}{A matrix of size \code{n_T * ((n_lags + 1)*n_determ)} where @@ -31,6 +32,7 @@ build_DD <- function(d, n_lags) { #' @templateVar n_vars TRUE #' @templateVar n_lags TRUE #' @keywords internal +#' @noRd #' @template man_template #' #' @return @@ -47,6 +49,7 @@ build_companion <- function(Pi, n_vars, n_lags) { #' @templateVar z TRUE #' @templateVar n_lags TRUE #' @keywords internal +#' @noRd #' @template man_template #' #' @return @@ -73,6 +76,7 @@ build_Z <- function(z, n_lags) { #' @templateVar n_determ TRUE #' @template man_template #' @keywords internal +#' @noRd #' @describeIn build_U Build the U matrix (R implementation) #' #' @return @@ -97,6 +101,7 @@ build_U <- function(Pi, n_determ) { #' @templateVar z TRUE #' @template man_template #' @keywords internal +#' @noRd #' #' @return #' \item{Y_tilde}{A matrix of size \code{n_T * n_vars}.} @@ -121,6 +126,7 @@ build_Y_tilde <- function(Pi, z) { #' @templateVar n_lags TRUE #' @templateVar n_T TRUE #' @keywords internal +#' @noRd #' @template man_template #' #' @return @@ -145,6 +151,7 @@ build_M_Lambda <- function(Y, Lambda, n_vars, n_lags, n_T) { #' @templateVar n_lags TRUE #' @template man_template #' @keywords internal +#' @noRd #' @return #' \item{Lambda}{An \code{n_vars * (n_vars*n_pseudolags)} matrix, where \code{n_pseudolags} is \code{max(5, n_lags)} if any variable uses the triangular aggregation scheme, \code{max(3, n_lags)} if any uses the quarterly average.} #' @details The choice \code{aggregation = "identity"} means that what is observed is assumed to be exactly the underlying variable, whereas \code{aggregation = "average"} uses the quarterly average of the monthly underlying observations. Lastly, \code{aggregation = "triangular"} uses the triangular specification used by Mariano and Murasawa (2010). diff --git a/R/data.R b/R/data.R index a915ed4..f6a498e 100644 --- a/R/data.R +++ b/R/data.R @@ -1,4 +1,4 @@ -#' Real-time data set. +#' Real-time data set for Sweden. #' #' A dataset containing real-time data for mixed and quarterly frequencies. #' @@ -16,3 +16,15 @@ #' Statistics Sweden (2016) Revisions, expenditure approach and hours worked at each release. #' "mf_sweden" + +#' US Macroeconomic Data Set +#' +#' A dataset containing mixed-frequency data from FRED for three US macroeconomic variables. +#' +#' @format A list with components: +#' \describe{ +#' \item{CPIAUCSL}{inflation rate} +#' \item{UNRATE}{unemployment rate} +#' \item{GDPC1}{GDP growth rate} +#' } +"mf_usa" diff --git a/R/densities.R b/R/densities.R index 47c244d..10545bf 100644 --- a/R/densities.R +++ b/R/densities.R @@ -9,6 +9,7 @@ #' @templateVar S TRUE #' @templateVar v TRUE #' @keywords internal +#' @noRd #' @template man_template #' @return #' For \code{dnorminvwish}: the evaluated density.\\n @@ -32,6 +33,7 @@ dnorminvwish <- function(X, Sigma, M, P, S, v) { #' @template man_template #' @inherit dnorminvwish #' @keywords internal +#' @noRd #' @return #' For \code{dmultn}: the evaluated density.\\n #' For \code{rmultn}: \eqn{p} random numbers. @@ -49,6 +51,7 @@ dmultn <- function(x, m, Sigma) { #' @templateVar chisq_val TRUE #' @template man_template #' @keywords internal +#' @noRd #' @inherit dmultn dnorm_trunc <- function(x, m, V_inv, d, p_trunc, chisq_val) { qf <- t(x - m) %*% V_inv %*% (x - m) @@ -65,6 +68,7 @@ dnorm_trunc <- function(x, m, V_inv, d, p_trunc, chisq_val) { #' @param v degrees of freedom #' @keywords internal #' @inherit dmultn +#' @noRd #' @references Karlsson, S. (2013) Forecasting with Bayesian Vector Autoregression. #' In Elliott, G. and Timmermann, A., editors, \emph{Handbook of Economic Forecasting}, #' volume 2, chapter 15, pp. 791-897. Elsevier B.V. diff --git a/R/eval.R b/R/eval.R index 19827cb..c1d7b2e 100644 --- a/R/eval.R +++ b/R/eval.R @@ -15,6 +15,7 @@ #' @templateVar n_reps TRUE #' @template man_template #' @keywords internal +#' @noRd #' @return The return is: #' \item{evals}{A vector with the evaulations.} #' @@ -28,7 +29,7 @@ eval_Pi_Sigma_RaoBlack <- function(Z_array, d, post_psi_center, post_Pi_center, for (i in 1:length(evals)) { # Demean z, create Z_array (companion form version) demeaned_z <- Z_array[,,i+1] - d %*% post_psi_center - demeaned_Z <- build_Z(z = demeaned_z, n_lags = n_lags) + demeaned_Z <- mfbvar:::build_Z(z = demeaned_z, n_lags = n_lags) XX <- demeaned_Z[-nrow(demeaned_Z), ] YY <- demeaned_Z[-1, 1:n_vars] XXt.XX <- crossprod(XX) @@ -47,7 +48,7 @@ eval_Pi_Sigma_RaoBlack <- function(Z_array, d, post_psi_center, post_Pi_center, post_s_i <- prior_S + s_sample + t(Pi_diff) %*% chol2inv(chol(prior_Pi_Omega + XXt.XX.inv)) %*% Pi_diff # Evaluate - evals[i] <- exp(dnorminvwish(X = t(post_Pi_center), Sigma = post_Sigma_center, M = post_Pi_i, P = post_Pi_Omega_i, S = post_s_i, v = post_nu)) + evals[i] <- dnorminvwish(X = t(post_Pi_center), Sigma = post_Sigma_center, M = post_Pi_i, P = post_Pi_Omega_i, S = post_s_i, v = post_nu) } return(evals) @@ -70,6 +71,7 @@ eval_Pi_Sigma_RaoBlack <- function(Z_array, d, post_psi_center, post_Pi_center, #' @templateVar n_reps TRUE #' @template man_template #' @keywords internal +#' @noRd #' @return The return is: #' \item{evals}{A vector with the evaulations.} #' diff --git a/R/fill_na.R b/R/fill_na.R index 03740e3..e1457a5 100644 --- a/R/fill_na.R +++ b/R/fill_na.R @@ -4,6 +4,7 @@ #' @templateVar Y TRUE #' @template man_template #' @keywords internal +#' @noRd #' @return A matrix with no \code{NA}s. fill_na <- function(Y) { apply(Y, 2, function(x) { diff --git a/R/interface.R b/R/interface.R index 4060bd9..6b095d8 100644 --- a/R/interface.R +++ b/R/interface.R @@ -1,16 +1,15 @@ -#' Set priors for an mfbvar model +#' Set priors for mfbvar #' -#' Create an object storing all information needed for estimation, including data as well as model and prior specifications for both a Minnesota or steady-state prior. +#' The function creates an object storing all information needed for estimating a mixed-frequency BVAR. The object includes data as well as details for the model and its priors. #' -#' @templateVar Y TRUE -#' @templateVar freq TRUE -#' @param aggregation the aggregation scheme used for relating latent monthly series to their quarterly observations. The default is \code{”average"} for averaging over the monthly observations within each quarter. The alternative is \code{"triangular"} is to use the Mariano-Murasawa triangular set of weights. See details for more information. +#' @param Y data input. For monthly-quarterly data, should be a list with components containing regularly spaced time series (that inherit from \code{ts} or \code{zooreg}). If a component contains a single time series, the component itself must be named. If a component contains multiple time series, each time series must be named. Monthly variables can only contain missing values at the end of the sample, and should precede quarterly variables in the list. Matrices in which quarterly variables are padded with \code{NA} and observations stored at the end of each quarter are also accepted, but then the frequency of each variable must be given in the argument \code{freq}. Weekly-monthly mixes can be provided using the matrix way, see examples. +#' @param aggregation the aggregation scheme used for relating latent high-frequency series to their low-frequency observations. The default is \code{"average"} for averaging within each low-frequency period (e.g., quarterly observations are averages of the constituent monthly observations). The alternative \code{"triangular"} can be used for monthly-quarterly mixes, and uses the Mariano-Murasawa triangular set of weights. See details for more information. #' @templateVar prior_Pi_AR1 TRUE #' @templateVar lambda1 TRUE -#' @param (Only if \code{variance %in% c("diffuse", "fsv")}) The cross-variable tightness +#' @param lambda2 (Only if \code{variance} is one of \code{c("diffuse", "fsv")} The cross-variable tightness #' @templateVar lambda3 TRUE #' @param lambda4 (Minnesota only) Prior variance of the intercept. -#' @param block_exo (Only if \code{variance %in% c("diffuse", "fsv")}) Vector of indexes/names of variables to be treated as block exogenous +#' @param block_exo (Only if \code{variance} is one of \code{c("diffuse", "fsv")}) Vector of indexes/names of variables to be treated as block exogenous #' @templateVar n_lags TRUE #' @templateVar n_fcst TRUE #' @param n_thin Store every \code{n_thin}th draw @@ -18,28 +17,37 @@ #' @templateVar n_reps TRUE #' @param d (Steady state only) Either a matrix with same number of rows as \code{Y} and \code{n_determ} number of columns containing the deterministic terms or a string \code{"intercept"} for requesting an intercept as the only deterministic #' term. +#' @templateVar freq TRUE #' @param d_fcst (Steady state only) The deterministic terms for the forecasting period (not used if \code{d = "intercept"}). #' @param prior_psi_mean (Steady state only) Vector of length \code{n_determ*n_vars} with the prior means of the steady-state parameters. -#' @param prior_psi_Omega (Steady state only) Matrix of size \code{(n_determ*n_vars) * (n_determ*n_vars)} with the prior covariance of the steady-state parameters. +#' @param prior_psi_Omega (Steady state only) Matrix of size \code{(n_determ*n_vars) * (n_determ*n_vars)} with the prior covariance of the steady-state parameters.#' +#' @templateVar check_roots TRUE +#' @param s (Hierarchical steady state only) scalar giving the tuning parameter for the Metropolis-Hastings proposal for the kurtosis parameter. If \code{s < 0}, then adaptive Metropolis-Hastings targeting an acceptance rate of 0.44 is used, where the scaling factor is restricted to the interval \code{[-abs(s), abs(s)]} +#' @param prior_ng (Hierarchical steady state only) vector with two elements giving the parameters \code{c(c0, c1)} of the hyperprior for the global shrinkage parameter #' @param prior_phi (Only used with common stochastic volatility) Vector with two elements \code{c(mean, variance)} for the AR(1) parameter in the log-volatility regression #' @param prior_sigma2 (Only used with common stochastic volatility) Vector with two elements \code{c(mean, df)} for the innovation variance of the log-volatility regression #' @param n_fac (Only used with factor stochastic volatility) Number of factors to use for the factor stochastic volatility model -#' @param cl (Only used with factor stochastic volatility) Cluster object to use for drawing regression parameters in parallel +#' @param n_cores (Only used with factor stochastic volatility) Number of cores to use for drawing regression parameters in parallel #' @param ... (Only used with factor stochastic volatility) Arguments to pass along to \code{\link[factorstochvol]{fsvsample}}. See details. #' @templateVar verbose TRUE -#' @templateVar check_roots TRUE #' @template man_template -#' @details The first arguments (\code{Y} through \code{n_reps}) must be set for the model to be estimated irrespective of the choice -#' of prior, but some have default values. +#' @details Some support is provided for single-frequency data sets, where \code{Y} contains variables sampled with the same frequency. #' -#' For the Minnesota prior, \code{lambda4} must also be set, but it too has a default that it relies on if not specified. +#' The aggregation weights that can be used for \code{aggregation} are intra-quarterly averages (\code{aggregation = "average"}), where the quarterly observations \eqn{y_{q,t}} are assumed to relate to the underlying monthly series \eqn{z_{q,,t}} through: +#' \deqn{y_{q,t} = \frac{1}{3}(z_{q,,t} + z_{q,,t-1} + z_{q,, t-2})} #' -#' For the steady-state prior, the deterministic matrix needs to be supplied, or a string indicating that the intercept should be -#' the only deterministic term. If the latter, also \code{d_fcst} is set to be intercept only. Otherwise, if forecasts are requested -#' (\code{n_fcst > 0}) also \code{d_fcst} needs to be provided. Finally, the prior moments for the steady-state parameters must also be -#' provided. +#' If \code{aggregation = "triangular"}, then instead +#' \deqn{y_{q,t} = \frac{1}{9}(z_{q,,t} + 2z_{q,,t-1} + 3z_{q,, t-2}) + 2z_{q,, t-3}) + z_{q,, t-4})} #' -#' For modeling stochastic volatility by the factor stochastic volatility model, the number of factors to use must be supplied. Further arguments can be passed along to \code{\link[factorstochvol]{fsvsample}}. If arguments are not given, the defaults used are as follows (see \code{\link[factorstochvol]{fsvsample}} for descriptions): +#' The latter is typically used when modeling growth rates, and the former when working with log-levels. +#' +#' If the steady-state prior is to be used, the deterministic matrix needs to be supplied, or a string indicating that the intercept should be the only deterministic term (\code{d = "intercept"}). If the latter, \code{d_fcst} is automatically set to be intercept only. Otherwise, if forecasts are requested +#' (\code{n_fcst > 0}) also \code{d_fcst} must be provided. Finally, the prior means of the steady-state parameters must (at the very minimum) also be +#' provided in \code{prior_psi_mean}. The steady-state prior involves inverting the lag polynomial. For this reason, draws in which the largest eigenvalue +#' (in absolute value) of the lag polynomial is greater than 1 are discarded and new draws are made if \code{check_roots = TRUE}. The maximum number of +#' attempts is 1,000. +#' +#' For modeling stochastic volatility by the factor stochastic volatility model, the number of factors to use must be supplied. Further arguments can be passed along, but are not included as formal arguments. If the default settings are not overriden, the defaults used are as follows (see \code{\link[factorstochvol]{fsvsample}} for descriptions): #' \itemize{ #' \item{\code{priormu}}{\code{ = c(0, 10)}} #' \item{\code{priorphiidi}}{\code{ = c(10, 3)}} @@ -47,33 +55,36 @@ #' \item{\code{priorsigmaidi}}{\code{ = 1}} #' \item{\code{priorsigmafac}}{\code{ = 1}} #' \item{\code{priorfacload}}{\code{ = 1}} -#' \item{\code{priorng}}{\code{ = c(1, 1)}} -#' \item{\code{columnwise}}{\code{ = FALSE}} #' \item{\code{restrict}}{\code{ = "none"}} -#' \item{\code{heteroskedastic}}{\code{ = TRUE}} -#' \item{\code{priorhomoskedastic}}{\code{ = NA}} #' } #' -#' The steady-state prior involves inverting the lag polynomial. For this reason, draws in which the largest eigenvalue -#' (in absolute value) of the lag polynomial is greater than 1 are discarded and new draws are made. The maximum number of -#' attempts is 1,000. The components in the output named \code{roots} and \code{num_tries} contain the largest roots and the -#' number of attempts, respectively, if \code{check_roots = TRUE} (the default). +#' The function \code{update_prior} can be used to update an existing prior object. See the examples. +#' +#' @return An object of class \code{mfbvar_prior} that is used as input to \code{estimate_mfbvar}. #' @examples -#' prior_obj <- set_prior(Y = mf_sweden, freq = c(rep("m", 4), "q"), -#' n_lags = 4, n_burnin = 100, n_reps = 100) +#' # Standard list-based way +#' prior_obj <- set_prior(Y = mf_usa, n_lags = 4, n_reps = 100) #' prior_obj <- update_prior(prior_obj, n_fcst = 4) -#' @seealso \code{\link{interval_to_moments}}, \code{\link{print.mfbvar_prior}}, \code{\link{summary.mfbvar_prior}}, \code{\link{estimate_mfbvar}}, \code{\link[factorstochvol]{fsvsample}} -set_prior <- function(Y, freq, aggregation = "average", prior_Pi_AR1 = rep(0, ncol(Y)), lambda1 = 0.2, +#' +#' # Weekly-monthly mix of data, four weeks per month +#' Y <- matrix(rnorm(400), 100, 4) +#' Y[setdiff(1:100,seq(4, 100, by = 4)), 4] <- NA +#' prior_obj <- set_prior(Y = Y, freq = c(rep("w", 3), "m"), +#' n_lags = 4, n_reps = 10) +#' @seealso \code{\link{estimate_mfbvar}}, \code{\link{update_prior}}, \code{\link{interval_to_moments}}, \code{\link{print.mfbvar_prior}}, \code{\link{summary.mfbvar_prior}}, \code{\link[factorstochvol]{fsvsample}} +set_prior <- function(Y, aggregation = "average", prior_Pi_AR1 = 0, lambda1 = 0.2, lambda2 = 0.5, lambda3 = 1, lambda4 = 10000, block_exo = NULL, n_lags, - n_fcst = 0, n_thin = 1, n_burnin, n_reps, d = NULL, d_fcst = NULL, - prior_psi_mean = NULL, prior_psi_Omega = NULL, s = -1000, prior_phi = c(0.9, 0.1), + n_fcst = 0, n_thin = 1, n_reps, n_burnin = n_reps, freq = NULL, d = NULL, d_fcst = NULL, + prior_psi_mean = NULL, prior_psi_Omega = NULL, check_roots = FALSE, + s = -1000, prior_ng = c(0.01, 0.01), + prior_phi = c(0.9, 0.1), prior_sigma2 = c(0.01, 4), n_fac = NULL, - cl = NULL, verbose = FALSE, check_roots = FALSE, ...) { + n_cores = 1, verbose = FALSE, ...) { prior_call <- mget(names(formals())[names(formals()) != "..."], sys.frame(sys.nframe())) prior_call$supplied_args <- names(as.list(match.call()))[-1] ellipsis <- list(...) fsv_names <- names(ellipsis) - fsv_arguments <- c("priormu", "priorphiidi", "priorphifac", "priorsigmaidi", "priorsigmafac", "priorfacload", "priorng", "columnwise", "restrict", "heteroskedastic", "priorhomoskedastic") + fsv_arguments <- c("priormu", "priorphiidi", "priorphifac", "priorsigmaidi", "priorsigmafac", "priorfacload") if (!all(fsv_names %in% fsv_arguments)) { unused_names <- setdiff(fsv_names, fsv_arguments) warning(sprintf("The following arguments passed along to fsvsample are unused: %s", ifelse(unused_names == "", "[unnamed component]", unused_names))) @@ -84,9 +95,7 @@ set_prior <- function(Y, freq, aggregation = "average", prior_Pi_AR1 = rep(0, nc return(ret) } #' @rdname set_prior -#' #' @param prior_obj an object of class \code{mfbvar_prior} -#' @param ... named arguments for prior attributes to update update_prior <- function(prior_obj, ...) { if(!inherits(prior_obj, "mfbvar_prior")) { stop("The object to be updated is not of class mfbvar_prior.") @@ -103,13 +112,14 @@ update_prior <- function(prior_obj, ...) { return(prior_obj) } - -#' @rdname set_prior check_prior <- function(prior_obj) { if (!is.matrix(prior_obj$Y)) { - if (!is.data.frame(prior_obj$Y)) { - stop(paste0("Y is of class ", class(prior_obj$Y), ", but must be matrix or data frame.")) - } else { + if (inherits(prior_obj$Y, "list")) { + list_conv <- list_to_matrix(prior_obj$Y) + prior_obj$Y <- list_conv[[1]] + prior_obj$freq <- list_conv[[2]] + prior_obj$supplied_args <- c(prior_obj$supplied_args, "freq") + } else if (is.data.frame(prior_obj$Y)) { col_class <- sapply(prior_obj$Y, class) if (all(col_class == "numeric")) { prior_obj$Y <- as.matrix(prior_obj$Y) @@ -131,6 +141,8 @@ check_prior <- function(prior_obj) { else { stop(sprintf("The data frame contains %d non-numeric columns. Please include at most one non-numeric column that can be coerced to dates.", sum(!(col_class == "numeric")))) } + } else { + stop(paste0("Y is of class ", class(prior_obj$Y), ", but must be matrix, data frame/tibble or a list of ts or zooreg objects.")) } } else { if (is.null(rownames(prior_obj$Y))) { @@ -179,32 +191,80 @@ check_prior <- function(prior_obj) { if ("freq" %in% prior_obj$supplied_args) { if (!(is.atomic(prior_obj$freq) && is.character(prior_obj$freq))) { stop("freq is of class ", class(prior_obj$freq), ", but it must be a character vector.") - } else if (!all(prior_obj$freq %in% c("m", "q"))) { - stop("Elements of freq must be 'm' or 'q'.") + } else if (!all(prior_obj$freq %in% c("w", "m", "q"))) { + stop("Elements of freq must be 'w', 'm' or 'q'.") } else if (length(prior_obj$freq) != ncol(prior_obj$Y)) { stop("The length of freq is ", length(prior_obj$freq), ", but Y has ", ncol(prior_obj$Y), " columns.") - } else if (which.max(prior_obj$freq == "m") > which.max(prior_obj$freq == "q")) { - stop("Monthly variables must be placed before quarterly variables.") + } else { + + freq_pos <- c( + ifelse(any(prior_obj$freq == "q"), which.max(prior_obj$freq == "q"), NA), + ifelse(any(prior_obj$freq == "m"), which.max(prior_obj$freq == "m"), NA), + ifelse(any(prior_obj$freq == "w"), which.max(prior_obj$freq == "w"), NA) + ) + freqs <- c("q", "m", "w") + freqs <- freqs[!is.na(freq_pos)] + if (length(freqs)>2) { + stop("mfbvar can currently only handle a mix of two frequencies.") + } + if (length(freqs)>1 && freqs[1]=="q" && freqs[2] == "w") { + stop("mfbvar can currently only handle weekly-monthly or monthly-quarterly mixes.") + } + prior_obj$freqs <- freqs + if (length(freq_pos[!is.na(freq_pos)])>1 && diff(freq_pos[!is.na(freq_pos)])>0) { + stop("Variables must be placed in weekly-monthly-quarterly order.") + } } } else { stop("freq: must be supplied.") } - if ("m" %in% prior_obj$freq) { - if (min(unlist(apply(prior_obj$Y[, prior_obj$freq == "m", drop = FALSE], 2, function(x) Position(is.na, x, nomatch = 9999999999)))) == 1) { - stop("Y: monthly variables are NA at the beginning of the sample.") - } + if (length(freqs)>1) { + if (min(unlist(apply(prior_obj$Y[, prior_obj$freq %in% freqs[-1], drop = FALSE], 2, function(x) Position(is.na, x, nomatch = 9999999999)))) == 1) { + stop("Y: high-frequency variables are NA at the beginning of the sample.") + } + } else { + if (min(unlist(apply(prior_obj$Y, 2, function(x) Position(is.na, x, nomatch = 9999999999)))) == 1) { + stop("Y: monthly variables are NA at the beginning of the sample.") + } } + if ("aggregation" %in% prior_obj$supplied_args) { - if (is.atomic(prior_obj$aggregation)) { + if (is.atomic(prior_obj$aggregation) || is.matrix(prior_obj$aggregation)) { } else { - stop("aggregation must be a vector, but is now of class ", class(prior_obj$aggregation), ".") + stop("aggregation must be a character vector or a matrix, but is now of class ", class(prior_obj$aggregation), ".") } } else { prior_obj$aggregation <- "average" } + if (is.matrix(prior_obj$aggregation)) { + prior_obj$Lambda <- prior_obj$aggregation + prior_obj$aggregation <- "custom" + } else { + freq <- prior_obj$freq + freqs <- prior_obj$freqs + n_l <- ifelse(length(freqs)>1, sum(freq == freqs[1]), 0) + n_h <- ifelse(length(freqs)>1, sum(freq == freqs[2]), length(freq)) + if (n_l > 0 && freqs[1] == "q") { + if (prior_obj$aggregation == "average") { + prior_obj$Lambda_ <- build_Lambda(rep("average", n_l), 3) + } else { + prior_obj$Lambda_ <- build_Lambda(rep("triangular", n_l), 5)} + } else if (n_l == 0) { + prior_obj$Lambda_ <- matrix(0, 1, 3) + } else if (freqs[1] == "m") { + if (prior_obj$aggregation == "triangular") { + stop("Triangular aggregation not supported for weekly data.") + } else { + prior_obj$Lambda_ <- matrix(0.25, 1, 4) + prior_obj$Lambda_ <- kronecker(prior_obj$Lambda_, diag(n_l)) + } + } + } + + if ("prior_Pi_AR1" %in% prior_obj$supplied_args) { if (is.atomic(prior_obj$prior_Pi_AR1)) { if (length(prior_obj$prior_Pi_AR1) == 1) { @@ -256,13 +316,13 @@ check_prior <- function(prior_obj) { stop("block_exo must be a vector of indexes or names.") } else { if (is.character(prior_obj$block_exo)) { - if (all(prior_obj$block_exo %in% colnames(obj$Y))) { - prior_obj$block_exo <- sapply(prior_obj$block_exo, function(x) colnames(obj$Y) == x) + if (all(prior_obj$block_exo %in% colnames(prior_obj$Y))) { + prior_obj$block_exo <- which(prior_obj$block_exo %in% colnames(prior_obj$Y)) } } } } else { - prior_obj$supplied_args <- c(prior_obj$supplied_args, "lambda4") + prior_obj$supplied_args <- c(prior_obj$supplied_args, "block_exo") } @@ -271,7 +331,7 @@ check_prior <- function(prior_obj) { stop("prior_psi_mean must be a vector or matrix with one row or column.") } if (is.atomic(prior_obj$prior_psi_mean)) { - if (length(prior_obj$prior_psi_mean) != ncol(prior_obj$Y)) { + if (length(prior_obj$prior_psi_mean) %% ncol(prior_obj$Y) != 0) { stop("prior_psi_mean has ", length(prior_obj$prior_psi_mean), " elements, but there are ", ncol(prior_obj$Y), " variables in Y.") } } @@ -292,9 +352,32 @@ check_prior <- function(prior_obj) { if (dim(prior_obj$prior_psi_Omega)[1] != dim(prior_obj$prior_psi_Omega)[2]) { stop("prior_psi_Omega must be a positive-definite symmetric matrix.") } + if (dim(prior_obj$prior_psi_Omega)[1] != length(prior_obj$prior_psi_mean)) { + stop("The dimension of prior_psi_Omega must correspond to the number of elements in prior_psi_mean.") + } } } + if ("s" %in% prior_obj$supplied_args) { + if (!is.atomic(prior_obj$s) || length(prior_obj$s) > 1) { + stop("s must be a vector with a single element.") + } + } else { + prior_obj$supplied_args <- c(prior_obj$supplied_args, "s") + } + + + if ("prior_ng" %in% prior_obj$supplied_args) { + if (!is.atomic(prior_obj$prior_ng) || length(prior_obj$prior_ng) > 2) { + stop("prior_ng must be a vector with one or two elements.") + } else { + if (length(prior_obj$prior_ng) == 1) { + prior_obj$prior_ng <- c(prior_obj$prior_ng, prior_obj$prior_ng) + } + } + } else { + prior_obj$supplied_args <- c(prior_obj$supplied_args, "prior_ng") + } if ("n_lags" %in% prior_obj$supplied_args) { if (!is.atomic(prior_obj$n_lags) || length(prior_obj$n_lags) > 1) { @@ -304,6 +387,16 @@ check_prior <- function(prior_obj) { stop("n_lags: No lag length specified.\n") } + if (prior_obj$aggregation == "triangular") { + if (prior_obj$n_lags < 5) { + stop("The number of lags must be at least 5 when using triangular aggregation.") + } + } else if (prior_obj$aggregation == "average") { + if (prior_obj$n_lags < 3) { + stop("The number of lags must be at least 3 when using intra-quarterly averaging.") + } + } + if ("n_fcst" %in% prior_obj$supplied_args) { if (!is.atomic(prior_obj$n_fcst) || length(prior_obj$n_fcst) > 1) { stop("n_fcst must be a vector with a single element.") @@ -322,15 +415,6 @@ check_prior <- function(prior_obj) { } } - - if ("n_burnin" %in% prior_obj$supplied_args) { - if (!is.atomic(prior_obj$n_burnin) || length(prior_obj$n_burnin) > 1) { - stop("n_burnin must be a vector with a single element.") - } - } else { - stop("n_burnin: Number of burn-in draws to use not specified.\n") - } - if ("n_reps" %in% prior_obj$supplied_args) { if (!is.atomic(prior_obj$n_reps) || length(prior_obj$n_reps) > 1) { stop("n_reps must be a vector with a single element.") @@ -339,6 +423,12 @@ check_prior <- function(prior_obj) { stop("n_reps: Number of draws to use in main chain not specified.\n") } + if (!is.atomic(prior_obj$n_burnin) || length(prior_obj$n_burnin) > 1) { + stop("n_burnin must be a vector with a single element.") + } else if (!("n_burnin" %in% prior_obj$supplied_args)) { + prior_obj$supplied_args <- c(prior_obj$supplied_args, "n_burnin") + } + if (!is.logical(prior_obj$check_roots)) { stop("check_roots: must be logical.\n") } @@ -365,8 +455,8 @@ check_prior <- function(prior_obj) { stop("The number of factors is not a numeric scalar value.") } - if (!inherits(prior_obj$cl, "cluster") && !is.null(prior_obj$cl)) { - stop(sprintf("cl should be a cluster object, but is %s", class(prior_obj$cl))) + if (!is.atomic(prior_obj$n_cores) || length(prior_obj$n_cores) > 1) { + stop("n_cores must be a vector with a single element.") } if ("priormu" %in% prior_obj$supplied_args) { @@ -394,19 +484,29 @@ check_prior <- function(prior_obj) { } if ("priorsigmaidi" %in% prior_obj$supplied_args) { - if (!(is.numeric(prior_obj$priorsigmaidi) && is.atomic(prior_obj$priorsigmaidi) && length(prior_obj$priorsigmaidi) %in% c(1, ncol(prior_obj$Y)))) { - stop(sprintf("priorsigmaidi should be a numeric vector with 1 or n_vars elements, but is %s with %d elements", class(prior_obj$priorsigmaidi), length(prior_obj$priorsigmaidi))) + if (!(is.numeric(prior_obj$priorsigmaidi))) { + stop("priorsigmaidi should be numeric.") + } + if (length(prior_obj$priorsigmaidi) == 1) { + } else if (length(prior_obj$priorsigmaidi) == ncol(prior_obj$Y)) { + } else { + stop("priorsigmaidi should be a numeric vector of length 1 or n_vars.") } } else { - prior_obj$priorsigmaidi <- 1 + prior_obj$priorsigmaidi <- rep(1, ncol(prior_obj$Y)) } if ("priorsigmafac" %in% prior_obj$supplied_args) { - if (!(is.numeric(prior_obj$priorsigmafac) && is.atomic(prior_obj$priorsigmafac) && length(prior_obj$priorsigmafac) %in% c(1, ncol(prior_obj$n_fac)))) { - stop(sprintf("priorsigmafac should be a numeric vector with 1 or n_vars elements, but is %s with %d elements", class(prior_obj$priorsigmafac), length(prior_obj$priorsigmaidi))) + if (!(is.numeric(prior_obj$priorsigmafac))) { + stop("priorsigmafac should be numeric.") + } + if (length(prior_obj$priorsigmafac) == 1) { + } else if (length(prior_obj$priorsigmafac) == prior_obj$n_fac) { + } else { + stop("priorsigmafac should be a numeric vector of length 1 or n_fac") } } else { - prior_obj$priorsigmafac <- 1 + prior_obj$priorsigmafac <- rep(1, prior_obj$n_fac) } if ("priorfacload" %in% prior_obj$supplied_args) { @@ -417,54 +517,32 @@ check_prior <- function(prior_obj) { prior_obj$priorfacload <- 1 } - if ("priorng" %in% prior_obj$supplied_args) { - if (!(is.numeric(prior_obj$priorng) && length(prior_obj$priorng) == 2)) { - stop(sprintf("priorng should be a numeric vector of length 2, but is %s of length %d", class(prior_obj$priorng), length(prior_obj$priorng))) - } - } else { - prior_obj$priorng <- c(1, 1) - } - - if ("columnwise" %in% prior_obj$supplied_args) { - if (!(is.logical(prior_obj$columnwise) && length(prior_obj$priorng) == 1)) { - stop(sprintf("columnwise should be a single logical value, but is %s of length %d", class(prior_obj$columnwise), length(prior_obj$columnwise))) - } - } else { - prior_obj$columnwise <- FALSE - } - if ("restrict" %in% prior_obj$supplied_args) { if (!(is.character(prior_obj$restrict) && length(prior_obj$priorng) == 1)) { stop(sprintf("restrict should be a single string, but is %s of length %d", class(prior_obj$restrict), length(prior_obj$restrict))) + } else { + if (!(prior_obj$restrict %in% c("none", "upper"))) { + stop(sprintf("restrict should be 'none' or 'upper', but is %s", prior_obj$restrict)) + } } } else { prior_obj$restrict <- "none" } - if ("heteroskedastic" %in% prior_obj$supplied_args) { - if (!(is.logical(prior_obj$heteroskedastic) && length(prior_obj$priorng) %in% c(1, 2, ncol(prior_obj$Y)+prior_obj$n_fac))) { - stop(sprintf("heteroskedastic should be a vector of 1, 2, or n_vars + n_fac logical values, but is %s of length %d", class(prior_obj$heteroskedastic), length(prior_obj$heteroskedastic))) - } - } else { - prior_obj$heteroskedastic <- TRUE - } - if (any(!prior_obj$heteroskedastic)) { - if ("priorhomoskedastic" %in% prior_obj$supplied_args) { - if (!(is.numeric(prior_obj$priorhomoskedastic) && is.matrix(prior_obj$priorhomoskedastic) && dim(prior_obj$priorhomoskedastic) == c(ncol(prior_obj$Y)+prior_obj$n_fac, 2))) { - stop(sprintf("priorhomoskedastic should be a matrix of dimensions (n_vars + n_fac) x 2, but is %s of length %d", class(prior_obj$priorhomoskedastic), length(prior_obj$priorhomoskedastic))) - } - } else { - prior_obj$priorhomoskedastic <- c(1.1, 1.1) - } - } else { - prior_obj$priorhomoskedastic <- c(1.1, 1.1) - } + } else if (is.null(prior_obj$n_fac) && any(prior_obj$supplied_args %in% c("priormu", "priorphiidi", "priorphifac", "priorsigmaidi", "priorsigmafac", - "priorfacload", "priorng", "columnwise", "restrict", "heteroskedastic", "priorhomoskedastic"))) { + "priorfacload", "restrict"))) { stop("Please set the number of factors before attempting to pass additional arguments along to fsvsim.") } + if ("a" %in% prior_obj$supplied_args) { + if (!is.atomic(prior_obj$a) || length(prior_obj$a) > 1) { + stop("a must be a vector with a single element.") + } + } else { + prior_obj$supplied_args <- c(prior_obj$supplied_args, "a") + } return(prior_obj) } @@ -481,19 +559,19 @@ check_prior <- function(prior_obj) { #' checks for estimation and not forecasting (for which the steady-state prior #' requires additional information). #' @seealso \code{\link{set_prior}}, \code{\link{update_prior}}, \code{\link{estimate_mfbvar}}, \code{\link{summary.mfbvar_prior}} +#' @return No return value, called for side effects. #' @examples -#' prior_obj <- set_prior(Y = mf_sweden, freq = c(rep("m", 4), "q"), -#' n_lags = 4, n_burnin = 100, n_reps = 100) +#' prior_obj <- set_prior(Y = mf_usa, n_lags = 4, n_reps = 100) #' print(prior_obj) print.mfbvar_prior <- function(x, ...) { - cat("The following elements of the prior have not been set: \n", names(sapply(x, is.null))[sapply(x, is.null)]) + cat("The following elements of the prior object have not been set: \n", names(sapply(x, is.null))[sapply(x, is.null)]) cat("\n\n") - cat("Checking if steady-state prior can be used... ") - if (!is.null(x$Y) && !is.null(x$d) && !is.null(x$prior_psi_mean) && !is.null(x$prior_psi_Omega) && !is.null(x$n_lags) && !is.null(x$n_burnin) && !is.null(x$n_reps)) { + cat("Checking if the steady-state prior can be used... ") + if (!is.null(x$Y) && !is.null(x$d) && !is.null(x$prior_psi_mean) && !is.null(x$n_lags) && !is.null(x$n_burnin) && !is.null(x$n_reps)) { cat("TRUE\n\n") } else { test_all <- sapply(x, is.null) - test_sub <- test_all[c("Y", "d", "prior_psi_mean", "prior_psi_Omega", "n_lags", "n_burnin", "n_reps")] + test_sub <- test_all[c("Y", "d", "prior_psi_mean", "n_lags", "n_burnin", "n_reps")] cat("FALSE\n Missing elements:", names(test_sub)[which(test_sub)], "\n") } @@ -506,6 +584,15 @@ print.mfbvar_prior <- function(x, ...) { cat("FALSE\n Missing elements:", names(test_sub)[which(test_sub)], "\n\n") } + #cat("Checking if the Dirichlet-Laplace prior can be used... ") + if (!is.null(x$Y) && !is.null(x$n_lags) && !is.null(x$n_burnin) && !is.null(x$n_reps) && !is.null(x$a)) { + #cat("TRUE\n\n") + } else { + test_all <- sapply(x, is.null) + test_sub <- test_all[c("Y", "n_lags", "n_burnin", "n_reps", "a")] + #cat("FALSE\n Missing elements:", names(test_sub)[which(test_sub)], "\n\n") + } + cat("Checking if common stochastic volatility can be used... ") if (length(x$prior_phi) == 2 && length(x$prior_sigma2) == 2) { cat("TRUE\n\n") @@ -523,6 +610,8 @@ print.mfbvar_prior <- function(x, ...) { } else { cat("FALSE\n Missing element: n_fac \n\n") } + + cat("\n") } #' Summary method for mfbvar_prior @@ -533,15 +622,25 @@ print.mfbvar_prior <- function(x, ...) { #' @param ... additional arguments (currently unused) #' @seealso \code{\link{set_prior}}, \code{\link{update_prior}}, \code{\link{estimate_mfbvar}}, \code{\link{print.mfbvar_prior}} #' @examples -#' prior_obj <- set_prior(Y = mf_sweden, freq = c(rep("m", 4), "q"), -#' n_lags = 4, n_burnin = 100, n_reps = 100) +#' prior_obj <- set_prior(Y = mf_usa, n_lags = 4, n_reps = 100) #' summary(prior_obj) summary.mfbvar_prior <- function(object, ...) { cat("PRIOR SUMMARY\n") cat("----------------------------\n") - cat("Main specification:\n") + cat("General specification:\n") cat(" Y:", ncol(object$Y), "variables,", nrow(object$Y), "time points\n") - cat(sprintf(" freq: %d monthly and %d quarterly %s\n", sum(object$freq == "m"), sum(object$freq == "q"), ifelse(sum(object$freq == "q") == 1, "variable", "variables"))) + cat(" aggregation:", object$aggregation, "\n") + freq_count <- vapply(object$freqs, function(x, freq) sum(x == freq), numeric(1), freq = object$freq) + freqs <- object$freqs + freqs <- replace(freqs, freqs == "w", "weekly") + freqs <- replace(freqs, freqs == "m", "monthly") + freqs <- replace(freqs, freqs == "q", "quarterly") + if (length(freq_count) == 1) { + freq_cat <- sprintf(" freq: %d %s variables\n", freq_count, freqs) + } else { + freq_cat <- sprintf(" freq: %s variables\n", paste(sprintf("%d %s", rev(freq_count), rev(freqs)), collapse = ", ")) + } + cat(freq_cat) if (length(object$prior_Pi_AR1)<=5) { disp_prior_Pi_AR1 <- object$prior_Pi_AR1 } else { @@ -562,19 +661,28 @@ summary.mfbvar_prior <- function(object, ...) { cat(" n_burnin:", object$n_burnin, "\n") cat(" n_reps:", object$n_reps, "\n") cat("----------------------------\n") - cat("Steady-state-specific elements:\n") + cat("Steady-state prior:\n") cat(" d:", ifelse(is.null(object$d), "", ifelse(object$intercept_flag, "intercept", paste0(ncol(object$d), "deterministic variables"))),"\n") cat(" d_fcst:", ifelse(is.null(object$d_fcst), "", ifelse(object$intercept_flag, "intercept", paste0(nrow(object$d_fcst), "forecasts, ", ncol(object$d), "deterministic variables"))),"\n") cat(" prior_psi_mean:", ifelse(is.null(object$prior_psi_mean), "", "prior mean vector of steady states"), "\n") cat(" prior_psi_Omega:", ifelse(is.null(object$prior_psi_Omega), "", "prior covariance matrix of steady states"), "\n") + cat(" check_roots:", object$check_roots, "\n") cat("----------------------------\n") - cat("Common stochastic volatility-specific elements:\n") + cat("Hierarchical steady-state prior:\n") + cat(" s:", ifelse(is.null(object$s), "", object$s), "\n") + cat(" c0:", ifelse(is.null(object$prior_ng), "", object$prior_ng[1]), "\n") + cat(" c1:", ifelse(is.null(object$prior_ng), "", object$prior_ng[2]), "\n") + cat("----------------------------\n") + #cat("Dirichlet-Laplace prior:\n") + #cat(" a:", ifelse(is.null(object[["a"]]), "", object[["a"]]), "\n") + #cat("----------------------------\n") + cat("Common stochastic volatility:\n") cat(sprintf(" prior_phi: mean = %g, var = %g", object$prior_phi[1], object$prior_phi[2]), "\n") cat(sprintf(" prior_sigma2: mean = %g, df = %d", object$prior_sigma2[1], object$prior_sigma2[2]), "\n") cat("----------------------------\n") - cat("Factor stochastic volatility-specific elements:\n") + cat("Factor stochastic volatility:\n") cat(" n_fac:", ifelse(is.null(object$n_fac), "", object$n_fac), "\n") - cat(" cl:", ifelse(is.null(object$cl), "", sprintf("%s with %d workers", class(object$cl)[1], length(object$cl))), "\n") + cat(" n_cores:", ifelse(is.null(object$n_cores), "", object$n_cores), "\n") if ("priormu" %in% object$supplied_args) { cat(" priormu:", object$priormu, "\n") } @@ -605,29 +713,12 @@ summary.mfbvar_prior <- function(object, ...) { cat(" priorfacload:", paste(dim(object$priorfacload), collapse = " x "), "matrix\n") } } - if ("priorng" %in% object$supplied_args) { - cat(" priorng:", object$priorng, "\n") - } - if ("columnwise" %in% object$supplied_args) { - cat(" columnwise:", object$columnwise, "\n") - } if ("restrict" %in% object$supplied_args) { cat(" restrict:", object$restrict, "\n") } - if ("heteroskedastic" %in% object$supplied_args) { - if (length(object$heteroskedastic) <= 2) { - cat(" heteroskedastic:", object$heteroskedastic, "\n") - } else { - cat(" heteroskedastic: vector with", ncol(object$Y)+object$n_fac, "logical values\n") - } - } - if ("priorhomoskedastic" %in% object$supplied_args) { - cat(" priorhomoskedastic:", paste(dim(object$priorhomoskedastic), collapse = " x "), "matrix\n") - } cat("----------------------------\n") cat("Other:\n") cat(" verbose:", object$verbose, "\n") - cat(" check_roots:", object$check_roots, "\n") } @@ -637,45 +728,55 @@ summary.mfbvar_prior <- function(object, ...) { #' The main function for estimating a mixed-frequency BVAR. #' #' @param mfbvar_prior a \code{mfbvar_prior} object -#' @param prior either \code{"ss"} (steady-state prior) or \code{"minn"} (Minnesota prior) +#' @param prior either \code{"ss"} (steady-state prior), \code{"ssng"} (hierarchical steady-state prior with normal-gamma shrinkage) or \code{"minn"} (Minnesota prior) #' @param variance form of the error variance-covariance matrix: \code{"iw"} for the inverse Wishart prior, \code{"diffuse"} for a diffuse prior, \code{"csv"} for common stochastic volatility or \code{"fsv"} for factor stochastic volatility #' @param ... additional arguments to \code{update_prior} (if \code{mfbvar_prior} is \code{NULL}, the arguments are passed on to \code{set_prior}) -#' @return An object of class \code{mfbvar}, \code{mfbvar_} and \code{mfbvar__} containing posterior quantities as well as the prior object -#' @seealso \code{\link{set_prior}}, \code{\link{update_prior}}, \code{\link{predict.mfbvar}}, \code{\link{plot.mfbvar_minn}}, -#' \code{\link{plot.mfbvar_ss}}, \code{\link{varplot}}, \code{\link{summary.mfbvar}} -#' @examples -#' prior_obj <- set_prior(Y = mf_sweden, freq = c(rep("m", 4), "q"), -#' n_lags = 4, n_burnin = 20, n_reps = 20) -#' mod_minn <- estimate_mfbvar(prior_obj, prior = "minn") -#' @return For all choices of \code{prior} and \code{variance}, the returned object contains: +#' @return +#' An object of class \code{mfbvar}, \code{mfbvar_} and \code{mfbvar__} containing posterior quantities as well as the prior object. For all choices of \code{prior} and \code{variance}, the returned object contains: #' \item{Pi}{Array of dynamic coefficient matrices; \code{Pi[,, r]} is the \code{r}th draw} #' \item{Z}{Array of monthly processes; \code{Z[,, r]} is the \code{r}th draw} #' \item{Z_fcst}{Array of monthly forecasts; \code{Z_fcst[,, r]} is the \code{r}th forecast. The first \code{n_lags} #' rows are taken from the data to offer a bridge between observations and forecasts and for computing nowcasts (i.e. with ragged edges).} -#' +#' \subsection{Steady-state priors}{ #' If \code{prior = "ss"}, it also includes: -#' \item{psi}{Matrix of steady-state parameter vectors; \code{psi[r,]} is the \code{r}th draw} -#' \item{roots}{The maximum eigenvalue of the lag polynomial (if \code{check_roots = TRUE})} -#' \item{num_tries}{The number of attempts for drawing a stationary \eqn{\Pi} (if \code{check_roots = TRUE})} +#' \describe{\item{\code{psi}}{Matrix of steady-state parameter vectors; \code{psi[r,]} is the \code{r}th draw} +#' \item{\code{roots}}{The maximum eigenvalue of the lag polynomial (if \code{check_roots = TRUE})}} #' +#' If \code{prior = "ssng"}, it also includes: +#' \describe{ +#' \item{\code{psi}}{Matrix of steady-state parameter vectors; \code{psi[r,]} is the \code{r}th draw} +#' \item{\code{roots}}{The maximum eigenvalue of the lag polynomial (if \code{check_roots = TRUE})} +#' \item{\code{lambda_psi}}{Vector of draws of the global hyperparameter in the normal-Gamma prior} +#' \item{\code{phi_psi}}{Vector of draws of the auxiliary hyperparameter in the normal-Gamma prior} +#' \item{\code{omega_psi}}{Matrix of draws of the prior variances of psi; \code{omega_psi[r, ]} is the \code{r}th draw, where \code{diag(omega_psi[r, ])} is used as the prior covariance matrix for psi}}} +#' \subsection{Constant error covariances}{ #' If \code{variance = "iw"} or \code{variance = "diffuse"}, it also includes: -#' \item{Sigma}{Array of error covariance matrices; \code{Sigma[,, r]} is the \code{r}th draw} -#' -#' #' If \code{variance = "csv"}, it also includes: -#' \item{Sigma}{Array of error covariance matrices; \code{Sigma[,, r]} is the \code{r}th draw} -#' \item{phi}{Vector of AR(1) parameters for the log-volatility regression; \code{phi[r]} is the \code{r}th draw} -#' \item{sigma}{Vector of error standard deviations for the log-volatility regression; \code{sigma[r]} is the \code{r}th draw}#' -#' \item{f}{Matrix of log-volatilities; \code{f[r, ]} is the \code{r}th draw} +#' \describe{\item{\code{Sigma}}{Array of error covariance matrices; \code{Sigma[,, r]} is the \code{r}th draw}}} +#' \subsection{Time-varying error covariances}{ +#' If \code{variance = "csv"}, it also includes: +#' \describe{\item{\code{Sigma}}{Array of error covariance matrices; \code{Sigma[,, r]} is the \code{r}th draw} +#' \item{\code{phi}}{Vector of AR(1) parameters for the log-volatility regression; \code{phi[r]} is the \code{r}th draw} +#' \item{\code{sigma}}{Vector of error standard deviations for the log-volatility regression; \code{sigma[r]} is the \code{r}th draw} +#' \item{\code{f}}{Matrix of log-volatilities; \code{f[r, ]} is the \code{r}th draw}} #' #' If \code{variance = "fsv"}, it also includes: -#' \item{facload}{Array of factor loadings; \code{facload[,, r]} is the \code{r}th draw} -#' \item{latent}{Array of latent log-volatilities; \code{latent[,, r]} is the \code{r}th draw} -#' \item{mu}{Matrix of means of the log-volatilities; \code{mu[, r]} is the \code{r}th draw} -#' \item{phi}{Matrix of AR(1) parameters for the log-volatilities; \code{phi[, r]} is the \code{r}th draw} -#' \item{sigma}{Matrix of innovation variances for the log-volatilities; \code{sigma[, r]} is the \code{r}th draw} +#' \describe{\item{\code{facload}}{Array of factor loadings; \code{facload[,, r]} is the \code{r}th draw} +#' \item{\code{latent}}{Array of latent log-volatilities; \code{latent[,, r]} is the \code{r}th draw} +#' \item{\code{mu}}{Matrix of means of the log-volatilities; \code{mu[, r]} is the \code{r}th draw} +#' \item{\code{phi}}{Matrix of AR(1) parameters for the log-volatilities; \code{phi[, r]} is the \code{r}th draw} +#' \item{\code{sigma}}{Matrix of innovation variances for the log-volatilities; \code{sigma[, r]} is the \code{r}th draw}}} +#' @seealso \code{\link{set_prior}}, \code{\link{update_prior}}, \code{\link{predict.mfbvar}}, \code{\link{plot.mfbvar_minn}}, +#' \code{\link{plot.mfbvar_ss}}, \code{\link{varplot}}, \code{\link{summary.mfbvar}} +#' @examples +#' prior_obj <- set_prior(Y = mf_usa, n_lags = 4, n_reps = 20) +#' mod_minn <- estimate_mfbvar(prior_obj, prior = "minn") #' @references -#' Schorfheide, F., & Song, D. (2015) Real-Time Forecasting With a Mixed-Frequency VAR. \emph{Journal of Business & Economic Statistics}, 33(3), 366--380. \url{http://dx.doi.org/10.1080/07350015.2014.954707}\cr -#' Ankargren, S., Unosson, M., & Yang, Y. (2018) A Mixed-Frequency Bayesian Vector Autoregression with a Steady-State Prior. Working Paper, Department of Statistics, Uppsala University No. 2018:3. +#' Ankargren, S., Unosson, M., & Yang, Y. (2020) A Flexible Mixed-Frequency Bayesian Vector Autoregression with a Steady-State Prior. \emph{Journal of Time Series Econometrics}, 12(2), \doi{10.1515/jtse-2018-0034}.\cr +#' Ankargren, S., & Jonéus, P. (2020) Simulation Smoothing for Nowcasting with Large Mixed-Frequency VARs. \emph{Econometrics and Statistics}, \doi{10.1016/j.ecosta.2020.05.007}.\cr +#' Ankargren, S., & Jonéus, P. (2019) Estimating Large Mixed-Frequency Bayesian VAR Models. arXiv:1912.02231, \url{https://arxiv.org/abs/1912.02231}.\cr +#' Kastner, G., & Huber, F. (2020) Sparse Bayesian Vector Autoregressions in Huge Dimensions. \emph{Journal of Forecasting}, 39, 1142--1165. \doi{10.1002/for.2680}.\cr +#' Schorfheide, F., & Song, D. (2015) Real-Time Forecasting With a Mixed-Frequency VAR. \emph{Journal of Business & Economic Statistics}, 33(3), 366--380. \doi{10.1080/07350015.2014.954707}\cr + estimate_mfbvar <- function(mfbvar_prior = NULL, prior, variance = "iw", ...) { time_out <- Sys.time() args <- list(...) @@ -696,39 +797,24 @@ estimate_mfbvar <- function(mfbvar_prior = NULL, prior, variance = "iw", ...) { prior <- args$prior_type } - if (!(prior %in% c("ss", "ssng", "minn"))) { - stop("prior must be 'ss', 'ssng' or 'minn'.") + if (!(prior %in% c("ss", "ssng", "minn", "dl"))) { + stop("prior must be 'ss', 'ssng', 'minn' or 'dl'.") } if (!(variance %in% c("iw", "fsv", "csv", "diffuse"))) { stop("volatility must be 'iw', 'diffuse', 'csv' or 'fsv'.") } - - - class(mfbvar_prior) <- c(class(mfbvar_prior), sprintf("mfbvar_%s_%s", prior, variance), sprintf("mfbvar_%s", prior), sprintf("mfbvar_%s", variance)) - - if (mfbvar_prior$verbose) { - cat(paste0("##############################################\nRunning the burn-in sampler with ", mfbvar_prior$n_burnin, " draws\n\n")) - start_burnin <- Sys.time() + if (prior == "dl" && !(variance %in% c("fsv", "diffuse"))) { + stop("The Dirichlet-Laplace prior (dl) can only be used with variance specifications fsv and diffuse.") } - time_out <- c(time_out, Sys.time()) - burn_in <- mcmc_sampler(update_prior(mfbvar_prior, n_fcst = 0), n_reps = mfbvar_prior$n_burnin, n_thin = mfbvar_prior$n_burnin) - - if (mfbvar_prior$verbose) { - end_burnin <- Sys.time() - time_diff <- end_burnin - start_burnin - cat(paste0("\n Time elapsed for drawing ", mfbvar_prior$n_burnin, " times for burn-in: ", signif(time_diff, digits = 1), " ", - attr(time_diff, "units"), "\n")) - cat(paste0("\nMoving on to the main chain with ", - mfbvar_prior$n_reps, " draws \n\n", ifelse(mfbvar_prior$n_fcst > 0, paste0(" Making forecasts ", mfbvar_prior$n_fcst, " steps ahead"), ""), "\n\n")) - } + class(mfbvar_prior) <- c(sprintf("mfbvar_%s_%s", prior, variance), sprintf("mfbvar_%s", prior), sprintf("mfbvar_%s", variance), class(mfbvar_prior)) time_out <- c(time_out, Sys.time()) - main_run <- mcmc_sampler(mfbvar_prior, n_reps = mfbvar_prior$n_reps, init = burn_in$init) + main_run <- mcmc_sampler(mfbvar_prior) time_out <- c(time_out, Sys.time()) if (mfbvar_prior$verbose) { - time_diff <- Sys.time() - start_burnin + time_diff <- Sys.time() - time_out[1] cat(paste0("\n Total time elapsed: ", signif(time_diff, digits = 1), " ", attr(time_diff, "units"), "\n")) } @@ -748,7 +834,7 @@ estimate_mfbvar <- function(mfbvar_prior = NULL, prior, variance = "iw", ...) { } if (mfbvar_prior$n_fcst > 0) { names_fcst <- paste0("fcst_", 1:mfbvar_prior$n_fcst) - rownames(main_run$Z_fcst)[1:main_run$n_lags] <- names_row[(main_run$n_T-main_run$n_lags+1):main_run$n_T] + rownames(main_run$Z_fcst)[1:main_run$n_lags] <- names_row[(length(names_row)-main_run$n_lags+1):length(names_row)] rownames(main_run$Z_fcst)[(main_run$n_lags+1):(main_run$n_fcst+main_run$n_lags)] <- names_fcst colnames(main_run$Z_fcst) <- names_col } else { @@ -763,12 +849,12 @@ estimate_mfbvar <- function(mfbvar_prior = NULL, prior, variance = "iw", ...) { dimnames(main_run$Z) <- list(time = names_row[(nrow(mfbvar_prior$Y)-nrow(main_run$Z)+1):nrow(mfbvar_prior$Y)], variable = names_col, - iteration = 1:mfbvar_prior$n_reps) + iteration = 1:(mfbvar_prior$n_reps/mfbvar_prior$n_thin)) if (variance %in% c("iw", "diffuse")) { dimnames(main_run$Sigma) <- list(names_col, names_col, - iteration = 1:mfbvar_prior$n_reps) + iteration = 1:(mfbvar_prior$n_reps/mfbvar_prior$n_thin)) } @@ -781,15 +867,15 @@ estimate_mfbvar <- function(mfbvar_prior = NULL, prior, variance = "iw", ...) { rownames(mfbvar_prior$d) <- rownames(mfbvar_prior$Y) main_run$names_determ <- names_determ n_determ <- dim(mfbvar_prior$d)[2] - dimnames(main_run$psi) <- list(iteration = 1:mfbvar_prior$n_reps, + dimnames(main_run$psi) <- list(iteration = 1:(mfbvar_prior$n_reps/mfbvar_prior$n_thin), param = paste0(rep(names_col, n_determ), ".", rep(names_determ, each = n_vars))) dimnames(main_run$Pi) <- list(dep = names_col, indep = paste0(rep(names_col, mfbvar_prior$n_lags), ".l", rep(1:mfbvar_prior$n_lags, each = n_vars)), - iteration = 1:mfbvar_prior$n_reps) + iteration = 1:(mfbvar_prior$n_reps/mfbvar_prior$n_thin)) } else { dimnames(main_run$Pi) <- list(dep = names_col, indep = c("const", paste0(rep(names_col, mfbvar_prior$n_lags), ".l", rep(1:mfbvar_prior$n_lags, each = n_vars))), - iteration = 1:mfbvar_prior$n_reps) + iteration = 1:(mfbvar_prior$n_reps/mfbvar_prior$n_thin)) } if (sum(mfbvar_prior$freq == "m") == 0 || sum(mfbvar_prior$freq == "m") == ncol(mfbvar_prior$Y)) { @@ -812,70 +898,67 @@ estimate_mfbvar <- function(mfbvar_prior = NULL, prior, variance = "iw", ...) { #' @param x object of class \code{mfbvar} #' @param ... Currently not in use. #' @template man_template +#' @return No return value, called for side effects. #' @examples -#' prior_obj <- set_prior(Y = mf_sweden[, 4:5], d = "intercept", -#' freq = c("m", "q"), n_lags = 4, n_burnin = 20, n_reps = 20) +#' prior_obj <- set_prior(Y = mf_usa, n_lags = 4, n_reps = 20) #' mod_minn <- estimate_mfbvar(prior_obj, prior = "minn") #' mod_minn print.mfbvar <- function(x, ...){ ss <- ifelse(x$prior == "ss", "steady-state ", "") + freq_type <- ifelse(sum(x$freq == "m") == 0, "Quarterly", ifelse(sum(x$freq == "q") == 0, "Monthly", "Mixed-frequency")) var_type <- switch(x$variance, iw = "Inverse Wishart", diffuse = "Diffuse", fsv = sprintf("Factor stochastic volatility (%d factors)", x$mfbvar_prior$n_fac), csv = "Common stochastic volatility") - cat(paste0(sprintf("Mixed-frequency %sBVAR with:\n", ss), ncol(x$Y), " variables", ifelse(!is.null(x$names_col), paste0(" (", paste(x$names_col, collapse = ", "), ")"), " "), "\nError covariance matrix: ", var_type, "\n", + cat(paste0(sprintf("%s BVAR with:\n", freq_type), ncol(x$Y), " variables", ifelse(!is.null(x$names_col), paste0(" (", paste(x$names_col, collapse = ", "), ")"), " "), + "\nPrior: ", x$prior, "\n", + "\nError covariance matrix: ", var_type, "\n", x$n_lags, " lags\n", - nrow(x$Y), " time periods", ifelse(!is.null(x$names_row), paste0(" (", x$names_row[1], " - ", x$names_row[length(x$names_row)], ")"), " "), "\n", ifelse(is.null(x$n_fcst), "0", x$n_fcst), " periods forecasted\n", + nrow(x$Y), " time periods", ifelse(!is.null(x$names_row), paste0(" (", x$names_row[1], " - ", x$names_row[length(x$names_row)], ")"), " "), "\n", ifelse(is.null(x$n_fcst), "0", x$n_fcst), " periods forecast\n", x$n_reps, " draws used in main chain")) + cat("\n") } #' Summary method for class mfbvar #' #' Method for summarizing \code{mfbvar} objects. #' -#' @param x object of class \code{mfbvar} +#' @param object object of class \code{mfbvar} #' @param ... Currently not in use. #' @template man_template #' @examples -#' prior_obj <- set_prior(Y = mf_sweden[, 4:5], d = "intercept", -#' freq = c("m", "q"), n_lags = 4, n_burnin = 20, n_reps = 20) +#' prior_obj <- set_prior(Y = mf_usa, n_lags = 4, n_reps = 20) #' mod_minn <- estimate_mfbvar(prior_obj, prior = "minn") #' summary(mod_minn) -summary.mfbvar <- function(x, ...){ - ss <- ifelse(x$prior == "ss", "steady-state ", "") - var_type <- switch(x$variance, - iw = "Inverse Wishart", - diffuse = "Diffuse", - fsv = sprintf("Factor stochastic volatility (%d factors)", x$mfbvar_prior$n_fac), - csv = "Common stochastic volatility") - cat(paste0(sprintf("Mixed-frequency %sBVAR with:\n", ss), ncol(x$Y), " variables", ifelse(!is.null(x$names_col), paste0(" (", paste(x$names_col, collapse = ", "), ")"), " "), "\nError covariance matrix: ", var_type, "\n", - x$n_lags, " lags\n", - nrow(x$Y), " time periods", ifelse(!is.null(x$names_row), paste0(" (", x$names_row[1], " - ", x$names_row[length(x$names_row)], ")"), " "), "\n", ifelse(is.null(x$n_fcst), "0", x$n_fcst), " periods forecasted\n", - x$n_reps, " draws used in main chain")) +summary.mfbvar <- function(object, ...){ + print(object) } #' Plotting methods for posterior mfbvar objects #' -#' Methods for plotting posterior mfbvar objects (\code{mfbvar_minn} and \code{mfbvar_ss}). +#' Methods for plotting posterior mfbvar objects. #' @param x object of class \code{mfbvar_minn} or \code{mfbvar_ss} -#' @param fcst_start Date of the first forecast; if dates are available for the data used for obtaining \code{x}, these will be used. +#' @param aggregate_fcst Boolean indicating whether forecasts of the latent monthly series should be aggregated to the quarterly frequency. #' @param plot_start Time period (date or number) to start plotting from. Default is to to use \code{5*n_fcst} time periods if \code{n_fcst} exists, otherwise the entire sample. +#' @param variables Vector of names or positions of variables to include in the plot of variances #' @param pred_bands Single number (between \code{0.0} and \code{1.0}) giving the coverage level of forecast intervals. #' @param ss_bands (Steady-state prior only) Single number (between \code{0.0} and \code{1.0}) giving the coverage level of posterior steady-state intervals. #' @param var_bands (\code{varplot} only) Single number (between \code{0.0} and \code{1.0}) giving the coverage level of posterior intervals for the error standard deviations. #' @param nrow_facet an integer giving the number of rows to use in the facet #' @param ... Currently not in use. +#' @return A \code{\link[ggplot2]{ggplot}}. #' @name plot-mfbvar #' @examples -#' prior_obj <- set_prior(Y = mf_sweden[, 4:5], d = "intercept", -#' freq = c("m", "q"), n_lags = 4, n_burnin = 20, n_reps = 20, +#' prior_obj <- set_prior(Y = mf_usa, d = "intercept", +#' n_lags = 4, n_reps = 20, #' n_fcst = 4, n_fac = 1) #' -#' prior_intervals <- matrix(c(-0.1, 0.1, -#' 0.4, 0.6), ncol = 2, byrow = TRUE) +#' prior_intervals <- matrix(c(1, 3, +#' 4, 8, +#' 1, 3), ncol = 2, byrow = TRUE) #' psi_moments <- interval_to_moments(prior_intervals) #' prior_psi_mean <- psi_moments$prior_psi_mean #' prior_psi_Omega <- psi_moments$prior_psi_Omega @@ -888,39 +971,33 @@ summary.mfbvar <- function(x, ...){ #' varplot(mod_ss) #' @rdname plot-mfbvar -plot.mfbvar_ss <- function(x, fcst_start = NULL, aggregate_fcst = TRUE, plot_start = NULL, +plot.mfbvar_ss <- function(x, aggregate_fcst = TRUE, plot_start = NULL, pred_bands = 0.8, nrow_facet = NULL, ss_bands = 0.95, ...){ - if (is.null(fcst_start)) { - row_names <- tryCatch(as.Date(rownames(x$Y)), error = function(cond) cond) - if (inherits(row_names, "error")) { - stop("To plot the forecasts, either fcst_start must be supplied or the rownames of Y be dates (YYYY-MM-DD).") - } - fcst_start <-lubridate::as_date(rownames(x$Y)[nrow(x$Y)]) %m+% months(1) - } else { - fcst_start <- tryCatch(as.Date(fcst_start), error = function(cond) cond) - if (inherits(fcst_start, "error")) { - stop("Unable to convert fcst_start to a date.") - } + row_names <- tryCatch(as.Date(rownames(x$Y)), error = function(cond) cond) + if (inherits(row_names, "error")) { + stop("To plot the forecasts, proper dates must be provided in the input data.") } + fcst_start <- lubridate::as_date(rownames(x$Y)[nrow(x$Y)]) %m+% months(1) - plot_range_names <- fcst_start %m+% months(-x$n_T:(-1)) + + plot_range_names <- fcst_start %m+% months(-nrow(x$Y):(-1)) lower <- upper <- value <- NULL if (is.null(plot_start)) { if (x$n_fcst > 0) { - plot_range <- max(x$n_T-x$n_fcst*5, 0):x$n_T + plot_range <- max(nrow(x$Y)-x$n_fcst*5, 0):nrow(x$Y) } else { - plot_range <- 1:x$n_T + plot_range <- 1:nrow(x$Y) } } else { - plot_start <- tryCatch(as_date(plot_start), error = function(cond) cond) + plot_start <- tryCatch(lubridate::as_date(plot_start), error = function(cond) cond) if (!inherits(plot_start, "error")) { if (!(plot_start %in% plot_range_names)) { stop(sprintf("The start date, %s, does not match rownames in the data matrix Y.", plot_start)) } - plot_range <- (which(plot_range_names == plot_start)):x$n_T + plot_range <- (which(plot_range_names == plot_start)):nrow(x$Y) } else { stop("Unable to convert plot_start to a date.") } @@ -939,7 +1016,7 @@ plot.mfbvar_ss <- function(x, fcst_start = NULL, aggregate_fcst = TRUE, plot_sta } names_col <- if (is.null(x$names_col)) paste0("x", 1:x$n_vars) else x$names_col - names_row <- if (is.null(x$names_row)) 1:x$n_T else x$names_row + names_row <- if (is.null(x$names_row)) 1:nrow(x$Y) else x$names_row p <- ggplot(mapping = aes(x = time)) if (x$n_fcst > 0) { @@ -966,16 +1043,17 @@ plot.mfbvar_ss <- function(x, fcst_start = NULL, aggregate_fcst = TRUE, plot_sta geom_line(data = na.omit(ss), aes(y = value)) } if (x$n_fcst > 0) { - preds <- predict(x, aggregate_fcst = aggregate_fcst, fcst_start = fcst_start, pred_bands = pred_bands) + preds <- predict(x, aggregate_fcst = aggregate_fcst, pred_bands = pred_bands) fcst <- preds last_pos <- apply(x$Y, 2, function(yy) max(which(!is.na(yy)))) for (i in seq_along(last_pos)) { - fcst <- rbind(fcst, data.frame(variable = names_col[i], + fcst <- rbind(data.frame(variable = names_col[i], time = (1:nrow(x$Y))[last_pos[i]], fcst_date = preds$fcst_date[1] %m-% months(preds$time[1] - last_pos[i]), lower = x$Y[last_pos[i], i], median = x$Y[last_pos[i], i], - upper = x$Y[last_pos[i], i])) + upper = x$Y[last_pos[i], i]), + fcst) } fcst <- mutate(fcst, variable = factor(variable, levels = names_col, labels = names_col)) fcst <- fcst[!duplicated(fcst[, 1:2]), ] @@ -1020,7 +1098,8 @@ plot.mfbvar_ss <- function(x, fcst_start = NULL, aggregate_fcst = TRUE, plot_sta p <- p + theme_minimal() + theme(legend.position="bottom") - breaks <- ggplot_build(p)$layout$coord$labels(ggplot_build(p)$layout$panel_params)[[1]]$x.labels + breaks <- ggplot_build(p)$layout$panel_params[[1]]$x$breaks + breaks <- na.omit(breaks) if (any(as.numeric(breaks)>plot_range[length(plot_range)])) { break_labels <- c(as.character(plot_range_names[as.numeric(breaks)[as.numeric(breaks)<=plot_range[length(plot_range)]]]), as.character(preds$fcst_date[min(which(preds$time == breaks[as.numeric(breaks)>plot_range[length(plot_range)]]))])) @@ -1033,42 +1112,39 @@ plot.mfbvar_ss <- function(x, fcst_start = NULL, aggregate_fcst = TRUE, plot_sta } #' @rdname plot-mfbvar -plot.mfbvar_ssng <- function(x, fcst_start = NULL, aggregate_fcst = TRUE, plot_start = NULL, +plot.mfbvar_ssng <- function(x, aggregate_fcst = TRUE, plot_start = NULL, pred_bands = 0.8, nrow_facet = NULL, ss_bands = 0.95, ...) { - plot.mfbvar_ss(x, fcst_start = fcst_start, aggregate_fcst = aggregate_fcst, ss_bands = ss_bands, ...) + plot.mfbvar_ss(x, aggregate_fcst = aggregate_fcst, plot_start = plot_start, + pred_bands = pred_bands, nrow_facet = nrow_facet, ss_bands = ss_bands, ...) } + #' @rdname plot-mfbvar -plot.mfbvar_minn <- function(x, fcst_start = NULL, aggregate_fcst = TRUE, plot_start = NULL, +plot.mfbvar_minn <- function(x, aggregate_fcst = TRUE, plot_start = NULL, pred_bands = 0.8, nrow_facet = NULL, ...){ - if (is.null(fcst_start)) { - row_names <- tryCatch(as.Date(rownames(x$Y)), error = function(cond) cond) - if (inherits(row_names, "error")) { - stop("To plot the forecasts, either fcst_start must be supplied or the rownames of Y be dates (YYYY-MM-DD).") - } - fcst_start <-lubridate::as_date(rownames(x$Y)[nrow(x$Y)]) %m+% months(1) - } else { - fcst_start <- tryCatch(as.Date(fcst_start), error = function(cond) cond) - if (inherits(fcst_start, "error")) { - stop("Unable to convert fcst_start to a date.") - } + + row_names <- tryCatch(as.Date(rownames(x$Y)), error = function(cond) cond) + if (inherits(row_names, "error")) { + stop("To plot the forecasts, proper dates must be provided in the input data.") } + fcst_start <-lubridate::as_date(rownames(x$Y)[nrow(x$Y)]) %m+% months(1) + - plot_range_names <- fcst_start %m+% months(-x$n_T:(-1)) + plot_range_names <- fcst_start %m+% months(-nrow(x$Y):(-1)) lower <- upper <- value <- NULL if (is.null(plot_start)) { if (x$n_fcst > 0) { - plot_range <- max(x$n_T-x$n_fcst*5, 0):x$n_T + plot_range <- max(nrow(x$Y)-x$n_fcst*5, 0):nrow(x$Y) } else { - plot_range <- 1:x$n_T + plot_range <- 1:nrow(x$Y) } } else { - plot_start <- tryCatch(as_date(plot_start), error = function(cond) cond) + plot_start <- tryCatch(lubridate::as_date(plot_start), error = function(cond) cond) if (!inherits(plot_start, "error")) { if (!(plot_start %in% plot_range_names)) { stop(sprintf("The start date, %s, does not match rownames in the data matrix Y.", plot_start)) } - plot_range <- (which(plot_range_names == plot_start)):x$n_T + plot_range <- (which(plot_range_names == plot_start)):nrow(x$Y) } else { stop("Unable to convert plot_start to a date.") } @@ -1096,7 +1172,7 @@ plot.mfbvar_minn <- function(x, fcst_start = NULL, aggregate_fcst = TRUE, plot_s geom_line(data = na.omit(ss), aes(y = value)) if (x$n_fcst > 0) { - preds <- predict(x, aggregate_fcst = aggregate_fcst, fcst_start = fcst_start, pred_bands = pred_bands) + preds <- predict(x, aggregate_fcst = aggregate_fcst, pred_bands = pred_bands) fcst <- preds last_pos <- apply(x$Y, 2, function(yy) max(which(!is.na(yy)))) for (i in seq_along(last_pos)) { @@ -1105,7 +1181,8 @@ plot.mfbvar_minn <- function(x, fcst_start = NULL, aggregate_fcst = TRUE, plot_s fcst_date = preds$fcst_date[1] %m-% months(preds$time[1] - last_pos[i]), lower = x$Y[last_pos[i], i], median = x$Y[last_pos[i], i], - upper = x$Y[last_pos[i], i])) + upper = x$Y[last_pos[i], i]), + fcst) } fcst <- mutate(fcst, variable = factor(variable, levels = names_col, labels = names_col)) fcst <- fcst[!duplicated(fcst[, 1:2]), ] @@ -1143,7 +1220,8 @@ plot.mfbvar_minn <- function(x, fcst_start = NULL, aggregate_fcst = TRUE, plot_s p <- p + theme_minimal() + theme(legend.position="bottom") - breaks <- ggplot_build(p)$layout$coord$labels(ggplot_build(p)$layout$panel_params)[[1]]$x.labels + breaks <- ggplot_build(p)$layout$panel_params[[1]]$x$breaks + breaks <- na.omit(breaks) if (any(as.numeric(breaks)>plot_range[length(plot_range)])) { break_labels <- c(as.character(plot_range_names[as.numeric(breaks)[as.numeric(breaks)<=plot_range[length(plot_range)]]]), as.character(preds$fcst_date[min(which(preds$time == breaks[as.numeric(breaks)>plot_range[length(plot_range)]]))])) @@ -1155,6 +1233,12 @@ plot.mfbvar_minn <- function(x, fcst_start = NULL, aggregate_fcst = TRUE, plot_s theme(axis.text.x=element_text(angle=45, hjust=1)) } +plot.mfbvar_dl <- function(x, aggregate_fcst = TRUE, plot_start = NULL, + pred_bands = 0.8, nrow_facet = NULL, ...) { + plot.mfbvar_minn(x, aggregate_fcst = aggregate_fcst, plot_start = plot_start, + pred_bands = pred_bands, nrow_facet = nrow_facet, ...) +} + #' @rdname plot-mfbvar varplot <- function(x, variables = colnames(x$Y), var_bands = 0.95, nrow_facet = NULL, ...) { if (!inherits(x, c("mfbvar_csv", "mfbvar_fsv"))) { @@ -1179,7 +1263,7 @@ varplot <- function(x, variables = colnames(x$Y), var_bands = 0.95, nrow_facet = } if (sv_type == "fsv") { n_fac <- x$mfbvar_prior$n_fac - variances_fsv(variances, x$latent, x$facload, variables_num, n_fac, n_reps, n_T, n_vars, n_plotvars) + variances_fsv(variances, x$h, x$facload, variables_num, n_fac, n_reps, n_T, n_vars, n_plotvars) } if (sv_type == "csv") { variances_csv(variances, x$Sigma, x$f, n_T, n_reps, variables_num) @@ -1221,46 +1305,36 @@ varplot <- function(x, variables = colnames(x$Y), var_bands = 0.95, nrow_facet = #' Method for predicting \code{mfbvar} objects. #' #' @param object object of class mfbvar -#' @param fcst_start The date (\code{YYYY-MM-DD}) of the first forecast. If not provided, dates from the original data is used if available. #' @param aggregate_fcst If forecasts of quarterly variables should be aggregated back to the quarterly frequency. #' @param pred_bands The level of the probability bands for the forecasts. #' @param ... Currently not in use. #' @details Note that this requires that forecasts were made in the original \code{mfbvar} call. +#' @return A \code{\link[tibble]{tibble}} with columns: +#' \describe{\item{\code{variable}}{Name of variable} +#' \item{\code{time}}{Time index} +#' \item{\code{fcst_date}}{Date of forecast}} +#' If the argument \code{pred_bands} is given as a numeric value between 0 and 1, the returned tibble also includes columns: +#' \describe{\item{\code{lower}}{The \code{(1-pred_bands)/2} lower quantiles of the predictive distributions} +#' \item{\code{median}}{The medians of the predictive distributions} +#' \item{\code{upper}}{The \code{(1+pred_bands)/2} upper quantiles of the predictive distributions}} +#' If \code{pred_bands} \code{NULL} or \code{NA}, the returned tibble also includes the columns: +#' \describe{\item{\code{fcst}}{MCMC samples from the predictive distributions} +#' \item{\code{iter}}{Iteration indexes for the MCMC samples}} #' @examples -#' prior_obj <- set_prior(Y = mf_sweden[, 4:5], freq = c("m", "q"), -#' n_lags = 4, n_burnin = 20, n_reps = 20, n_fcst = 4) +#' prior_obj <- set_prior(Y = mf_usa, n_lags = 4, n_reps = 20, n_fcst = 4) #' mod_minn <- estimate_mfbvar(prior_obj, prior = "minn") #' predict(mod_minn) -predict.mfbvar <- function(object, fcst_start = NULL, aggregate_fcst = TRUE, pred_bands = 0.8, ...) { +predict.mfbvar <- function(object, aggregate_fcst = TRUE, pred_bands = 0.8, ...) { end_month <- FALSE if (object$n_fcst==0) { stop("No forecasts exist in the provided object.") } - if (!is.null(fcst_start)) { - fcst_start <- as.Date(fcst_start) - if (lubridate::days_in_month(fcst_start) == lubridate::day(fcst_start)) { - end_month <- TRUE - } - } - if (object$n_fcst > 0) { - if (!inherits(fcst_start, "Date")) { - tmp <- tryCatch(lubridate::ymd(rownames(object$Y)[nrow(object$Y)]), warning = function(cond) cond) - if (inherits(tmp, "warning")) { - stop("To summarize the forecasts, either fcst_start must be supplied or the rownames of Y be dates (YYYY-MM-DD).") - } else { - final_est <- lubridate::ymd(rownames(object$Y)[nrow(object$Y)]) - fcst_start <- final_est %m+% months(1) - if (lubridate::days_in_month(final_est) == lubridate::day(final_est)) { - end_month <- TRUE - } - } - } - } - final_m <- c(unlist(apply(object$Y, 2, function(x) Position(is.na, x, nomatch = nrow(object$Y)+1)))[object$mfbvar_prior$freq == "m"])-1 - final_q <- min(apply(object$Y[,object$mfbvar_prior$freq == "q", drop = FALSE], 2, function(x) max(which(!is.na(x))))) - final_non_na <- min(c(final_m, - final_q)) + + final_h <- c(unlist(apply(object$Y, 2, function(x) Position(is.na, x, nomatch = nrow(object$Y)+1)))[object$mfbvar_prior$freq == object$mfbvar_prior$freqs[2]])-1 + final_l <- min(apply(object$Y[,object$mfbvar_prior$freq == object$mfbvar_prior$freqs[1], drop = FALSE], 2, function(x) max(which(!is.na(x))))) + final_non_na <- min(c(final_h, + final_l)) final_fcst <- object$n_lags - (nrow(object$Y)-final_non_na)+1 if (final_fcst >= 1) { incl_fcst <- final_fcst:(object$n_lags + object$n_fcst) @@ -1268,92 +1342,61 @@ predict.mfbvar <- function(object, fcst_start = NULL, aggregate_fcst = TRUE, pre incl_fcst <- 1:(object$n_lags + object$n_fcst) } + n_h <- sum(object$mfbvar_prior$freq == object$mfbvar_prior$freqs[2]) + n_l <- sum(object$mfbvar_prior$freq == object$mfbvar_prior$freqs[1]) + n_vars <- n_h + n_l - ret_names <- fcst_start %m+% months((-(length(incl_fcst)-object$n_fcst)):(object$n_fcst-1)) - ret_names_q <- fcst_start %m+% months((-(object$n_lags)):(object$n_fcst-1)) + tmp <- tryCatch(lubridate::ymd(rownames(object$Y)[nrow(object$Y)]), warning = function(cond) cond) - if (end_month) { - ret_names <- lubridate::ceiling_date(ret_names, unit = "months") - lubridate::days(1) - ret_names_q <- lubridate::ceiling_date(ret_names_q, unit = "months") - lubridate::days(1) - } - - n_m <- sum(object$mfbvar_prior$freq == "m") - n_q <- sum(object$mfbvar_prior$freq == "q") - n_vars <- n_m + n_q - fcst_collapsed <- tibble(variable = rep(rep(object$names_col[1:n_m], each = length(incl_fcst)), object$n_reps), - iter = rep(1:object$n_reps, each = n_m*length(incl_fcst)), - fcst = c(object$Z_fcst[incl_fcst,1:n_m,]), - fcst_date = rep(as.Date(as.character(ret_names)), n_m*object$n_reps), - freq = rep(rep(rep("m", n_m), each = length(incl_fcst)), object$n_reps), - time = rep(nrow(object$Y)+object$n_fcst-max(incl_fcst)+incl_fcst, n_m*object$n_reps) - ) %>% - transmute(variable = variable, - iter = iter, - year = year(fcst_date), - quarter = quarter(fcst_date), - fcst_date = fcst_date, - fcst = fcst, - freq = freq, - time = time) - if (aggregate_fcst) { - n_Lambda <- ncol(object$Lambda_)/nrow(object$Lambda_) - fcst_agg_required <- final_q+3-n_Lambda+1 - fcst_included <- nrow(object$Y)-object$n_lags+1 - fcst_agg_missing <- max(c(fcst_included - fcst_agg_required, 0)) - fcst_q <- array(0, dim = c(dim(object$Z_fcst)[1]+max(c(fcst_agg_missing, 0)), n_q, object$n_reps)) - if (fcst_agg_required < fcst_included) { - ret_names_q <- c(ret_names_q[1] %m+% months((-fcst_agg_missing):(-1)), - ret_names_q) - fcst_q[1:fcst_agg_missing, ,] <- object$Z[fcst_agg_required:(fcst_included-1), object$mfbvar_prior$freq == "q", , drop = FALSE] - } else { - if (nrow(fcst_q) > length(ret_names_q)) { - ret_names_q <- c(ret_names_q[1] %m+% months((-(nrow(fcst_q)-length(ret_names_q))):(-1)), - ret_names_q) - } + if (!("w" %in% object$freq) && !inherits(tmp, "warning")) { + final_est <- lubridate::ymd(rownames(object$Y)[nrow(object$Y)]) + fcst_start <- final_est %m+% months(1) + if (lubridate::days_in_month(final_est) == lubridate::day(final_est)) { + end_month <- TRUE } - fcst_q[(fcst_agg_missing+1):nrow(fcst_q), , ] <- object$Z_fcst[, object$mfbvar_prior$freq == "q", , drop = FALSE] - rownames(fcst_q) <- as.character(ret_names_q) - - end_of_quarter <- which(lubridate::month(ret_names_q) %% 3 == 0) - end_of_quarter <- end_of_quarter[end_of_quarter >= n_Lambda] - agg_fun <- function(fcst_q, Lambda_, end_of_quarter) { - fcst_q_agg <- array(0, dim = c(length(end_of_quarter), dim(fcst_q)[2:3])) - for (i in 1:object$n_reps) { - Z_i <- matrix(fcst_q[,,i], nrow = nrow(fcst_q), ncol = ncol(fcst_q)) - for (j in 1:length(end_of_quarter)) { - Z_ij <- matrix(t(Z_i[(((-n_Lambda+1):0)+end_of_quarter[j]), , drop = FALSE]), ncol = 1) - fcst_q_agg[j, , i] <- Lambda_ %*% Z_ij - } + + ret_names <- fcst_start %m+% months((-(length(incl_fcst)-object$n_fcst)):(object$n_fcst-1)) + ret_names_q <- fcst_start %m+% months((-(object$n_lags)):(object$n_fcst-1)) + + if (end_month) { + ret_names <- lubridate::ceiling_date(ret_names, unit = "months") - lubridate::days(1) + ret_names_q <- lubridate::ceiling_date(ret_names_q, unit = "months") - lubridate::days(1) + } + } else { + final_est <- nrow(object$Y) + fcst_start <- final_est + 1 + + ret_names <- fcst_start + (-(length(incl_fcst)-object$n_fcst)):(object$n_fcst-1) + + + if (aggregate_fcst) { + if ("w" %in% object$freq) { + warning("Because of ambiguities, forecasts are not aggregated when weekly data are included.") + } else { + stop("Dates must be provided to aggregate latent monthly forecasts to the quarterly frequency.") } - return(fcst_q_agg) } + } - fcst_q_agg <- agg_fun(fcst_q, object$Lambda_, end_of_quarter) - fcst_quarterly <- tibble(variable = rep(rep(object$names_col[(n_m+1):n_vars], each = nrow(fcst_q_agg)), object$n_reps), - iter = rep(1:object$n_reps, each = n_q*nrow(fcst_q_agg)), - fcst = c(fcst_q_agg), - fcst_date = rep(ret_names_q[end_of_quarter], n_q*object$n_reps), - freq = rep(rep(rep("q", n_q), each = nrow(fcst_q_agg)), object$n_reps), - time = rep(seq(final_q+3, by = 3, length.out = nrow(fcst_q_agg)), n_q*object$n_reps) - ) %>% - transmute(variable = variable, - iter = iter, - year = year(fcst_date), - quarter = quarter(fcst_date), - fcst_date = fcst_date, - fcst = fcst, - freq = freq, - time = time) + if (!aggregate_fcst) { + fcst_collapsed <- tibble(variable = rep(rep(as.character(object$names_col), each = length(incl_fcst)), object$n_reps/object$n_thin), + iter = rep(1:(object$n_reps/object$n_thin), each = n_vars*length(incl_fcst)), + fcst = c(object$Z_fcst[incl_fcst,,]), + fcst_date = rep(ret_names, n_vars*object$n_reps/object$n_thin), + freq = rep(rep(object$freq, each = length(incl_fcst)), object$n_reps/object$n_thin), + time = rep(nrow(object$Y)+object$n_fcst-max(incl_fcst)+incl_fcst, n_vars*object$n_reps/object$n_thin) + ) } else { - fcst_quarterly <- tibble(variable = rep(rep(object$names_col[(n_m+1):n_vars], each = length(incl_fcst)), object$n_reps), - iter = rep(1:object$n_reps, each = n_q*length(incl_fcst)), - fcst = c(object$Z_fcst[incl_fcst,(n_m+1):n_vars,]), - fcst_date = rep(as.Date(as.character(ret_names)), n_q*object$n_reps), - freq = rep(rep(rep("q", n_q), each = length(incl_fcst)), object$n_reps), - time = rep(nrow(object$Y)+object$n_fcst-max(incl_fcst)+incl_fcst, n_q*object$n_reps) - ) %>% + + fcst_collapsed <- tibble(variable = rep(rep(object$names_col[1:n_h], each = length(incl_fcst)), object$n_reps/object$n_thin), + iter = rep(1:(object$n_reps/object$n_thin), each = n_h*length(incl_fcst)), + fcst = c(object$Z_fcst[incl_fcst,1:n_h,]), + fcst_date = rep(as.Date(as.character(ret_names)), n_h*object$n_reps/object$n_thin), + freq = rep(rep(rep(object$mfbvar_prior$freqs[2], n_h), each = length(incl_fcst)), object$n_reps/object$n_thin), + time = rep(nrow(object$Y)+object$n_fcst-max(incl_fcst)+incl_fcst, n_h*object$n_reps/object$n_thin) + ) %>% transmute(variable = variable, iter = iter, year = year(fcst_date), @@ -1362,47 +1405,113 @@ predict.mfbvar <- function(object, fcst_start = NULL, aggregate_fcst = TRUE, pre fcst = fcst, freq = freq, time = time) + if (aggregate_fcst) { + n_Lambda <- ncol(object$Lambda_)/nrow(object$Lambda_) + fcst_agg_required <- final_l+3-n_Lambda+1 + fcst_included <- nrow(object$Y)-object$n_lags+1 + fcst_agg_missing <- max(c(fcst_included - fcst_agg_required, 0)) + fcst_q <- array(0, dim = c(dim(object$Z_fcst)[1]+max(c(fcst_agg_missing, 0)), n_l, object$n_reps/object$n_thin)) + if (fcst_agg_required < fcst_included) { + ret_names_q <- c(ret_names_q[1] %m+% months((-fcst_agg_missing):(-1)), + ret_names_q) + fcst_q[1:fcst_agg_missing, ,] <- object$Z[fcst_agg_required:(fcst_included-1), object$mfbvar_prior$freq == object$mfbvar_prior$freqs[1], , drop = FALSE] + } else { + if (nrow(fcst_q) > length(ret_names_q)) { + ret_names_q <- c(ret_names_q[1] %m+% months((-(nrow(fcst_q)-length(ret_names_q))):(-1)), + ret_names_q) + } + } + fcst_q[(fcst_agg_missing+1):nrow(fcst_q), , ] <- object$Z_fcst[, object$mfbvar_prior$freq == object$mfbvar_prior$freqs[1], , drop = FALSE] + rownames(fcst_q) <- as.character(ret_names_q) + + end_of_quarter <- which(lubridate::month(ret_names_q) %% 3 == 0) + end_of_quarter <- end_of_quarter[end_of_quarter >= n_Lambda] + agg_fun <- function(fcst_q, Lambda_, end_of_quarter) { + fcst_q_agg <- array(0, dim = c(length(end_of_quarter), dim(fcst_q)[2:3])) + for (i in 1:(object$n_reps/object$n_thin)) { + Z_i <- matrix(fcst_q[,,i], nrow = nrow(fcst_q), ncol = ncol(fcst_q)) + for (j in 1:length(end_of_quarter)) { + Z_ij <- matrix(t(Z_i[(((-n_Lambda+1):0)+end_of_quarter[j]), , drop = FALSE]), ncol = 1) + fcst_q_agg[j, , i] <- Lambda_ %*% Z_ij + } + } + return(fcst_q_agg) + } + + fcst_q_agg <- agg_fun(fcst_q, object$Lambda_, end_of_quarter) + + fcst_quarterly <- tibble(variable = rep(rep(object$names_col[(n_h+1):n_vars], each = nrow(fcst_q_agg)), object$n_reps/object$n_thin), + iter = rep(1:(object$n_reps/object$n_thin), each = n_l*nrow(fcst_q_agg)), + fcst = c(fcst_q_agg), + fcst_date = rep(ret_names_q[end_of_quarter], n_l*object$n_reps/object$n_thin), + freq = rep(rep(rep(object$mfbvar_prior$freqs[1], n_l), each = nrow(fcst_q_agg)), object$n_reps/object$n_thin), + time = rep(seq(final_l+3, by = 3, length.out = nrow(fcst_q_agg)), n_l*object$n_reps/object$n_thin) + ) %>% + transmute(variable = variable, + iter = iter, + year = year(fcst_date), + quarter = quarter(fcst_date), + fcst_date = fcst_date, + fcst = fcst, + freq = freq, + time = time) + + } else { + fcst_quarterly <- tibble(variable = rep(rep(object$names_col[(n_h+1):n_vars], each = length(incl_fcst)), object$n_reps/object$n_thin), + iter = rep(1:(object$n_reps/object$n_thin), each = n_l*length(incl_fcst)), + fcst = c(object$Z_fcst[incl_fcst,(n_h+1):n_vars,]), + fcst_date = rep(ret_names, n_l*object$n_reps/object$n_thin), + freq = rep(rep(rep(object$mfbvar_prior$freqs[1], n_l), each = length(incl_fcst)), object$n_reps/object$n_thin), + time = rep(nrow(object$Y)+object$n_fcst-max(incl_fcst)+incl_fcst, n_l*object$n_reps/object$n_thin) + ) %>% + transmute(variable = variable, + iter = iter, + year = year(fcst_date), + quarter = quarter(fcst_date), + fcst_date = fcst_date, + fcst = fcst, + freq = freq, + time = time) + } + + fcst_collapsed <- bind_rows(fcst_collapsed, fcst_quarterly) + } - fcst_collapsed <- bind_rows(fcst_collapsed, fcst_quarterly) if (!is.null(pred_bands) && !is.na(pred_bands)) { pred_quantiles <- c(0.5-pred_bands/2, 0.5, 0.5+pred_bands/2) fcst_collapsed <- group_by(fcst_collapsed, variable, time, fcst_date) %>% - summarize(lower = quantile(fcst, prob = pred_quantiles[1]), - median = quantile(fcst, prob = pred_quantiles[2]), - upper = quantile(fcst, prob = pred_quantiles[3])) %>% + summarize(lower = quantile(fcst, prob = pred_quantiles[1], names = FALSE), + median = quantile(fcst, prob = pred_quantiles[2], names = FALSE), + upper = quantile(fcst, prob = pred_quantiles[3], names = FALSE), + .groups = "keep") %>% ungroup() + } else { + fcst_collapsed <- fcst_collapsed[, c("variable", "time", "fcst_date", "fcst", "iter")] } + return(fcst_collapsed) } -predict.sfbvar <- function(object, fcst_start = NULL, pred_bands = 0.8, ...) { +predict.sfbvar <- function(object, pred_bands = 0.8, ...) { end_period <- FALSE sf_type <- unique(object$mfbvar_prior$freq) if (object$n_fcst==0) { stop("No forecasts exist in the provided object.") } - if (!is.null(fcst_start)) { - fcst_start <- as.Date(fcst_start) - if (lubridate::days_in_month(fcst_start) == lubridate::day(fcst_start)) { - end_period <- TRUE - } - } if (object$n_fcst > 0) { - if (!inherits(fcst_start, "Date")) { - tmp <- tryCatch(lubridate::ymd(rownames(object$Y)[nrow(object$Y)]), warning = function(cond) cond) - if (inherits(tmp, "warning")) { - stop("To summarize the forecasts, either fcst_start must be supplied or the rownames of Y be dates (YYYY-MM-DD).") + tmp <- tryCatch(lubridate::ymd(rownames(object$Y)[nrow(object$Y)]), warning = function(cond) cond) + if (inherits(tmp, "warning")) { + stop("To summarize the forecasts, proper dates must be provided in the input data.") + } else { + final_est <- lubridate::ymd(rownames(object$Y)[nrow(object$Y)]) + if (sf_type == "m") { + fcst_start <- final_est %m+% months(1) } else { - final_est <- lubridate::ymd(rownames(object$Y)[nrow(object$Y)]) - if (sf_type == "m") { - fcst_start <- final_est %m+% months(1) - } else { - fcst_start <- final_est %m+% months(3) - } - if (lubridate::days_in_month(final_est) == lubridate::day(final_est)) { - end_period <- TRUE - } + fcst_start <- final_est %m+% months(3) + } + if (lubridate::days_in_month(final_est) == lubridate::day(final_est)) { + end_period <- TRUE } } } @@ -1424,12 +1533,12 @@ predict.sfbvar <- function(object, fcst_start = NULL, pred_bands = 0.8, ...) { ret_names <- lubridate::ceiling_date(ret_names, unit = "months") - lubridate::days(1) } - fcst_collapsed <- tibble(variable = rep(rep(object$names_col, each = length(incl_fcst)), object$n_reps), - iter = rep(1:object$n_reps, each = object$n_vars*length(incl_fcst)), + fcst_collapsed <- tibble(variable = rep(rep(object$names_col, each = length(incl_fcst)), object$n_reps/object$n_thin), + iter = rep(1:(object$n_reps/object$n_thin), each = object$n_vars*length(incl_fcst)), fcst = c(object$Z_fcst[incl_fcst,,]), - fcst_date = rep(as.Date(as.character(ret_names)), object$n_vars*object$n_reps), - freq = rep(rep(object$mfbvar_prior$freq, each = length(incl_fcst)), object$n_reps), - time = rep(nrow(object$Y)+object$n_fcst-max(incl_fcst)+incl_fcst, object$n_vars*object$n_reps) + fcst_date = rep(as.Date(as.character(ret_names)), object$n_vars*object$n_reps/object$n_thin), + freq = rep(rep(object$mfbvar_prior$freq, each = length(incl_fcst)), object$n_reps/object$n_thin), + time = rep(nrow(object$Y)+object$n_fcst-max(incl_fcst)+incl_fcst, object$n_vars*object$n_reps/object$n_thin) ) %>% transmute(variable = variable, iter = iter, @@ -1444,9 +1553,9 @@ predict.sfbvar <- function(object, fcst_start = NULL, pred_bands = 0.8, ...) { if (!is.null(pred_bands) && !is.na(pred_bands)) { pred_quantiles <- c(0.5-pred_bands/2, 0.5, 0.5+pred_bands/2) fcst_collapsed <- group_by(fcst_collapsed, variable, time, fcst_date) %>% - summarize(lower = quantile(fcst, prob = pred_quantiles[1]), - median = quantile(fcst, prob = pred_quantiles[2]), - upper = quantile(fcst, prob = pred_quantiles[3])) %>% + summarize(lower = quantile(fcst, prob = pred_quantiles[1], names = FALSE), + median = quantile(fcst, prob = pred_quantiles[2], names = FALSE), + upper = quantile(fcst, prob = pred_quantiles[3], names = FALSE)) %>% ungroup() } @@ -1462,9 +1571,9 @@ predict.sfbvar <- function(object, fcst_start = NULL, pred_bands = 0.8, ...) { #' @param nrow_facet number of rows in facet #' @param ... Currently not in use. #' @details The function plots the data. If the prior moments for the steady-state parameters are available in \code{x}, these are included. +#' @return A \code{\link[ggplot2]{ggplot}}. #' @examples -#' prior_obj <- set_prior(Y = mf_sweden[, 4:5], freq = c("m", "q"), -#' n_lags = 4, n_burnin = 20, n_reps = 20, n_fcst = 4) +#' prior_obj <- set_prior(Y = mf_usa, n_lags = 4, n_reps = 20, n_fcst = 4) #' plot(prior_obj) plot.mfbvar_prior <- function(x, nrow_facet = NULL, ...){ @@ -1487,9 +1596,9 @@ plot.mfbvar_prior <- function(x, nrow_facet = NULL, ...){ if (ss_flag) { n_determ <- ncol(x$d) - ss_lower <- x$d %*% t(matrix(qnorm(ss_level[1], x$prior_psi_mean, diag(x$prior_psi_Omega)), ncol = n_determ)) - ss_median <- x$d %*% t(matrix(qnorm(0.5, x$prior_psi_mean, diag(x$prior_psi_Omega)), ncol = n_determ)) - ss_upper <- x$d %*% t(matrix(qnorm(ss_level[2], x$prior_psi_mean, diag(x$prior_psi_Omega)), ncol = n_determ)) + ss_lower <- x$d %*% t(matrix(qnorm(ss_level[1], x$prior_psi_mean, sqrt(diag(x$prior_psi_Omega))), ncol = n_determ)) + ss_median <- x$d %*% t(matrix(qnorm(0.5, x$prior_psi_mean, sqrt(diag(x$prior_psi_Omega))), ncol = n_determ)) + ss_upper <- x$d %*% t(matrix(qnorm(ss_level[2], x$prior_psi_mean, sqrt(diag(x$prior_psi_Omega))), ncol = n_determ)) ss <- data.frame(expand.grid(time = as.Date(rownames(x$Y)), variable = names_col), lower = c(ss_lower), median = c(ss_median), upper = c(ss_upper)) diff --git a/R/interval_to_moments.R b/R/interval_to_moments.R index 1dff48c..a4a3294 100644 --- a/R/interval_to_moments.R +++ b/R/interval_to_moments.R @@ -4,7 +4,6 @@ #' @templateVar prior_psi_int TRUE #' @param alpha \code{100*(1-alpha)} is the prior probability of the interval #' @template man_template -#' @keywords internal #' @return A list with two components: #' \item{prior_psi_mean}{The prior mean of psi} #' \item{prior_psi_Omega}{The prior covariance matrix of psi} diff --git a/R/list_to_matrix.R b/R/list_to_matrix.R new file mode 100644 index 0000000..3a4310d --- /dev/null +++ b/R/list_to_matrix.R @@ -0,0 +1,92 @@ + +list_to_matrix <- function(Y_in) { + + if (all(sapply(Y_in, function(x) inherits(x, "ts"))) || all(sapply(Y_in, function(x) inherits(x, "zoo")))) { + if (all(sapply(Y_in, function(x) inherits(x, "ts")))) { + zoofun <- function(x) { + if (frequency(x) == 4) { + if (is.null(dim(x))) { + zoo::zoo(as.numeric(x), as.Date(zoo::as.Date.ts(x) %m+% months(2))) + } else { + zoo::zoo(as.matrix(x), as.Date(zoo::as.Date.ts(x) %m+% months(2))) + } + } else if (frequency(x) == 12) { + if (is.null(dim(x))) { + zoo::zoo(as.numeric(x), as.Date(zoo::as.Date.ts(x))) + } else { + zoo::zoo(as.matrix(x), as.Date(zoo::as.Date.ts(x))) + + } + } else { + stop("Time series objects can only include monthly and/or quarterly time series.") + } + } + + } else if (all(sapply(Y_in, function(x) inherits(x, "zooreg")))) { + zoofun <- function(x) { + if (frequency(x) == 4) { + if (is.null(dim(x))) { + zoo::zoo(as.numeric(x), as.Date(zoo::as.Date(zoo::index(x)) %m+% months(2))) + } else { + zoo::zoo(as.matrix(x), as.Date(zoo::as.Date(zoo::index(x)) %m+% months(2))) + } + } else if (frequency(x) == 12) { + if (is.null(dim(x))) { + zoo::zoo(as.numeric(x), as.Date(zoo::as.Date(zoo::index(x)))) + } else { + zoo::zoo(as.matrix(x), as.Date(zoo::as.Date(zoo::index(x)))) + } + } else { + stop("Time series objects can only include monthly and/or quarterly time series.") + } + } + } + zoolist <- lapply(Y_in, zoofun) + reducedlist <- Reduce(zoo::merge.zoo, zoolist) + Y <- as.matrix(reducedlist) + rownames(Y) <- as.character(time(reducedlist)) + dim_null <- sapply(zoolist, function(x) is.null(dim(x))) + if (all(dim_null)) { + colnames(Y) <- names(zoolist) + } else if (all(!dim_null)) { + colnames(Y) <- Reduce(c, lapply(zoolist, colnames)) + } else { + name_vec <- c() + for (iter in 1:length(dim_null)) { + if (dim_null[iter]) { + name_vec <- c(name_vec, names(zoolist)[iter]) + } else { + name_vec <- c(name_vec, colnames(zoolist[[iter]])) + } + } + colnames(Y) <- name_vec + } + + if (all(dim_null)) { + zoolistfreq <- sapply(Y_in, frequency) + } else if (all(!dim_null)) { + zoolistfreq <- sapply(Y_in, frequency) + zoolistn <- sapply(Y_in, NCOL) + zoolistfreq <- Reduce(c, mapply(function(x, y) rep(x, each = y), zoolistfreq, zoolistn, SIMPLIFY = FALSE)) + + } else { + zoolistfreq <- c() + for (iter in 1:length(dim_null)) { + if (dim_null[iter]) { + zoolistfreq <- c(zoolistfreq, frequency(Y_in[[iter]])) + } else { + zoolistfreq <- c(zoolistfreq, rep(frequency(Y_in[[iter]]), each = ncol(Y_in[[iter]]))) + } + } + } + names(zoolistfreq) <- NULL + if (all(zoolistfreq %in% c(4, 12))) { + freq <- ifelse(zoolistfreq == 4, "q", "m") + } else { + stop("Only monthly and quarterly frequencies are allowed as time series objects.") + } + } else { + + } + return(list(Y, freq)) +} diff --git a/R/mcmc_sampler.R b/R/mcmc_sampler.R index e0d028d..7782112 100644 --- a/R/mcmc_sampler.R +++ b/R/mcmc_sampler.R @@ -5,682 +5,7 @@ #' #' @param x argument to dispatch on (of class \code{prior_obj}) #' @param ... additional named arguments passed on to the methods - +#' @noRd mcmc_sampler <- function(x, ...) { UseMethod("mcmc_sampler") } - -#' @rdname mcmc_sampler -mcmc_sampler.mfbvar_ss_iw <- function(x, ...) { - - n_vars <- ncol(x$Y) - if (!(!is.null(x$Y) && !is.null(x$d) && !is.null(x$prior_psi_mean) && !is.null(x$prior_psi_Omega) && !is.null(x$n_lags) && !is.null(x$n_burnin) && !is.null(x$n_reps))) { - test_all <- sapply(x, is.null) - test_sub <- test_all[c("Y", "d", "prior_psi_mean", "prior_psi_Omega", "n_lags", "n_burnin", "n_reps")] - stop("Missing elements: ", paste(names(test_sub)[which(test_sub)], collapse = " ")) - } - if (x$n_fcst > 0 && nrow(x$d_fcst) != x$n_fcst) { - stop("d_fcst has ", nrow(x$d_fcst), " rows, but n_fcst is ", x$n_fcst, ".") - } - - priors <- mfbvar:::prior_Pi_Sigma(lambda1 = x$lambda1, lambda2 = x$lambda3, prior_Pi_AR1 = x$prior_Pi_AR1, Y = x$Y, - n_lags = x$n_lags, prior_nu = n_vars + 2) - prior_Pi_mean <- priors$prior_Pi_mean - prior_Pi_Omega <- priors$prior_Pi_Omega - prior_S <- priors$prior_S - - Y <- x$Y - d <- x$d - d_fcst <- x$d_fcst - freq <- x$freq - prior_psi_mean <- x$prior_psi_mean - prior_psi_Omega <- x$prior_psi_Omega - n_fcst <- x$n_fcst - check_roots <- x$check_roots - verbose <- x$verbose - - add_args <- list(...) - n_reps <- add_args$n_reps - n_thin <- ifelse(is.null(add_args$n_thin),1,add_args$n_thin) - init <- add_args$init - init_Pi <- init$init_Pi - init_Sigma <- init$init_Sigma - init_psi <- init$init_psi - init_Z <- init$init_Z - - # n_vars: number of variables - # n_lags: number of lags - # n_determ: number of deterministic variables - # n_T: sample size (full sample) - # n_T_: sample size (reduced sample) - n_vars <- dim(Y)[2] - n_lags <- prod(dim(as.matrix(prior_Pi_mean)))/n_vars^2 - n_q <- sum(freq == "q") - n_m <- sum(freq == "m") - if (n_q == 0 || n_q == n_vars) { - complete_quarters <- apply(Y, 1, function(x) !any(is.na(x))) - Y <- Y[complete_quarters, ] - d_fcst <- rbind(d[!complete_quarters, , drop = FALSE], d_fcst) - d <- d[complete_quarters, , drop = FALSE] - } - y_in_p <- Y[-(1:n_lags), ] - if (n_q < n_vars) { - T_b <- min(apply(y_in_p[,1:n_m], 2, function(x) ifelse(any(is.na(x)), min(which(is.na(x))), Inf))-1, nrow(y_in_p)) - } else { - T_b <- nrow(y_in_p) - } - if (n_q > 0) { - if (x$aggregation == "average") { - Lambda_ <- mfbvar:::build_Lambda(rep("average", n_q), 3) - } else { - Lambda_ <- mfbvar:::build_Lambda(rep("triangular", n_q), 5)} - } else { - Lambda_ <- matrix(0, 1, 3) - } - - - n_pseudolags <- max(c(n_lags, ncol(Lambda_)/nrow(Lambda_))) - n_determ <- dim(d)[2] - n_T <- dim(Y)[1]# - n_lags - n_T_ <- n_T - n_pseudolags - - - - - - ################################################################ - ### Preallocation - # Pi and Sigma store their i-th draws in the third dimension, psi - # is vectorized so it has its i-th draw stored in the i-th row - # Pi: p * pk * n_reps, each [,,i] stores Pi' - # Sigma: p * p * n_reps - # psi: n_reps * p - # Z: T * p * n_reps - ### If forecasting (h is horizon): - # Z_fcst: hk * p * n_reps - # d_fcst_lags: hk * m - ### If root checking: - # roots: n_reps vector - # num_tries: n_reps vector - ### If smoothing of the state vector: - # smoothed_Z: T * p * n_reps - - Pi <- array(NA, dim = c(n_vars, n_vars * n_lags, n_reps/n_thin)) - Sigma <- array(NA, dim = c(n_vars, n_vars, n_reps/n_thin)) - psi <- array(NA, dim = c(n_reps/n_thin, n_vars * n_determ)) - Z <- array(NA, dim = c(n_T, n_vars, n_reps/n_thin)) - Z_fcst<- array(NA, dim = c(n_fcst+n_lags, n_vars, n_reps/n_thin)) - if (n_fcst > 0) { - rownames(Z_fcst) <- c((n_T-n_lags+1):n_T, paste0("fcst_", 1:n_fcst)) - Z_fcst[,,1] <- 0 - } else { - rownames(Z_fcst) <- (n_T-n_lags+1):n_T - } - d_fcst_lags <- as.matrix(rbind(d[(n_T-n_lags+1):n_T, , drop = FALSE], d_fcst)) - d_fcst_lags <- d_fcst_lags[1:(n_lags+n_fcst), , drop = FALSE] - roots <- vector("numeric", n_reps/n_thin) - num_tries <- roots - - - - ################################################################ - ### MCMC sampling initialization - - # If the initial values are not provided, the missing values in - # Z are filled with the next observed value and Pi, Sigma and - # psi are then computed using maximum likelihood - - # This allows the user to run the MCMC sampler for a burn-in - # period, then use the final draw of that as initialization - # for multiple chains - - if (is.null(init_Z)) { - Z[,, 1] <- mfbvar:::fill_na(Y) - } else { - if (all(dim(Z[,, 1]) == dim(init_Z))) { - Z[,, 1] <- init_Z - } else { - stop(paste0("The dimension of init_Z is ", paste(dim(init_Z), collapse = " x "), ", but should be ", paste(dim(Z[,, 1]), collapse = " x "))) - } - - } - - ols_results <- tryCatch(mfbvar:::ols_initialization(z = Z[,, 1], d = d, n_lags = n_lags, n_T = n_T, n_vars = n_vars, n_determ = n_determ), - error = function(cond) NULL) - if (is.null(ols_results)) { - ols_results <- list() - ols_results$Pi <- prior_Pi_mean - ols_results$S <- prior_S - ols_results$psi <- prior_psi_mean - } - - if (is.null(init_Pi)) { - Pi[,, 1] <- ols_results$Pi - } else { - if (all(dim(Pi[,, 1]) == dim(init_Pi))) { - Pi[,, 1] <- init_Pi - } else { - stop(paste0("The dimension of init_Pi is ", paste(dim(init_Pi), collapse = " x "), ", but should be ", paste(dim(Pi[,, 1]), collapse = " x "))) - } - } - - # Compute the maximum eigenvalue of the initial Pi - if (check_roots == TRUE) { - Pi_comp <- mfbvar:::build_companion(Pi = Pi[,, 1], n_vars = n_vars, n_lags = n_lags) - roots[1] <- mfbvar:::max_eig_cpp(Pi_comp) - } - - if (is.null(init_Sigma)) { - Sigma[,, 1] <- ols_results$S - } else { - if (all(dim(Sigma[,,1]) == dim(init_Sigma))) { - Sigma[,, 1] <- init_Sigma - } else { - stop(paste0("The dimension of init_Sigma is ", paste(dim(init_Sigma), collapse = " x "), ", but should be ", paste(dim(Sigma[,,1]), collapse = " x "))) - } - } - - if (is.null(init_psi)) { - if (roots[1] < 1) { - psi[1, ] <- ols_results$psi - } else { - psi[1, ] <- prior_psi_mean - } - } else { - if (length(psi[1, ]) == length(init_psi)) { - psi[1,] <- init_psi - } else { - stop(paste0("The length of init_psi is ", paste(length(init_psi), collapse = " x "), ", but should be ", paste(length(psi[1,]), collapse = " x "))) - } - } - - ################################################################ - ### Compute terms which do not vary in the sampler - - # Create D (does not vary in the sampler), and find roots of Pi - # if requested - D_mat <- mfbvar:::build_DD(d = d, n_lags = n_lags) - dt <- d[-(1:n_lags), , drop = FALSE] - d1 <- d[1:n_lags, , drop = FALSE] - psi_i <- psi[1, ] - Pi_i <- Pi[,, 1] - Sigma_i <- Sigma[,, 1] - Z_i <- Z[-(1:n_lags),, 1] - mu_mat <- dt %*% t(matrix(psi_i, nrow = n_vars)) - - - # For the posterior of Pi - inv_prior_Pi_Omega <- chol2inv(chol(prior_Pi_Omega)) - Omega_Pi <- inv_prior_Pi_Omega %*% prior_Pi_mean - - # For the posterior of psi - inv_prior_psi_Omega <- solve(prior_psi_Omega) - inv_prior_psi_Omega_mean <- inv_prior_psi_Omega %*% prior_psi_mean - Z_1 <- Z[1:n_pseudolags,, 1] - - mfbvar:::mcmc_ss_iw(Y[-(1:n_lags),],Pi,Sigma,psi,Z,Z_fcst,Lambda_,prior_Pi_Omega,inv_prior_Pi_Omega,Omega_Pi,prior_Pi_mean, - prior_S,D_mat,dt,d1,d_fcst_lags,inv_prior_psi_Omega,inv_prior_psi_Omega_mean,check_roots,Z_1,n_reps, - n_q,T_b,n_lags,n_vars,n_T_,n_fcst,n_determ,n_thin,verbose) - - # mfbvar:::mcmc_ssng_iw(Y[-(1:n_lags),],Pi,Sigma,psi,phi_mu,lambda_mu,omega,Z,Z_fcst,Lambda_comp,prior_Pi_Omega,inv_prior_Pi_Omega,Omega_Pi,prior_Pi_mean, - # prior_S,D_mat,dt,d1,d_fcst_lags,prior_psi_mean,0.01,0.01,1,check_roots,Z_1,n_reps, - # n_q,T_b,n_lags,n_vars,n_T_,n_fcst,n_determ,n_thin,verbose) - - ################################################################ - ### Prepare the return object - return_obj <- list(Pi = Pi, Sigma = Sigma, psi = psi, Z = Z, roots = NULL, num_tries = NULL, - Z_fcst = NULL, smoothed_Z = NULL, n_determ = n_determ, - n_lags = n_lags, n_vars = n_vars, n_fcst = n_fcst, prior_Pi_Omega = prior_Pi_Omega, prior_Pi_mean = prior_Pi_mean, - prior_S = prior_S, prior_nu = n_vars+2, post_nu = n_T + n_vars+2, d = d, Y = Y, n_T = n_T, n_T_ = n_T_, - prior_psi_Omega = prior_psi_Omega, prior_psi_mean = prior_psi_mean, n_reps = n_reps, Lambda_ = Lambda_, - init = list(init_Pi = Pi[,, n_reps/n_thin], init_Sigma = Sigma[,, n_reps/n_thin], init_psi = psi[n_reps/n_thin, ], init_Z = Z[,, n_reps/n_thin])) - - if (check_roots == TRUE) { - return_obj$roots <- roots - return_obj$num_tries <- num_tries - } - if (n_fcst > 0) { - return_obj$Z_fcst <- Z_fcst - } - - return(return_obj) - -} - -#' @rdname mcmc_sampler -mcmc_sampler.mfbvar_ssng_iw <- function(x, ...) { - - n_vars <- ncol(x$Y) - if (!(!is.null(x$Y) && !is.null(x$d) && !is.null(x$prior_psi_mean) && !is.null(x$n_lags) && !is.null(x$n_burnin) && !is.null(x$n_reps))) { - test_all <- sapply(x, is.null) - test_sub <- test_all[c("Y", "d", "prior_psi_mean", "n_lags", "n_burnin", "n_reps")] - stop("Missing elements: ", paste(names(test_sub)[which(test_sub)], collapse = " ")) - } - if (x$n_fcst > 0 && nrow(x$d_fcst) != x$n_fcst) { - stop("d_fcst has ", nrow(x$d_fcst), " rows, but n_fcst is ", x$n_fcst, ".") - } - - priors <- mfbvar:::prior_Pi_Sigma(lambda1 = x$lambda1, lambda2 = x$lambda3, prior_Pi_AR1 = x$prior_Pi_AR1, Y = x$Y, - n_lags = x$n_lags, prior_nu = n_vars + 2) - prior_Pi_mean <- priors$prior_Pi_mean - prior_Pi_Omega <- priors$prior_Pi_Omega - prior_S <- priors$prior_S - - Y <- x$Y - d <- x$d - d_fcst <- x$d_fcst - freq <- x$freq - prior_psi_mean <- x$prior_psi_mean - prior_psi_Omega <- x$prior_psi_Omega - n_fcst <- x$n_fcst - check_roots <- x$check_roots - verbose <- x$verbose - - add_args <- list(...) - n_reps <- add_args$n_reps - n_thin <- ifelse(is.null(add_args$n_thin),1,add_args$n_thin) - init <- add_args$init - init_Pi <- init$init_Pi - init_Sigma <- init$init_Sigma - init_psi <- init$init_psi - init_Z <- init$init_Z - init_omega <- init$init_omega - init_phi_mu <- init$init_phi_mu - init_lambda_mu <- init$init_lambda_mu - - # n_vars: number of variables - # n_lags: number of lags - # n_determ: number of deterministic variables - # n_T: sample size (full sample) - # n_T_: sample size (reduced sample) - n_vars <- dim(Y)[2] - n_lags <- prod(dim(as.matrix(prior_Pi_mean)))/n_vars^2 - n_q <- sum(freq == "q") - n_m <- sum(freq == "m") - if (n_q == 0 || n_q == n_vars) { - complete_quarters <- apply(Y, 1, function(x) !any(is.na(x))) - Y <- Y[complete_quarters, ] - d_fcst <- rbind(d[!complete_quarters, , drop = FALSE], d_fcst) - d <- d[complete_quarters, , drop = FALSE] - } - y_in_p <- Y[-(1:n_lags), ] - if (n_q < n_vars) { - T_b <- min(apply(y_in_p[,1:n_m], 2, function(x) ifelse(any(is.na(x)), min(which(is.na(x))), Inf))-1, nrow(y_in_p)) - } else { - T_b <- nrow(y_in_p) - } - if (n_q > 0) { - if (x$aggregation == "average") { - Lambda_ <- mfbvar:::build_Lambda(rep("average", n_q), 3) - } else { - Lambda_ <- mfbvar:::build_Lambda(rep("triangular", n_q), 5)} - } else { - Lambda_ <- matrix(0, 1, 3) - } - - - n_pseudolags <- max(c(n_lags, ncol(Lambda_)/nrow(Lambda_))) - n_determ <- dim(d)[2] - n_T <- dim(Y)[1]# - n_lags - n_T_ <- n_T - n_pseudolags - - c0 <- ifelse(is.null(x$c0), 0.01, x$c0) - c1 <- ifelse(is.null(x$c1), 0.01, x$c1) - s <- ifelse(is.null(x[["s"]]), 1, x$s) - - ################################################################ - ### Preallocation - # Pi and Sigma store their i-th draws in the third dimension, psi - # is vectorized so it has its i-th draw stored in the i-th row - # Pi: p * pk * n_reps, each [,,i] stores Pi' - # Sigma: p * p * n_reps - # psi: n_reps * p - # Z: T * p * n_reps - ### If forecasting (h is horizon): - # Z_fcst: hk * p * n_reps - # d_fcst_lags: hk * m - ### If root checking: - # roots: n_reps vector - # num_tries: n_reps vector - ### If smoothing of the state vector: - # smoothed_Z: T * p * n_reps - - Pi <- array(NA, dim = c(n_vars, n_vars * n_lags, n_reps/n_thin)) - Sigma <- array(NA, dim = c(n_vars, n_vars, n_reps/n_thin)) - psi <- array(NA, dim = c(n_reps/n_thin, n_vars * n_determ)) - Z <- array(NA, dim = c(n_T, n_vars, n_reps/n_thin)) - omega <- matrix(NA, nrow = n_reps/n_thin, ncol = n_vars * n_determ) - phi_mu <- rep(NA, n_reps/n_thin) - lambda_mu <- rep(NA, n_reps/n_thin) - Z_fcst<- array(NA, dim = c(n_fcst+n_lags, n_vars, n_reps/n_thin)) - if (n_fcst > 0) { - rownames(Z_fcst) <- c((n_T-n_lags+1):n_T, paste0("fcst_", 1:n_fcst)) - Z_fcst[,,1] <- 0 - } else { - rownames(Z_fcst) <- (n_T-n_lags+1):n_T - } - d_fcst_lags <- as.matrix(rbind(d[(n_T-n_lags+1):n_T, , drop = FALSE], d_fcst)) - d_fcst_lags <- d_fcst_lags[1:(n_lags+n_fcst), , drop = FALSE] - roots <- vector("numeric", n_reps/n_thin) - num_tries <- roots - - - - ################################################################ - ### MCMC sampling initialization - - # If the initial values are not provided, the missing values in - # Z are filled with the next observed value and Pi, Sigma and - # psi are then computed using maximum likelihood - - # This allows the user to run the MCMC sampler for a burn-in - # period, then use the final draw of that as initialization - # for multiple chains - - if (is.null(init_Z)) { - Z[,, 1] <- mfbvar:::fill_na(Y) - } else { - if (all(dim(Z[,, 1]) == dim(init_Z))) { - Z[,, 1] <- init_Z - } else { - stop(paste0("The dimension of init_Z is ", paste(dim(init_Z), collapse = " x "), ", but should be ", paste(dim(Z[,, 1]), collapse = " x "))) - } - - } - - ols_results <- tryCatch(mfbvar:::ols_initialization(z = Z[,, 1], d = d, n_lags = n_lags, n_T = n_T, n_vars = n_vars, n_determ = n_determ), - error = function(cond) NULL) - if (is.null(ols_results)) { - ols_results <- list() - ols_results$Pi <- prior_Pi_mean - ols_results$S <- prior_S - ols_results$psi <- prior_psi_mean - } - - if (is.null(init_Pi)) { - Pi[,, 1] <- ols_results$Pi - } else { - if (all(dim(Pi[,, 1]) == dim(init_Pi))) { - Pi[,, 1] <- init_Pi - } else { - stop(paste0("The dimension of init_Pi is ", paste(dim(init_Pi), collapse = " x "), ", but should be ", paste(dim(Pi[,, 1]), collapse = " x "))) - } - } - - # Compute the maximum eigenvalue of the initial Pi - if (check_roots == TRUE) { - Pi_comp <- mfbvar:::build_companion(Pi = Pi[,, 1], n_vars = n_vars, n_lags = n_lags) - roots[1] <- mfbvar:::max_eig_cpp(Pi_comp) - } - - if (is.null(init_Sigma)) { - Sigma[,, 1] <- ols_results$S - } else { - if (all(dim(Sigma[,,1]) == dim(init_Sigma))) { - Sigma[,, 1] <- init_Sigma - } else { - stop(paste0("The dimension of init_Sigma is ", paste(dim(init_Sigma), collapse = " x "), ", but should be ", paste(dim(Sigma[,,1]), collapse = " x "))) - } - } - - if (is.null(init_psi)) { - if (roots[1] < 1) { - psi[1, ] <- ols_results$psi - } else { - psi[1, ] <- prior_psi_mean - } - } else { - if (length(psi[1, ]) == length(init_psi)) { - psi[1,] <- init_psi - } else { - stop(paste0("The length of init_psi is ", paste(length(init_psi), collapse = " x "), ", but should be ", paste(length(psi[1,]), collapse = " x "))) - } - } - - if (is.null(init_omega)) { - if (is.null(prior_psi_Omega)) { - omega[1, ] <- diag(prior_psi_Omega) - } else { - omega[1, ] <- rep(0.1, n_determ*n_vars) - } - } else { - omega[1, ] <- init_omega - } - if (is.null(init_phi_mu)) { - phi_mu[1] <- 1 - } else { - phi_mu[1] <- init_phi_mu - } - if (is.null(init_lambda_mu)) { - lambda_mu[1] <- 1 - } else { - lambda_mu[1] <- init_lambda_mu - } - - ################################################################ - ### Compute terms which do not vary in the sampler - - # Create D (does not vary in the sampler), and find roots of Pi - # if requested - D_mat <- mfbvar:::build_DD(d = d, n_lags = n_lags) - dt <- d[-(1:n_lags), , drop = FALSE] - d1 <- d[1:n_lags, , drop = FALSE] - psi_i <- psi[1, ] - Pi_i <- Pi[,, 1] - Sigma_i <- Sigma[,, 1] - Z_i <- Z[-(1:n_lags),, 1] - mu_mat <- dt %*% t(matrix(psi_i, nrow = n_vars)) - omega_i <- omega[1, ] - phi_mu_i <- phi_mu[1] - lambda_mu_i <- lambda_mu[1] - - - # For the posterior of Pi - inv_prior_Pi_Omega <- chol2inv(chol(prior_Pi_Omega)) - Omega_Pi <- inv_prior_Pi_Omega %*% prior_Pi_mean - - Z_1 <- Z[1:n_pseudolags,, 1] - - mfbvar:::mcmc_ssng_iw(Y[-(1:n_lags),],Pi,Sigma,psi,phi_mu,lambda_mu,omega,Z,Z_fcst,Lambda_,prior_Pi_Omega,inv_prior_Pi_Omega,Omega_Pi,prior_Pi_mean, - prior_S,D_mat,dt,d1,d_fcst_lags,prior_psi_mean,c0,c1,s,check_roots,Z_1,n_reps, - n_q,T_b,n_lags,n_vars,n_T_,n_fcst,n_determ,n_thin,verbose) - - ################################################################ - ### Prepare the return object - return_obj <- list(Pi = Pi, Sigma = Sigma, psi = psi, Z = Z, phi_mu = phi_mu, lambda_mu = lambda_mu, omega = omega, - roots = NULL, num_tries = NULL, - Z_fcst = NULL, smoothed_Z = NULL, n_determ = n_determ, - n_lags = n_lags, n_vars = n_vars, n_fcst = n_fcst, prior_Pi_Omega = prior_Pi_Omega, prior_Pi_mean = prior_Pi_mean, - prior_S = prior_S, prior_nu = n_vars+2, post_nu = n_T + n_vars+2, d = d, Y = Y, n_T = n_T, n_T_ = n_T_, - prior_psi_Omega = prior_psi_Omega, prior_psi_mean = prior_psi_mean, n_reps = n_reps, Lambda_ = Lambda_, - init = list(init_Pi = Pi[,, n_reps/n_thin], init_Sigma = Sigma[,, n_reps/n_thin], init_psi = psi[n_reps/n_thin, ], init_Z = Z[,, n_reps/n_thin], init_omega = omega[n_reps/n_thin, ], init_lambda_mu = lambda_mu[n_reps/n_thin], init_phi_mu = phi_mu[n_reps/n_thin])) - - if (check_roots == TRUE) { - return_obj$roots <- roots - return_obj$num_tries <- num_tries - } - if (n_fcst > 0) { - return_obj$Z_fcst <- Z_fcst - } - - return(return_obj) - -} - -#' @rdname mcmc_sampler -mcmc_sampler.mfbvar_minn_iw <- function(x, ...){ - - n_vars <- ncol(x$Y) - if (!(!is.null(x$Y) && !is.null(x$n_lags) && !is.null(x$n_burnin) && !is.null(x$n_reps))) { - test_all <- sapply(x, is.null) - test_sub <- test_all[c("Y", "n_lags", "n_burnin", "n_reps")] - stop("Missing elements: ", paste(names(test_sub)[which(test_sub)], collapse = " ")) - } - - prior_nu <- n_vars + 2 - priors <- mfbvar:::prior_Pi_Sigma(lambda1 = x$lambda1, lambda2 = x$lambda3, prior_Pi_AR1 = x$prior_Pi_AR1, Y = x$Y, - n_lags = x$n_lags, prior_nu = prior_nu) - prior_Pi_mean <- priors$prior_Pi_mean - prior_Pi_Omega <- priors$prior_Pi_Omega - prior_S <- priors$prior_S - - Y <- x$Y - freq <- x$freq - n_fcst <- x$n_fcst - verbose <- x$verbose - n_lags <- x$n_lags - lambda4 <- x$lambda4 - - # Add terms for constant - prior_Pi_Omega <- diag(c(x$lambda1^2*lambda4^2, diag(prior_Pi_Omega))) - prior_Pi_mean <- rbind(0, prior_Pi_mean) - - add_args <- list(...) - n_reps <- add_args$n_reps - n_thin <- ifelse(!is.null(add_args$n_thin), add_args$n_thin, ifelse(!is.null(x$n_thin), x$n_thin, 1)) - init <- add_args$init - init_Pi <- init$init_Pi - init_Sigma <- init$init_Sigma - init_Z <- init$init_Z - - # n_vars: number of variables - # n_lags: number of lags - # n_determ: number of deterministic variables - # n_T: sample size (full sample) - # n_T_: sample size (reduced sample) - - n_q <- sum(freq == "q") - if (n_q < n_vars) { - T_b <- max(which(!apply(apply(Y[, freq == "m", drop = FALSE], 2, is.na), 1, any))) - } else { - T_b <- nrow(Y) - } - if (n_q == 0 || n_q == n_vars) { - complete_quarters <- apply(Y, 1, function(x) !any(is.na(x))) - Y <- Y[complete_quarters, ] - } - if (n_q > 0) { - if (x$aggregation == "average") { - Lambda_ <- mfbvar:::build_Lambda(rep("average", n_q), 3) - } else { - Lambda_ <- mfbvar:::build_Lambda(rep("triangular", n_q), 5)} - } else { - Lambda_ <- matrix(0, 1, 3) - } - - n_pseudolags <- max(c(n_lags, ncol(Lambda_)/nrow(Lambda_))) - n_T <- dim(Y)[1]# - n_lags - n_T_ <- n_T - n_pseudolags - d <- matrix(1, nrow = nrow(Y), ncol = 1) - post_nu <- n_T_ + prior_nu - - ################################################################ - ### Preallocation - # Pi and Sigma store their i-th draws in the third dimension, psi - # is vectorized so it has its i-th draw stored in the i-th row - # Pi: p * pk * n_reps, each [,,i] stores Pi' - # Sigma: p * p * n_reps - # psi: n_reps * p - # Z: T * p * n_reps - ### If forecasting (h is horizon): - # Z_fcst: hk * p * n_reps - # d_fcst_lags: hk * m - ### If root checking: - # roots: n_reps vector - # num_tries: n_reps vector - ### If smoothing of the state vector: - # smoothed_Z: T * p * n_reps - - Pi <- array(NA, dim = c(n_vars, n_vars * n_lags + 1, n_reps/n_thin)) - Sigma <- array(NA, dim = c(n_vars, n_vars, n_reps/n_thin)) - Z <- array(NA, dim = c(n_T, n_vars, n_reps/n_thin)) - - Z_fcst<- array(NA, dim = c(n_fcst+n_lags, n_vars, n_reps/n_thin)) - if (n_fcst > 0) { - rownames(Z_fcst) <- c((n_T-n_lags+1):n_T, paste0("fcst_", 1:n_fcst)) - Z_fcst[,,1] <- 0 - } else { - rownames(Z_fcst) <- (n_T-n_lags+1):n_T - } - - - - - ################################################################ - ### MCMC sampling initialization - - # If the initial values are not provided, the missing values in - # Z are filled with the next observed value and Pi, Sigma and - # psi are then computed using maximum likelihood - - # This allows the user to run the MCMC sampler for a burn-in - # period, then use the final draw of that as initialization - # for multiple chains - - if (is.null(init_Z)) { - Z[,, 1] <- mfbvar:::fill_na(Y) - } else { - if (all(dim(Z[,, 1]) == dim(init_Z))) { - Z[,, 1] <- init_Z - } else { - stop(paste0("The dimension of init_Z is ", paste(dim(init_Z), collapse = " x "), ", but should be ", paste(dim(Z[,, 1]), collapse = " x "))) - } - - } - - ols_results <- mfbvar:::ols_initialization(z = Z[,, 1], d = d, n_lags = n_lags, n_T = n_T, n_vars = n_vars, n_determ = 1) - - if (is.null(init_Pi)) { - Pi[,, 1] <- cbind(ols_results$const, ols_results$Pi) - } else { - if (all(dim(Pi[,, 1]) == dim(init_Pi))) { - Pi[,, 1] <- init_Pi - } else { - stop(paste0("The dimension of init_Pi is ", paste(dim(init_Pi), collapse = " x "), ", but should be ", paste(dim(Pi[,, 1]), collapse = " x "))) - } - } - - # Compute the maximum eigenvalue of the initial Pi - - if (is.null(init_Sigma)) { - Sigma[,, 1] <- ols_results$S - } else { - if (all(dim(Sigma[,,1]) == dim(init_Sigma))) { - Sigma[,, 1] <- init_Sigma - } else { - stop(paste0("The dimension of init_Sigma is ", paste(dim(init_Sigma), collapse = " x "), ", but should be ", paste(dim(Sigma[,,1]), collapse = " x "))) - } - } - - ################################################################ - ### Compute terms which do not vary in the sampler - - Z_1 <- Z[1:n_pseudolags,, 1] - - # For the posterior of Pi - inv_prior_Pi_Omega <- chol2inv(chol(prior_Pi_Omega)) - Omega_Pi <- inv_prior_Pi_Omega %*% prior_Pi_mean - - mfbvar:::mcmc_minn_iw(Y[-(1:n_lags),],Pi,Sigma,Z,Z_fcst,Lambda_,prior_Pi_Omega,inv_prior_Pi_Omega, - Omega_Pi,prior_Pi_mean,prior_S,Z_1,n_reps,n_q,T_b-n_lags,n_lags,n_vars,n_T_,n_fcst, - n_thin,verbose,2) - - - ################################################################ - ### Prepare the return object - return_obj <- list(Pi = Pi, Sigma = Sigma, psi = NULL, Z = Z, roots = NULL, num_tries = NULL, - Z_fcst = NULL, smoothed_Z = NULL, n_determ = 1, - n_lags = n_lags, n_vars = n_vars, n_fcst = n_fcst, prior_Pi_Omega = prior_Pi_Omega, prior_Pi_mean = prior_Pi_mean, - prior_S = prior_S, prior_nu = prior_nu, post_nu = prior_nu + n_T_, d = d, Y = Y, n_T = n_T, n_T_ = n_T_, - prior_psi_Omega = NULL, prior_psi_mean = NULL, n_reps = n_reps, Lambda_ = Lambda_, freq = freq, - init = list(init_Pi = Pi[,, n_reps/n_thin], init_Sigma = Sigma[,, n_reps/n_thin], init_Z = Z[,, n_reps/n_thin])) - - if (n_fcst>0) { - return_obj$Z_fcst <- Z_fcst - } - - return(return_obj) - -} - - diff --git a/R/mcmc_sampler_csv.R b/R/mcmc_sampler_csv.R index 7c36654..5311008 100644 --- a/R/mcmc_sampler_csv.R +++ b/R/mcmc_sampler_csv.R @@ -7,7 +7,7 @@ mcmc_sampler.mfbvar_minn_csv <- function(x, ...){ } prior_nu <- n_vars + 2 - priors <- mfbvar:::prior_Pi_Sigma(lambda1 = x$lambda1, lambda2 = x$lambda3, prior_Pi_AR1 = x$prior_Pi_AR1, Y = x$Y, + priors <- prior_Pi_Sigma(lambda1 = x$lambda1, lambda2 = x$lambda3, prior_Pi_AR1 = x$prior_Pi_AR1, Y = x$Y, n_lags = x$n_lags, prior_nu = prior_nu) prior_Pi_mean <- priors$prior_Pi_mean prior_Pi_Omega <- priors$prior_Pi_Omega @@ -30,8 +30,9 @@ mcmc_sampler.mfbvar_minn_csv <- function(x, ...){ prior_df <- x$prior_sigma2[2] add_args <- list(...) - n_reps <- add_args$n_reps - n_thin <- ifelse(!is.null(add_args$n_thin), add_args$n_thin, ifelse(!is.null(x$n_thin), x$n_thin, 1)) + n_reps <- x$n_reps + n_burnin <- x$n_burnin + n_thin <- ifelse(is.null(x$n_thin), 1, x$n_thin) init <- add_args$init init_Pi <- init$init_Pi init_Sigma <- init$init_Sigma @@ -46,9 +47,11 @@ mcmc_sampler.mfbvar_minn_csv <- function(x, ...){ # n_T: sample size (full sample) # n_T_: sample size (reduced sample) - n_q <- sum(freq == "q") + freqs <- x$freqs + Lambda_ <- x$Lambda_ + n_q <- sum(freq == freqs[1]) if (n_q < n_vars) { - T_b <- max(which(!apply(apply(Y[, freq == "m", drop = FALSE], 2, is.na), 1, any))) + T_b <- max(which(!apply(apply(Y[, freq == freqs[2], drop = FALSE], 2, is.na), 1, any))) } else { T_b <- nrow(Y) } @@ -56,14 +59,6 @@ mcmc_sampler.mfbvar_minn_csv <- function(x, ...){ complete_quarters <- apply(Y, 1, function(x) !any(is.na(x))) Y <- Y[complete_quarters, ] } - if (n_q > 0) { - if (x$aggregation == "average") { - Lambda_ <- mfbvar:::build_Lambda(rep("average", n_q), 3) - } else { - Lambda_ <- mfbvar:::build_Lambda(rep("triangular", n_q), 5)} - } else { - Lambda_ <- matrix(0, 1, 3) - } n_pseudolags <- max(c(n_lags, ncol(Lambda_)/nrow(Lambda_))) @@ -117,7 +112,7 @@ mcmc_sampler.mfbvar_minn_csv <- function(x, ...){ # for multiple chains if (is.null(init_Z)) { - Z[,, 1] <- mfbvar:::fill_na(Y) + Z[,, 1] <- fill_na(Y) } else { if (all(dim(Z[,, 1]) == dim(init_Z))) { Z[,, 1] <- init_Z @@ -127,7 +122,7 @@ mcmc_sampler.mfbvar_minn_csv <- function(x, ...){ } - ols_results <- mfbvar:::ols_initialization(z = Z[,, 1], d = d, n_lags = n_lags, n_T = n_T, n_vars = n_vars, n_determ = 1) + ols_results <- ols_initialization(z = Z[,, 1], d = d, n_lags = n_lags, n_T = n_T, n_vars = n_vars, n_determ = 1) if (is.null(init_Pi)) { Pi[,, 1] <- cbind(ols_results$const, ols_results$Pi) @@ -178,17 +173,18 @@ mcmc_sampler.mfbvar_minn_csv <- function(x, ...){ inv_prior_Pi_Omega <- chol2inv(chol(prior_Pi_Omega)) Omega_Pi <- inv_prior_Pi_Omega %*% prior_Pi_mean - set.seed(1) - mfbvar:::mcmc_minn_csv(Y[-(1:n_lags),],Pi,Sigma,Z,Z_fcst,phi,sigma,f,Lambda_,prior_Pi_Omega,inv_prior_Pi_Omega, + mcmc_minn_csv(Y[-(1:n_lags),],Pi,Sigma,Z,Z_fcst,phi,sigma,f,Lambda_,prior_Pi_Omega,inv_prior_Pi_Omega, Omega_Pi,prior_Pi_mean,prior_S,Z_1,10,phi_invvar,phi_meaninvvar,prior_sigma2,prior_df, - n_reps,n_q,T_b-n_lags,n_lags,n_vars,n_T_,n_fcst,n_thin,verbose) - + n_reps,n_burnin,n_q,T_b-n_lags,n_lags,n_vars,n_T_,n_fcst,n_thin,verbose) + if (verbose) { + cat("\n") + } return_obj <- list(Pi = Pi, Sigma = Sigma, Z = Z, phi = phi, sigma = sigma, f = f, - Z_fcst = NULL, n_lags = n_lags, n_vars = n_vars, + Z_fcst = NULL, aggregation = x$aggregation, n_lags = n_lags, n_vars = n_vars, n_fcst = n_fcst, prior_Pi_Omega = prior_Pi_Omega, prior_Pi_mean = prior_Pi_mean, prior_S = prior_S, prior_nu = n_vars+2, post_nu = n_T + n_vars+2, d = d, Y = Y, - n_T = n_T, n_T_ = n_T_, n_reps = n_reps, Lambda_ = Lambda_, + n_T = n_T, n_T_ = n_T_, n_reps = n_reps, n_burnin = n_burnin, n_thin = n_thin, Lambda_ = Lambda_, init = list(init_Pi = Pi[,, n_reps/n_thin], init_Sigma = Sigma[,, n_reps/n_thin], init_Z = Z[,, n_reps/n_thin], @@ -215,7 +211,7 @@ mcmc_sampler.mfbvar_ss_csv <- function(x, ...) { stop("d_fcst has ", nrow(x$d_fcst), " rows, but n_fcst is ", x$n_fcst, ".") } - priors <- mfbvar:::prior_Pi_Sigma(lambda1 = x$lambda1, lambda2 = x$lambda3, prior_Pi_AR1 = x$prior_Pi_AR1, Y = x$Y, + priors <- prior_Pi_Sigma(lambda1 = x$lambda1, lambda2 = x$lambda3, prior_Pi_AR1 = x$prior_Pi_AR1, Y = x$Y, n_lags = x$n_lags, prior_nu = n_vars + 2) prior_Pi_mean <- priors$prior_Pi_mean prior_Pi_Omega <- priors$prior_Pi_Omega @@ -237,8 +233,9 @@ mcmc_sampler.mfbvar_ss_csv <- function(x, ...) { prior_df <- x$prior_sigma2[2] add_args <- list(...) - n_reps <- add_args$n_reps - n_thin <- ifelse(is.null(add_args$n_thin),1,add_args$n_thin) + n_reps <- x$n_reps + n_burnin <- x$n_burnin + n_thin <- ifelse(is.null(x$n_thin), 1, x$n_thin) init <- add_args$init init_Pi <- init$init_Pi init_Sigma <- init$init_Sigma @@ -255,28 +252,22 @@ mcmc_sampler.mfbvar_ss_csv <- function(x, ...) { # n_T_: sample size (reduced sample) n_vars <- dim(Y)[2] n_lags <- prod(dim(as.matrix(prior_Pi_mean)))/n_vars^2 - n_q <- sum(freq == "q") - n_m <- sum(freq == "m") + freqs <- x$freqs + Lambda_ <- x$Lambda_ + + n_q <- sum(freq == freqs[1]) + n_m <- n_vars - n_q + if (n_q < n_vars) { + T_b <- max(which(!apply(apply(Y[, freq == freqs[2], drop = FALSE], 2, is.na), 1, any))) + } else { + T_b <- nrow(Y) + } if (n_q == 0 || n_q == n_vars) { complete_quarters <- apply(Y, 1, function(x) !any(is.na(x))) Y <- Y[complete_quarters, ] d_fcst <- rbind(d[!complete_quarters, , drop = FALSE], d_fcst) d <- d[complete_quarters, , drop = FALSE] } - y_in_p <- Y[-(1:n_lags), ] - if (n_q < n_vars) { - T_b <- min(apply(y_in_p[,1:n_m], 2, function(x) ifelse(any(is.na(x)), min(which(is.na(x))), Inf))-1, nrow(y_in_p)) - } else { - T_b <- nrow(y_in_p) - } - if (n_q > 0) { - if (x$aggregation == "average") { - Lambda_ <- mfbvar:::build_Lambda(rep("average", n_q), 3) - } else { - Lambda_ <- mfbvar:::build_Lambda(rep("triangular", n_q), 5)} - } else { - Lambda_ <- matrix(0, 1, 3) - } n_pseudolags <- max(c(n_lags, ncol(Lambda_)/nrow(Lambda_))) @@ -334,7 +325,7 @@ mcmc_sampler.mfbvar_ss_csv <- function(x, ...) { # for multiple chains if (is.null(init_Z)) { - Z[,, 1] <- mfbvar:::fill_na(Y) + Z[,, 1] <- fill_na(Y) } else { if (all(dim(Z[,, 1]) == dim(init_Z))) { Z[,, 1] <- init_Z @@ -344,7 +335,7 @@ mcmc_sampler.mfbvar_ss_csv <- function(x, ...) { } - ols_results <- tryCatch(mfbvar:::ols_initialization(z = Z[,, 1], d = d, n_lags = n_lags, n_T = n_T, n_vars = n_vars, n_determ = n_determ), + ols_results <- tryCatch(ols_initialization(z = Z[,, 1], d = d, n_lags = n_lags, n_T = n_T, n_vars = n_vars, n_determ = n_determ), error = function(cond) NULL) if (is.null(ols_results)) { ols_results <- list() @@ -365,8 +356,8 @@ mcmc_sampler.mfbvar_ss_csv <- function(x, ...) { # Compute the maximum eigenvalue of the initial Pi if (check_roots == TRUE) { - Pi_comp <- mfbvar:::build_companion(Pi = Pi[,, 1], n_vars = n_vars, n_lags = n_lags) - roots[1] <- mfbvar:::max_eig_cpp(Pi_comp) + Pi_comp <- build_companion(Pi = Pi[,, 1], n_vars = n_vars, n_lags = n_lags) + roots[1] <- max_eig_cpp(Pi_comp) } if (is.null(init_Sigma)) { @@ -416,7 +407,7 @@ mcmc_sampler.mfbvar_ss_csv <- function(x, ...) { # Create D (does not vary in the sampler), and find roots of Pi # if requested - D_mat <- mfbvar:::build_DD(d = d, n_lags = n_lags) + D_mat <- build_DD(d = d, n_lags = n_lags) dt <- d[-(1:n_lags), , drop = FALSE] d1 <- d[1:n_lags, , drop = FALSE] @@ -425,19 +416,27 @@ mcmc_sampler.mfbvar_ss_csv <- function(x, ...) { Omega_Pi <- inv_prior_Pi_Omega %*% prior_Pi_mean # For the posterior of psi - inv_prior_psi_Omega <- solve(prior_psi_Omega) - inv_prior_psi_Omega_mean <- inv_prior_psi_Omega %*% prior_psi_mean Z_1 <- Z[1:n_pseudolags,, 1] - mfbvar:::mcmc_ss_csv(Y[-(1:n_lags),],Pi,Sigma,psi,Z,Z_fcst,phi,sigma,f,Lambda_,prior_Pi_Omega,inv_prior_Pi_Omega,Omega_Pi,prior_Pi_mean, - prior_S,D_mat,dt,d1,d_fcst_lags,inv_prior_psi_Omega,inv_prior_psi_Omega_mean,check_roots,Z_1, - 10,phi_invvar,phi_meaninvvar,prior_sigma2,prior_df,n_reps,n_q,T_b,n_lags,n_vars,n_T_,n_fcst,n_determ,n_thin,verbose) + phi_mu <- matrix(0, 1, 1) + lambda_mu <- matrix(0, 1, 1) + omega <- matrix(diag(prior_psi_Omega), nrow = 1) + c0 <- 0 + c1 <- 0 + s <- 0 + mcmc_ssng_csv(Y[-(1:n_lags),],Pi,Sigma,psi,phi_mu,lambda_mu,omega,Z,Z_fcst,phi,sigma,f,Lambda_,prior_Pi_Omega,inv_prior_Pi_Omega,Omega_Pi,prior_Pi_mean, + prior_S,D_mat,dt,d1,d_fcst_lags,prior_psi_mean,c0,c1,s,check_roots,Z_1, + 10,phi_invvar,phi_meaninvvar,prior_sigma2,prior_df,n_reps,n_burnin,n_q,T_b-n_lags,n_lags,n_vars,n_T_,n_fcst,n_determ,n_thin, + verbose,FALSE) + if (verbose) { + cat("\n") + } return_obj <- list(Pi = Pi, Sigma = Sigma, psi = psi, Z = Z, phi = phi, sigma = sigma, f = f, roots = NULL, num_tries = NULL, - Z_fcst = NULL, smoothed_Z = NULL, n_determ = n_determ, + Z_fcst = NULL, aggregation = x$aggregation, n_determ = n_determ, n_lags = n_lags, n_vars = n_vars, n_fcst = n_fcst, prior_Pi_Omega = prior_Pi_Omega, prior_Pi_mean = prior_Pi_mean, prior_S = prior_S, prior_nu = n_vars+2, post_nu = n_T + n_vars+2, d = d, Y = Y, n_T = n_T, n_T_ = n_T_, - prior_psi_Omega = prior_psi_Omega, prior_psi_mean = prior_psi_mean, n_reps = n_reps, Lambda_ = Lambda_, + prior_psi_Omega = prior_psi_Omega, prior_psi_mean = prior_psi_mean, n_reps = n_reps, n_burnin = n_burnin, n_thin = n_thin, Lambda_ = Lambda_, init = list(init_Pi = Pi[,, n_reps/n_thin], init_Sigma = Sigma[,, n_reps/n_thin], init_psi = psi[n_reps/n_thin, ], init_Z = Z[,, n_reps/n_thin], init_phi = phi[n_reps/n_thin], init_sigma = sigma[n_reps/n_thin], init_f = f[n_reps/n_thin,])) if (check_roots == TRUE) { @@ -463,7 +462,7 @@ mcmc_sampler.mfbvar_ssng_csv <- function(x, ...) { stop("d_fcst has ", nrow(x$d_fcst), " rows, but n_fcst is ", x$n_fcst, ".") } - priors <- mfbvar:::prior_Pi_Sigma(lambda1 = x$lambda1, lambda2 = x$lambda3, prior_Pi_AR1 = x$prior_Pi_AR1, Y = x$Y, + priors <- prior_Pi_Sigma(lambda1 = x$lambda1, lambda2 = x$lambda3, prior_Pi_AR1 = x$prior_Pi_AR1, Y = x$Y, n_lags = x$n_lags, prior_nu = n_vars + 2) prior_Pi_mean <- priors$prior_Pi_mean prior_Pi_Omega <- priors$prior_Pi_Omega @@ -485,8 +484,9 @@ mcmc_sampler.mfbvar_ssng_csv <- function(x, ...) { prior_df <- x$prior_sigma2[2] add_args <- list(...) - n_reps <- add_args$n_reps - n_thin <- ifelse(is.null(add_args$n_thin),1,add_args$n_thin) + n_reps <- x$n_reps + n_burnin <- x$n_burnin + n_thin <- ifelse(is.null(x$n_thin), 1, x$n_thin) init <- add_args$init init_Pi <- init$init_Pi init_Sigma <- init$init_Sigma @@ -506,29 +506,22 @@ mcmc_sampler.mfbvar_ssng_csv <- function(x, ...) { # n_T_: sample size (reduced sample) n_vars <- dim(Y)[2] n_lags <- prod(dim(as.matrix(prior_Pi_mean)))/n_vars^2 - n_q <- sum(freq == "q") - n_m <- sum(freq == "m") + freqs <- x$freqs + Lambda_ <- x$Lambda_ + + n_q <- sum(freq == freqs[1]) + n_m <- n_vars - n_q + if (n_q < n_vars) { + T_b <- max(which(!apply(apply(Y[, freq == freqs[2], drop = FALSE], 2, is.na), 1, any))) + } else { + T_b <- nrow(Y) + } if (n_q == 0 || n_q == n_vars) { complete_quarters <- apply(Y, 1, function(x) !any(is.na(x))) Y <- Y[complete_quarters, ] d_fcst <- rbind(d[!complete_quarters, , drop = FALSE], d_fcst) d <- d[complete_quarters, , drop = FALSE] } - y_in_p <- Y[-(1:n_lags), ] - if (n_q < n_vars) { - T_b <- min(apply(y_in_p[,1:n_m], 2, function(x) ifelse(any(is.na(x)), min(which(is.na(x))), Inf))-1, nrow(y_in_p)) - } else { - T_b <- nrow(y_in_p) - } - if (n_q > 0) { - if (x$aggregation == "average") { - Lambda_ <- mfbvar:::build_Lambda(rep("average", n_q), 3) - } else { - Lambda_ <- mfbvar:::build_Lambda(rep("triangular", n_q), 5)} - } else { - Lambda_ <- matrix(0, 1, 3) - } - n_pseudolags <- max(c(n_lags, ncol(Lambda_)/nrow(Lambda_))) n_determ <- dim(d)[2] @@ -537,8 +530,8 @@ mcmc_sampler.mfbvar_ssng_csv <- function(x, ...) { - c0 <- ifelse(is.null(x$c0), 0.01, x$c0) - c1 <- ifelse(is.null(x$c1), 0.01, x$c1) + c0 <- ifelse(is.null(x$prior_ng), 0.01, x$prior_ng[1]) + c1 <- ifelse(is.null(x$prior_ng), 0.01, x$prior_ng[2]) s <- ifelse(is.null(x[["s"]]), -10, x$s) ################################################################ ### Preallocation @@ -593,7 +586,7 @@ mcmc_sampler.mfbvar_ssng_csv <- function(x, ...) { # for multiple chains if (is.null(init_Z)) { - Z[,, 1] <- mfbvar:::fill_na(Y) + Z[,, 1] <- fill_na(Y) } else { if (all(dim(Z[,, 1]) == dim(init_Z))) { Z[,, 1] <- init_Z @@ -603,7 +596,7 @@ mcmc_sampler.mfbvar_ssng_csv <- function(x, ...) { } - ols_results <- tryCatch(mfbvar:::ols_initialization(z = Z[,, 1], d = d, n_lags = n_lags, n_T = n_T, n_vars = n_vars, n_determ = n_determ), + ols_results <- tryCatch(ols_initialization(z = Z[,, 1], d = d, n_lags = n_lags, n_T = n_T, n_vars = n_vars, n_determ = n_determ), error = function(cond) NULL) if (is.null(ols_results)) { ols_results <- list() @@ -624,8 +617,8 @@ mcmc_sampler.mfbvar_ssng_csv <- function(x, ...) { # Compute the maximum eigenvalue of the initial Pi if (check_roots == TRUE) { - Pi_comp <- mfbvar:::build_companion(Pi = Pi[,, 1], n_vars = n_vars, n_lags = n_lags) - roots[1] <- mfbvar:::max_eig_cpp(Pi_comp) + Pi_comp <- build_companion(Pi = Pi[,, 1], n_vars = n_vars, n_lags = n_lags) + roots[1] <- max_eig_cpp(Pi_comp) } if (is.null(init_Sigma)) { @@ -697,7 +690,7 @@ mcmc_sampler.mfbvar_ssng_csv <- function(x, ...) { # Create D (does not vary in the sampler), and find roots of Pi # if requested - D_mat <- mfbvar:::build_DD(d = d, n_lags = n_lags) + D_mat <- build_DD(d = d, n_lags = n_lags) dt <- d[-(1:n_lags), , drop = FALSE] d1 <- d[1:n_lags, , drop = FALSE] @@ -707,13 +700,14 @@ mcmc_sampler.mfbvar_ssng_csv <- function(x, ...) { Z_1 <- Z[1:n_pseudolags,, 1] - mfbvar:::mcmc_ssng_csv(Y[-(1:n_lags),],Pi,Sigma,psi,phi_mu,lambda_mu,omega,Z,Z_fcst,phi,sigma,f,Lambda_,prior_Pi_Omega,inv_prior_Pi_Omega,Omega_Pi,prior_Pi_mean, + mcmc_ssng_csv(Y[-(1:n_lags),],Pi,Sigma,psi,phi_mu,lambda_mu,omega,Z,Z_fcst,phi,sigma,f,Lambda_,prior_Pi_Omega,inv_prior_Pi_Omega,Omega_Pi,prior_Pi_mean, prior_S,D_mat,dt,d1,d_fcst_lags,prior_psi_mean,c0,c1,s,check_roots,Z_1, - 10,phi_invvar,phi_meaninvvar,prior_sigma2,prior_df,n_reps,n_q,T_b,n_lags,n_vars,n_T_,n_fcst,n_determ,n_thin,verbose) - + 10,phi_invvar,phi_meaninvvar,prior_sigma2,prior_df,n_reps,n_burnin,n_q,T_b-n_lags,n_lags,n_vars,n_T_,n_fcst,n_determ,n_thin,verbose,TRUE) + if (verbose) { + cat("\n") + } return_obj <- list(Pi = Pi, Sigma = Sigma, psi = psi, Z = Z, phi_mu = phi_mu, lambda_mu = lambda_mu, omega = omega, - phi = phi, sigma = sigma, f = f, roots = NULL, num_tries = NULL, - Z_fcst = NULL, smoothed_Z = NULL, n_determ = n_determ, + phi = phi, sigma = sigma, f = f, Z_fcst = NULL, aggregation = x$aggregation, n_determ = n_determ, n_lags = n_lags, n_vars = n_vars, n_fcst = n_fcst, prior_Pi_Omega = prior_Pi_Omega, prior_Pi_mean = prior_Pi_mean, prior_S = prior_S, prior_nu = n_vars+2, post_nu = n_T + n_vars+2, d = d, Y = Y, n_T = n_T, n_T_ = n_T_, prior_psi_Omega = prior_psi_Omega, prior_psi_mean = prior_psi_mean, n_reps = n_reps, Lambda_ = Lambda_, diff --git a/R/mcmc_sampler_diffuse.R b/R/mcmc_sampler_diffuse.R new file mode 100644 index 0000000..65ef67e --- /dev/null +++ b/R/mcmc_sampler_diffuse.R @@ -0,0 +1,860 @@ +mcmc_sampler.mfbvar_minn_diffuse <- function(x, ...){ + + n_vars <- ncol(x$Y) + if (!(!is.null(x$Y) && !is.null(x$n_lags) && !is.null(x$n_burnin) && !is.null(x$n_reps))) { + test_all <- sapply(x, is.null) + test_sub <- test_all[c("Y", "n_lags", "n_burnin", "n_reps")] + stop("Missing elements: ", paste(names(test_sub)[which(test_sub)], collapse = " ")) + } + + # Diffuse + prior_Pi_Omega <- create_prior_Pi_Omega(lambda1 = x$lambda1, lambda2 = x$lambda2, lambda3 = x$lambda3, + prior_Pi_AR1 = x$prior_Pi_AR1, Y = x$Y, + n_lags = x$n_lags, block_exo = x$block_exo) + prior_Pi_mean <- matrix(0, n_vars, n_vars*x$n_lags + 1) + prior_Pi_mean[, 2:(n_vars+1)] <- diag(x$prior_Pi_AR1) + + Y <- x$Y + freq <- x$freq + n_fcst <- x$n_fcst + verbose <- x$verbose + n_lags <- x$n_lags + lambda4 <- x$lambda4 + + + add_args <- list(...) + n_reps <- x$n_reps + n_burnin <- x$n_burnin + n_thin <- ifelse(is.null(x$n_thin), 1, x$n_thin) + init <- add_args$init + init_Pi <- init$init_Pi + init_Sigma <- init$init_Sigma + init_Z <- init$init_Z + + # n_vars: number of variables + # n_lags: number of lags + # n_determ: number of deterministic variables + # n_T: sample size (full sample) + # n_T_: sample size (reduced sample) + + freqs <- x$freqs + Lambda_ <- x$Lambda_ + n_q <- sum(freq == freqs[1]) + if (n_q < n_vars) { + T_b <- max(which(!apply(apply(Y[, freq == freqs[2], drop = FALSE], 2, is.na), 1, any))) + } else { + T_b <- nrow(Y) + } + if (n_q == 0 || n_q == n_vars) { + complete_quarters <- apply(Y, 1, function(x) !any(is.na(x))) + Y <- Y[complete_quarters, ] + } + + n_pseudolags <- max(c(n_lags, 3)) + n_T <- dim(Y)[1]# - n_lags + n_T_ <- n_T - n_pseudolags + d <- matrix(1, nrow = nrow(Y), ncol = 1) + + ################################################################ + ### Preallocation + # Pi and Sigma store their i-th draws in the third dimension, psi + # is vectorized so it has its i-th draw stored in the i-th row + # Pi: p * pk * n_reps, each [,,i] stores Pi' + # Sigma: p * p * n_reps + # psi: n_reps * p + # Z: T * p * n_reps + ### If forecasting (h is horizon): + # Z_fcst: hk * p * n_reps + # d_fcst_lags: hk * m + ### If root checking: + # roots: n_reps vector + # num_tries: n_reps vector + ### If smoothing of the state vector: + # smoothed_Z: T * p * n_reps + + Pi <- array(NA, dim = c(n_vars, n_vars * n_lags + 1, n_reps/n_thin)) + Sigma <- array(NA, dim = c(n_vars, n_vars, n_reps/n_thin)) + Z <- array(NA, dim = c(n_T, n_vars, n_reps/n_thin)) + + Z_fcst<- array(NA, dim = c(n_fcst+n_lags, n_vars, n_reps/n_thin)) + if (n_fcst > 0) { + rownames(Z_fcst) <- c((n_T-n_lags+1):n_T, paste0("fcst_", 1:n_fcst)) + Z_fcst[,,1] <- 0 + } else { + rownames(Z_fcst) <- (n_T-n_lags+1):n_T + } + + + + + ################################################################ + ### MCMC sampling initialization + + # If the initial values are not provided, the missing values in + # Z are filled with the next observed value and Pi, Sigma and + # psi are then computed using maximum likelihood + + # This allows the user to run the MCMC sampler for a burn-in + # period, then use the final draw of that as initialization + # for multiple chains + + if (is.null(init_Z)) { + Z[,, 1] <- fill_na(Y) + } else { + if (all(dim(Z[,, 1]) == dim(init_Z))) { + Z[,, 1] <- init_Z + } else { + stop(paste0("The dimension of init_Z is ", paste(dim(init_Z), collapse = " x "), ", but should be ", paste(dim(Z[,, 1]), collapse = " x "))) + } + + } + + ols_results <- ols_initialization(z = Z[,, 1], d = d, n_lags = n_lags, n_T = n_T, n_vars = n_vars, n_determ = 1) + + if (is.null(init_Pi)) { + Pi[,, 1] <- cbind(ols_results$const, ols_results$Pi) + } else { + if (all(dim(Pi[,, 1]) == dim(init_Pi))) { + Pi[,, 1] <- init_Pi + } else { + stop(paste0("The dimension of init_Pi is ", paste(dim(init_Pi), collapse = " x "), ", but should be ", paste(dim(Pi[,, 1]), collapse = " x "))) + } + } + + # Compute the maximum eigenvalue of the initial Pi + + if (is.null(init_Sigma)) { + Sigma[,, 1] <- ols_results$S + } else { + if (all(dim(Sigma[,,1]) == dim(init_Sigma))) { + Sigma[,, 1] <- init_Sigma + } else { + stop(paste0("The dimension of init_Sigma is ", paste(dim(init_Sigma), collapse = " x "), ", but should be ", paste(dim(Sigma[,,1]), collapse = " x "))) + } + } + + ################################################################ + ### Compute terms which do not vary in the sampler + + Z_1 <- Z[1:n_pseudolags,, 1] + + # For the posterior of Pi + inv_prior_Pi_Omega <- diag(1/c(prior_Pi_Omega)) + prior_Pi_mean_vec <- c(prior_Pi_mean) + + aux <- matrix(0, 1, 1) + global <- c(0) + local <- matrix(0, 1, 1) + a <- -1 + slice <- c(0) + gig <- TRUE + + mcmc_minn_diffuse(Y[-(1:n_lags),],Pi,Sigma,Z,Z_fcst,aux,global,local,slice,Lambda_,prior_Pi_Omega, + prior_Pi_mean_vec,Z_1,n_reps,n_burnin,n_q,T_b-n_lags,n_lags,n_vars,n_T_,n_fcst, + n_thin,verbose,a,gig) + if (verbose) { + cat("\n") + } + + ################################################################ + ### Prepare the return object + return_obj <- list(Pi = Pi, Sigma = Sigma, psi = NULL, Z = Z, + Z_fcst = NULL, aggregation = x$aggregation, n_determ = 1, + n_lags = n_lags, n_vars = n_vars, n_fcst = n_fcst, prior_Pi_Omega = prior_Pi_Omega, prior_Pi_mean = prior_Pi_mean, + d = d, Y = Y, n_T = n_T, n_T_ = n_T_, + prior_psi_Omega = NULL, prior_psi_mean = NULL, n_reps = n_reps, n_burnin = n_burnin, n_thin = n_thin, Lambda_ = Lambda_, freq = freq, + init = list(init_Pi = Pi[,, n_reps/n_thin], init_Sigma = Sigma[,, n_reps/n_thin], init_Z = Z[,, n_reps/n_thin])) + + if (n_fcst>0) { + return_obj$Z_fcst <- Z_fcst + } + + return(return_obj) + +} + +mcmc_sampler.mfbvar_dl_diffuse <- function(x, ...){ + + n_vars <- ncol(x$Y) + if (!(!is.null(x$Y) && !is.null(x$n_lags) && !is.null(x$n_burnin) && !is.null(x$n_reps))) { + test_all <- sapply(x, is.null) + test_sub <- test_all[c("Y", "n_lags", "n_burnin", "n_reps")] + stop("Missing elements: ", paste(names(test_sub)[which(test_sub)], collapse = " ")) + } + + # Diffuse + prior_Pi_Omega <- create_prior_Pi_Omega(lambda1 = x$lambda1, lambda2 = x$lambda2, lambda3 = x$lambda3, + prior_Pi_AR1 = x$prior_Pi_AR1, Y = x$Y, + n_lags = x$n_lags, block_exo = x$block_exo) + prior_Pi_mean <- matrix(0, n_vars, n_vars*x$n_lags + 1) + prior_Pi_mean[, 2:(n_vars+1)] <- diag(x$prior_Pi_AR1) + + Y <- x$Y + freq <- x$freq + n_fcst <- x$n_fcst + verbose <- x$verbose + n_lags <- x$n_lags + lambda4 <- x$lambda4 + + add_args <- list(...) + n_reps <- x$n_reps + n_burnin <- x$n_burnin + n_thin <- ifelse(is.null(x$n_thin), 1, x$n_thin) + init <- add_args$init + init_Pi <- init$init_Pi + init_Sigma <- init$init_Sigma + init_Z <- init$init_Z + + # n_vars: number of variables + # n_lags: number of lags + # n_determ: number of deterministic variables + # n_T: sample size (full sample) + # n_T_: sample size (reduced sample) + + freqs <- x$freqs + Lambda_ <- x$Lambda_ + n_q <- sum(freq == freqs[1]) + n_m <- n_vars - n_q + if (n_q < n_vars) { + T_b <- max(which(!apply(apply(Y[, freq == freqs[2], drop = FALSE], 2, is.na), 1, any))) + } else { + T_b <- nrow(Y) + } + if (n_q == 0 || n_q == n_vars) { + complete_quarters <- apply(Y, 1, function(x) !any(is.na(x))) + Y <- Y[complete_quarters, ] + } + + n_pseudolags <- max(c(n_lags, 3)) + n_T <- dim(Y)[1]# - n_lags + n_T_ <- n_T - n_pseudolags + d <- matrix(1, nrow = nrow(Y), ncol = 1) + + ################################################################ + ### Preallocation + # Pi and Sigma store their i-th draws in the third dimension, psi + # is vectorized so it has its i-th draw stored in the i-th row + # Pi: p * pk * n_reps, each [,,i] stores Pi' + # Sigma: p * p * n_reps + # psi: n_reps * p + # Z: T * p * n_reps + ### If forecasting (h is horizon): + # Z_fcst: hk * p * n_reps + # d_fcst_lags: hk * m + ### If root checking: + # roots: n_reps vector + # num_tries: n_reps vector + ### If smoothing of the state vector: + # smoothed_Z: T * p * n_reps + + Pi <- array(NA, dim = c(n_vars, n_vars * n_lags + 1, n_reps/n_thin)) + Sigma <- array(NA, dim = c(n_vars, n_vars, n_reps/n_thin)) + Z <- array(NA, dim = c(n_T, n_vars, n_reps/n_thin)) + + Z_fcst<- array(NA, dim = c(n_fcst+n_lags, n_vars, n_reps/n_thin)) + if (n_fcst > 0) { + rownames(Z_fcst) <- c((n_T-n_lags+1):n_T, paste0("fcst_", 1:n_fcst)) + Z_fcst[,,1] <- 0 + } else { + rownames(Z_fcst) <- (n_T-n_lags+1):n_T + } + + + + + ################################################################ + ### MCMC sampling initialization + + # If the initial values are not provided, the missing values in + # Z are filled with the next observed value and Pi, Sigma and + # psi are then computed using maximum likelihood + + # This allows the user to run the MCMC sampler for a burn-in + # period, then use the final draw of that as initialization + # for multiple chains + + if (is.null(init_Z)) { + Z[,, 1] <- fill_na(Y) + } else { + if (all(dim(Z[,, 1]) == dim(init_Z))) { + Z[,, 1] <- init_Z + } else { + stop(paste0("The dimension of init_Z is ", paste(dim(init_Z), collapse = " x "), ", but should be ", paste(dim(Z[,, 1]), collapse = " x "))) + } + + } + + ols_results <- ols_initialization(z = Z[,, 1], d = d, n_lags = n_lags, n_T = n_T, n_vars = n_vars, n_determ = 1) + + if (is.null(init_Pi)) { + Pi[,, 1] <- cbind(ols_results$const, ols_results$Pi) + } else { + if (all(dim(Pi[,, 1]) == dim(init_Pi))) { + Pi[,, 1] <- init_Pi + } else { + stop(paste0("The dimension of init_Pi is ", paste(dim(init_Pi), collapse = " x "), ", but should be ", paste(dim(Pi[,, 1]), collapse = " x "))) + } + } + + # Compute the maximum eigenvalue of the initial Pi + + if (is.null(init_Sigma)) { + Sigma[,, 1] <- ols_results$S + } else { + if (all(dim(Sigma[,,1]) == dim(init_Sigma))) { + Sigma[,, 1] <- init_Sigma + } else { + stop(paste0("The dimension of init_Sigma is ", paste(dim(init_Sigma), collapse = " x "), ", but should be ", paste(dim(Sigma[,,1]), collapse = " x "))) + } + } + + ## DL + if (!("a" %in% names(x))) { + a <- 1 + } else { + a <- x$a + } + + gig <- ifelse(is.null(x$gig), TRUE, FALSE) + + if (is.null(init$init_global)) { + init_global <- 0.1 + } else { + init_global <- init$init_global + } + + if (is.null(init$init_aux)) { + init_aux <- c(sqrt(prior_Pi_Omega[-1,])/init_global) + } else { + init_aux <- init$init_aux + } + + if (is.null(init$init_local)) { + init_local <- c(sqrt(prior_Pi_Omega[-1,])/init_global) + } else { + init_local <- init$init_local + } + + if (is.null(init$init_slice)) { + init_slice <- rep(1, n_vars^2*n_lags) + } else { + init_slice <- init$init_slice + } + + aux <- matrix(init_aux, nrow = n_reps/n_thin, ncol = n_vars*n_vars*n_lags, byrow = TRUE) + local <- matrix(init_local, nrow = n_reps/n_thin, ncol = n_vars*n_vars*n_lags, byrow = TRUE) + global <- matrix(init_global, n_reps/n_thin, ncol = 1) + slice <- matrix(init_slice, nrow = 1, ncol = n_vars*n_vars*n_lags) + + ################################################################ + ### Compute terms which do not vary in the sampler + + Z_1 <- Z[1:n_pseudolags,, 1] + + # For the posterior of Pi + inv_prior_Pi_Omega <- diag(1/c(prior_Pi_Omega)) + prior_Pi_mean_vec <- c(prior_Pi_mean) + mcmc_minn_diffuse(Y[-(1:n_lags),],Pi,Sigma,Z,Z_fcst,aux,global,local,slice,Lambda_,prior_Pi_Omega, + prior_Pi_mean_vec,Z_1,n_reps,n_burnin,n_q,T_b-n_lags,n_lags,n_vars,n_T_,n_fcst, + n_thin,verbose,a,gig) + if (verbose) { + cat("\n") + } + + ################################################################ + ### Prepare the return object + return_obj <- list(Pi = Pi, Sigma = Sigma, psi = NULL, Z = Z, + Z_fcst = NULL, aggregation = x$aggregation, n_determ = 1, + n_lags = n_lags, n_vars = n_vars, n_fcst = n_fcst, prior_Pi_Omega = prior_Pi_Omega, prior_Pi_mean = prior_Pi_mean, + d = d, Y = Y, n_T = n_T, n_T_ = n_T_, + prior_psi_Omega = NULL, prior_psi_mean = NULL, n_reps = n_reps, n_burnin = n_burnin, n_thin = n_thin, Lambda_ = Lambda_, freq = freq, + init = list(init_Pi = Pi[,, n_reps/n_thin], init_Sigma = Sigma[,, n_reps/n_thin], init_Z = Z[,, n_reps/n_thin])) + + if (n_fcst>0) { + return_obj$Z_fcst <- Z_fcst + } + + return(return_obj) + +} + +mcmc_sampler.mfbvar_ss_diffuse <- function(x, ...) { + + n_vars <- ncol(x$Y) + if (!(!is.null(x$Y) && !is.null(x$d) && !is.null(x$prior_psi_mean) && !is.null(x$prior_psi_Omega) && !is.null(x$n_lags) && !is.null(x$n_burnin) && !is.null(x$n_reps))) { + test_all <- sapply(x, is.null) + test_sub <- test_all[c("Y", "d", "prior_psi_mean", "prior_psi_Omega", "n_lags", "n_burnin", "n_reps")] + stop("Missing elements: ", paste(names(test_sub)[which(test_sub)], collapse = " ")) + } + if (x$n_fcst > 0 && nrow(x$d_fcst) != x$n_fcst) { + stop("d_fcst has ", nrow(x$d_fcst), " rows, but n_fcst is ", x$n_fcst, ".") + } + + prior_Pi_Omega <- create_prior_Pi_Omega(lambda1 = x$lambda1, lambda2 = x$lambda2, lambda3 = x$lambda3, + prior_Pi_AR1 = x$prior_Pi_AR1, Y = x$Y, + n_lags = x$n_lags, block_exo = x$block_exo) + prior_Pi_Omega <- prior_Pi_Omega[-1, ] + prior_Pi_mean <- matrix(0, n_vars, n_vars*x$n_lags) + prior_Pi_mean[, 1:n_vars] <- diag(x$prior_Pi_AR1) + + Y <- x$Y + d <- x$d + d_fcst <- x$d_fcst + freq <- x$freq + prior_psi_mean <- x$prior_psi_mean + prior_psi_Omega <- x$prior_psi_Omega + n_fcst <- x$n_fcst + check_roots <- x$check_roots + verbose <- x$verbose + + add_args <- list(...) + n_reps <- x$n_reps + n_burnin <- x$n_burnin + n_thin <- ifelse(is.null(x$n_thin), 1, x$n_thin) + init <- add_args$init + init_Pi <- init$init_Pi + init_Sigma <- init$init_Sigma + init_psi <- init$init_psi + init_Z <- init$init_Z + + # n_vars: number of variables + # n_lags: number of lags + # n_determ: number of deterministic variables + # n_T: sample size (full sample) + # n_T_: sample size (reduced sample) + n_vars <- dim(Y)[2] + n_lags <- prod(dim(as.matrix(prior_Pi_mean)))/n_vars^2 + freqs <- x$freqs + Lambda_ <- x$Lambda_ + + n_q <- sum(freq == freqs[1]) + n_m <- n_vars - n_q + if (n_q < n_vars) { + T_b <- max(which(!apply(apply(Y[, freq == freqs[2], drop = FALSE], 2, is.na), 1, any))) + } else { + T_b <- nrow(Y) + } + if (n_q == 0 || n_q == n_vars) { + complete_quarters <- apply(Y, 1, function(x) !any(is.na(x))) + Y <- Y[complete_quarters, ] + d_fcst <- rbind(d[!complete_quarters, , drop = FALSE], d_fcst) + d <- d[complete_quarters, , drop = FALSE] + } + + + n_pseudolags <- max(c(n_lags, 3)) + n_determ <- dim(d)[2] + n_T <- dim(Y)[1]# - n_lags + n_T_ <- n_T - n_pseudolags + + + + + + ################################################################ + ### Preallocation + # Pi and Sigma store their i-th draws in the third dimension, psi + # is vectorized so it has its i-th draw stored in the i-th row + # Pi: p * pk * n_reps, each [,,i] stores Pi' + # Sigma: p * p * n_reps + # psi: n_reps * p + # Z: T * p * n_reps + ### If forecasting (h is horizon): + # Z_fcst: hk * p * n_reps + # d_fcst_lags: hk * m + ### If root checking: + # roots: n_reps vector + # num_tries: n_reps vector + ### If smoothing of the state vector: + # smoothed_Z: T * p * n_reps + + Pi <- array(NA, dim = c(n_vars, n_vars * n_lags, n_reps/n_thin)) + Sigma <- array(NA, dim = c(n_vars, n_vars, n_reps/n_thin)) + psi <- array(NA, dim = c(n_reps/n_thin, n_vars * n_determ)) + Z <- array(NA, dim = c(n_T, n_vars, n_reps/n_thin)) + Z_fcst<- array(NA, dim = c(n_fcst+n_lags, n_vars, n_reps/n_thin)) + if (n_fcst > 0) { + rownames(Z_fcst) <- c((n_T-n_lags+1):n_T, paste0("fcst_", 1:n_fcst)) + Z_fcst[,,1] <- 0 + } else { + rownames(Z_fcst) <- (n_T-n_lags+1):n_T + } + d_fcst_lags <- as.matrix(rbind(d[(n_T-n_lags+1):n_T, , drop = FALSE], d_fcst)) + d_fcst_lags <- d_fcst_lags[1:(n_lags+n_fcst), , drop = FALSE] + roots <- vector("numeric", n_reps/n_thin) + num_tries <- roots + + + + ################################################################ + ### MCMC sampling initialization + + # If the initial values are not provided, the missing values in + # Z are filled with the next observed value and Pi, Sigma and + # psi are then computed using maximum likelihood + + # This allows the user to run the MCMC sampler for a burn-in + # period, then use the final draw of that as initialization + # for multiple chains + + if (is.null(init_Z)) { + Z[,, 1] <- fill_na(Y) + } else { + if (all(dim(Z[,, 1]) == dim(init_Z))) { + Z[,, 1] <- init_Z + } else { + stop(paste0("The dimension of init_Z is ", paste(dim(init_Z), collapse = " x "), ", but should be ", paste(dim(Z[,, 1]), collapse = " x "))) + } + + } + + ols_results <- tryCatch(ols_initialization(z = Z[,, 1], d = d, n_lags = n_lags, n_T = n_T, n_vars = n_vars, n_determ = n_determ), + error = function(cond) NULL) + if (is.null(ols_results)) { + ols_results <- list() + ols_results$Pi <- prior_Pi_mean + ols_results$psi <- prior_psi_mean + } + + if (is.null(init_Pi)) { + Pi[,, 1] <- ols_results$Pi + } else { + if (all(dim(Pi[,, 1]) == dim(init_Pi))) { + Pi[,, 1] <- init_Pi + } else { + stop(paste0("The dimension of init_Pi is ", paste(dim(init_Pi), collapse = " x "), ", but should be ", paste(dim(Pi[,, 1]), collapse = " x "))) + } + } + + # Compute the maximum eigenvalue of the initial Pi + if (check_roots == TRUE) { + Pi_comp <- build_companion(Pi = Pi[,, 1], n_vars = n_vars, n_lags = n_lags) + roots[1] <- max_eig_cpp(Pi_comp) + } + + if (is.null(init_Sigma)) { + Sigma[,, 1] <- ols_results$S + } else { + if (all(dim(Sigma[,,1]) == dim(init_Sigma))) { + Sigma[,, 1] <- init_Sigma + } else { + stop(paste0("The dimension of init_Sigma is ", paste(dim(init_Sigma), collapse = " x "), ", but should be ", paste(dim(Sigma[,,1]), collapse = " x "))) + } + } + + if (is.null(init_psi)) { + if (roots[1] < 1) { + psi[1, ] <- ols_results$psi + } else { + psi[1, ] <- prior_psi_mean + } + } else { + if (length(psi[1, ]) == length(init_psi)) { + psi[1,] <- init_psi + } else { + stop(paste0("The length of init_psi is ", paste(length(init_psi), collapse = " x "), ", but should be ", paste(length(psi[1,]), collapse = " x "))) + } + } + + ################################################################ + ### Compute terms which do not vary in the sampler + + # Create D (does not vary in the sampler), and find roots of Pi + # if requested + D_mat <- build_DD(d = d, n_lags = n_lags) + dt <- d[-(1:n_lags), , drop = FALSE] + d1 <- d[1:n_lags, , drop = FALSE] + psi_i <- psi[1, ] + Pi_i <- Pi[,, 1] + Sigma_i <- Sigma[,, 1] + Z_i <- Z[-(1:n_lags),, 1] + mu_mat <- dt %*% t(matrix(psi_i, nrow = n_vars)) + + + # For the posterior of Pi + inv_prior_Pi_Omega <- diag(1/c(prior_Pi_Omega)) + Omega_Pi <- matrix(inv_prior_Pi_Omega %*% c(prior_Pi_mean), n_vars*n_lags, n_vars) + + # For the posterior of psi + phi_mu <- matrix(0, 1, 1) + lambda_mu <- matrix(0, 1, 1) + omega <- matrix(diag(prior_psi_Omega), nrow = 1) + c0 <- 0 + c1 <- 0 + s <- 0 + + Z_1 <- Z[1:n_pseudolags,, 1] + + mcmc_ssng_diffuse(Y[-(1:n_lags),],Pi,Sigma,psi,phi_mu, lambda_mu, omega, Z,Z_fcst,Lambda_,prior_Pi_Omega,Omega_Pi, + D_mat,dt,d1,d_fcst_lags,prior_psi_mean,c0,c1,s,check_roots,Z_1,n_reps,n_burnin, + n_q,T_b-n_lags,n_lags,n_vars,n_T_,n_fcst,n_determ,n_thin,verbose,FALSE) + if (verbose) { + cat("\n") + } + ################################################################ + ### Prepare the return object + return_obj <- list(Pi = Pi, Sigma = Sigma, psi = psi, Z = Z, + Z_fcst = NULL, aggregation = x$aggregation, n_determ = n_determ, + n_lags = n_lags, n_vars = n_vars, n_fcst = n_fcst, prior_Pi_Omega = prior_Pi_Omega, prior_Pi_mean = prior_Pi_mean, + d = d, Y = Y, n_T = n_T, n_T_ = n_T_, + prior_psi_Omega = prior_psi_Omega, prior_psi_mean = prior_psi_mean, n_reps = n_reps, n_burnin = n_burnin, n_thin = n_thin, Lambda_ = Lambda_, + init = list(init_Pi = Pi[,, n_reps/n_thin], init_Sigma = Sigma[,, n_reps/n_thin], init_psi = psi[n_reps/n_thin, ], init_Z = Z[,, n_reps/n_thin])) + + if (check_roots == TRUE) { + return_obj$roots <- roots + return_obj$num_tries <- num_tries + } + if (n_fcst > 0) { + return_obj$Z_fcst <- Z_fcst + } + + return(return_obj) + +} + +mcmc_sampler.mfbvar_ssng_diffuse <- function(x, ...) { + + n_vars <- ncol(x$Y) + if (!(!is.null(x$Y) && !is.null(x$d) && !is.null(x$prior_psi_mean) && !is.null(x$n_lags) && !is.null(x$n_burnin) && !is.null(x$n_reps))) { + test_all <- sapply(x, is.null) + test_sub <- test_all[c("Y", "d", "prior_psi_mean", "n_lags", "n_burnin", "n_reps")] + stop("Missing elements: ", paste(names(test_sub)[which(test_sub)], collapse = " ")) + } + if (x$n_fcst > 0 && nrow(x$d_fcst) != x$n_fcst) { + stop("d_fcst has ", nrow(x$d_fcst), " rows, but n_fcst is ", x$n_fcst, ".") + } + + prior_Pi_Omega <- create_prior_Pi_Omega(lambda1 = x$lambda1, lambda2 = x$lambda2, lambda3 = x$lambda3, + prior_Pi_AR1 = x$prior_Pi_AR1, Y = x$Y, + n_lags = x$n_lags, block_exo = x$block_exo) + prior_Pi_Omega <- prior_Pi_Omega[-1, ] + prior_Pi_mean <- matrix(0, n_vars, n_vars*x$n_lags) + prior_Pi_mean[, 1:n_vars] <- diag(x$prior_Pi_AR1) + + Y <- x$Y + d <- x$d + d_fcst <- x$d_fcst + freq <- x$freq + prior_psi_mean <- x$prior_psi_mean + prior_psi_Omega <- x$prior_psi_Omega + n_fcst <- x$n_fcst + check_roots <- x$check_roots + verbose <- x$verbose + + add_args <- list(...) + n_reps <- x$n_reps + n_burnin <- x$n_burnin + n_thin <- ifelse(is.null(x$n_thin), 1, x$n_thin) + init <- add_args$init + init_Pi <- init$init_Pi + init_Sigma <- init$init_Sigma + init_psi <- init$init_psi + init_Z <- init$init_Z + init_omega <- init$init_omega + init_phi_mu <- init$init_phi_mu + init_lambda_mu <- init$init_lambda_mu + + # n_vars: number of variables + # n_lags: number of lags + # n_determ: number of deterministic variables + # n_T: sample size (full sample) + # n_T_: sample size (reduced sample) + n_vars <- dim(Y)[2] + n_lags <- prod(dim(as.matrix(prior_Pi_mean)))/n_vars^2 + freqs <- x$freqs + Lambda_ <- x$Lambda_ + + n_q <- sum(freq == freqs[1]) + n_m <- n_vars - n_q + if (n_q < n_vars) { + T_b <- max(which(!apply(apply(Y[, freq == freqs[2], drop = FALSE], 2, is.na), 1, any))) + } else { + T_b <- nrow(Y) + } + if (n_q == 0 || n_q == n_vars) { + complete_quarters <- apply(Y, 1, function(x) !any(is.na(x))) + Y <- Y[complete_quarters, ] + d_fcst <- rbind(d[!complete_quarters, , drop = FALSE], d_fcst) + d <- d[complete_quarters, , drop = FALSE] + } + + + n_pseudolags <- max(c(n_lags, 3)) + n_determ <- dim(d)[2] + n_T <- dim(Y)[1]# - n_lags + n_T_ <- n_T - n_pseudolags + + c0 <- ifelse(is.null(x$prior_ng), 0.01, x$prior_ng[1]) + c1 <- ifelse(is.null(x$prior_ng), 0.01, x$prior_ng[2]) + s <- ifelse(is.null(x[["s"]]), 1, x$s) + + ################################################################ + ### Preallocation + # Pi and Sigma store their i-th draws in the third dimension, psi + # is vectorized so it has its i-th draw stored in the i-th row + # Pi: p * pk * n_reps, each [,,i] stores Pi' + # Sigma: p * p * n_reps + # psi: n_reps * p + # Z: T * p * n_reps + ### If forecasting (h is horizon): + # Z_fcst: hk * p * n_reps + # d_fcst_lags: hk * m + ### If root checking: + # roots: n_reps vector + # num_tries: n_reps vector + ### If smoothing of the state vector: + # smoothed_Z: T * p * n_reps + + Pi <- array(NA, dim = c(n_vars, n_vars * n_lags, n_reps/n_thin)) + Sigma <- array(NA, dim = c(n_vars, n_vars, n_reps/n_thin)) + psi <- array(NA, dim = c(n_reps/n_thin, n_vars * n_determ)) + Z <- array(NA, dim = c(n_T, n_vars, n_reps/n_thin)) + omega <- matrix(NA, nrow = n_reps/n_thin, ncol = n_vars * n_determ) + phi_mu <- rep(NA, n_reps/n_thin) + lambda_mu <- rep(NA, n_reps/n_thin) + Z_fcst<- array(NA, dim = c(n_fcst+n_lags, n_vars, n_reps/n_thin)) + if (n_fcst > 0) { + rownames(Z_fcst) <- c((n_T-n_lags+1):n_T, paste0("fcst_", 1:n_fcst)) + Z_fcst[,,1] <- 0 + } else { + rownames(Z_fcst) <- (n_T-n_lags+1):n_T + } + d_fcst_lags <- as.matrix(rbind(d[(n_T-n_lags+1):n_T, , drop = FALSE], d_fcst)) + d_fcst_lags <- d_fcst_lags[1:(n_lags+n_fcst), , drop = FALSE] + roots <- vector("numeric", n_reps/n_thin) + num_tries <- roots + + + + ################################################################ + ### MCMC sampling initialization + + # If the initial values are not provided, the missing values in + # Z are filled with the next observed value and Pi, Sigma and + # psi are then computed using maximum likelihood + + # This allows the user to run the MCMC sampler for a burn-in + # period, then use the final draw of that as initialization + # for multiple chains + + if (is.null(init_Z)) { + Z[,, 1] <- fill_na(Y) + } else { + if (all(dim(Z[,, 1]) == dim(init_Z))) { + Z[,, 1] <- init_Z + } else { + stop(paste0("The dimension of init_Z is ", paste(dim(init_Z), collapse = " x "), ", but should be ", paste(dim(Z[,, 1]), collapse = " x "))) + } + + } + + ols_results <- tryCatch(ols_initialization(z = Z[,, 1], d = d, n_lags = n_lags, n_T = n_T, n_vars = n_vars, n_determ = n_determ), + error = function(cond) NULL) + if (is.null(ols_results)) { + ols_results <- list() + ols_results$Pi <- prior_Pi_mean + ols_results$psi <- prior_psi_mean + } + + if (is.null(init_Pi)) { + Pi[,, 1] <- ols_results$Pi + } else { + if (all(dim(Pi[,, 1]) == dim(init_Pi))) { + Pi[,, 1] <- init_Pi + } else { + stop(paste0("The dimension of init_Pi is ", paste(dim(init_Pi), collapse = " x "), ", but should be ", paste(dim(Pi[,, 1]), collapse = " x "))) + } + } + + # Compute the maximum eigenvalue of the initial Pi + if (check_roots == TRUE) { + Pi_comp <- build_companion(Pi = Pi[,, 1], n_vars = n_vars, n_lags = n_lags) + roots[1] <- max_eig_cpp(Pi_comp) + } + + if (is.null(init_Sigma)) { + Sigma[,, 1] <- ols_results$S + } else { + if (all(dim(Sigma[,,1]) == dim(init_Sigma))) { + Sigma[,, 1] <- init_Sigma + } else { + stop(paste0("The dimension of init_Sigma is ", paste(dim(init_Sigma), collapse = " x "), ", but should be ", paste(dim(Sigma[,,1]), collapse = " x "))) + } + } + + if (is.null(init_psi)) { + if (roots[1] < 1) { + psi[1, ] <- ols_results$psi + } else { + psi[1, ] <- prior_psi_mean + } + } else { + if (length(psi[1, ]) == length(init_psi)) { + psi[1,] <- init_psi + } else { + stop(paste0("The length of init_psi is ", paste(length(init_psi), collapse = " x "), ", but should be ", paste(length(psi[1,]), collapse = " x "))) + } + } + + if (is.null(init_omega)) { + if (is.null(prior_psi_Omega)) { + omega[1, ] <- diag(prior_psi_Omega) + } else { + omega[1, ] <- rep(0.1, n_determ*n_vars) + } + } else { + omega[1, ] <- init_omega + } + if (is.null(init_phi_mu)) { + phi_mu[1] <- 1 + } else { + phi_mu[1] <- init_phi_mu + } + if (is.null(init_lambda_mu)) { + lambda_mu[1] <- 1 + } else { + lambda_mu[1] <- init_lambda_mu + } + + ################################################################ + ### Compute terms which do not vary in the sampler + + # Create D (does not vary in the sampler), and find roots of Pi + # if requested + D_mat <- build_DD(d = d, n_lags = n_lags) + dt <- d[-(1:n_lags), , drop = FALSE] + d1 <- d[1:n_lags, , drop = FALSE] + + + # For the posterior of Pi + inv_prior_Pi_Omega <- diag(1/c(prior_Pi_Omega)) + Omega_Pi <- matrix(inv_prior_Pi_Omega %*% c(prior_Pi_mean), n_vars*n_lags, n_vars) + + Z_1 <- Z[1:n_pseudolags,, 1] + + mcmc_ssng_diffuse(Y[-(1:n_lags),],Pi,Sigma,psi,phi_mu, lambda_mu, omega, Z,Z_fcst,Lambda_,prior_Pi_Omega,Omega_Pi, + D_mat,dt,d1,d_fcst_lags,prior_psi_mean,c0,c1,s,check_roots,Z_1,n_reps,n_burnin, + n_q,T_b-n_lags,n_lags,n_vars,n_T_,n_fcst,n_determ,n_thin,verbose,TRUE) + if (verbose) { + cat("\n") + } + ################################################################ + ### Prepare the return object + return_obj <- list(Pi = Pi, Sigma = Sigma, psi = psi, Z = Z, phi_mu = phi_mu, lambda_mu = lambda_mu, omega = omega, + Z_fcst = NULL, aggregation = x$aggregation, n_determ = n_determ, + n_lags = n_lags, n_vars = n_vars, n_fcst = n_fcst, prior_Pi_Omega = prior_Pi_Omega, prior_Pi_mean = prior_Pi_mean, + d = d, Y = Y, n_T = n_T, n_T_ = n_T_, + prior_psi_Omega = prior_psi_Omega, prior_psi_mean = prior_psi_mean, n_reps = n_reps, n_burnin = n_burnin, n_thin = n_thin, Lambda_ = Lambda_, + init = list(init_Pi = Pi[,, n_reps/n_thin], init_Sigma = Sigma[,, n_reps/n_thin], init_psi = psi[n_reps/n_thin, ], init_Z = Z[,, n_reps/n_thin], init_omega = omega[n_reps/n_thin, ], init_lambda_mu = lambda_mu[n_reps/n_thin], init_phi_mu = phi_mu[n_reps/n_thin])) + + if (check_roots == TRUE) { + return_obj$roots <- roots + return_obj$num_tries <- num_tries + } + if (n_fcst > 0) { + return_obj$Z_fcst <- Z_fcst + } + + return(return_obj) + +} diff --git a/R/mcmc_sampler_fsv.R b/R/mcmc_sampler_fsv.R index 9ac11de..7be7bde 100644 --- a/R/mcmc_sampler_fsv.R +++ b/R/mcmc_sampler_fsv.R @@ -1,37 +1,25 @@ -#' @rdname mcmc_sampler mcmc_sampler.mfbvar_minn_fsv <- function(x, ...){ - - if (is.null(x$n_fac)) { - stop("The number of factors (n_fac) must be provided.") + n_vars <- ncol(x$Y) + if (!(!is.null(x$Y) && !is.null(x$n_lags) && !is.null(x$n_burnin) && !is.null(x$n_reps))) { + test_all <- sapply(x, is.null) + test_sub <- test_all[c("Y", "n_lags", "n_burnin", "n_reps")] + stop("Missing elements: ", paste(names(test_sub)[which(test_sub)], collapse = " ")) } + + prior_Pi_Omega <- create_prior_Pi_Omega(x$lambda1, x$lambda2, x$lambda3, x$prior_Pi_AR1, x$Y, x$n_lags) + prior_Pi_AR1 <- x$prior_Pi_AR1 + prior_zero_mean <- all(x$prior_Pi_AR1 == 0) + Y <- x$Y + freq <- x$freq + verbose <- x$verbose + n_vars <- ncol(Y) n_lags <- x$n_lags - n_q <- sum(x$freq == "q") - n_m <- n_vars - n_q n_fac <- x$n_fac n_fcst <- x$n_fcst - mf <- TRUE - if (n_q == 0 || n_q == n_vars) { - complete_quarters <- apply(Y, 1, function(x) !any(is.na(x))) - Y <- Y[complete_quarters, ] - mf <- FALSE - } - y_in_p <- Y[-(1:n_lags), ] - if (n_q < n_vars) { - T_b <- min(apply(y_in_p[,1:n_m], 2, function(x) ifelse(any(is.na(x)), min(which(is.na(x))), Inf))-1, nrow(y_in_p)) - } else { - T_b <- nrow(y_in_p) - } - n_T_ <- nrow(Y) - n_lags - n_T <- nrow(Y) - add_args <- list(...) - n_reps <- add_args$n_reps - n_thin <- ifelse(!is.null(add_args$n_thin), add_args$n_thin, ifelse(!is.null(x$n_thin), x$n_thin, 1)) - - init <- add_args$init - error_variance <- mfbvar:::compute_error_variances(Y) + ## Priors priormu <- x$priormu priorphiidi <- x$priorphiidi @@ -39,11 +27,71 @@ mcmc_sampler.mfbvar_minn_fsv <- function(x, ...){ priorsigmaidi <- x$priorsigmaidi priorsigmafac <- x$priorsigmafac priorfacload <- x$priorfacload - priorng <- x$priorng - columnwise <- x$columnwise restrict <- x$restrict - heteroskedastic <- x$heteroskedastic - priorhomoskedastic <- x$priorhomoskedastic + + if (length(priorsigmaidi) == 1) { + priorsigmaidi <- rep(priorsigmaidi, n_vars) + } + if (length(priorsigmafac) == 1) { + priorsigmafac <- rep(priorsigmafac, n_fac) + } + + bmu <- priormu[1] + Bmu <- priormu[2]^2 + + Bsigma <- c(priorsigmaidi, priorsigmafac) + + B011inv <- 1/10^8 + B022inv <- 1/10^12 + + armatau2 <- matrix(priorfacload^2, n_vars, n_fac) # priorfacload is scalar, or matrix + + armarestr <- matrix(FALSE, nrow = n_vars, ncol = n_fac) + if (restrict == "upper") armarestr[upper.tri(armarestr)] <- TRUE + armarestr <- matrix(as.integer(!armarestr), nrow = nrow(armarestr), ncol = ncol(armarestr)) # restrinv + + a0idi <- priorphiidi[1] + b0idi <- priorphiidi[2] + a0fac <- priorphifac[1] + b0fac <- priorphifac[2] + + priorh0 <- rep(-1.0, n_vars + n_fac) + + ## Initials + + add_args <- list(...) + n_reps <- x$n_reps + n_burnin <- x$n_burnin + n_thin <- ifelse(is.null(x$n_thin), 1, x$n_thin) + + # n_vars: number of variables + # n_lags: number of lags + # n_determ: number of deterministic variables + # n_T: sample size (full sample) + # n_T_: sample size (reduced sample) + + freqs <- x$freqs + Lambda_ <- x$Lambda_ + n_q <- sum(freq == freqs[1]) + if (n_q < n_vars) { + T_b <- max(which(!apply(apply(Y[, freq == freqs[2], drop = FALSE], 2, is.na), 1, any))) + } else { + T_b <- nrow(Y) + } + if (n_q == 0 || n_q == n_vars) { + complete_quarters <- apply(Y, 1, function(x) !any(is.na(x))) + Y <- Y[complete_quarters, ] + } + + + n_pseudolags <- max(c(n_lags, ncol(Lambda_)/nrow(Lambda_))) + n_T <- dim(Y)[1]# - n_lags + n_T_ <- n_T - n_pseudolags + + ## Initials + init <- add_args$init + y_in_p <- Y[-(1:n_lags), ] + error_variance <- compute_error_variances(Y) ### Regression parameters if (is.null(init$init_Pi)) { @@ -54,7 +102,7 @@ mcmc_sampler.mfbvar_minn_fsv <- function(x, ...){ ### Latent high-frequency if (is.null(init$init_Z)) { - init_Z <- y_in_p + init_Z <- fill_na(Y) } else { init_Z <- init$init_Z } @@ -78,14 +126,14 @@ mcmc_sampler.mfbvar_minn_fsv <- function(x, ...){ ### Factors and loadings if (is.null(init$init_facload)) { - init_facload <- matrix(rnorm(n_vars*n_fac, sd = .5)^2, nrow=n_vars, ncol=n_fac) + init_facload <- matrix(rnorm(n_vars*n_fac, sd = 0.5)^2, nrow=n_vars, ncol=n_fac) } else { init_facload <- init$init_facload } - if (is.null(init$init_fac)) { - init_fac <- matrix(rnorm(n_fac*n_T_, sd = 0.005), n_fac, n_T_) + if (is.null(init$init_f)) { + init_f <- matrix(rnorm(n_fac*n_T_, sd = 0.5), n_fac, n_T_) } else { - init_fac <- init$init_fac + init_f <- init$init_f } ### Latent volatilities @@ -100,294 +148,205 @@ mcmc_sampler.mfbvar_minn_fsv <- function(x, ...){ init_latent0 <- init$init_latent0 } - cl <- x$cl - Z_1 <- mfbvar:::fill_na(Y)[(1:n_lags), ] - verbose <- x$verbose + ################################################################ + ### Preallocation + # Pi and Sigma store their i-th draws in the third dimension, psi + # is vectorized so it has its i-th draw stored in the i-th row + # Pi: p * pk * n_reps, each [,,i] stores Pi' + # Sigma: p * p * n_reps + # psi: n_reps * p + # Z: T * p * n_reps + ### If forecasting (h is horizon): + # Z_fcst: hk * p * n_reps + # d_fcst_lags: hk * m + ### If root checking: + # roots: n_reps vector + # num_tries: n_reps vector + ### If smoothing of the state vector: + # smoothed_Z: T * p * n_reps - ## Set up cluster (if used) - if (!is.null(cl)) { - parallelize <- TRUE - parallel::clusterCall(cl, fun = function() library(mfbvar)) - parallel::clusterExport(cl, varlist = c("par_fun")) + Pi <- array(init_Pi, dim = c(n_vars, n_vars*n_lags + 1, n_reps/n_thin)) + Z <- array(init_Z, dim = c(n_T, n_vars, n_reps/n_thin)) + Z_fcst<- array(NA, dim = c(n_fcst+n_lags, n_vars, n_reps/n_thin)) + if (n_fcst > 0) { + rownames(Z_fcst) <- c((n_T-n_lags+1):n_T, paste0("fcst_", 1:n_fcst)) + Z_fcst[,,1] <- 0 } else { - parallelize <- FALSE + rownames(Z_fcst) <- (n_T-n_lags+1):n_T } - prior_Pi_Omega <- mfbvar:::create_prior_Pi_Omega(x$lambda1, x$lambda2, x$lambda3, x$prior_Pi_AR1, Y, n_lags) - prior_Pi_AR1 <- x$prior_Pi_AR1 - prior_zero_mean <- all(x$prior_Pi_AR1 == 0) + mu <- matrix(init_mu, n_vars, n_reps/n_thin) + sigma <- matrix(init_sigma, n_vars+n_fac, n_reps/n_thin) + phi <- matrix(init_phi, n_vars+n_fac, n_reps/n_thin) - if (prior_zero_mean) { - if (n_vars*n_lags > 1.05 * n_T_) { - par_fun <- mfbvar:::par_fun_top(mfbvar:::rmvn_bcm) - } else { - par_fun <- mfbvar:::par_fun_top(mfbvar:::rmvn_rue) - } - } + facload <- array(matrix(init_facload, nrow = n_vars, ncol = n_fac), + dim = c(n_vars, n_fac, n_reps/n_thin)) + f <- array(matrix(init_f, n_fac, n_T_), dim = c(n_fac, n_T_, n_reps/n_thin)) - ## Obtain the aggregation matrix for the quarterly only - if (mf) { - if (x$aggregation == "average") { - Lambda_ <- mfbvar:::build_Lambda(rep("average", n_q), 3) - } else { - Lambda_ <- mfbvar:::build_Lambda(rep("triangular", n_q), 5) - } - } + h <- array(t(init_latent), dim = c(n_T_, n_vars+n_fac, n_reps/n_thin), + dimnames = list(rownames(init_latent), colnames(init_latent), NULL)) + + ################################################################ + ### Compute terms which do not vary in the sampler + + Z_1 <- Z[1:n_pseudolags,, 1] + + aux <- matrix(0, 1, 1) + global <- c(0) + local <- matrix(0, 1, 1) + a <- -1 + slice <- c(0) + gig <- TRUE + + mcmc_minn_fsv(Y[-(1:n_lags),],Pi,Z,Z_fcst,mu,phi,sigma,f,facload,h, + aux,global,local,slice,Lambda_,prior_Pi_Omega,prior_Pi_AR1, Z_1,bmu,Bmu, + a0idi,b0idi,a0fac,b0fac,Bsigma,B011inv,B022inv,priorh0, + armarestr,armatau2,n_fac,n_reps,n_burnin,n_q,T_b-n_lags,n_lags, + n_vars,n_T_,n_fcst,n_thin,verbose,a,gig) + if (verbose) { + cat("\n") + } + return_obj <- list(Pi = Pi, Z = Z, Z_fcst = NULL, mu = mu, phi = phi, + sigma = sigma, f = f, facload = facload, h = h, + Lambda_ = Lambda_, aggregation = x$aggregation, prior_Pi_Omega = prior_Pi_Omega, + prior_Pi_AR1 = prior_Pi_AR1, Y = Y, Z_1 = Z_1, bmu = bmu, + Bmu = Bmu, a0idi = a0idi, b0idi = b0idi, a0fac = a0fac, + b0fac = b0fac, Bsigma = Bsigma, B011inv = B011inv, + B022inv = B022inv, priorh0 = priorh0, armarestr = armarestr, + armatau2 = armatau2, n_fac = n_fac, n_reps = n_reps, n_burnin = n_burnin, + n_q = n_q, T_b_ = T_b-n_lags, n_lags = n_lags, + n_vars = n_vars, n_T_ = n_T_, n_fcst = n_fcst, + n_thin = n_thin, verbose = verbose, + init = list(init_Pi = Pi[,, n_reps/n_thin], + init_Z = Z[,, n_reps/n_thin], + init_mu = mu[, n_reps/n_thin], + init_phi = phi[, n_reps/n_thin], + init_sigma = sigma[, n_reps/n_thin], + init_facload = facload[,,n_reps/n_thin], + init_f = f[,,n_reps/n_thin], + init_h = h[,,n_reps/n_thin])) - Pi <- array(init_Pi, dim = c(n_vars, n_vars*n_lags + 1, n_reps/n_thin)) - Z <- array(init_Z, dim = c(n_T_, n_vars, n_reps/n_thin)) if (n_fcst > 0) { - Z_fcst <- array(NA, dim = c(n_fcst+n_lags, n_vars, n_reps), - dimnames = list(c((n_T-n_lags+1):n_T, paste0("fcst_", 1:n_fcst)), NULL, NULL)) + return_obj$Z_fcst <- Z_fcst } + return(return_obj) - mu_storage <- matrix(init_mu, n_vars, n_reps/n_thin) - sigma_storage <- matrix(init_sigma, n_vars+n_fac, n_reps/n_thin) - phi_storage <- matrix(init_phi, n_vars+n_fac, n_reps/n_thin) - - facload_storage <- array(matrix(init_facload, nrow = n_vars, ncol = n_fac), dim = c(n_vars, n_fac, n_reps/n_thin)) - fac_storage <- array(matrix(init_fac, n_fac, n_T_), dim = c(n_fac, n_T_, n_reps/n_thin)) +} - latent <- array(init_latent, dim = c(n_T_, n_vars+n_fac, n_reps/n_thin), - dimnames = list(rownames(init_latent), colnames(init_latent), NULL)) +mcmc_sampler.mfbvar_dl_fsv <- function(x, ...){ + n_vars <- ncol(x$Y) + if (!(!is.null(x$Y) && !is.null(x$n_lags) && !is.null(x$n_burnin) && !is.null(x$n_reps))) { + test_all <- sapply(x, is.null) + test_sub <- test_all[c("Y", "n_lags", "n_burnin", "n_reps")] + stop("Missing elements: ", paste(names(test_sub)[which(test_sub)], collapse = " ")) + } - Pi_i <- Pi[,,1] - Z_i <- init_Z - startpara <- list(mu = init_mu, - phi = init_phi, - sigma = init_sigma) - startlatent <- latent[,,1] - startlatent0 <- init_latent0 - startfacload <- matrix(init_facload, nrow = n_vars, ncol = n_fac) - startfac <- matrix(init_fac, n_fac, n_T_) + prior_Pi_Omega <- create_prior_Pi_Omega(x$lambda1, x$lambda2, x$lambda3, x$prior_Pi_AR1, x$Y, x$n_lags) + prior_Pi_AR1 <- x$prior_Pi_AR1 + prior_zero_mean <- all(x$prior_Pi_AR1 == 0) - if (verbose) { - pb <- progress_bar$new( - format = "[:bar] :percent eta: :eta", - clear = FALSE, total = n_reps, width = 60) - } - - if (!mf) { - X <- mfbvar:::create_X(rbind(Z_1, Z_i), n_lags) - } - - error <- NULL - for (i in 1:n_reps) { - ## Square root of idiosyncratic variances (in dense form) - Sig <- exp(0.5 * startlatent[, 1:n_vars]) - - ## Mixed-frequency block: sample latent monthly series - if (mf) { - Z_i <- tryCatch(mfbvar:::rsimsm_adaptive_univariate(y_in_p, Pi_i, Sig, Lambda_, Z_1, n_q, T_b, t(startfac) %*% t(startfacload)), error = function(cond) cond) - if (inherits(Z_i, "error")) { - warning("MCMC halted because of an error in the mixed-frequency step. See $error for more information.") - error <- list(error = Z_i, iter = i, block = "z") - break - } - Z_i <- rbind(Z_1, Z_i) - X <- mfbvar:::create_X(Z_i, n_lags) - Z_i <- Z_i[-(1:n_lags), ] - } + Y <- x$Y + freq <- x$freq + verbose <- x$verbose - ## Produce forecasts + n_vars <- ncol(Y) + n_lags <- x$n_lags + n_fac <- x$n_fac + n_fcst <- x$n_fcst + ## Priors - ## Storage - if (i %% n_thin == 0) { + priormu <- x$priormu + priorphiidi <- x$priorphiidi + priorphifac <- x$priorphifac + priorsigmaidi <- x$priorsigmaidi + priorsigmafac <- x$priorsigmafac + priorfacload <- x$priorfacload + restrict <- x$restrict - if (n_fcst > 0) { - mu <- c(startpara$mu, numeric(n_fac)) - phi <- startpara$phi - sigma <- startpara$sigma - volatility_pred <- startlatent[n_T_, ] + if (length(priorsigmaidi) == 1) { + priorsigmaidi <- rep(priorsigmaidi, n_vars) + } + if (length(priorsigmafac) == 1) { + priorsigmafac <- rep(priorsigmafac, n_fac) + } - Z_pred <- matrix(0, n_fcst+n_lags, n_vars) - Z_pred[1:n_lags, ] <- Z_i[(n_T_-n_lags+1):n_T_,] - for (j in 1:n_fcst) { - volatility_pred <- mu + phi * (volatility_pred - mu) + rnorm(n_vars+n_fac, sd = sigma) - error_pred <- rnorm(n_vars+n_fac, sd = exp(volatility_pred * 0.5)) - X_t <- mfbvar:::create_X_t(Z_pred[j:(n_lags+j-1), ]) - Z_pred[j+n_lags, ] <- Pi_i %*% X_t + startfacload %*% error_pred[(n_vars+1):(n_vars+n_fac)] + error_pred[1:n_vars] - } - Z_fcst[,,i/n_thin] <- Z_pred - } + bmu <- priormu[1] + Bmu <- priormu[2]^2 - Pi[,,i/n_thin] <- Pi_i - Z[,,i/n_thin] <- Z_i + Bsigma <- c(priorsigmaidi, priorsigmafac) - mu_storage[,i/n_thin] <- startpara$mu - sigma_storage[,i/n_thin] <- startpara$sigma - phi_storage[,i/n_thin] <- startpara$phi + B011inv <- 1/10^8 + B022inv <- 1/10^12 - fac_storage[,,i/n_thin] <- startfac - facload_storage[,,i/n_thin] <- startfacload + armatau2 <- matrix(priorfacload^2, n_vars, n_fac) # priorfacload is scalar, or matrix - latent[,,i/n_thin] <- startlatent - } + armarestr <- matrix(FALSE, nrow = n_vars, ncol = n_fac) + if (restrict == "upper") armarestr[upper.tri(armarestr)] <- TRUE + armarestr <- matrix(as.integer(!armarestr), nrow = nrow(armarestr), ncol = ncol(armarestr)) # restrinv - ## Stochastic volatility block: sample latent factors, latent volatilities and factor loadings - y_hat <- Z_i - X %*% t(Pi_i) - fsample <- tryCatch(factorstochvol::fsvsample(y_hat, factors = n_fac, draws = 1, burnin = 0, priorh0idi = "stationary", - priorh0fac = "stationary", thin = 1, keeptime = "all", - runningstore = 0, runningstorethin = 10, runningstoremoments = 1, - quiet = TRUE, interweaving = 4, signswitch = TRUE, - startpara = startpara, startlatent = startlatent, - startlatent0 = startlatent0, - startfacload = startfacload, startfac = startfac, priormu = priormu, - priorphiidi = priorphiidi, priorphifac = priorphifac, priorsigmaidi = priorsigmaidi, - priorsigmafac = priorsigmafac, priorfacload = priorfacload, priorng = priorng, - columnwise = columnwise, restrict = restrict, heteroskedastic = heteroskedastic, - priorhomoskedastic = priorhomoskedastic), error = function(cond) cond) - if (inherits(fsample, "error")) { - warning("MCMC halted because of an error in the factor stochastic volatility step. See $error for more information.") - error <- list(error = fsample, iter = i, block = "fsample") - break - } - startpara$mu <- fsample$para[1,1:n_vars,1] - startpara$phi <- fsample$para[2,,1] - startpara$sigma <- fsample$para[3,,1] - startlatent0 <- c(fsample$h0) - startlatent <- fsample$h[,,1] - startfacload <- matrix(fsample$facload[,,1], nrow = n_vars, ncol = n_fac) - startfac <- matrix(fsample$f[,,1], nrow = n_fac) - - ## Regression parameters block: sample Pi (possibly in parallel) - latent_nofac <- Z_i - t(startfac) %*% t(startfacload) - - if (!parallelize) { - if (prior_zero_mean) { - for (j in 1:n_vars) { - Pi_i[j,] <- tryCatch(mfbvar:::rmvn(X/exp(startlatent[,j]*0.5), prior_Pi_Omega[,j], latent_nofac[,j]/exp(startlatent[,j]*0.5)), error = function(cond) cond) - } - } else { - for (j in 1:n_vars) { - Pi_i[j,] <- tryCatch(mfbvar:::rmvn_ccm(X/exp(startlatent[,j]*0.5), prior_Pi_Omega[,j], latent_nofac[,j]/exp(startlatent[,j]*0.5), prior_Pi_AR1[j], j), error = function(cond) cond) - } - } - } else { - if (prior_zero_mean) { - Pi_i <- tryCatch(t(parallel::parSapply(cl, 1:n_vars, FUN = par_fun, XX = X, startlatent = startlatent, D = prior_Pi_Omega, latent_nofac = latent_nofac)), error = function(cond) cond) - } else { - Pi_i <- tryCatch(t(parallel::parSapply(cl, 1:n_vars, FUN = par_fun_AR1, XX = X, startlatent = startlatent, D = prior_Pi_Omega, latent_nofac = latent_nofac, prior_Pi_AR1 = prior_Pi_AR1)), error = function(cond) cond) - } - } - if (inherits(Pi_i, "error")) { - warning("MCMC halted because of an error in the regression parameters step. See $error for more information.") - error <- list(error = Pi_i, iter = i, block = "Pi_i") - break - } + a0idi <- priorphiidi[1] + b0idi <- priorphiidi[2] + a0fac <- priorphifac[1] + b0fac <- priorphifac[2] - if (verbose) { - pb$tick() - } - } + priorh0 <- rep(-1.0, n_vars + n_fac) - ################################################################ - ### Prepare the return object - return_obj <- list(Pi = Pi, Z = Z, Z_fcst = NULL, n_lags = n_lags, n_vars = n_vars, n_fcst = n_fcst, - prior_Pi_Omega = prior_Pi_Omega, Y = Y, n_T = n_T, n_T_ = n_T_, n_reps = n_reps, - facload = facload_storage, latent = latent, mu = mu_storage, sigma = sigma_storage, phi = phi_storage, - init = list(init_Pi = Pi_i, init_Z = Z_i, init_mu = startpara$mu, - init_phi = startpara$phi, init_sigma = startpara$sigma, - init_facload = startfacload, - init_fac = startfac, - init_latent = startlatent, - init_latent0 = startlatent0), - error = error) - - if (n_fcst>0) { - return_obj$Z_fcst <- Z_fcst + ## DL + if (!("a" %in% names(x))) { + a <- 1 + } else { + a <- x$a } - return(return_obj) - -} - -#' @rdname mcmc_sampler -mcmc_sampler.mfbvar_ss_fsv <- function(x, ...){ + gig <- ifelse(is.null(x$gig), TRUE, FALSE) - if (is.null(x$n_fac)) { - stop("The number of factors (n_fac) must be provided.") - } + RcppParallel::setThreadOptions(numThreads = x$n_cores) + ## Initials - if (x$n_fcst > 0 && nrow(x$d_fcst) != x$n_fcst) { - stop("d_fcst has ", nrow(x$d_fcst), " rows, but n_fcst is ", x$n_fcst, ".") + add_args <- list(...) + n_reps <- x$n_reps + n_burnin <- x$n_burnin + n_thin <- ifelse(is.null(x$n_thin), 1, x$n_thin) + + # n_vars: number of variables + # n_lags: number of lags + # n_determ: number of deterministic variables + # n_T: sample size (full sample) + # n_T_: sample size (reduced sample) + + freqs <- x$freqs + Lambda_ <- x$Lambda_ + n_q <- sum(freq == freqs[1]) + if (n_q < n_vars) { + T_b <- max(which(!apply(apply(Y[, freq == freqs[2], drop = FALSE], 2, is.na), 1, any))) + } else { + T_b <- nrow(Y) } - - Y <- x$Y - d <- x$d - d_fcst <- x$d_fcst - n_vars <- ncol(Y) - n_lags <- x$n_lags - n_q <- sum(x$freq == "q") - n_m <- n_vars - n_q - n_fac <- x$n_fac - n_fcst <- x$n_fcst - n_determ <- dim(d)[2] - mf <- TRUE if (n_q == 0 || n_q == n_vars) { complete_quarters <- apply(Y, 1, function(x) !any(is.na(x))) Y <- Y[complete_quarters, ] - d_fcst <- rbind(d[!complete_quarters, , drop = FALSE], d_fcst) - d <- d[complete_quarters, , drop = FALSE] - mf <- FALSE - } - y_in_p <- Y[-(1:n_lags), ] - if (n_q < n_vars) { - T_b <- min(apply(y_in_p[,1:n_m], 2, function(x) ifelse(any(is.na(x)), min(which(is.na(x))), Inf))-1, nrow(y_in_p)) - } else { - T_b <- nrow(y_in_p) - } - n_T_ <- nrow(Y) - n_lags - n_T <- nrow(Y) - - add_args <- list(...) - n_reps <- add_args$n_reps - n_thin <- ifelse(!is.null(add_args$n_thin), add_args$n_thin, ifelse(!is.null(x$n_thin), x$n_thin, 1)) - - prior_psi_mean <- x$prior_psi_mean - prior_psi_Omega <- x$prior_psi_Omega - check_roots <- x$check_roots - if (check_roots == TRUE) { - roots <- vector("numeric", n_reps) - num_tries <- roots - } else { - num_tries <- NULL } + n_pseudolags <- max(c(n_lags, ncol(Lambda_)/nrow(Lambda_))) + n_T <- dim(Y)[1]# - n_lags + n_T_ <- n_T - n_pseudolags + ## Initials init <- add_args$init - error_variance <- mfbvar:::compute_error_variances(Y) - - priormu <- x$priormu - priorphiidi <- x$priorphiidi - priorphifac <- x$priorphifac - priorsigmaidi <- x$priorsigmaidi - priorsigmafac <- x$priorsigmafac - priorfacload <- x$priorfacload - priorng <- x$priorng - columnwise <- x$columnwise - restrict <- x$restrict - heteroskedastic <- x$heteroskedastic - priorhomoskedastic <- x$priorhomoskedastic + y_in_p <- Y[-(1:n_lags), ] + error_variance <- compute_error_variances(Y) ### Regression parameters if (is.null(init$init_Pi)) { - init_Pi <- matrix(0, nrow = n_vars, ncol = n_vars*n_lags) + init_Pi <- matrix(0, nrow = n_vars, ncol = n_vars*(n_vars*n_lags + 1)) } else { init_Pi <- init$init_Pi } - - ### Regression parameters - if (is.null(init$init_psi)) { - init_psi <- colMeans(y_in_p, na.rm = TRUE) - } else { - init_psi <- init$init_psi - } ### Latent high-frequency if (is.null(init$init_Z)) { - init_Z <- y_in_p + init_Z <- fill_na(Y) } else { init_Z <- init$init_Z } @@ -399,31 +358,31 @@ mcmc_sampler.mfbvar_ss_fsv <- function(x, ...){ init_mu <- init$init_mu } if (is.null(init$init_sigma)) { - init_sigma <- rep(0.2, n_vars + n_fac) + init_sigma <- rep(0.75, n_vars + n_fac) } else { init_sigma <- init$init_sigma } if (is.null(init$init_phi)) { - init_phi <- rep(0.75, n_vars + n_fac) + init_phi <- rep(0.2, n_vars + n_fac) } else { init_phi <- init$init_phi } ### Factors and loadings if (is.null(init$init_facload)) { - init_facload <- matrix(rnorm(n_vars*n_fac, sd = .5)^2, nrow=n_vars, ncol=n_fac) + init_facload <- matrix(rnorm(n_vars*n_fac, sd = 0.5)^2, nrow=n_vars, ncol=n_fac) } else { init_facload <- init$init_facload } - if (is.null(init$init_fac)) { - init_fac <- matrix(rnorm(n_fac*n_T_, sd = 0.005), n_fac, n_T_) + if (is.null(init$init_f)) { + init_f <- matrix(rnorm(n_fac*n_T_, sd = 0.5), n_fac, n_T_) } else { - init_fac <- init$init_fac + init_f <- init$init_f } ### Latent volatilities if (is.null(init$init_latent)) { - init_latent <- cbind(matrix(c(log(error_variance), rep(1, n_fac)), nrow = n_T_, ncol = n_vars+n_fac, byrow = TRUE)) + init_latent <- t(cbind(matrix(c(log(error_variance), rep(1, n_fac)), nrow = n_T_, ncol = n_vars+n_fac, byrow = TRUE))) } else { init_latent <- init$init_latent } @@ -433,375 +392,228 @@ mcmc_sampler.mfbvar_ss_fsv <- function(x, ...){ init_latent0 <- init$init_latent0 } - cl <- x$cl - Z_1 <- mfbvar:::fill_na(Y)[(1:n_lags), ] - verbose <- x$verbose - - ## Set up cluster (if used) - if (!is.null(cl)) { - parallelize <- TRUE - parallel::clusterCall(cl, fun = function() library(mfbvar)) - parallel::clusterExport(cl, varlist = c("par_fun")) + if (is.null(init$init_global)) { + init_global <- 0.1 } else { - parallelize <- FALSE + init_global <- init$init_global } - prior_Pi_Omega <- mfbvar:::create_prior_Pi_Omega(x$lambda1, x$lambda2, x$lambda3, x$prior_Pi_AR1, Y, n_lags)[-1, ] - prior_Pi_AR1 <- x$prior_Pi_AR1 - prior_zero_mean <- all(x$prior_Pi_AR1 == 0) - - if (prior_zero_mean) { - if (n_vars*n_lags > 1.05 * n_T_) { - par_fun <- mfbvar:::par_fun_top(rmvn_bcm) - } else { - par_fun <- mfbvar:::par_fun_top(rmvn_rue) - } + if (is.null(init$init_aux)) { + init_aux <- c(sqrt(prior_Pi_Omega[-1,])/init_global) + } else { + init_aux <- init$init_aux } - ## Obtain the aggregation matrix for the quarterly only - if (mf) { - if (x$aggregation == "average") { - Lambda_ <- mfbvar:::build_Lambda(rep("average", n_q), 3) - } else { - Lambda_ <- mfbvar:::build_Lambda(rep("triangular", n_q), 5) - } + if (is.null(init$init_local)) { + init_local <- c(sqrt(prior_Pi_Omega[-1,])/init_global) + } else { + init_local <- init$init_local } - Pi <- array(init_Pi, dim = c(n_vars, n_vars*n_lags, n_reps/n_thin)) - Z <- array(init_Z, dim = c(n_T_, n_vars, n_reps/n_thin)) - psi <- matrix(init_psi, n_reps, n_vars * n_determ, byrow = TRUE) - if (n_fcst > 0) { - Z_fcst <- array(NA, dim = c(n_fcst+n_lags, n_vars, n_reps), - dimnames = list(c((n_T-n_lags+1):n_T, paste0("fcst_", 1:n_fcst)), NULL, NULL)) + if (is.null(init$init_slice)) { + init_slice <- rep(1, n_vars^2*n_lags) + } else { + init_slice <- init$init_slice } - d_fcst_lags <- as.matrix(rbind(d[(n_T-n_lags+1):n_T, , drop = FALSE], d_fcst)) - d_fcst_lags <- d_fcst_lags[1:(n_lags+n_fcst), , drop = FALSE] - - mu_storage <- matrix(init_mu, n_vars, n_reps/n_thin) - sigma_storage <- matrix(init_sigma, n_vars+n_fac, n_reps/n_thin) - phi_storage <- matrix(init_phi, n_vars+n_fac, n_reps/n_thin) - - facload_storage <- array(matrix(init_facload, nrow = n_vars, ncol = n_fac), dim = c(n_vars, n_fac, n_reps/n_thin)) - fac_storage <- array(matrix(init_fac, n_fac, n_T_), dim = c(n_fac, n_T_, n_reps/n_thin)) - latent <- array(init_latent, dim = c(n_T_, n_vars+n_fac, n_reps/n_thin), - dimnames = list(rownames(init_latent), colnames(init_latent), NULL)) - - Pi_i <- init_Pi - Pi_i0 <- cbind(0, Pi_i) - Z_i <- init_Z - psi_i <- init_psi - startpara <- list(mu = init_mu, - phi = init_phi, - sigma = init_sigma) - startlatent <- latent[,,1] - startlatent0 <- init_latent0 - startfacload <- matrix(init_facload, nrow = n_vars, ncol = n_fac) - startfac <- matrix(init_fac, n_fac, n_T_) - - D_mat <- mfbvar:::build_DD(d = d, n_lags = n_lags) - dt <- d[-(1:n_lags), , drop = FALSE] - d1 <- d[1:n_lags, , drop = FALSE] - mu_mat <- dt %*% t(matrix(psi_i, nrow = n_vars)) - n_Lambda <- ncol(Lambda_)/nrow(Lambda_) - mu_long <- matrix(0, n_Lambda+n_T_, n_vars) - if (mf) { - Lambda_single <- matrix(0, 1, n_Lambda) - for (i in 1:n_Lambda) { - Lambda_single[i] <- Lambda_[1, (i-1)*n_q+1] - } - } - my <- matrix(0, nrow(y_in_p), ncol(y_in_p)) + ################################################################ + ### Preallocation + # Pi and Sigma store their i-th draws in the third dimension, psi + # is vectorized so it has its i-th draw stored in the i-th row + # Pi: p * pk * n_reps, each [,,i] stores Pi' + # Sigma: p * p * n_reps + # psi: n_reps * p + # Z: T * p * n_reps + ### If forecasting (h is horizon): + # Z_fcst: hk * p * n_reps + # d_fcst_lags: hk * m + ### If root checking: + # roots: n_reps vector + # num_tries: n_reps vector + ### If smoothing of the state vector: + # smoothed_Z: T * p * n_reps - if (verbose) { - pb <- progress_bar$new( - format = "[:bar] :percent eta: :eta", - clear = FALSE, total = n_reps, width = 60) + Pi <- array(init_Pi, dim = c(n_vars, n_vars*n_lags + 1, n_reps/n_thin)) + Z <- array(init_Z, dim = c(n_T, n_vars, n_reps/n_thin)) + Z_fcst<- array(NA, dim = c(n_fcst+n_lags, n_vars, n_reps/n_thin)) + if (n_fcst > 0) { + rownames(Z_fcst) <- c((n_T-n_lags+1):n_T, paste0("fcst_", 1:n_fcst)) + Z_fcst[,,1] <- 0 + } else { + rownames(Z_fcst) <- (n_T-n_lags+1):n_T } - error <- NULL + mu <- matrix(init_mu, n_vars, n_reps/n_thin) + sigma <- matrix(init_sigma, n_vars+n_fac, n_reps/n_thin) + phi <- matrix(init_phi, n_vars+n_fac, n_reps/n_thin) - inv_prior_psi_Omega <- solve(prior_psi_Omega) - inv_prior_psi_Omega_mean <- inv_prior_psi_Omega %*% prior_psi_mean - for (i in 1:n_reps) { - ## Square root of idiosyncratic variances (in dense form) - Sig <- exp(0.5 * startlatent[, 1:n_vars]) + facload <- array(matrix(init_facload, nrow = n_vars, ncol = n_fac), + dim = c(n_vars, n_fac, n_reps/n_thin)) + f <- array(matrix(init_f, n_fac, n_T_), dim = c(n_fac, n_T_, n_reps/n_thin)) + h <- array(t(init_latent), dim = c(n_T_, n_vars+n_fac, n_reps/n_thin), + dimnames = list(rownames(init_latent), colnames(init_latent), NULL)) + aux <- matrix(init_aux, nrow = n_reps/n_thin, ncol = n_vars*n_vars*n_lags, byrow = TRUE) + local <- matrix(init_local, nrow = n_reps/n_thin, ncol = n_vars*n_vars*n_lags, byrow = TRUE) + global <- matrix(init_global, n_reps/n_thin, ncol = 1) + slice <- matrix(init_slice, nrow = 1, ncol = n_vars*n_vars*n_lags) - ## Mixed-frequency block: sample latent monthly series - if (mf) { - mfbvar:::update_demean(my, mu_long, y_in_p, mu_mat, d1, matrix(psi_i, nrow = n_vars), Lambda_single, n_vars, - n_q, n_Lambda, n_T_) - } else { - mZ <- y_in_p - mu_mat - } + ################################################################ + ### Compute terms which do not vary in the sampler + + Z_1 <- Z[1:n_pseudolags,, 1] + mcmc_minn_fsv(Y[-(1:n_lags),],Pi,Z,Z_fcst,mu,phi,sigma,f,facload,h, + aux,global,local,slice,Lambda_,prior_Pi_Omega,prior_Pi_AR1, Z_1,bmu,Bmu, + a0idi,b0idi,a0fac,b0fac,Bsigma,B011inv,B022inv,priorh0, + armarestr,armatau2,n_fac,n_reps,n_burnin,n_q,T_b-n_lags,n_lags, + n_vars,n_T_,n_fcst,n_thin,verbose,a,gig) + if (verbose) { + cat("\n") + } + return_obj <- list(Pi = Pi, Z = Z, Z_fcst = NULL, mu = mu, phi = phi, + sigma = sigma, f = f, facload = facload, h = h, + aux = aux, local = local, global = global, + Lambda_ = Lambda_, aggregation = x$aggregation, prior_Pi_Omega = prior_Pi_Omega, + prior_Pi_AR1 = prior_Pi_AR1, Y = Y, Z_1 = Z_1, bmu = bmu, + Bmu = Bmu, a0idi = a0idi, b0idi = b0idi, a0fac = a0fac, + b0fac = b0fac, Bsigma = Bsigma, B011inv = B011inv, + B022inv = B022inv, priorh0 = priorh0, armarestr = armarestr, + armatau2 = armatau2, n_fac = n_fac, n_reps = n_reps, n_burnin = n_burnin, + n_q = n_q, T_b_ = T_b-n_lags, n_lags = n_lags, + n_vars = n_vars, n_T_ = n_T_, n_fcst = n_fcst, + n_thin = n_thin, verbose = verbose) - mZ1 <- Z_1 - d1 %*% t(matrix(psi_i, nrow = n_vars)) - Pi_i0[, -1] <- Pi_i + if (n_fcst > 0) { + return_obj$Z_fcst <- Z_fcst + } + return(return_obj) - if (mf){ - mZ <- tryCatch(mfbvar:::rsimsm_adaptive_univariate(my, Pi_i0, Sig, Lambda_, mZ1, n_q, T_b, t(startfac) %*% t(startfacload)), error = function(cond) cond) - if (inherits(Z_i, "error")) { - warning("MCMC halted because of an error in the mixed-frequency step. See $error for more information.") - error <- list(error = Z_i_demean, iter = i, block = "z") - break - } - } - Z_i_demean <- mZ - Z_i <- Z_i_demean + mu_mat - X <- mfbvar:::create_X_noint(rbind(Z_1, Z_i), n_lags) - X_demean <- mfbvar:::create_X_noint(rbind(mZ1, Z_i_demean), n_lags) +} +mcmc_sampler.mfbvar_ss_fsv <- function(x, ...){ + n_vars <- ncol(x$Y) + if (!(!is.null(x$Y) && !is.null(x$n_lags) && !is.null(x$n_burnin) && !is.null(x$n_reps))) { + test_all <- sapply(x, is.null) + test_sub <- test_all[c("Y", "n_lags", "n_burnin", "n_reps")] + stop("Missing elements: ", paste(names(test_sub)[which(test_sub)], collapse = " ")) + } - ## Produce forecasts + prior_Pi_Omega <- create_prior_Pi_Omega(x$lambda1, x$lambda2, x$lambda3, x$prior_Pi_AR1, x$Y, x$n_lags) + prior_Pi_Omega <- prior_Pi_Omega[-1, ] + prior_Pi_AR1 <- x$prior_Pi_AR1 + prior_zero_mean <- all(x$prior_Pi_AR1 == 0) + Y <- x$Y + freq <- x$freq + verbose <- x$verbose - ## Storage - if (i %% n_thin == 0) { + prior_psi_mean <- x$prior_psi_mean + prior_psi_Omega <- x$prior_psi_Omega + d <- x$d + d_fcst <- x$d_fcst + check_roots <- x$check_roots + n_determ <- dim(d)[2] - if (n_fcst > 0) { - mu <- c(startpara$mu, numeric(n_fac)) - phi <- startpara$phi - sigma <- startpara$sigma - volatility_pred <- startlatent[n_T_, ] + n_vars <- ncol(Y) + n_lags <- x$n_lags + n_fac <- x$n_fac + n_fcst <- x$n_fcst - Z_pred <- matrix(0, n_fcst+n_lags, n_vars) - Z_pred[1:n_lags, ] <- Z_i_demean[(n_T_-n_lags+1):n_T_,] - for (j in 1:n_fcst) { - volatility_pred <- mu + phi * (volatility_pred - mu) + rnorm(n_vars+n_fac, sd = sigma) - error_pred <- rnorm(n_vars+n_fac, sd = exp(volatility_pred * 0.5)) - X_t <- mfbvar:::create_X_t_noint(Z_pred[j:(n_lags+j-1), ]) - Z_pred[j+n_lags, ] <- Pi_i %*% X_t + startfacload %*% error_pred[(n_vars+1):(n_vars+n_fac)] + error_pred[1:n_vars] - } - Z_fcst[,,i/n_thin] <- Z_pred + d_fcst_lags %*% t(matrix(psi_i, nrow = n_vars)) - } + ## Priors - Pi[,,i/n_thin] <- Pi_i - Z[,,i/n_thin] <- Z_i - psi[i/n_thin, ] <- psi_i + priormu <- x$priormu + priorphiidi <- x$priorphiidi + priorphifac <- x$priorphifac + priorsigmaidi <- x$priorsigmaidi + priorsigmafac <- x$priorsigmafac + priorfacload <- x$priorfacload + restrict <- x$restrict - mu_storage[,i/n_thin] <- startpara$mu - sigma_storage[,i/n_thin] <- startpara$sigma - phi_storage[,i/n_thin] <- startpara$phi + if (length(priorsigmaidi) == 1) { + priorsigmaidi <- rep(priorsigmaidi, n_vars) + } + if (length(priorsigmafac) == 1) { + priorsigmafac <- rep(priorsigmafac, n_fac) + } - fac_storage[,,i/n_thin] <- startfac - facload_storage[,,i/n_thin] <- startfacload + bmu <- priormu[1] + Bmu <- priormu[2]^2 - latent[,,i/n_thin] <- startlatent - } + Bsigma <- c(priorsigmaidi, priorsigmafac) - ## Stochastic volatility block: sample latent factors, latent volatilities and factor loadings - y_hat <- Z_i_demean - X_demean %*% t(Pi_i) - fsample <- tryCatch(factorstochvol::fsvsample(y_hat, factors = n_fac, draws = 1, burnin = 0, priorh0idi = "stationary", - priorh0fac = "stationary", thin = 1, keeptime = "all", - runningstore = 0, runningstorethin = 10, runningstoremoments = 1, - quiet = TRUE, interweaving = 4, signswitch = TRUE, - startpara = startpara, startlatent = startlatent, - startlatent0 = startlatent0, - startfacload = startfacload, startfac = startfac, priormu = priormu, - priorphiidi = priorphiidi, priorphifac = priorphifac, priorsigmaidi = priorsigmaidi, - priorsigmafac = priorsigmafac, priorfacload = priorfacload, priorng = priorng, - columnwise = columnwise, restrict = restrict, heteroskedastic = heteroskedastic, - priorhomoskedastic = priorhomoskedastic), error = function(cond) cond) - if (inherits(fsample, "error")) { - warning("MCMC halted because of an error in the factor stochastic volatility step. See $error for more information.") - error <- list(error = fsample, iter = i, block = "fsample") - break - } - startpara$mu <- fsample$para[1,1:n_vars,1] - startpara$phi <- fsample$para[2,,1] - startpara$sigma <- fsample$para[3,,1] - startlatent0 <- c(fsample$h0) - startlatent <- fsample$h[,,1] - startfacload <- matrix(fsample$facload[,,1], nrow = n_vars, ncol = n_fac) - startfac <- matrix(fsample$f[,,1], nrow = n_fac) - - ## Regression parameters block: sample Pi (possibly in parallel) - latent_nofac <- Z_i_demean - t(startfac) %*% t(startfacload) - - stationarity_check <- FALSE - iter <- 0 - while(stationarity_check == FALSE) { - iter <- iter + 1 - - if (!parallelize) { - if (prior_zero_mean) { - for (j in 1:n_vars) { - Pi_i[j,] <- tryCatch(mfbvar:::rmvn(X_demean/exp(startlatent[,j]*0.5), prior_Pi_Omega[,j], latent_nofac[,j]/exp(startlatent[,j]*0.5)), error = function(cond) cond) - } - } else { - for (j in 1:n_vars) { - Pi_i[j,] <- tryCatch(mfbvar:::rmvn_ccm(X_demean/exp(startlatent[,j]*0.5), prior_Pi_Omega[,j], latent_nofac[,j]/exp(startlatent[,j]*0.5), prior_Pi_AR1[j], j), error = function(cond) cond) - } - } - } else { - if (prior_zero_mean) { - Pi_i <- tryCatch(t(parallel::parSapply(cl, 1:n_vars, FUN = par_fun, XX = X_demean, startlatent = startlatent, D = prior_Pi_Omega, latent_nofac = latent_nofac)), error = function(cond) cond) - } else { - Pi_i <- tryCatch(t(parallel::parSapply(cl, 1:n_vars, FUN = par_fun_AR1, XX = X_demean, startlatent = startlatent, D = prior_Pi_Omega, latent_nofac = latent_nofac, prior_Pi_AR1 = prior_Pi_AR1)), error = function(cond) cond) - } - } - - if (inherits(Pi_i, "error")) { - warning("MCMC halted because of an error in the regression parameters step. See $error for more information.") - error <- list(error = Pi_i, iter = i, block = "Pi_i") - break - } - - Pi_comp <- mfbvar:::build_companion(Pi_i, n_vars = n_vars, n_lags = n_lags) - if (check_roots == TRUE) { - root <- mfbvar:::max_eig_cpp(Pi_comp) - } else { - root <- 0 - } - if (root < 1) { - stationarity_check <- TRUE - if (check_roots == TRUE) { - num_tries[i] <- iter - } - } - if (iter == 1000) { - warning("Attempted to draw stationary Pi 1,000 times.") - error <- list(error = Pi_i, iter = i, block = "Pi_i") - if (check_roots == TRUE) { - num_tries[i] <- iter - } - break - } + B011inv <- 1/10^8 + B022inv <- 1/10^12 - } + armatau2 <- matrix(priorfacload^2, n_vars, n_fac) # priorfacload is scalar, or matrix - Pi_i0[, -1] <- Pi_i - idivar <- exp(startlatent[, 1:n_vars]) - mfbvar:::posterior_psi_fsv(psi_i, mu_mat, Pi_i, D_mat, idivar, inv_prior_psi_Omega, - Z_i, X, startfacload, startfac, inv_prior_psi_Omega_mean, dt, - n_determ, n_vars, n_lags) + armarestr <- matrix(FALSE, nrow = n_vars, ncol = n_fac) + if (restrict == "upper") armarestr[upper.tri(armarestr)] <- TRUE + armarestr <- matrix(as.integer(!armarestr), nrow = nrow(armarestr), ncol = ncol(armarestr)) # restrinv - if (verbose) { - pb$tick() - } - } + a0idi <- priorphiidi[1] + b0idi <- priorphiidi[2] + a0fac <- priorphifac[1] + b0fac <- priorphifac[2] - ################################################################ - ### Prepare the return object - return_obj <- list(Pi = Pi, Z = Z, psi = psi, Z_fcst = NULL, n_lags = n_lags, n_vars = n_vars, n_fcst = n_fcst, - prior_Pi_Omega = prior_Pi_Omega, d = d, Y = Y, n_T = n_T, n_T_ = n_T_, n_reps = n_reps, - n_determ = n_determ, facload = facload_storage, latent = latent, mu = mu_storage, sigma = sigma_storage, phi = phi_storage, - init = list(init_Pi = Pi_i, init_Z = Z_i, init_psi = psi_i, init_mu = startpara$mu, - init_phi = startpara$phi, init_sigma = startpara$sigma, - init_facload = startfacload, - init_fac = startfac, - init_latent = startlatent, - init_latent0 = startlatent0), - num_tries = num_tries, - error = error) - - if (n_fcst>0) { - return_obj$Z_fcst <- Z_fcst - } + priorh0 <- rep(-1.0, n_vars + n_fac) - return(return_obj) + ## Initials -} + add_args <- list(...) + n_reps <- x$n_reps + n_burnin <- x$n_burnin + n_thin <- ifelse(is.null(x$n_thin), 1, x$n_thin) -mcmc_sampler.mfbvar_ssng_fsv <- function(x, ...){ + # n_vars: number of variables + # n_lags: number of lags + # n_determ: number of deterministic variables + # n_T: sample size (full sample) + # n_T_: sample size (reduced sample) - if (is.null(x$n_fac)) { - stop("The number of factors (n_fac) must be provided.") - } + freqs <- x$freqs + Lambda_ <- x$Lambda_ - if (x$n_fcst > 0 && nrow(x$d_fcst) != x$n_fcst) { - stop("d_fcst has ", nrow(x$d_fcst), " rows, but n_fcst is ", x$n_fcst, ".") + n_q <- sum(freq == freqs[1]) + if (n_q < n_vars) { + T_b <- max(which(!apply(apply(Y[, freq == freqs[2], drop = FALSE], 2, is.na), 1, any))) + } else { + T_b <- nrow(Y) } - - Y <- x$Y - d <- x$d - d_fcst <- x$d_fcst - n_vars <- ncol(Y) - n_lags <- x$n_lags - n_q <- sum(x$freq == "q") - n_m <- n_vars - n_q - n_fac <- x$n_fac - n_fcst <- x$n_fcst - n_determ <- dim(d)[2] - mf <- TRUE if (n_q == 0 || n_q == n_vars) { complete_quarters <- apply(Y, 1, function(x) !any(is.na(x))) Y <- Y[complete_quarters, ] d_fcst <- rbind(d[!complete_quarters, , drop = FALSE], d_fcst) d <- d[complete_quarters, , drop = FALSE] - mf <- FALSE - } - y_in_p <- Y[-(1:n_lags), ] - if (n_q < n_vars) { - T_b <- min(apply(y_in_p[,1:n_m], 2, function(x) ifelse(any(is.na(x)), min(which(is.na(x))), Inf))-1, nrow(y_in_p)) - } else { - T_b <- nrow(y_in_p) } - n_T_ <- nrow(Y) - n_lags - n_T <- nrow(Y) - - add_args <- list(...) - n_reps <- add_args$n_reps - n_thin <- ifelse(!is.null(add_args$n_thin), add_args$n_thin, ifelse(!is.null(x$n_thin), x$n_thin, 1)) - prior_psi_mean <- x$prior_psi_mean - prior_psi_Omega <- x$prior_psi_Omega - check_roots <- x$check_roots - if (check_roots == TRUE) { - roots <- vector("numeric", n_reps) - num_tries <- roots - } else { - num_tries <- NULL - } - c0 <- ifelse(is.null(x$c0), 0.01, x$c0) - c1 <- ifelse(is.null(x$c1), 0.01, x$c1) - s <- ifelse(is.null(x[["s"]]), -10, x$s) - batch <- 0 - accept_vec <- numeric(n_reps) - accept <- 0 - adaptive_mh <- FALSE - if (s < 0) { - M <- abs(s) - s <- 1.0 - adaptive_mh <- TRUE - } - min_vec <- c(0.01, 0) + n_pseudolags <- max(c(n_lags, ncol(Lambda_)/nrow(Lambda_))) + n_T <- dim(Y)[1]# - n_lags + n_T_ <- n_T - n_pseudolags + ## Initials init <- add_args$init - error_variance <- mfbvar:::compute_error_variances(Y) - - priormu <- x$priormu - priorphiidi <- x$priorphiidi - priorphifac <- x$priorphifac - priorsigmaidi <- x$priorsigmaidi - priorsigmafac <- x$priorsigmafac - priorfacload <- x$priorfacload - priorng <- x$priorng - columnwise <- x$columnwise - restrict <- x$restrict - heteroskedastic <- x$heteroskedastic - priorhomoskedastic <- x$priorhomoskedastic + y_in_p <- Y[-(1:n_lags), ] + error_variance <- compute_error_variances(Y) ### Regression parameters if (is.null(init$init_Pi)) { - init_Pi <- matrix(0, nrow = n_vars, ncol = n_vars*n_lags) + init_Pi <- matrix(0, nrow = n_vars, ncol = n_vars*(n_vars*n_lags)) } else { init_Pi <- init$init_Pi } - - ### Regression parameters - if (is.null(init$init_psi)) { - init_psi <- colMeans(y_in_p, na.rm = TRUE) + ### Steady-states + if (is.null(init$init_Z)) { + init_Z <- fill_na(Y) } else { - init_psi <- init$init_psi + init_Z <- init$init_Z } + ### Latent high-frequency - if (is.null(init$init_Z)) { - init_Z <- y_in_p + if (is.null(init$init_psi)) { + init_psi <- prior_psi_mean } else { - init_Z <- init$init_Z + init_psi <- init$init_psi } ### SV regressions @@ -811,31 +623,31 @@ mcmc_sampler.mfbvar_ssng_fsv <- function(x, ...){ init_mu <- init$init_mu } if (is.null(init$init_sigma)) { - init_sigma <- rep(0.2, n_vars + n_fac) + init_sigma <- rep(0.75, n_vars + n_fac) } else { init_sigma <- init$init_sigma } if (is.null(init$init_phi)) { - init_phi <- rep(0.75, n_vars + n_fac) + init_phi <- rep(0.2, n_vars + n_fac) } else { init_phi <- init$init_phi } ### Factors and loadings if (is.null(init$init_facload)) { - init_facload <- matrix(rnorm(n_vars*n_fac, sd = .5)^2, nrow=n_vars, ncol=n_fac) + init_facload <- matrix(rnorm(n_vars*n_fac, sd = 0.5)^2, nrow=n_vars, ncol=n_fac) } else { init_facload <- init$init_facload } - if (is.null(init$init_fac)) { - init_fac <- matrix(rnorm(n_fac*n_T_, sd = 0.005), n_fac, n_T_) + if (is.null(init$init_f)) { + init_f <- matrix(rnorm(n_fac*n_T_, sd = 0.5), n_fac, n_T_) } else { - init_fac <- init$init_fac + init_f <- init$init_f } ### Latent volatilities if (is.null(init$init_latent)) { - init_latent <- cbind(matrix(c(log(error_variance), rep(1, n_fac)), nrow = n_T_, ncol = n_vars+n_fac, byrow = TRUE)) + init_latent <- t(cbind(matrix(c(log(error_variance), rep(1, n_fac)), nrow = n_T_, ncol = n_vars+n_fac, byrow = TRUE))) } else { init_latent <- init$init_latent } @@ -845,9 +657,236 @@ mcmc_sampler.mfbvar_ssng_fsv <- function(x, ...){ init_latent0 <- init$init_latent0 } + ################################################################ + ### Preallocation + # Pi and Sigma store their i-th draws in the third dimension, psi + # is vectorized so it has its i-th draw stored in the i-th row + # Pi: p * pk * n_reps, each [,,i] stores Pi' + # Sigma: p * p * n_reps + # psi: n_reps * p + # Z: T * p * n_reps + ### If forecasting (h is horizon): + # Z_fcst: hk * p * n_reps + # d_fcst_lags: hk * m + ### If root checking: + # roots: n_reps vector + # num_tries: n_reps vector + ### If smoothing of the state vector: + # smoothed_Z: T * p * n_reps + + Pi <- array(init_Pi, dim = c(n_vars, n_vars*n_lags, n_reps/n_thin)) + psi <- array(init_psi, dim = c(n_reps/n_thin, n_vars * n_determ)) + Z <- array(init_Z, dim = c(n_T, n_vars, n_reps/n_thin)) + Z_fcst<- array(NA, dim = c(n_fcst+n_lags, n_vars, n_reps/n_thin)) + if (n_fcst > 0) { + rownames(Z_fcst) <- c((n_T-n_lags+1):n_T, paste0("fcst_", 1:n_fcst)) + Z_fcst[,,1] <- 0 + } else { + rownames(Z_fcst) <- (n_T-n_lags+1):n_T + } + d_fcst_lags <- as.matrix(rbind(d[(n_T-n_lags+1):n_T, , drop = FALSE], d_fcst)) + d_fcst_lags <- d_fcst_lags[1:(n_lags+n_fcst), , drop = FALSE] + roots <- vector("numeric", n_reps/n_thin) + num_tries <- roots + + mu <- matrix(init_mu, n_vars, n_reps/n_thin) + sigma <- matrix(init_sigma, n_vars+n_fac, n_reps/n_thin) + phi <- matrix(init_phi, n_vars+n_fac, n_reps/n_thin) + + facload <- array(matrix(init_facload, nrow = n_vars, ncol = n_fac), + dim = c(n_vars, n_fac, n_reps/n_thin)) + f <- array(matrix(init_f, n_fac, n_T_), dim = c(n_fac, n_T_, n_reps/n_thin)) + + h <- array(t(init_latent), dim = c(n_T_, n_vars+n_fac, n_reps/n_thin), + dimnames = list(rownames(init_latent), colnames(init_latent), NULL)) + + ################################################################ + ### Compute terms which do not vary in the sampler + + Z_1 <- Z[1:n_pseudolags,, 1] + D_mat <- build_DD(d = d, n_lags = n_lags) + dt <- d[-(1:n_lags), , drop = FALSE] + d1 <- d[1:n_lags, , drop = FALSE] + + phi_mu <- matrix(0, 1, 1) + lambda_mu <- matrix(0, 1, 1) + omega <- matrix(diag(prior_psi_Omega), nrow = 1) + c0 <- 0 + c1 <- 0 + s <- 0 + + mcmc_ssng_fsv(Y[-(1:n_lags),],Pi,psi,phi_mu,lambda_mu,omega,Z,Z_fcst, + mu,phi,sigma,f,facload,h, + Lambda_,prior_Pi_Omega,prior_Pi_AR1,D_mat,dt,d1, + d_fcst_lags,prior_psi_mean,c0,c1,s,check_roots,Z_1,bmu,Bmu, + a0idi,b0idi,a0fac,b0fac,Bsigma,B011inv,B022inv,priorh0, + armarestr,armatau2,n_fac,n_reps,n_burnin,n_q,T_b-n_lags,n_lags, + n_vars,n_T_,n_fcst,n_determ,n_thin,verbose,FALSE) + if (verbose) { + cat("\n") + } + return_obj <- list(Pi = Pi, psi = psi, Z = Z, Z_fcst = NULL, mu = mu, phi = phi, + sigma = sigma, f = f, facload = facload, h = h, + Lambda_ = Lambda_, aggregation = x$aggregation, prior_Pi_Omega = prior_Pi_Omega, + prior_Pi_AR1 = prior_Pi_AR1, prior_psi_mean = prior_psi_mean, + prior_psi_Omega = diag(omega[1, ]), d = d, Y = Y, Z_1 = Z_1, bmu = bmu, + Bmu = Bmu, a0idi = a0idi, b0idi = b0idi, a0fac = a0fac, + b0fac = b0fac, Bsigma = Bsigma, B011inv = B011inv, + B022inv = B022inv, priorh0 = priorh0, armarestr = armarestr, + armatau2 = armatau2, n_fac = n_fac, n_reps = n_reps, + n_q = n_q, T_b_ = T_b-n_lags, n_lags = n_lags, + n_vars = n_vars, n_T_ = n_T_, n_fcst = n_fcst, n_determ = n_determ, + n_thin = n_thin, verbose = verbose, + init = list(init_Pi = Pi[,, n_reps/n_thin], + init_psi = psi[n_reps/n_thin, ], + init_Z = Z[,, n_reps/n_thin], + init_mu = mu[, n_reps/n_thin], + init_phi = phi[, n_reps/n_thin], + init_sigma = sigma[, n_reps/n_thin], + init_facload = facload[,,n_reps/n_thin], + init_f = f[,,n_reps/n_thin], + init_h = h[,,n_reps/n_thin])) + + if (n_fcst > 0) { + return_obj$Z_fcst <- Z_fcst + } + return(return_obj) + +} + +mcmc_sampler.mfbvar_ssng_fsv <- function(x, ...){ + n_vars <- ncol(x$Y) + if (!(!is.null(x$Y) && !is.null(x$n_lags) && !is.null(x$n_burnin) && !is.null(x$n_reps))) { + test_all <- sapply(x, is.null) + test_sub <- test_all[c("Y", "n_lags", "n_burnin", "n_reps")] + stop("Missing elements: ", paste(names(test_sub)[which(test_sub)], collapse = " ")) + } + + prior_Pi_Omega <- create_prior_Pi_Omega(x$lambda1, x$lambda2, x$lambda3, x$prior_Pi_AR1, x$Y, x$n_lags) + prior_Pi_Omega <- prior_Pi_Omega[-1, ] + prior_Pi_AR1 <- x$prior_Pi_AR1 + prior_zero_mean <- all(x$prior_Pi_AR1 == 0) + + Y <- x$Y + freq <- x$freq + verbose <- x$verbose + + prior_psi_mean <- x$prior_psi_mean + d <- x$d + d_fcst <- x$d_fcst + check_roots <- x$check_roots + n_determ <- dim(d)[2] + + n_vars <- ncol(Y) + n_lags <- x$n_lags + n_fac <- x$n_fac + n_fcst <- x$n_fcst + + ## Priors + + priormu <- x$priormu + priorphiidi <- x$priorphiidi + priorphifac <- x$priorphifac + priorsigmaidi <- x$priorsigmaidi + priorsigmafac <- x$priorsigmafac + priorfacload <- x$priorfacload + restrict <- x$restrict + + if (length(priorsigmaidi) == 1) { + priorsigmaidi <- rep(priorsigmaidi, n_vars) + } + if (length(priorsigmafac) == 1) { + priorsigmafac <- rep(priorsigmafac, n_fac) + } + + bmu <- priormu[1] + Bmu <- priormu[2]^2 + + Bsigma <- c(priorsigmaidi, priorsigmafac) + + B011inv <- 1/10^8 + B022inv <- 1/10^12 + + armatau2 <- matrix(priorfacload^2, n_vars, n_fac) # priorfacload is scalar, or matrix + + armarestr <- matrix(FALSE, nrow = n_vars, ncol = n_fac) + if (restrict == "upper") armarestr[upper.tri(armarestr)] <- TRUE + armarestr <- matrix(as.integer(!armarestr), nrow = nrow(armarestr), ncol = ncol(armarestr)) # restrinv + + a0idi <- priorphiidi[1] + b0idi <- priorphiidi[2] + a0fac <- priorphifac[1] + b0fac <- priorphifac[2] + + priorh0 <- rep(-1.0, n_vars + n_fac) + + c0 <- ifelse(is.null(x$prior_ng), 0.01, x$prior_ng[1]) + c1 <- ifelse(is.null(x$prior_ng), 0.01, x$prior_ng[2]) + s <- ifelse(is.null(x[["s"]]), 1, x$s) + + ## Initials + + add_args <- list(...) + n_reps <- x$n_reps + n_burnin <- x$n_burnin + n_thin <- ifelse(is.null(x$n_thin), 1, x$n_thin) + n_thin <- ifelse(!is.null(add_args$n_thin), add_args$n_thin, ifelse(!is.null(x$n_thin), x$n_thin, 1)) + + # n_vars: number of variables + # n_lags: number of lags + # n_determ: number of deterministic variables + # n_T: sample size (full sample) + # n_T_: sample size (reduced sample) + + freqs <- x$freqs + Lambda_ <- x$Lambda_ + + n_q <- sum(freq == freqs[1]) + if (n_q < n_vars) { + T_b <- max(which(!apply(apply(Y[, freq == freqs[2], drop = FALSE], 2, is.na), 1, any))) + } else { + T_b <- nrow(Y) + } + if (n_q == 0 || n_q == n_vars) { + complete_quarters <- apply(Y, 1, function(x) !any(is.na(x))) + Y <- Y[complete_quarters, ] + d_fcst <- rbind(d[!complete_quarters, , drop = FALSE], d_fcst) + d <- d[complete_quarters, , drop = FALSE] + } + + n_pseudolags <- max(c(n_lags, ncol(Lambda_)/nrow(Lambda_))) + n_T <- dim(Y)[1]# - n_lags + n_T_ <- n_T - n_pseudolags + + ## Initials + init <- add_args$init + y_in_p <- Y[-(1:n_lags), ] + error_variance <- compute_error_variances(Y) + + ### Regression parameters + if (is.null(init$init_Pi)) { + init_Pi <- matrix(0, nrow = n_vars, ncol = n_vars*(n_vars*n_lags)) + } else { + init_Pi <- init$init_Pi + } + + ### Latent high-frequency + if (is.null(init$init_Z)) { + init_Z <- fill_na(Y) + } else { + init_Z <- init$init_Z + } + + ### Steady-states + if (is.null(init$init_psi)) { + init_psi <- prior_psi_mean + } else { + init_psi <- init$init_psi + } + if (is.null(init$init_omega)) { - if (is.null(prior_psi_Omega)) { - init_omega <- diag(prior_psi_Omega) + if (!is.null(x$prior_psi_Omega)) { + init_omega <- diag(x$prior_psi_Omega) } else { init_omega <- rep(0.1, n_determ*n_vars) } @@ -867,314 +906,143 @@ mcmc_sampler.mfbvar_ssng_fsv <- function(x, ...){ init_lambda_mu <- init$init_lambda_mu } - cl <- x$cl - Z_1 <- mfbvar:::fill_na(Y)[(1:n_lags), ] - verbose <- x$verbose - - ## Set up cluster (if used) - if (!is.null(cl)) { - parallelize <- TRUE - parallel::clusterCall(cl, fun = function() library(mfbvar)) - parallel::clusterExport(cl, varlist = c("par_fun")) + ### SV regressions + if (is.null(init$init_mu)) { + init_mu <- log(error_variance) + } else { + init_mu <- init$init_mu + } + if (is.null(init$init_sigma)) { + init_sigma <- rep(0.75, n_vars + n_fac) } else { - parallelize <- FALSE + init_sigma <- init$init_sigma + } + if (is.null(init$init_phi)) { + init_phi <- rep(0.2, n_vars + n_fac) + } else { + init_phi <- init$init_phi } - prior_Pi_Omega <- mfbvar:::create_prior_Pi_Omega(x$lambda1, x$lambda2, x$lambda3, x$prior_Pi_AR1, Y, n_lags)[-1, ] - prior_Pi_AR1 <- x$prior_Pi_AR1 - prior_zero_mean <- all(x$prior_Pi_AR1 == 0) - - if (prior_zero_mean) { - if (n_vars*n_lags > 1.05 * n_T_) { - par_fun <- mfbvar:::par_fun_top(rmvn_bcm) - } else { - par_fun <- mfbvar:::par_fun_top(rmvn_rue) - } + ### Factors and loadings + if (is.null(init$init_facload)) { + init_facload <- matrix(rnorm(n_vars*n_fac, sd = 0.5)^2, nrow=n_vars, ncol=n_fac) + } else { + init_facload <- init$init_facload + } + if (is.null(init$init_f)) { + init_f <- matrix(rnorm(n_fac*n_T_, sd = 0.5), n_fac, n_T_) + } else { + init_f <- init$init_f } - ## Obtain the aggregation matrix for the quarterly only - if (mf) { - if (x$aggregation == "average") { - Lambda_ <- mfbvar:::build_Lambda(rep("average", n_q), 3) - } else { - Lambda_ <- mfbvar:::build_Lambda(rep("triangular", n_q), 5) - } + ### Latent volatilities + if (is.null(init$init_latent)) { + init_latent <- t(cbind(matrix(c(log(error_variance), rep(1, n_fac)), nrow = n_T_, ncol = n_vars+n_fac, byrow = TRUE))) + } else { + init_latent <- init$init_latent } + if (is.null(init$init_latent0)) { + init_latent0 <- numeric(n_vars + n_fac) + } else { + init_latent0 <- init$init_latent0 + } + + ################################################################ + ### Preallocation + # Pi and Sigma store their i-th draws in the third dimension, psi + # is vectorized so it has its i-th draw stored in the i-th row + # Pi: p * pk * n_reps, each [,,i] stores Pi' + # Sigma: p * p * n_reps + # psi: n_reps * p + # Z: T * p * n_reps + ### If forecasting (h is horizon): + # Z_fcst: hk * p * n_reps + # d_fcst_lags: hk * m + ### If root checking: + # roots: n_reps vector + # num_tries: n_reps vector + ### If smoothing of the state vector: + # smoothed_Z: T * p * n_reps Pi <- array(init_Pi, dim = c(n_vars, n_vars*n_lags, n_reps/n_thin)) - Z <- array(init_Z, dim = c(n_T_, n_vars, n_reps/n_thin)) - psi <- matrix(init_psi, n_reps, n_vars * n_determ, byrow = TRUE) + psi <- array(init_psi, dim = c(n_reps/n_thin, n_vars * n_determ)) + Z <- array(init_Z, dim = c(n_T, n_vars, n_reps/n_thin)) + Z_fcst<- array(NA, dim = c(n_fcst+n_lags, n_vars, n_reps/n_thin)) if (n_fcst > 0) { - Z_fcst <- array(NA, dim = c(n_fcst+n_lags, n_vars, n_reps), - dimnames = list(c((n_T-n_lags+1):n_T, paste0("fcst_", 1:n_fcst)), NULL, NULL)) + rownames(Z_fcst) <- c((n_T-n_lags+1):n_T, paste0("fcst_", 1:n_fcst)) + Z_fcst[,,1] <- 0 + } else { + rownames(Z_fcst) <- (n_T-n_lags+1):n_T } d_fcst_lags <- as.matrix(rbind(d[(n_T-n_lags+1):n_T, , drop = FALSE], d_fcst)) d_fcst_lags <- d_fcst_lags[1:(n_lags+n_fcst), , drop = FALSE] + roots <- vector("numeric", n_reps/n_thin) + num_tries <- roots - omega <- matrix(init_omega, nrow = n_reps/n_thin, ncol = n_vars * n_determ) - phi_mu <- rep(init_phi_mu, n_reps/n_thin) - lambda_mu <- rep(init_lambda_mu, n_reps/n_thin) + mu <- matrix(init_mu, n_vars, n_reps/n_thin) + sigma <- matrix(init_sigma, n_vars+n_fac, n_reps/n_thin) + phi <- matrix(init_phi, n_vars+n_fac, n_reps/n_thin) - mu_storage <- matrix(init_mu, n_vars, n_reps/n_thin) - sigma_storage <- matrix(init_sigma, n_vars+n_fac, n_reps/n_thin) - phi_storage <- matrix(init_phi, n_vars+n_fac, n_reps/n_thin) + facload <- array(matrix(init_facload, nrow = n_vars, ncol = n_fac), + dim = c(n_vars, n_fac, n_reps/n_thin)) + f <- array(matrix(init_f, n_fac, n_T_), dim = c(n_fac, n_T_, n_reps/n_thin)) - facload_storage <- array(matrix(init_facload, nrow = n_vars, ncol = n_fac), dim = c(n_vars, n_fac, n_reps/n_thin)) - fac_storage <- array(matrix(init_fac, n_fac, n_T_), dim = c(n_fac, n_T_, n_reps/n_thin)) + h <- array(t(init_latent), dim = c(n_T_, n_vars+n_fac, n_reps/n_thin), + dimnames = list(rownames(init_latent), colnames(init_latent), NULL)) - latent <- array(init_latent, dim = c(n_T_, n_vars+n_fac, n_reps/n_thin), - dimnames = list(rownames(init_latent), colnames(init_latent), NULL)) + omega <- matrix(init_omega, nrow = n_reps/n_thin, ncol = n_vars * n_determ, byrow = TRUE) + phi_mu <- rep(init_phi_mu, n_reps/n_thin) + lambda_mu <- rep(init_lambda_mu, n_reps/n_thin) - Pi_i <- init_Pi - Pi_i0 <- cbind(0, Pi_i) - Z_i <- init_Z - psi_i <- init_psi - omega_i <- init_omega - phi_mu_i <- init_phi_mu - lambda_mu_i <- init_lambda_mu - startpara <- list(mu = init_mu, - phi = init_phi, - sigma = init_sigma) - startlatent <- latent[,,1] - startlatent0 <- init_latent0 - startfacload <- matrix(init_facload, nrow = n_vars, ncol = n_fac) - startfac <- matrix(init_fac, n_fac, n_T_) - - D_mat <- mfbvar:::build_DD(d = d, n_lags = n_lags) + ################################################################ + ### Compute terms which do not vary in the sampler + + Z_1 <- Z[1:n_pseudolags,, 1] + D_mat <- build_DD(d = d, n_lags = n_lags) dt <- d[-(1:n_lags), , drop = FALSE] d1 <- d[1:n_lags, , drop = FALSE] - mu_mat <- dt %*% t(matrix(psi_i, nrow = n_vars)) - n_Lambda <- ncol(Lambda_)/nrow(Lambda_) - mu_long <- matrix(0, n_Lambda+n_T_, n_vars) - if (mf) { - Lambda_single <- matrix(0, 1, n_Lambda) - for (i in 1:n_Lambda) { - Lambda_single[i] <- Lambda_[1, (i-1)*n_q+1] - } - } - my <- matrix(0, nrow(y_in_p), ncol(y_in_p)) - - if (verbose) { - pb <- progress_bar$new( - format = "[:bar] :percent eta: :eta", - clear = FALSE, total = n_reps, width = 60) - } - - error <- NULL - inv_prior_psi_Omega <- solve(prior_psi_Omega) - inv_prior_psi_Omega_mean <- inv_prior_psi_Omega %*% prior_psi_mean - for (i in 1:n_reps) { - ## Square root of idiosyncratic variances (in dense form) - Sig <- exp(0.5 * startlatent[, 1:n_vars]) - - ## Mixed-frequency block: sample latent monthly series - if (mf) { - mfbvar:::update_demean(my, mu_long, y_in_p, mu_mat, d1, matrix(psi_i, nrow = n_vars), Lambda_single, n_vars, - n_q, n_Lambda, n_T_) - } else { - mZ <- y_in_p - mu_mat - } - mZ1 <- Z_1 - d1 %*% t(matrix(psi_i, nrow = n_vars)) - Pi_i0[, -1] <- Pi_i - - if (mf){ - mZ <- tryCatch(mfbvar:::rsimsm_adaptive_univariate(my, Pi_i0, Sig, Lambda_, mZ1, n_q, T_b, t(startfac) %*% t(startfacload)), error = function(cond) cond) - if (inherits(Z_i, "error")) { - warning("MCMC halted because of an error in the mixed-frequency step. See $error for more information.") - error <- list(error = Z_i_demean, iter = i, block = "z") - break - } - } - Z_i_demean <- mZ - Z_i <- Z_i_demean + mu_mat - X <- mfbvar:::create_X_noint(rbind(Z_1, Z_i), n_lags) - X_demean <- mfbvar:::create_X_noint(rbind(mZ1, Z_i_demean), n_lags) - - ## Produce forecasts - - - ## Storage - if (i %% n_thin == 0) { - - if (n_fcst > 0) { - mu <- c(startpara$mu, numeric(n_fac)) - phi <- startpara$phi - sigma <- startpara$sigma - volatility_pred <- startlatent[n_T_, ] - - Z_pred <- matrix(0, n_fcst+n_lags, n_vars) - Z_pred[1:n_lags, ] <- Z_i_demean[(n_T_-n_lags+1):n_T_,] - for (j in 1:n_fcst) { - volatility_pred <- mu + phi * (volatility_pred - mu) + rnorm(n_vars+n_fac, sd = sigma) - error_pred <- rnorm(n_vars+n_fac, sd = exp(volatility_pred * 0.5)) - X_t <- mfbvar:::create_X_t_noint(Z_pred[j:(n_lags+j-1), ]) - Z_pred[j+n_lags, ] <- Pi_i %*% X_t + startfacload %*% error_pred[(n_vars+1):(n_vars+n_fac)] + error_pred[1:n_vars] - } - Z_fcst[,,i/n_thin] <- Z_pred + d_fcst_lags %*% t(matrix(psi_i, nrow = n_vars)) - } - - Pi[,,i/n_thin] <- Pi_i - Z[,,i/n_thin] <- Z_i - psi[i/n_thin, ] <- psi_i - - mu_storage[,i/n_thin] <- startpara$mu - sigma_storage[,i/n_thin] <- startpara$sigma - phi_storage[,i/n_thin] <- startpara$phi - - fac_storage[,,i/n_thin] <- startfac - facload_storage[,,i/n_thin] <- startfacload - - latent[,,i/n_thin] <- startlatent - } - - ## Stochastic volatility block: sample latent factors, latent volatilities and factor loadings - y_hat <- Z_i_demean - X_demean %*% t(Pi_i) - fsample <- tryCatch(factorstochvol::fsvsample(y_hat, factors = n_fac, draws = 1, burnin = 0, priorh0idi = "stationary", - priorh0fac = "stationary", thin = 1, keeptime = "all", - runningstore = 0, runningstorethin = 10, runningstoremoments = 1, - quiet = TRUE, interweaving = 4, signswitch = TRUE, - startpara = startpara, startlatent = startlatent, - startlatent0 = startlatent0, - startfacload = startfacload, startfac = startfac, priormu = priormu, - priorphiidi = priorphiidi, priorphifac = priorphifac, priorsigmaidi = priorsigmaidi, - priorsigmafac = priorsigmafac, priorfacload = priorfacload, priorng = priorng, - columnwise = columnwise, restrict = restrict, heteroskedastic = heteroskedastic, - priorhomoskedastic = priorhomoskedastic), error = function(cond) cond) - if (inherits(fsample, "error")) { - warning("MCMC halted because of an error in the factor stochastic volatility step. See $error for more information.") - error <- list(error = fsample, iter = i, block = "fsample") - break - } - startpara$mu <- fsample$para[1,1:n_vars,1] - startpara$phi <- fsample$para[2,,1] - startpara$sigma <- fsample$para[3,,1] - startlatent0 <- c(fsample$h0) - startlatent <- fsample$h[,,1] - startfacload <- matrix(fsample$facload[,,1], nrow = n_vars, ncol = n_fac) - startfac <- matrix(fsample$f[,,1], nrow = n_fac) - - ## Regression parameters block: sample Pi (possibly in parallel) - latent_nofac <- Z_i_demean - t(startfac) %*% t(startfacload) - - stationarity_check <- FALSE - iter <- 0 - while(stationarity_check == FALSE) { - iter <- iter + 1 - - if (!parallelize) { - if (prior_zero_mean) { - for (j in 1:n_vars) { - Pi_i[j,] <- tryCatch(mfbvar:::rmvn(X_demean/exp(startlatent[,j]*0.5), prior_Pi_Omega[,j], latent_nofac[,j]/exp(startlatent[,j]*0.5)), error = function(cond) cond) - } - } else { - for (j in 1:n_vars) { - Pi_i[j,] <- tryCatch(mfbvar:::rmvn_ccm(X_demean/exp(startlatent[,j]*0.5), prior_Pi_Omega[,j], latent_nofac[,j]/exp(startlatent[,j]*0.5), prior_Pi_AR1[j], j), error = function(cond) cond) - } - } - } else { - if (prior_zero_mean) { - Pi_i <- tryCatch(t(parallel::parSapply(cl, 1:n_vars, FUN = par_fun, XX = X_demean, startlatent = startlatent, D = prior_Pi_Omega, latent_nofac = latent_nofac)), error = function(cond) cond) - } else { - Pi_i <- tryCatch(t(parallel::parSapply(cl, 1:n_vars, FUN = par_fun_AR1, XX = X_demean, startlatent = startlatent, D = prior_Pi_Omega, latent_nofac = latent_nofac, prior_Pi_AR1 = prior_Pi_AR1)), error = function(cond) cond) - } - } - - if (inherits(Pi_i, "error")) { - warning("MCMC halted because of an error in the regression parameters step. See $error for more information.") - error <- list(error = Pi_i, iter = i, block = "Pi_i") - break - } - - Pi_comp <- mfbvar:::build_companion(Pi_i, n_vars = n_vars, n_lags = n_lags) - if (check_roots == TRUE) { - root <- mfbvar:::max_eig_cpp(Pi_comp) - } else { - root <- 0 - } - if (root < 1) { - stationarity_check <- TRUE - if (check_roots == TRUE) { - num_tries[i] <- iter - } - } - if (iter == 1000) { - warning("Attempted to draw stationary Pi 1,000 times.") - error <- list(error = Pi_i, iter = i, block = "Pi_i") - if (check_roots == TRUE) { - num_tries[i] <- iter - } - break - } - - } - Pi_i0[, -1] <- Pi_i - idivar <- exp(startlatent[, 1:n_vars]) - gig_lambda <- phi_mu_i-0.5 - gig_chi <- lambda_mu_i * phi_mu_i - gig_psi <- (psi_i-prior_psi_mean)^2 - for (j in 1:(n_vars*n_determ)) { - omega_i[j] = mfbvar:::do_rgig1(gig_lambda, gig_chi, gig_psi[j]) - } - lambda_mu_i <- rgamma(1, n_vars*n_determ * phi_mu_i + c0, (0.5 * phi_mu_i * sum(omega_i) + c1)) - phi_mu_proposal <- phi_mu_i * exp(rnorm(1, sd = s)) - prob <- exp(mfbvar:::posterior_phi_mu(lambda_mu_i, phi_mu_proposal, omega_i, n_vars*n_determ)-mfbvar:::posterior_phi_mu(lambda_mu_i, phi_mu_i, omega_i, n_vars*n_determ)) * phi_mu_proposal/phi_mu_i - u <- runif(1) - if (u < prob) { - phi_mu <- phi_mu_proposal - accept <- 1 - } else { - accept <- 0 - } - if (adaptive_mh) { - accept_vec[i] <- accept - if (i %% 100 == 0) { - batch <- batch + 1 - min_vec[2] <- batch^(-0.5) - if (mean(accept_vec[(i-99):i]) > 0.44) { - s_prop <- log(s) + min(min_vec) - if (s_prop < M) { - s <- exp(s_prop) - } - } else { - s_prop <- log(s) - min(min_vec) - if (s_prop > -M) { - s <- exp(s_prop) - } - } - } - } - - - mfbvar:::posterior_psi_fsv(psi_i, mu_mat, Pi_i, D_mat, idivar, inv_prior_psi_Omega, - Z_i, X, startfacload, startfac, inv_prior_psi_Omega_mean, dt, - n_determ, n_vars, n_lags) - - if (verbose) { - pb$tick() - } - } + mcmc_ssng_fsv(Y[-(1:n_lags),],Pi,psi,phi_mu,lambda_mu,omega,Z,Z_fcst, + mu,phi,sigma,f,facload,h, + Lambda_,prior_Pi_Omega,prior_Pi_AR1,D_mat,dt,d1, + d_fcst_lags,prior_psi_mean,c0,c1,s,check_roots,Z_1,bmu,Bmu, + a0idi,b0idi,a0fac,b0fac,Bsigma,B011inv,B022inv,priorh0, + armarestr,armatau2,n_fac,n_reps,n_burnin,n_q,T_b-n_lags,n_lags, + n_vars,n_T_,n_fcst,n_determ,n_thin,verbose,TRUE) + if (verbose) { + cat("\n") + } + return_obj <- list(Pi = Pi, psi = psi, omega = omega, lambda_mu = lambda_mu, + phi_mu = phi_mu, Z = Z, Z_fcst = NULL, mu = mu, phi = phi, + sigma = sigma, f = f, facload = facload, h = h, + Lambda_ = Lambda_, aggregation = x$aggregation, prior_Pi_Omega = prior_Pi_Omega, + prior_Pi_AR1 = prior_Pi_AR1, prior_psi_mean = prior_psi_mean, + prior_psi_Omega = diag(omega[1, ]), d = d, Y = Y, Z_1 = Z_1, bmu = bmu, + Bmu = Bmu, a0idi = a0idi, b0idi = b0idi, a0fac = a0fac, + b0fac = b0fac, Bsigma = Bsigma, B011inv = B011inv, + B022inv = B022inv, priorh0 = priorh0, armarestr = armarestr, + armatau2 = armatau2, n_fac = n_fac, n_reps = n_reps, n_burnin = n_burnin, + n_q = n_q, T_b_ = T_b-n_lags, n_lags = n_lags, + n_vars = n_vars, n_T_ = n_T_, n_fcst = n_fcst, n_determ = n_determ, + n_thin = n_thin, verbose = verbose, + init = list(init_Pi = Pi[,, n_reps/n_thin], + init_psi = psi[n_reps/n_thin, ], + init_omega = omega[n_reps/n_thin, ], + init_lambda_mu = lambda_mu[n_reps/n_thin], + init_phi_mu = phi_mu[n_reps/n_thin], + init_Z = Z[,, n_reps/n_thin], + init_mu = mu[, n_reps/n_thin], + init_phi = phi[, n_reps/n_thin], + init_sigma = sigma[, n_reps/n_thin], + init_facload = facload[,,n_reps/n_thin], + init_f = f[,,n_reps/n_thin], + init_h = h[,,n_reps/n_thin])) - ################################################################ - ### Prepare the return object - return_obj <- list(Pi = Pi, Z = Z, psi = psi, Z_fcst = NULL, n_lags = n_lags, n_vars = n_vars, n_fcst = n_fcst, - prior_Pi_Omega = prior_Pi_Omega, d = d, Y = Y, n_T = n_T, n_T_ = n_T_, n_reps = n_reps, - n_determ = n_determ, facload = facload_storage, latent = latent, mu = mu_storage, sigma = sigma_storage, phi = phi_storage, - init = list(init_Pi = Pi_i, init_Z = Z_i, init_psi = psi_i, init_mu = startpara$mu, - init_phi = startpara$phi, init_sigma = startpara$sigma, - init_facload = startfacload, - init_fac = startfac, - init_latent = startlatent, - init_latent0 = startlatent0), - num_tries = num_tries, - error = error) - - if (n_fcst>0) { + if (n_fcst > 0) { return_obj$Z_fcst <- Z_fcst } - return(return_obj) } diff --git a/R/mcmc_minn_diffuse.R b/R/mcmc_sampler_iw.R similarity index 74% rename from R/mcmc_minn_diffuse.R rename to R/mcmc_sampler_iw.R index 34f7fee..d5e1749 100644 --- a/R/mcmc_minn_diffuse.R +++ b/R/mcmc_sampler_iw.R @@ -1,34 +1,40 @@ -#' @rdname mcmc_sampler -mcmc_sampler.mfbvar_minn_diffuse <- function(x, ...){ + +mcmc_sampler.mfbvar_ss_iw <- function(x, ...) { n_vars <- ncol(x$Y) - if (!(!is.null(x$Y) && !is.null(x$n_lags) && !is.null(x$n_burnin) && !is.null(x$n_reps))) { + if (!(!is.null(x$Y) && !is.null(x$d) && !is.null(x$prior_psi_mean) && !is.null(x$prior_psi_Omega) && !is.null(x$n_lags) && !is.null(x$n_burnin) && !is.null(x$n_reps))) { test_all <- sapply(x, is.null) - test_sub <- test_all[c("Y", "n_lags", "n_burnin", "n_reps")] + test_sub <- test_all[c("Y", "d", "prior_psi_mean", "prior_psi_Omega", "n_lags", "n_burnin", "n_reps")] stop("Missing elements: ", paste(names(test_sub)[which(test_sub)], collapse = " ")) } + if (x$n_fcst > 0 && nrow(x$d_fcst) != x$n_fcst) { + stop("d_fcst has ", nrow(x$d_fcst), " rows, but n_fcst is ", x$n_fcst, ".") + } - # Diffuse - prior_Pi_Omega <- mfbvar:::create_prior_Pi_Omega(lambda1 = x$lambda1, lambda2 = x$lambda2, lambda3 = x$lambda3, - prior_Pi_AR1 = x$prior_Pi_AR1, Y = x$Y, - n_lags = x$n_lags, block_exo = x$block_exo) - prior_Pi_mean <- matrix(0, n_vars, n_vars*x$n_lags + 1) - prior_Pi_mean[, 2:(n_vars+1)] <- diag(x$prior_Pi_AR1) + priors <- prior_Pi_Sigma(lambda1 = x$lambda1, lambda2 = x$lambda3, prior_Pi_AR1 = x$prior_Pi_AR1, Y = x$Y, + n_lags = x$n_lags, prior_nu = n_vars + 2) + prior_Pi_mean <- priors$prior_Pi_mean + prior_Pi_Omega <- priors$prior_Pi_Omega + prior_S <- priors$prior_S Y <- x$Y + d <- x$d + d_fcst <- x$d_fcst freq <- x$freq + prior_psi_mean <- x$prior_psi_mean + prior_psi_Omega <- x$prior_psi_Omega n_fcst <- x$n_fcst + check_roots <- x$check_roots verbose <- x$verbose - n_lags <- x$n_lags - lambda4 <- x$lambda4 - add_args <- list(...) - n_reps <- add_args$n_reps - n_thin <- ifelse(!is.null(add_args$n_thin), add_args$n_thin, ifelse(!is.null(x$n_thin), x$n_thin, 1)) + n_reps <- x$n_reps + n_burnin <- x$n_burnin + n_thin <- ifelse(is.null(x$n_thin), 1, x$n_thin) init <- add_args$init init_Pi <- init$init_Pi init_Sigma <- init$init_Sigma + init_psi <- init$init_psi init_Z <- init$init_Z # n_vars: number of variables @@ -36,27 +42,32 @@ mcmc_sampler.mfbvar_minn_diffuse <- function(x, ...){ # n_determ: number of deterministic variables # n_T: sample size (full sample) # n_T_: sample size (reduced sample) - - n_q <- sum(freq == "q") - if (n_q < n_vars) { - T_b <- max(which(!apply(apply(Y[, freq == "m", drop = FALSE], 2, is.na), 1, any))) - } else { - T_b <- nrow(Y) - } - if (n_q > 0) { - Lambda_ <- mfbvar:::build_Lambda(rep("q", n_q), 3) - } else { - Lambda_ <- matrix(0, 1, 3) - } + n_vars <- dim(Y)[2] + n_lags <- prod(dim(as.matrix(prior_Pi_mean)))/n_vars^2 + freqs <- x$freqs + Lambda_ <- x$Lambda_ + n_q <- sum(freq == freqs[1]) + n_m <- n_vars - n_q if (n_q == 0 || n_q == n_vars) { complete_quarters <- apply(Y, 1, function(x) !any(is.na(x))) Y <- Y[complete_quarters, ] + d_fcst <- rbind(d[!complete_quarters, , drop = FALSE], d_fcst) + d <- d[complete_quarters, , drop = FALSE] + } + if (n_q < n_vars) { + T_b <- max(which(!apply(apply(Y[, freq == freqs[2], drop = FALSE], 2, is.na), 1, any))) + } else { + T_b <- nrow(Y) } - n_pseudolags <- max(c(n_lags, 3)) + n_pseudolags <- max(c(n_lags, ncol(Lambda_)/nrow(Lambda_))) + n_determ <- dim(d)[2] n_T <- dim(Y)[1]# - n_lags n_T_ <- n_T - n_pseudolags - d <- matrix(1, nrow = nrow(Y), ncol = 1) + + + + ################################################################ ### Preallocation @@ -75,10 +86,10 @@ mcmc_sampler.mfbvar_minn_diffuse <- function(x, ...){ ### If smoothing of the state vector: # smoothed_Z: T * p * n_reps - Pi <- array(NA, dim = c(n_vars, n_vars * n_lags + 1, n_reps/n_thin)) + Pi <- array(NA, dim = c(n_vars, n_vars * n_lags, n_reps/n_thin)) Sigma <- array(NA, dim = c(n_vars, n_vars, n_reps/n_thin)) + psi <- array(NA, dim = c(n_reps/n_thin, n_vars * n_determ)) Z <- array(NA, dim = c(n_T, n_vars, n_reps/n_thin)) - Z_fcst<- array(NA, dim = c(n_fcst+n_lags, n_vars, n_reps/n_thin)) if (n_fcst > 0) { rownames(Z_fcst) <- c((n_T-n_lags+1):n_T, paste0("fcst_", 1:n_fcst)) @@ -86,7 +97,10 @@ mcmc_sampler.mfbvar_minn_diffuse <- function(x, ...){ } else { rownames(Z_fcst) <- (n_T-n_lags+1):n_T } - + d_fcst_lags <- as.matrix(rbind(d[(n_T-n_lags+1):n_T, , drop = FALSE], d_fcst)) + d_fcst_lags <- d_fcst_lags[1:(n_lags+n_fcst), , drop = FALSE] + roots <- vector("numeric", n_reps/n_thin) + num_tries <- roots @@ -102,7 +116,7 @@ mcmc_sampler.mfbvar_minn_diffuse <- function(x, ...){ # for multiple chains if (is.null(init_Z)) { - Z[,, 1] <- mfbvar:::fill_na(Y) + Z[,, 1] <- fill_na(Y) } else { if (all(dim(Z[,, 1]) == dim(init_Z))) { Z[,, 1] <- init_Z @@ -112,10 +126,17 @@ mcmc_sampler.mfbvar_minn_diffuse <- function(x, ...){ } - ols_results <- mfbvar:::ols_initialization(z = Z[,, 1], d = d, n_lags = n_lags, n_T = n_T, n_vars = n_vars, n_determ = 1) + ols_results <- tryCatch(ols_initialization(z = Z[,, 1], d = d, n_lags = n_lags, n_T = n_T, n_vars = n_vars, n_determ = n_determ), + error = function(cond) NULL) + if (is.null(ols_results)) { + ols_results <- list() + ols_results$Pi <- prior_Pi_mean + ols_results$S <- prior_S + ols_results$psi <- prior_psi_mean + } if (is.null(init_Pi)) { - Pi[,, 1] <- cbind(ols_results$const, ols_results$Pi) + Pi[,, 1] <- ols_results$Pi } else { if (all(dim(Pi[,, 1]) == dim(init_Pi))) { Pi[,, 1] <- init_Pi @@ -125,6 +146,10 @@ mcmc_sampler.mfbvar_minn_diffuse <- function(x, ...){ } # Compute the maximum eigenvalue of the initial Pi + if (check_roots == TRUE) { + Pi_comp <- build_companion(Pi = Pi[,, 1], n_vars = n_vars, n_lags = n_lags) + roots[1] <- max_eig_cpp(Pi_comp) + } if (is.null(init_Sigma)) { Sigma[,, 1] <- ols_results$S @@ -136,30 +161,73 @@ mcmc_sampler.mfbvar_minn_diffuse <- function(x, ...){ } } + if (is.null(init_psi)) { + if (roots[1] < 1) { + psi[1, ] <- ols_results$psi + } else { + psi[1, ] <- prior_psi_mean + } + } else { + if (length(psi[1, ]) == length(init_psi)) { + psi[1,] <- init_psi + } else { + stop(paste0("The length of init_psi is ", paste(length(init_psi), collapse = " x "), ", but should be ", paste(length(psi[1,]), collapse = " x "))) + } + } + ################################################################ ### Compute terms which do not vary in the sampler - Z_1 <- Z[1:n_pseudolags,, 1] + # Create D (does not vary in the sampler), and find roots of Pi + # if requested + D_mat <- build_DD(d = d, n_lags = n_lags) + dt <- d[-(1:n_lags), , drop = FALSE] + d1 <- d[1:n_lags, , drop = FALSE] + psi_i <- psi[1, ] + Pi_i <- Pi[,, 1] + Sigma_i <- Sigma[,, 1] + Z_i <- Z[-(1:n_lags),, 1] + mu_mat <- dt %*% t(matrix(psi_i, nrow = n_vars)) + # For the posterior of Pi - inv_prior_Pi_Omega <- diag(1/c(prior_Pi_Omega)) - Omega_Pi <- matrix(inv_prior_Pi_Omega %*% c(prior_Pi_mean), n_vars*n_lags + 1, n_vars) + inv_prior_Pi_Omega <- chol2inv(chol(prior_Pi_Omega)) + Omega_Pi <- inv_prior_Pi_Omega %*% prior_Pi_mean + + # For the posterior of psi + phi_mu <- matrix(0, 1, 1) + lambda_mu <- matrix(0, 1, 1) + omega <- matrix(diag(prior_psi_Omega), nrow = 1) + c0 <- 0 + c1 <- 0 + s <- 0 - mfbvar:::mcmc_minn_diffuse(Y[-(1:n_lags),],Pi,Sigma,Z,Z_fcst,Lambda_,prior_Pi_Omega, - Omega_Pi,Z_1,n_reps,n_q,T_b-n_lags,n_lags,n_vars,n_T_,n_fcst, - n_thin,verbose) + Z_1 <- Z[1:n_pseudolags,, 1] + mcmc_ssng_iw(Y[-(1:n_lags),],Pi,Sigma,psi,phi_mu,lambda_mu,omega,Z,Z_fcst,Lambda_,prior_Pi_Omega,inv_prior_Pi_Omega,Omega_Pi,prior_Pi_mean, + prior_S,D_mat,dt,d1,d_fcst_lags,prior_psi_mean,c0,c1,s,check_roots,Z_1,n_reps,n_burnin, + n_q,T_b-n_lags,n_lags,n_vars,n_T_,n_fcst,n_determ,n_thin,verbose,FALSE) + if (verbose) { + cat("\n") + } + # mcmc_ssng_iw(Y[-(1:n_lags),],Pi,Sigma,psi,phi_mu,lambda_mu,omega,Z,Z_fcst,Lambda_comp,prior_Pi_Omega,inv_prior_Pi_Omega,Omega_Pi,prior_Pi_mean, + # prior_S,D_mat,dt,d1,d_fcst_lags,prior_psi_mean,0.01,0.01,1,check_roots,Z_1,n_reps, + # n_q,T_b-n_lags,n_lags,n_vars,n_T_,n_fcst,n_determ,n_thin,verbose) ################################################################ ### Prepare the return object - return_obj <- list(Pi = Pi, Sigma = Sigma, psi = NULL, Z = Z, roots = NULL, num_tries = NULL, - Z_fcst = NULL, smoothed_Z = NULL, n_determ = 1, + return_obj <- list(Pi = Pi, Sigma = Sigma, psi = psi, Z = Z, roots = NULL, num_tries = NULL, + Z_fcst = NULL, aggregation = x$aggregation, n_determ = n_determ, n_lags = n_lags, n_vars = n_vars, n_fcst = n_fcst, prior_Pi_Omega = prior_Pi_Omega, prior_Pi_mean = prior_Pi_mean, - d = d, Y = Y, n_T = n_T, n_T_ = n_T_, - prior_psi_Omega = NULL, prior_psi_mean = NULL, n_reps = n_reps, Lambda_ = Lambda_, freq = freq, - init = list(init_Pi = Pi[,, n_reps/n_thin], init_Sigma = Sigma[,, n_reps/n_thin], init_Z = Z[,, n_reps/n_thin])) + prior_S = prior_S, prior_nu = n_vars+2, post_nu = n_T + n_vars+2, d = d, Y = Y, n_T = n_T, n_T_ = n_T_, + prior_psi_Omega = prior_psi_Omega, prior_psi_mean = prior_psi_mean, n_reps = n_reps, n_burnin = n_burnin, n_thin = n_thin, Lambda_ = Lambda_, + init = list(init_Pi = Pi[,, n_reps/n_thin], init_Sigma = Sigma[,, n_reps/n_thin], init_psi = psi[n_reps/n_thin, ], init_Z = Z[,, n_reps/n_thin])) - if (n_fcst>0) { + if (check_roots == TRUE) { + return_obj$roots <- roots + return_obj$num_tries <- num_tries + } + if (n_fcst > 0) { return_obj$Z_fcst <- Z_fcst } @@ -167,25 +235,23 @@ mcmc_sampler.mfbvar_minn_diffuse <- function(x, ...){ } -#' @rdname mcmc_sampler -mcmc_sampler.mfbvar_ss_diffuse <- function(x, ...) { +mcmc_sampler.mfbvar_ssng_iw <- function(x, ...) { n_vars <- ncol(x$Y) - if (!(!is.null(x$Y) && !is.null(x$d) && !is.null(x$prior_psi_mean) && !is.null(x$prior_psi_Omega) && !is.null(x$n_lags) && !is.null(x$n_burnin) && !is.null(x$n_reps))) { + if (!(!is.null(x$Y) && !is.null(x$d) && !is.null(x$prior_psi_mean) && !is.null(x$n_lags) && !is.null(x$n_burnin) && !is.null(x$n_reps))) { test_all <- sapply(x, is.null) - test_sub <- test_all[c("Y", "d", "prior_psi_mean", "prior_psi_Omega", "n_lags", "n_burnin", "n_reps")] + test_sub <- test_all[c("Y", "d", "prior_psi_mean", "n_lags", "n_burnin", "n_reps")] stop("Missing elements: ", paste(names(test_sub)[which(test_sub)], collapse = " ")) } if (x$n_fcst > 0 && nrow(x$d_fcst) != x$n_fcst) { stop("d_fcst has ", nrow(x$d_fcst), " rows, but n_fcst is ", x$n_fcst, ".") } - prior_Pi_Omega <- mfbvar:::create_prior_Pi_Omega(lambda1 = x$lambda1, lambda2 = x$lambda2, lambda3 = x$lambda3, - prior_Pi_AR1 = x$prior_Pi_AR1, Y = x$Y, - n_lags = x$n_lags, block_exo = x$block_exo) - prior_Pi_Omega <- prior_Pi_Omega[-1, ] - prior_Pi_mean <- matrix(0, n_vars, n_vars*x$n_lags) - prior_Pi_mean[, 1:n_vars] <- diag(x$prior_Pi_AR1) + priors <- prior_Pi_Sigma(lambda1 = x$lambda1, lambda2 = x$lambda3, prior_Pi_AR1 = x$prior_Pi_AR1, Y = x$Y, + n_lags = x$n_lags, prior_nu = n_vars + 2) + prior_Pi_mean <- priors$prior_Pi_mean + prior_Pi_Omega <- priors$prior_Pi_Omega + prior_S <- priors$prior_S Y <- x$Y d <- x$d @@ -198,13 +264,17 @@ mcmc_sampler.mfbvar_ss_diffuse <- function(x, ...) { verbose <- x$verbose add_args <- list(...) - n_reps <- add_args$n_reps - n_thin <- ifelse(is.null(add_args$n_thin),1,add_args$n_thin) + n_reps <- x$n_reps + n_burnin <- x$n_burnin + n_thin <- ifelse(is.null(x$n_thin), 1, x$n_thin) init <- add_args$init init_Pi <- init$init_Pi init_Sigma <- init$init_Sigma init_psi <- init$init_psi init_Z <- init$init_Z + init_omega <- init$init_omega + init_phi_mu <- init$init_phi_mu + init_lambda_mu <- init$init_lambda_mu # n_vars: number of variables # n_lags: number of lags @@ -213,35 +283,30 @@ mcmc_sampler.mfbvar_ss_diffuse <- function(x, ...) { # n_T_: sample size (reduced sample) n_vars <- dim(Y)[2] n_lags <- prod(dim(as.matrix(prior_Pi_mean)))/n_vars^2 - n_q <- sum(freq == "q") - n_m <- sum(freq == "m") + freqs <- x$freqs + Lambda_ <- x$Lambda_ + n_q <- sum(freq == freqs[1]) + n_m <- n_vars - n_q if (n_q == 0 || n_q == n_vars) { complete_quarters <- apply(Y, 1, function(x) !any(is.na(x))) Y <- Y[complete_quarters, ] d_fcst <- rbind(d[!complete_quarters, , drop = FALSE], d_fcst) d <- d[complete_quarters, , drop = FALSE] } - y_in_p <- Y[-(1:n_lags), ] if (n_q < n_vars) { - T_b <- min(apply(y_in_p[,1:n_m], 2, function(x) ifelse(any(is.na(x)), min(which(is.na(x))), Inf))-1, nrow(y_in_p)) + T_b <- max(which(!apply(apply(Y[, freq == freqs[2], drop = FALSE], 2, is.na), 1, any))) } else { - T_b <- nrow(y_in_p) - } - if (n_q > 0) { - Lambda_ <- mfbvar:::build_Lambda(rep("q", n_q), 3) - } else { - Lambda_ <- matrix(0, 1, 3) + T_b <- nrow(Y) } - - n_pseudolags <- max(c(n_lags, 3)) + n_pseudolags <- max(c(n_lags, ncol(Lambda_)/nrow(Lambda_))) n_determ <- dim(d)[2] n_T <- dim(Y)[1]# - n_lags n_T_ <- n_T - n_pseudolags - - - + c0 <- ifelse(is.null(x$prior_ng), 0.01, x$prior_ng[1]) + c1 <- ifelse(is.null(x$prior_ng), 0.01, x$prior_ng[2]) + s <- ifelse(is.null(x[["s"]]), 1, x$s) ################################################################ ### Preallocation @@ -264,6 +329,9 @@ mcmc_sampler.mfbvar_ss_diffuse <- function(x, ...) { Sigma <- array(NA, dim = c(n_vars, n_vars, n_reps/n_thin)) psi <- array(NA, dim = c(n_reps/n_thin, n_vars * n_determ)) Z <- array(NA, dim = c(n_T, n_vars, n_reps/n_thin)) + omega <- matrix(NA, nrow = n_reps/n_thin, ncol = n_vars * n_determ) + phi_mu <- rep(NA, n_reps/n_thin) + lambda_mu <- rep(NA, n_reps/n_thin) Z_fcst<- array(NA, dim = c(n_fcst+n_lags, n_vars, n_reps/n_thin)) if (n_fcst > 0) { rownames(Z_fcst) <- c((n_T-n_lags+1):n_T, paste0("fcst_", 1:n_fcst)) @@ -290,7 +358,7 @@ mcmc_sampler.mfbvar_ss_diffuse <- function(x, ...) { # for multiple chains if (is.null(init_Z)) { - Z[,, 1] <- mfbvar:::fill_na(Y) + Z[,, 1] <- fill_na(Y) } else { if (all(dim(Z[,, 1]) == dim(init_Z))) { Z[,, 1] <- init_Z @@ -300,11 +368,12 @@ mcmc_sampler.mfbvar_ss_diffuse <- function(x, ...) { } - ols_results <- tryCatch(mfbvar:::ols_initialization(z = Z[,, 1], d = d, n_lags = n_lags, n_T = n_T, n_vars = n_vars, n_determ = n_determ), + ols_results <- tryCatch(ols_initialization(z = Z[,, 1], d = d, n_lags = n_lags, n_T = n_T, n_vars = n_vars, n_determ = n_determ), error = function(cond) NULL) if (is.null(ols_results)) { ols_results <- list() ols_results$Pi <- prior_Pi_mean + ols_results$S <- prior_S ols_results$psi <- prior_psi_mean } @@ -320,8 +389,8 @@ mcmc_sampler.mfbvar_ss_diffuse <- function(x, ...) { # Compute the maximum eigenvalue of the initial Pi if (check_roots == TRUE) { - Pi_comp <- mfbvar:::build_companion(Pi = Pi[,, 1], n_vars = n_vars, n_lags = n_lags) - roots[1] <- mfbvar:::max_eig_cpp(Pi_comp) + Pi_comp <- build_companion(Pi = Pi[,, 1], n_vars = n_vars, n_lags = n_lags) + roots[1] <- max_eig_cpp(Pi_comp) } if (is.null(init_Sigma)) { @@ -348,12 +417,32 @@ mcmc_sampler.mfbvar_ss_diffuse <- function(x, ...) { } } + if (is.null(init_omega)) { + if (is.null(prior_psi_Omega)) { + omega[1, ] <- diag(prior_psi_Omega) + } else { + omega[1, ] <- rep(0.1, n_determ*n_vars) + } + } else { + omega[1, ] <- init_omega + } + if (is.null(init_phi_mu)) { + phi_mu[1] <- 1 + } else { + phi_mu[1] <- init_phi_mu + } + if (is.null(init_lambda_mu)) { + lambda_mu[1] <- 1 + } else { + lambda_mu[1] <- init_lambda_mu + } + ################################################################ ### Compute terms which do not vary in the sampler # Create D (does not vary in the sampler), and find roots of Pi # if requested - D_mat <- mfbvar:::build_DD(d = d, n_lags = n_lags) + D_mat <- build_DD(d = d, n_lags = n_lags) dt <- d[-(1:n_lags), , drop = FALSE] d1 <- d[1:n_lags, , drop = FALSE] psi_i <- psi[1, ] @@ -361,29 +450,31 @@ mcmc_sampler.mfbvar_ss_diffuse <- function(x, ...) { Sigma_i <- Sigma[,, 1] Z_i <- Z[-(1:n_lags),, 1] mu_mat <- dt %*% t(matrix(psi_i, nrow = n_vars)) + omega_i <- omega[1, ] + phi_mu_i <- phi_mu[1] + lambda_mu_i <- lambda_mu[1] # For the posterior of Pi - inv_prior_Pi_Omega <- diag(1/c(prior_Pi_Omega)) - Omega_Pi <- matrix(inv_prior_Pi_Omega %*% c(prior_Pi_mean), n_vars*n_lags, n_vars) + inv_prior_Pi_Omega <- chol2inv(chol(prior_Pi_Omega)) + Omega_Pi <- inv_prior_Pi_Omega %*% prior_Pi_mean - # For the posterior of psi - inv_prior_psi_Omega <- solve(prior_psi_Omega) - inv_prior_psi_Omega_mean <- inv_prior_psi_Omega %*% prior_psi_mean Z_1 <- Z[1:n_pseudolags,, 1] - mfbvar:::mcmc_ss_diffuse(Y[-(1:n_lags),],Pi,Sigma,psi,Z,Z_fcst,Lambda_,prior_Pi_Omega,Omega_Pi, - D_mat,dt,d1,d_fcst_lags,inv_prior_psi_Omega,inv_prior_psi_Omega_mean,check_roots,Z_1,n_reps, - n_q,T_b,n_lags,n_vars,n_T_,n_fcst,n_determ,n_thin,verbose) - + mcmc_ssng_iw(Y[-(1:n_lags),],Pi,Sigma,psi,phi_mu,lambda_mu,omega,Z,Z_fcst,Lambda_,prior_Pi_Omega,inv_prior_Pi_Omega,Omega_Pi,prior_Pi_mean, + prior_S,D_mat,dt,d1,d_fcst_lags,prior_psi_mean,c0,c1,s,check_roots,Z_1,n_reps,n_burnin, + n_q,T_b-n_lags,n_lags,n_vars,n_T_,n_fcst,n_determ,n_thin,verbose,TRUE) + if (verbose) { + cat("\n") + } ################################################################ ### Prepare the return object - return_obj <- list(Pi = Pi, Sigma = Sigma, psi = psi, Z = Z, roots = NULL, num_tries = NULL, - Z_fcst = NULL, smoothed_Z = NULL, n_determ = n_determ, + return_obj <- list(Pi = Pi, Sigma = Sigma, psi = psi, Z = Z, phi_mu = phi_mu, lambda_mu = lambda_mu, omega = omega, + Z_fcst = NULL, aggregation = x$aggregation, n_determ = n_determ, n_lags = n_lags, n_vars = n_vars, n_fcst = n_fcst, prior_Pi_Omega = prior_Pi_Omega, prior_Pi_mean = prior_Pi_mean, - d = d, Y = Y, n_T = n_T, n_T_ = n_T_, - prior_psi_Omega = prior_psi_Omega, prior_psi_mean = prior_psi_mean, n_reps = n_reps, Lambda_ = Lambda_, - init = list(init_Pi = Pi[,, n_reps/n_thin], init_Sigma = Sigma[,, n_reps/n_thin], init_psi = psi[n_reps/n_thin, ], init_Z = Z[,, n_reps/n_thin])) + prior_S = prior_S, prior_nu = n_vars+2, post_nu = n_T + n_vars+2, d = d, Y = Y, n_T = n_T, n_T_ = n_T_, + prior_psi_Omega = prior_psi_Omega, prior_psi_mean = prior_psi_mean, n_reps = n_reps, n_burnin = n_burnin, n_thin = n_thin, Lambda_ = Lambda_, + init = list(init_Pi = Pi[,, n_reps/n_thin], init_Sigma = Sigma[,, n_reps/n_thin], init_psi = psi[n_reps/n_thin, ], init_Z = Z[,, n_reps/n_thin], init_omega = omega[n_reps/n_thin, ], init_lambda_mu = lambda_mu[n_reps/n_thin], init_phi_mu = phi_mu[n_reps/n_thin])) if (check_roots == TRUE) { return_obj$roots <- roots @@ -397,84 +488,66 @@ mcmc_sampler.mfbvar_ss_diffuse <- function(x, ...) { } -#' @rdname mcmc_sampler -mcmc_sampler.mfbvar_ssng_diffuse <- function(x, ...) { +mcmc_sampler.mfbvar_minn_iw <- function(x, ...){ n_vars <- ncol(x$Y) - if (!(!is.null(x$Y) && !is.null(x$d) && !is.null(x$prior_psi_mean) && !is.null(x$n_lags) && !is.null(x$n_burnin) && !is.null(x$n_reps))) { + if (!(!is.null(x$Y) && !is.null(x$n_lags) && !is.null(x$n_burnin) && !is.null(x$n_reps))) { test_all <- sapply(x, is.null) - test_sub <- test_all[c("Y", "d", "prior_psi_mean", "n_lags", "n_burnin", "n_reps")] + test_sub <- test_all[c("Y", "n_lags", "n_burnin", "n_reps")] stop("Missing elements: ", paste(names(test_sub)[which(test_sub)], collapse = " ")) } - if (x$n_fcst > 0 && nrow(x$d_fcst) != x$n_fcst) { - stop("d_fcst has ", nrow(x$d_fcst), " rows, but n_fcst is ", x$n_fcst, ".") - } - prior_Pi_Omega <- mfbvar:::create_prior_Pi_Omega(lambda1 = x$lambda1, lambda2 = x$lambda2, lambda3 = x$lambda3, - prior_Pi_AR1 = x$prior_Pi_AR1, Y = x$Y, - n_lags = x$n_lags, block_exo = x$block_exo) - prior_Pi_Omega <- prior_Pi_Omega[-1, ] - prior_Pi_mean <- matrix(0, n_vars, n_vars*x$n_lags) - prior_Pi_mean[, 1:n_vars] <- diag(x$prior_Pi_AR1) + prior_nu <- n_vars + 2 + priors <- prior_Pi_Sigma(lambda1 = x$lambda1, lambda2 = x$lambda3, prior_Pi_AR1 = x$prior_Pi_AR1, Y = x$Y, + n_lags = x$n_lags, prior_nu = prior_nu) + prior_Pi_mean <- priors$prior_Pi_mean + prior_Pi_Omega <- priors$prior_Pi_Omega + prior_S <- priors$prior_S Y <- x$Y - d <- x$d - d_fcst <- x$d_fcst freq <- x$freq - prior_psi_mean <- x$prior_psi_mean - prior_psi_Omega <- x$prior_psi_Omega n_fcst <- x$n_fcst - check_roots <- x$check_roots verbose <- x$verbose + n_lags <- x$n_lags + lambda4 <- x$lambda4 + + # Add terms for constant + prior_Pi_Omega <- diag(c(x$lambda1^2*lambda4^2, diag(prior_Pi_Omega))) + prior_Pi_mean <- rbind(0, prior_Pi_mean) add_args <- list(...) - n_reps <- add_args$n_reps - n_thin <- ifelse(is.null(add_args$n_thin),1,add_args$n_thin) + n_reps <- x$n_reps + n_burnin <- x$n_burnin + n_thin <- ifelse(is.null(x$n_thin), 1, x$n_thin) init <- add_args$init init_Pi <- init$init_Pi init_Sigma <- init$init_Sigma - init_psi <- init$init_psi init_Z <- init$init_Z - init_omega <- init$init_omega - init_phi_mu <- init$init_phi_mu - init_lambda_mu <- init$init_lambda_mu # n_vars: number of variables # n_lags: number of lags # n_determ: number of deterministic variables # n_T: sample size (full sample) # n_T_: sample size (reduced sample) - n_vars <- dim(Y)[2] - n_lags <- prod(dim(as.matrix(prior_Pi_mean)))/n_vars^2 - n_q <- sum(freq == "q") - n_m <- sum(freq == "m") - if (n_q == 0 || n_q == n_vars) { - complete_quarters <- apply(Y, 1, function(x) !any(is.na(x))) - Y <- Y[complete_quarters, ] - d_fcst <- rbind(d[!complete_quarters, , drop = FALSE], d_fcst) - d <- d[complete_quarters, , drop = FALSE] - } - y_in_p <- Y[-(1:n_lags), ] + + freqs <- x$freqs + Lambda_ <- x$Lambda_ + n_q <- sum(freq == freqs[1]) if (n_q < n_vars) { - T_b <- min(apply(y_in_p[,1:n_m], 2, function(x) ifelse(any(is.na(x)), min(which(is.na(x))), Inf))-1, nrow(y_in_p)) + T_b <- max(which(!apply(apply(Y[, freq == freqs[2], drop = FALSE], 2, is.na), 1, any))) } else { - T_b <- nrow(y_in_p) + T_b <- nrow(Y) } - if (n_q > 0) { - Lambda_ <- mfbvar:::build_Lambda(rep("q", n_q), 3) - } else { - Lambda_ <- matrix(0, 1, 3) + if (n_q == 0 || n_q == n_vars) { + complete_quarters <- apply(Y, 1, function(x) !any(is.na(x))) + Y <- Y[complete_quarters, ] } - - n_pseudolags <- max(c(n_lags, 3)) - n_determ <- dim(d)[2] + n_pseudolags <- max(c(n_lags, ncol(Lambda_)/nrow(Lambda_))) n_T <- dim(Y)[1]# - n_lags n_T_ <- n_T - n_pseudolags - - c0 <- ifelse(is.null(x$c0), 0.01, x$c0) - c1 <- ifelse(is.null(x$c1), 0.01, x$c1) - s <- ifelse(is.null(x[["s"]]), 1, x$s) + d <- matrix(1, nrow = nrow(Y), ncol = 1) + post_nu <- n_T_ + prior_nu ################################################################ ### Preallocation @@ -493,13 +566,10 @@ mcmc_sampler.mfbvar_ssng_diffuse <- function(x, ...) { ### If smoothing of the state vector: # smoothed_Z: T * p * n_reps - Pi <- array(NA, dim = c(n_vars, n_vars * n_lags, n_reps/n_thin)) + Pi <- array(NA, dim = c(n_vars, n_vars * n_lags + 1, n_reps/n_thin)) Sigma <- array(NA, dim = c(n_vars, n_vars, n_reps/n_thin)) - psi <- array(NA, dim = c(n_reps/n_thin, n_vars * n_determ)) Z <- array(NA, dim = c(n_T, n_vars, n_reps/n_thin)) - omega <- matrix(NA, nrow = n_reps/n_thin, ncol = n_vars * n_determ) - phi_mu <- rep(NA, n_reps/n_thin) - lambda_mu <- rep(NA, n_reps/n_thin) + Z_fcst<- array(NA, dim = c(n_fcst+n_lags, n_vars, n_reps/n_thin)) if (n_fcst > 0) { rownames(Z_fcst) <- c((n_T-n_lags+1):n_T, paste0("fcst_", 1:n_fcst)) @@ -507,10 +577,7 @@ mcmc_sampler.mfbvar_ssng_diffuse <- function(x, ...) { } else { rownames(Z_fcst) <- (n_T-n_lags+1):n_T } - d_fcst_lags <- as.matrix(rbind(d[(n_T-n_lags+1):n_T, , drop = FALSE], d_fcst)) - d_fcst_lags <- d_fcst_lags[1:(n_lags+n_fcst), , drop = FALSE] - roots <- vector("numeric", n_reps/n_thin) - num_tries <- roots + @@ -526,7 +593,7 @@ mcmc_sampler.mfbvar_ssng_diffuse <- function(x, ...) { # for multiple chains if (is.null(init_Z)) { - Z[,, 1] <- mfbvar:::fill_na(Y) + Z[,, 1] <- fill_na(Y) } else { if (all(dim(Z[,, 1]) == dim(init_Z))) { Z[,, 1] <- init_Z @@ -536,16 +603,10 @@ mcmc_sampler.mfbvar_ssng_diffuse <- function(x, ...) { } - ols_results <- tryCatch(mfbvar:::ols_initialization(z = Z[,, 1], d = d, n_lags = n_lags, n_T = n_T, n_vars = n_vars, n_determ = n_determ), - error = function(cond) NULL) - if (is.null(ols_results)) { - ols_results <- list() - ols_results$Pi <- prior_Pi_mean - ols_results$psi <- prior_psi_mean - } + ols_results <- ols_initialization(z = Z[,, 1], d = d, n_lags = n_lags, n_T = n_T, n_vars = n_vars, n_determ = 1) if (is.null(init_Pi)) { - Pi[,, 1] <- ols_results$Pi + Pi[,, 1] <- cbind(ols_results$const, ols_results$Pi) } else { if (all(dim(Pi[,, 1]) == dim(init_Pi))) { Pi[,, 1] <- init_Pi @@ -555,10 +616,6 @@ mcmc_sampler.mfbvar_ssng_diffuse <- function(x, ...) { } # Compute the maximum eigenvalue of the initial Pi - if (check_roots == TRUE) { - Pi_comp <- mfbvar:::build_companion(Pi = Pi[,, 1], n_vars = n_vars, n_lags = n_lags) - roots[1] <- mfbvar:::max_eig_cpp(Pi_comp) - } if (is.null(init_Sigma)) { Sigma[,, 1] <- ols_results$S @@ -570,86 +627,37 @@ mcmc_sampler.mfbvar_ssng_diffuse <- function(x, ...) { } } - if (is.null(init_psi)) { - if (roots[1] < 1) { - psi[1, ] <- ols_results$psi - } else { - psi[1, ] <- prior_psi_mean - } - } else { - if (length(psi[1, ]) == length(init_psi)) { - psi[1,] <- init_psi - } else { - stop(paste0("The length of init_psi is ", paste(length(init_psi), collapse = " x "), ", but should be ", paste(length(psi[1,]), collapse = " x "))) - } - } - - if (is.null(init_omega)) { - if (is.null(prior_psi_Omega)) { - omega[1, ] <- diag(prior_psi_Omega) - } else { - omega[1, ] <- rep(0.1, n_determ*n_vars) - } - } else { - omega[1, ] <- init_omega - } - if (is.null(init_phi_mu)) { - phi_mu[1] <- 1 - } else { - phi_mu[1] <- init_phi_mu - } - if (is.null(init_lambda_mu)) { - lambda_mu[1] <- 1 - } else { - lambda_mu[1] <- init_lambda_mu - } - ################################################################ ### Compute terms which do not vary in the sampler - # Create D (does not vary in the sampler), and find roots of Pi - # if requested - D_mat <- mfbvar:::build_DD(d = d, n_lags = n_lags) - dt <- d[-(1:n_lags), , drop = FALSE] - d1 <- d[1:n_lags, , drop = FALSE] - psi_i <- psi[1, ] - Pi_i <- Pi[,, 1] - Sigma_i <- Sigma[,, 1] - Z_i <- Z[-(1:n_lags),, 1] - mu_mat <- dt %*% t(matrix(psi_i, nrow = n_vars)) - omega_i <- omega[1, ] - phi_mu_i <- phi_mu[1] - lambda_mu_i <- lambda_mu[1] - + Z_1 <- Z[1:n_pseudolags,, 1] # For the posterior of Pi - inv_prior_Pi_Omega <- diag(1/c(prior_Pi_Omega)) - Omega_Pi <- matrix(inv_prior_Pi_Omega %*% c(prior_Pi_mean), n_vars*n_lags, n_vars) - - Z_1 <- Z[1:n_pseudolags,, 1] + inv_prior_Pi_Omega <- chol2inv(chol(prior_Pi_Omega)) + Omega_Pi <- inv_prior_Pi_Omega %*% prior_Pi_mean - mfbvar:::mcmc_ssng_diffuse(Y[-(1:n_lags),],Pi,Sigma,psi,phi_mu,lambda_mu,omega,Z,Z_fcst,Lambda_,prior_Pi_Omega,Omega_Pi, - D_mat,dt,d1,d_fcst_lags,prior_psi_mean,c0,c1,s,check_roots,Z_1,n_reps, - n_q,T_b,n_lags,n_vars,n_T_,n_fcst,n_determ,n_thin,verbose) + mcmc_minn_iw(Y[-(1:n_lags),],Pi,Sigma,Z,Z_fcst,Lambda_,prior_Pi_Omega,inv_prior_Pi_Omega, + Omega_Pi,prior_Pi_mean,prior_S,Z_1,n_reps,n_burnin,n_q,T_b-n_lags,n_lags,n_vars,n_T_,n_fcst, + n_thin,verbose,2) + if (verbose) { + cat("\n") + } ################################################################ ### Prepare the return object - return_obj <- list(Pi = Pi, Sigma = Sigma, psi = psi, Z = Z, phi_mu = phi_mu, lambda_mu = lambda_mu, omega = omega, - roots = NULL, num_tries = NULL, - Z_fcst = NULL, smoothed_Z = NULL, n_determ = n_determ, + return_obj <- list(Pi = Pi, Sigma = Sigma, psi = NULL, Z = Z, roots = NULL, num_tries = NULL, + Z_fcst = NULL, aggregation = x$aggregation, n_determ = 1, n_lags = n_lags, n_vars = n_vars, n_fcst = n_fcst, prior_Pi_Omega = prior_Pi_Omega, prior_Pi_mean = prior_Pi_mean, - d = d, Y = Y, n_T = n_T, n_T_ = n_T_, - prior_psi_Omega = prior_psi_Omega, prior_psi_mean = prior_psi_mean, n_reps = n_reps, Lambda_ = Lambda_, - init = list(init_Pi = Pi[,, n_reps/n_thin], init_Sigma = Sigma[,, n_reps/n_thin], init_psi = psi[n_reps/n_thin, ], init_Z = Z[,, n_reps/n_thin], init_omega = omega[n_reps/n_thin, ], init_lambda_mu = lambda_mu[n_reps/n_thin], init_phi_mu = phi_mu[n_reps/n_thin])) + prior_S = prior_S, prior_nu = prior_nu, post_nu = prior_nu + n_T_, d = d, Y = Y, n_T = n_T, n_T_ = n_T_, + prior_psi_Omega = NULL, prior_psi_mean = NULL, n_reps = n_reps, n_burnin = n_burnin, n_thin = n_thin, Lambda_ = Lambda_, freq = freq, + init = list(init_Pi = Pi[,, n_reps/n_thin], init_Sigma = Sigma[,, n_reps/n_thin], init_Z = Z[,, n_reps/n_thin])) - if (check_roots == TRUE) { - return_obj$roots <- roots - return_obj$num_tries <- num_tries - } - if (n_fcst > 0) { + if (n_fcst>0) { return_obj$Z_fcst <- Z_fcst } return(return_obj) } + + diff --git a/R/mdd.R b/R/mdd.R index b3b8617..c54ef35 100644 --- a/R/mdd.R +++ b/R/mdd.R @@ -6,6 +6,8 @@ #' @seealso \code{\link{mdd.mfbvar_ss_iw}}, \code{\link{mdd.mfbvar_minn_iw}} #' @param x argument to dispatch on (of class \code{mfbvar_ss} or \code{mfbvar_minn}) #' @param ... additional named arguments passed on to the methods +#' @return The logarithm of the marginal data density. +#' @details The marginal data density is also known as the marginal likelihood. mdd <- function(x, ...) { UseMethod("mdd") @@ -25,10 +27,13 @@ mdd.default <- function(x, ...) { #' Fuentes-Albero and Melosi (2013) and Ankargren, Unosson and Yang (2018). #' @return The logarithm of the marginal data density. #' @references Fuentes-Albero, C. and Melosi, L. (2013) Methods for Computing Marginal Data Densities from the Gibbs Output. -#' \emph{Journal of Econometrics}, 175(2), 132-141, \url{https://doi.org/10.1016/j.jeconom.2013.03.002}\cr +#' \emph{Journal of Econometrics}, 175(2), 132-141, \doi{10.1016/j.jeconom.2013.03.002}\cr #' Ankargren, S., Unosson, M., & Yang, Y. (2018) A Mixed-Frequency Bayesian Vector Autoregression with a Steady-State Prior. Working Paper, Department of Statistics, Uppsala University No. 2018:3. #' @seealso \code{\link{mdd}}, \code{\link{mdd.mfbvar_minn_iw}} mdd.mfbvar_ss_iw <- function(x, method = 1, ...) { + if (x$aggregation != "average") { + stop("The marginal data density can only be computed using intra-quarterly average aggregation.") + } if (method == 1) { mdd_est <- estimate_mdd_ss_1(x) } else if (method == 2) { @@ -48,9 +53,12 @@ mdd.mfbvar_ss_iw <- function(x, method = 1, ...) { #' @details The method used for estimating the marginal data density is the proposal made by #' Schorfheide and Song (2015). #' @references -#' Schorfheide, F., & Song, D. (2015) Real-Time Forecasting With a Mixed-Frequency VAR. \emph{Journal of Business & Economic Statistics}, 33(3), 366--380. \url{http://dx.doi.org/10.1080/07350015.2014.954707} +#' Schorfheide, F., & Song, D. (2015) Real-Time Forecasting With a Mixed-Frequency VAR. \emph{Journal of Business & Economic Statistics}, 33(3), 366--380. \doi{10.1080/07350015.2014.954707} #' @seealso \code{\link{mdd}}, \code{\link{mdd.mfbvar_ss_iw}} mdd.mfbvar_minn_iw <- function(x, ...) { + if (x$aggregation != "average") { + stop("The marginal data density can only be computed using intra-quarterly average aggregation.") + } quarterly_cols <- which(x$mfbvar_prior$freq == "q") estimate_mdd_minn(x, ...) } @@ -59,6 +67,7 @@ mdd.mfbvar_minn_iw <- function(x, ...) { #' #' This function provides the possibility to estimate the log marginal density using the steady-state MF-BVAR. #' @keywords internal +#' @noRd #' @return #' \code{estimate_mdd_ss_1} returns a list with components (all are currently in logarithms): #' \item{lklhd}{The likelihood.} @@ -102,10 +111,10 @@ estimate_mdd_ss_1 <- function(mfbvar_obj) { prior_psi_mean <- mfbvar_obj$prior_psi_mean freq <- mfbvar_obj$mfbvar_prior$freq - Lambda <- build_Lambda(freq, n_lags) n_q <- sum(freq == "q") T_b <- max(which(!apply(apply(Y[, freq == "m"], 2, is.na), 1, any))) - Lambda_ <- build_Lambda(rep("q", n_q), 3) + Lambda <- build_Lambda(ifelse(freq == "q", "average", freq), n_lags) + Lambda_ <- build_Lambda(rep("average", n_q), 3) ################################################################ ### Initialize @@ -172,14 +181,34 @@ estimate_mdd_ss_1 <- function(mfbvar_obj) { ################################################################ ### Final calculations - lklhd <- sum(c(loglike(Y = as.matrix(mZ), Lambda = Lambda, Pi_comp = Pi_comp, Q_comp = Q_comp, n_T = n_T_, n_vars = n_vars, n_comp = n_lags * n_vars, z0 = h0, P0 = P0)[-1])) - eval_prior_Pi_Sigma <- dnorminvwish(X = t(post_Pi_mean), Sigma = post_Sigma, M = prior_Pi_mean, P = prior_Pi_Omega, S = prior_S, v = n_vars+2) - eval_prior_psi <- dmultn(x = post_psi, m = prior_psi_mean, Sigma = prior_psi_Omega) - eval_RB_Pi_Sigma <- log(mean(eval_Pi_Sigma_RaoBlack(Z_array = Z_red, d = d, post_psi_center = post_psi, post_Pi_center = post_Pi_mean, post_Sigma_center = post_Sigma, - post_nu = post_nu, prior_Pi_mean = prior_Pi_mean, prior_Pi_Omega = prior_Pi_Omega, prior_S = prior_S, - n_vars = n_vars, n_lags = n_lags, n_reps = n_reps))) - eval_marg_psi <- log(mean(eval_psi_MargPost(Pi_array = Pi, Sigma_array = Sigma, Z_array = Z, post_psi_center = post_psi, prior_psi_mean = prior_psi_mean, - prior_psi_Omega = prior_psi_Omega, D_mat = D, n_determ = n_determ, n_vars = n_vars, n_lags = n_lags, n_reps = n_reps))) + lklhd <- sum(c(loglike(Y = as.matrix(mZ), Lambda = Lambda, + Pi_comp = Pi_comp, Q_comp = Q_comp, n_T = n_T_, + n_vars = n_vars, n_comp = n_lags * n_vars, + z0 = h0, P0 = P0)[-1])) + eval_prior_Pi_Sigma <- dnorminvwish(X = t(post_Pi_mean), Sigma = post_Sigma, + M = prior_Pi_mean, P = prior_Pi_Omega, + S = prior_S, v = n_vars+2) + eval_prior_psi <- dmultn(x = post_psi, m = prior_psi_mean, + Sigma = prior_psi_Omega) + eval_log_RB <- eval_Pi_Sigma_RaoBlack(Z_array = Z_red, d = d, + post_psi_center = post_psi, + post_Pi_center = post_Pi_mean, + post_Sigma_center = post_Sigma, + post_nu = post_nu, + prior_Pi_mean = prior_Pi_mean, + prior_Pi_Omega = prior_Pi_Omega, + prior_S = prior_S, n_vars = n_vars, + n_lags = n_lags, n_reps = n_reps) + const <- median(eval_log_RB) + eval_RB_Pi_Sigma <- log(mean(exp(eval_log_RB-const))) + const + eval_marg_psi <- log(mean(eval_psi_MargPost(Pi_array = Pi, Sigma_array = Sigma, + Z_array = Z, + post_psi_center = post_psi, + prior_psi_mean = prior_psi_mean, + prior_psi_Omega = prior_psi_Omega, + D_mat = D, n_determ = n_determ, + n_vars = n_vars, n_lags = n_lags, + n_reps = n_reps))) mdd_estimate <- c(lklhd + eval_prior_Pi_Sigma + eval_prior_psi - (eval_RB_Pi_Sigma + eval_marg_psi)) @@ -193,6 +222,7 @@ estimate_mdd_ss_1 <- function(mfbvar_obj) { #' @templateVar p_trunc TRUE #' @template man_template #' @keywords internal +#' @noRd #' @return #' \code{estimate_mdd_ss_1} returns a list with components being \code{n_reps}-long vectors and a scalar (the final estimate). #' \item{eval_posterior_Pi_Sigma}{Posterior of Pi and Sigma.} @@ -307,6 +337,7 @@ estimate_mdd_ss_2 <- function(mfbvar_obj, p_trunc) { #' @param quarterly_cols numeric vector with positions of quarterly variables #' @templateVar p_trunc TRUE #' @keywords internal +#' @noRd #' @return The log marginal data density estimate (bar a constant) #' estimate_mdd_minn <- function(mfbvar_obj, p_trunc, ...) { diff --git a/R/mfbvar-package.R b/R/mfbvar-package.R index 2d9e86d..e138f68 100644 --- a/R/mfbvar-package.R +++ b/R/mfbvar-package.R @@ -4,7 +4,7 @@ #' simple. The prior for the regression parameters is normal with Minnesota-style prior moments. #' The package supports either an inverse Wishart prior for the error covariance matrix, yielding a #' standard normal-inverse Wishart prior, or a time-varying error covariance matrix by means of a factor -#' stochastic volatility model through the \code{\link[factorstochvol]{factorstochvol}} package. +#' stochastic volatility model through the \code{\link[factorstochvol]{factorstochvol-package}} package. #' #' @section Specifying the prior: #' The prior of the VAR model is specified using the function \code{\link{set_prior}}. The function @@ -26,4 +26,6 @@ #' #' @docType package #' @name mfbvar -NULL +## quiets concerns of R CMD check re: the .'s that appear in pipelines +if(getRversion() >= "2.15.1") utils::globalVariables(c(".", "obj", "prior_type", "lower", "upper", "value", + "variable", "iter", "fcst_date", "fcst", "freq", "prior_Pi_AR1")) diff --git a/R/ols.R b/R/ols.R index 358ed65..cfebe4b 100644 --- a/R/ols.R +++ b/R/ols.R @@ -1,21 +1,31 @@ -#' OLS functions -#' -#' Helper functions for multivariate regression and sum of squared error computations -#' +#' @title OLS functions +#' @description Helper functions for multivariate regression and sum of squared error computations #' @param X The regressor matrix. #' @param Y The dependnet variable matrix. #' @keywords internal +#' @noRd #' @return #' \item{pi_sample}{Estimated coefficients.} - ols_pi <- function(X, Y) { - pi_sample <- solve(crossprod(X)) %*% crossprod(X, Y) + ridge <- 1e-6 + error_count <- 0 + fail <- TRUE + while (fail) { + pi_sample <- tryCatch({solve(crossprod(X)+diag(ridge, ncol(X))) %*% crossprod(X, Y)}, + error = function(cond) {cond}) + if (!inherits(pi_sample, "error")) { + fail <- FALSE + } else { + ridge <- ridge*10 + } + } return(pi_sample) } #' @rdname ols_pi #' @param Pi The estimated coefficients. -#' @keywords internal +#' @keywords internal +#' @noRd #' @return #' \item{s_sample}{The sum of squared residuals matrix.} ols_s <- function(X, Y, Pi) { @@ -38,6 +48,7 @@ ols_s <- function(X, Y, Pi) { #' \item{S}{Estimated error covariance matrix.} #' \item{psi}{The estimated steady-state parameters.} #' @keywords internal +#' @noRd ols_initialization <- function(z, d, n_lags, n_T, n_vars, n_determ) { n_T <- nrow(z) diff --git a/R/posteriors.R b/R/posteriors.R index bb4458d..8c6fda0 100644 --- a/R/posteriors.R +++ b/R/posteriors.R @@ -16,6 +16,7 @@ #' @templateVar n_T TRUE #' @template man_template #' @keywords internal +#' @noRd #' @return \code{posterior_Pi_Sigma} returns a list with: #' \item{Pi_r}{The draw of \code{Pi}.} #' \item{Sigma_r}{The draw of \code{Sigma}.} @@ -88,6 +89,7 @@ posterior_Pi_Sigma <- function(Z_r1, d, psi_r1, prior_Pi_mean, prior_Pi_Omega, i #' @templateVar n_determ TRUE #' @template man_template #' @keywords internal +#' @noRd #' @return \code{posterior_psi} returns: #' \item{psi_r}{The draw of \code{psi}.} posterior_psi <- function(Pi_r, Sigma_r, Z_r1, prior_psi_mean, prior_psi_Omega, D_mat, n_vars, n_lags, n_determ) { @@ -115,6 +117,7 @@ posterior_psi <- function(Pi_r, Sigma_r, Z_r1, prior_psi_mean, prior_psi_Omega, #' @templateVar prior_psi_mean TRUE #' @template man_template #' @keywords internal +#' @noRd #' @return The return is: #' \item{psi}{The posterior mean (from \code{\link{posterior_psi_mean}})} posterior_psi_mean <- function(U, D_mat, Sigma, prior_psi_Omega, post_psi_Omega, Y_tilde, prior_psi_mean) { @@ -125,6 +128,7 @@ posterior_psi_mean <- function(U, D_mat, Sigma, prior_psi_Omega, post_psi_Omega, #' @rdname posterior_psi_mean #' @keywords internal +#' @noRd #' @return \item{psi_Omega}{The posterior variance (from \code{\link{posterior_psi_Omega}})} posterior_psi_Omega <- function(U, D_mat, Sigma, prior_psi_Omega) { psi_Omega <- chol2inv(chol(t(U) %*% (kronecker(crossprod(D_mat), chol2inv(chol(Sigma)))) %*% U + chol2inv(chol(prior_psi_Omega)))) diff --git a/R/prior_pi_sigma.R b/R/prior_pi_sigma.R index e926ec5..44e724f 100644 --- a/R/prior_pi_sigma.R +++ b/R/prior_pi_sigma.R @@ -12,6 +12,7 @@ #' \item{prior_Pi_Omega}{The prior covariance matrix for Pi.} #' \item{prior_s}{The prior for Sigma.} #' @keywords internal +#' @noRd prior_Pi_Sigma <- function(lambda1, lambda2, prior_Pi_AR1, Y, n_lags, prior_nu) { # lambda1: 1-long vector (overall tightness) # lambda2: 1-long vector (lag decay) @@ -65,6 +66,7 @@ prior_Pi_Sigma <- function(lambda1, lambda2, prior_Pi_AR1, Y, n_lags, prior_nu) #' @return #' \item{prior_Pi_Omega}{The prior covariance matrix for Pi.} #' @keywords internal +#' @noRd create_prior_Pi_Omega <- function(lambda1, lambda2, lambda3, prior_Pi_AR1, Y, n_lags, block_exo = NULL) { # lambda1: 1-long vector (overall tightness) @@ -75,7 +77,7 @@ create_prior_Pi_Omega <- function(lambda1, lambda2, lambda3, prior_Pi_AR1, Y, n_ n_vars <- length(prior_Pi_AR1) prior_Pi_Omega <- matrix(0, n_vars * n_lags + 1, n_vars) - error_variance <- mfbvar:::compute_error_variances(Y) + error_variance <- compute_error_variances(Y) prior_Pi_Omega[1, ] <- lambda1 * 100 * sqrt(error_variance) for (i in 1:n_vars) { diff --git a/R/utils.R b/R/utils.R index f8c9f54..6148e06 100644 --- a/R/utils.R +++ b/R/utils.R @@ -6,13 +6,12 @@ compute_error_variances <- function(Y) { for (i in 1:n_vars) { success <- NULL init_order <- 4 - while(is.null(success)) { - error_variance[i] <- tryCatch(arima(na.omit(Y[,i]), order = c(init_order, 0, 0), method = "ML")$sigma2, + for (ar_order in init_order:1) { + error_variance[i] <- tryCatch(arima(na.omit(Y[,i]), order = c(ar_order, 0, 0), method = "ML")$sigma2, error = function(cond) NA) if (!is.na(error_variance[i])) { - success <- 1 + break } else { - init_order <- init_order - 1 if (init_order < 1) { error_variance[i] <- var(na.omit(Y[,i])) } diff --git a/README.Rmd b/README.Rmd deleted file mode 100644 index 6b0899d..0000000 --- a/README.Rmd +++ /dev/null @@ -1,141 +0,0 @@ ---- -output: - github_document ---- -# mfbvar -[![Build Status](https://travis-ci.org/ankargren/mfbvar.svg?branch=master)](https://travis-ci.org/ankargren/mfbvar) -[![](http://www.r-pkg.org/badges/version/mfbvar)](http://www.r-pkg.org/pkg/mfbvar) -[![Coverage status](https://codecov.io/gh/ankargren/mfbvar/branch/master/graph/badge.svg)](https://codecov.io/github/ankargren/mfbvar?branch=master) - -## Overview -The `mfbvar` package implements a steady-state prior and a Minnesota prior for state space-based mixed-frequency VAR models. - -## Installation -The package can be installed with the help of `devtools`: -```{r, eval = FALSE} -devtools::install_github("ankargren/mfbvar") -``` - - -```{r, echo = FALSE} -knitr::opts_chunk$set( - collapse = TRUE, - comment = "#>", - fig.path = "man/figures/README-" -) -thm = knitr::knit_theme$get("edit-xcode") -knitr::knit_theme$set(thm) -set.seed(100) -``` - -## Usage - -To illustrate the functionality of the package, first load some data stored in the package. -```{r} -library(mfbvar) -Y <- mfbvar::mf_sweden -head(Y) -tail(Y) -``` -### Prior specification -Next, we create a minimal prior object. We must specify: 1) data, 2) the frequency of the data, 3) the number of lags, 4) the length of burn-in and main chains, respectively. This is done by calling the `set_prior()` function and giving named arguments. The resulting object is of class `mfbvar_prior` and has a basic `print` method. -```{r} -prior_obj <- set_prior(Y = Y, freq = c(rep("m", 4), "q"), - n_lags = 4, n_burnin = 5000, n_reps = 10000) -``` -Warnings are produced because we haven't specified values for some of the prior elements and instead the function uses default values. - -There is a print method for the prior object, showing some basic information: -```{r} -prior_obj -``` -The message tells us what elements of the prior have not yet been set, and if each of the two priors can be run with the current specification. The check is very minimal; the steady-state prior cannot be used to make forecasts (which it will attempt to if `n_fcst` is greater than `0`) unless also `d_fcst` is given, but to run the model with no forecasts only the three indicated elements are missing. - -The summary method provides a little bit more detail: -```{r} -summary(prior_obj) -``` - -### Model estimation -As the print method told us before, we can run the Minnesota prior, but not the steady-state prior with the current prior specification. The model is estimated by calling `estimate_mfbvar()`. -```{r, cache = TRUE} -mod_minn <- estimate_mfbvar(mfbvar_prior = prior_obj, prior_type = "minn") -``` - -To use the steady-state prior, we need to specify `d`, `prior_psi_mean` and `prior_psi_Omega`. We specify the prior moments for $\psi$ using the helper function `interval_to_moments()` which converts 95 % prior probability intervals to prior moments, assuming independence. -```{r} -prior_intervals <- matrix(c( 6.5, 7.5, - 0.4/3, 0.5/3, - 0, 1, - -0.1, 0.1, - 0.5, 0.65), ncol = 2, byrow = TRUE) -psi_moments <- interval_to_moments(prior_intervals) -prior_psi_mean <- psi_moments$prior_psi_mean -prior_psi_Omega <- psi_moments$prior_psi_Omega -``` - -Instead of creating a new prior object, we can update the old by use of the `update_prior()` function. Note also that it is possible to specify `"intercept"` for `d` rather than a matrix containing a constant for the deterministic term. -```{r} -prior_obj <- update_prior(prior_obj, d = "intercept", prior_psi_mean = prior_psi_mean, - prior_psi_Omega = prior_psi_Omega) -prior_obj -``` - -It is now possible to estimate the model using the steady-state prior. -```{r, cache = TRUE} -mod_ss <- estimate_mfbvar(prior_obj, "ss") -``` - -It is also allowed to temporarily override elements in the prior object by adding them as separate arguments to the `estimate_mfbvar()` function. Thus, to get forecasts eight steps ahead we would use: -```{r, cache = TRUE} -mod_minn <- estimate_mfbvar(prior_obj, "minn", n_fcst = 8) -mod_ss <- estimate_mfbvar(prior_obj, "ss", n_fcst = 8) -``` - -### Processing results -The resulting objects contain all of the posterior information. The returned objects from `estimate_mfbvar()` are of class `mfbvar` and `mfbvar_ss` or `mfbvar_minn`. -```{r} -class(mod_minn) -class(mod_ss) -``` - -For forecasts, there is a `predict` method for class `mfbvar` which computes forecasts for selected quantiles. By default, it returns the 10%, 50% and 90% quantiles. -```{r} -predict(mod_minn, pred_quantiles = 0.5) -``` -If desired, it can be requested in a tidy format. -```{r} -head(predict(mod_minn, pred_quantiles = 0.5, tidy = TRUE)) -``` - -Calling plot on `mfbvar_ss` or `mfbvar_minn` objects produces plots of the forecasts and, by default, `5*n_fcst` of the preceding values. - -```{r plot_minn, cache = TRUE} -plot(mod_minn) -``` - -The axis tick labels are too long and overlap. The `plot()` method returns a `ggplot`. Hence, modifying the plot simply amounts to adding layers in the usual `ggplot2` way. The method also allows for changing where the plot should begin. -```{r plot_ss, cache = TRUE} -library(ggplot2) -plot(mod_ss, plot_start = 1) + - theme(axis.text.x = element_text(angle = 90)) -``` - -There are also some basic `print` and `summary` methods for the two classes implemented. -```{r} -mod_minn -mod_ss -summary(mod_minn) -summary(mod_ss) -``` -### Marginal data density estimation -To estimate the marginal data density, there is a generic function `mdd()` for which there are methods for classes `mfbvar_ss` and `mfbvar_minn`. -```{r, cache = TRUE} -mdd_minn <- mdd(mod_minn, p_trunc = 0.5) -mdd_ss_1 <- mdd(mod_ss) -mdd_ss_2 <- mdd(mod_ss, p_trunc = 0.5) - -mdd_minn -mdd_ss_1 -mdd_ss_2 -``` diff --git a/README.md b/README.md index bedae18..3f63b22 100644 --- a/README.md +++ b/README.md @@ -2,370 +2,23 @@ mfbvar ====== -[![Build Status](https://travis-ci.org/ankargren/mfbvar.svg?branch=master)](https://travis-ci.org/ankargren/mfbvar) [![](http://www.r-pkg.org/badges/version/mfbvar)](http://www.r-pkg.org/pkg/mfbvar) [![Coverage status](https://codecov.io/gh/ankargren/mfbvar/branch/master/graph/badge.svg)](https://codecov.io/github/ankargren/mfbvar?branch=master) +[![Build Status](https://travis-ci.org/ankargren/mfbvar.svg?branch=master)](https://travis-ci.org/ankargren/mfbvar) [![](http://www.r-pkg.org/badges/version/mfbvar)](https://www.r-pkg.org:443/pkg/mfbvar) [![Coverage status](https://codecov.io/gh/ankargren/mfbvar/branch/master/graph/badge.svg)](https://codecov.io/github/ankargren/mfbvar?branch=master) Overview -------- -The `mfbvar` package implements a steady-state prior and a Minnesota prior for state space-based mixed-frequency VAR models. +The `mfbvar` package implements Bayesian mixed-frequency VAR models. Installation ------------ -The package can be installed with the help of `devtools`: +The current development version of the package can be installed with the help of `devtools`: ``` r devtools::install_github("ankargren/mfbvar") ``` - Usage ----- -To illustrate the functionality of the package, first load some data stored in the package. - -``` r -library(mfbvar) -Y <- mfbvar::mf_sweden -head(Y) -#> unemp infl ip eti gdp -#> 1996-08-31 9.9 -0.44997116 0.5941788 0.19536978 NA -#> 1996-09-30 9.8 0.56804886 -1.5522700 0.08309475 0.4704331 -#> 1996-10-31 9.8 0.03539614 -0.4825100 0.26642772 NA -#> 1996-11-30 9.9 -0.20074400 1.3213405 0.07019829 NA -#> 1996-12-31 10.1 -0.15378249 2.7076404 -0.06840048 0.7567702 -#> 1997-01-31 10.0 -0.01183922 0.3478264 0.31459737 NA -tail(Y) -#> unemp infl ip eti gdp -#> 2015-07-31 7.3 0.02895613 -3.1285137 0.09746577 NA -#> 2015-08-31 7.0 -0.19319944 3.8446293 0.16136658 NA -#> 2015-09-30 7.3 0.39565793 0.9132484 0.23165768 0.843138 -#> 2015-10-31 7.2 0.07701935 NA 0.16152144 NA -#> 2015-11-30 NA NA NA -0.17872172 NA -#> 2015-12-31 NA NA NA 0.33933697 NA -``` - -### Prior specification - -Next, we create a minimal prior object. We must specify: 1) data, 2) the frequency of the data, 3) the number of lags, 4) the length of burn-in and main chains, respectively. This is done by calling the `set_prior()` function and giving named arguments. The resulting object is of class `mfbvar_prior` and has a basic `print` method. - -``` r -prior_obj <- set_prior(Y = Y, freq = c(rep("m", 4), "q"), - n_lags = 4, n_burnin = 1000, n_reps = 1000) -#> Warning: prior_Pi_AR1: 0 used as prior mean for AR(1) coefficients. -#> Warning: lambda1: 0.2 used as the value for the overall tightness hyperparameter. -#> Warning: lambda2: 1 used as the value for the lag decay hyperparameter. -#> Warning: lambda3: 10000 used for the constant's prior variance. -#> Warning: n_fcst: 0 used for the number of forecasts to compute. -``` - -Warnings are produced because we haven't specified values for some of the prior elements and instead the function uses default values. - -There is a print method for the prior object, showing some basic information: - -``` r -prior_obj -#> The following elements of the prior have not been set: -#> d d_fcst prior_psi_mean prior_psi_Omega -#> -#> Checking if steady-state prior can be run... FALSE -#> Missing elements: d prior_psi_mean prior_psi_Omega -#> Checking if Minnesota prior can be run... TRUE -``` - -The message tells us what elements of the prior have not yet been set, and if each of the two priors can be run with the current specification. The check is very minimal; the steady-state prior cannot be used to make forecasts (which it will attempt to if `n_fcst` is greater than `0`) unless also `d_fcst` is given, but to run the model with no forecasts only the three indicated elements are missing. - -The summary method provides a little bit more detail: - -``` r -summary(prior_obj) -#> PRIOR SUMMARY -#> ---------------------------- -#> Required elements: -#> Y: 5 variables, 233 time points -#> freq: m m m m q -#> prior_Pi_AR1: 0 0 0 0 0 -#> lambda1: 0.2 -#> lambda2: 1 -#> n_lags: 4 -#> n_fcst: 0 -#> n_burnin: 1000 -#> n_reps: 1000 -#> ---------------------------- -#> Steady-state-specific elements: -#> d: -#> d_fcst: -#> prior_psi_mean: -#> prior_psi_Omega: -#> ---------------------------- -#> Minnesota-specific elements: -#> lambda3: 10000 -#> ---------------------------- -#> Other: -#> verbose: FALSE -#> smooth_state: FALSE -#> check_roots: TRUE -``` - -### Model estimation - -As the print method told us before, we can run the Minnesota prior, but not the steady-state prior with the current prior specification. The model is estimated by calling `estimate_mfbvar()`. - -``` r -mod_minn <- estimate_mfbvar(mfbvar_prior = prior_obj, prior_type = "minn") -``` - -To use the steady-state prior, we need to specify `d`, `prior_psi_mean` and `prior_psi_Omega`. We specify the prior moments for *ψ* using the helper function `interval_to_moments()` which converts 95 % prior probability intervals to prior moments, assuming independence. - -``` r -prior_intervals <- matrix(c( 6, 7, - 0.1, 0.2, - 0, 0.5, - -0.5, 0.5, - 0.4, 0.6), ncol = 2, byrow = TRUE) -psi_moments <- interval_to_moments(prior_intervals) -prior_psi_mean <- psi_moments$prior_psi_mean -prior_psi_Omega <- psi_moments$prior_psi_Omega -``` - -Instead of creating a new prior object, we can update the old by use of the `update_prior()` function. Note also that it is possible to specify `"intercept"` for `d` rather than a matrix containing a constant for the deterministic term. - -``` r -prior_obj <- update_prior(prior_obj, d = "intercept", prior_psi_mean = prior_psi_mean, - prior_psi_Omega = prior_psi_Omega) -prior_obj -#> The following elements of the prior have not been set: -#> -#> -#> Checking if steady-state prior can be run... TRUE -#> -#> Checking if Minnesota prior can be run... TRUE -``` - -It is now possible to estimate the model using the steady-state prior. - -``` r -mod_ss <- estimate_mfbvar(prior_obj, "ss") -``` - -It is also allowed to temporarily override elements in the prior object by adding them as separate arguments to the `estimate_mfbvar()` function. Thus, to get forecasts eight steps ahead we would use: - -``` r -mod_minn <- estimate_mfbvar(prior_obj, "minn", n_fcst = 8) -mod_ss <- estimate_mfbvar(prior_obj, "ss", n_fcst = 8) -``` - -### Processing results - -The resulting objects contain all of the posterior information. The returned objects from `estimate_mfbvar()` are of class `mfbvar` and `mfbvar_ss` or `mfbvar_minn`. - -``` r -class(mod_minn) -#> [1] "mfbvar" "mfbvar_minn" -class(mod_ss) -#> [1] "mfbvar" "mfbvar_ss" -``` - -For forecasts, there is a `predict` method for class `mfbvar` which computes forecasts for selected quantiles. By default, it returns the 10%, 50% and 90% quantiles. - -``` r -predict(mod_minn, pred_quantiles = 0.5) -#> $quantile_50 -#> unemp infl ip eti gdp -#> 2015-10-31 7.200000 0.07701935 0.15937929 0.161521443 0.9532468 -#> 2015-11-30 7.133971 0.06294611 0.22159661 -0.178721720 1.2583549 -#> 2015-12-31 7.144809 0.14946118 0.41239687 0.339336970 0.7827024 -#> fcst_1 7.071454 0.10820970 0.61760986 0.039354282 1.0709753 -#> fcst_2 7.063182 0.05068836 0.33710755 0.013616411 0.7690866 -#> fcst_3 7.061058 0.13085761 0.02997096 0.034041795 0.7069756 -#> fcst_4 7.002702 0.09072667 0.27579304 0.012184009 0.7835469 -#> fcst_5 7.004394 0.11209962 0.14695194 0.014447554 0.6775694 -#> fcst_6 7.026714 0.05772676 0.14264816 -0.015879097 0.6082434 -#> fcst_7 6.989170 0.08354229 -0.01363007 -0.018254112 0.5807203 -#> fcst_8 6.988067 0.09529331 -0.02175004 -0.005380111 0.5294131 -``` - -If desired, it can be requested in a tidy format. - -``` r -head(predict(mod_minn, pred_quantiles = 0.5, tidy = TRUE)) -#> value fcst_date time variable quantile -#> 1 7.200000 2015-10-31 231 unemp 0.5 -#> 2 7.133971 2015-11-30 232 unemp 0.5 -#> 3 7.144809 2015-12-31 233 unemp 0.5 -#> 4 7.071454 fcst_1 234 unemp 0.5 -#> 5 7.063182 fcst_2 235 unemp 0.5 -#> 6 7.061058 fcst_3 236 unemp 0.5 -``` - -Calling plot on `mfbvar_ss` or `mfbvar_minn` objects produces plots of the forecasts and, by default, `5*n_fcst` of the preceding values. - -``` r -plot(mod_minn) -``` - -![](man/figures/README-plot_minn-1.png) - -The axis tick labels are too long and overlap. The `plot()` method returns a `ggplot`. Hence, modifying the plot simply amounts to adding layers in the usual `ggplot2` way. The method also allows for changing where the plot should begin. - -``` r -library(ggplot2) -plot(mod_ss, plot_start = 1) + - theme(axis.text.x = element_text(angle = 90)) -``` - -![](man/figures/README-plot_ss-1.png) - -There are also some basic `print` and `summary` methods for the two classes implemented. - -``` r -mod_minn -#> Mixed-frequency Minnesota BVAR with: -#> 5 variables (unemp, infl, ip, eti, gdp) -#> 4 lags -#> 233 time periods (1996-08-31 - 2015-12-31) -#> 8 periods forecasted -#> 1000 draws used in main chain -mod_ss -#> Mixed-frequency steady-state BVAR with: -#> 5 variables (unemp, infl, ip, eti, gdp) -#> 4 lags -#> 233 time periods (1996-08-31 - 2015-12-31) -#> 8 periods forecasted -#> 1000 draws used in main chain -summary(mod_minn) -#> Mixed-frequency Minnesota BVAR with: -#> 5 variables (unemp, infl, ip, eti, gdp) -#> 4 lags -#> 233 time periods (1996-08-31 - 2015-12-31) -#> 8 periods forecasted -#> 1000 draws used in main chain -#> -#> ######################### -#> Posterior means computed -#> -#> Pi: -#> indep -#> dep unemp.1 infl.1 ip.1 eti.1 gdp.1 -#> unemp 0.57374475 -0.059666359 -0.009725344 -0.06143760 0.006146794 -#> infl -0.06984886 0.002022346 0.010754882 0.07097111 0.014382233 -#> ip -0.28026095 0.373048568 -0.324879157 1.28380761 0.046542786 -#> eti 0.01050189 -0.106675765 -0.010801383 0.12754578 0.040666585 -#> gdp -0.14696300 0.213991024 0.014547391 1.13724644 0.041164095 -#> indep -#> dep unemp.2 infl.2 ip.2 eti.2 gdp.2 -#> unemp 0.2244127962 0.03356854 -0.015902327 0.06713901 -0.0127318401 -#> infl 0.0007198677 -0.10438354 0.008958532 -0.10579849 -0.0257301594 -#> ip 0.0379167035 -0.02420588 -0.106269928 0.51158351 0.1165583383 -#> eti 0.0579525147 -0.06100234 0.003753267 0.03489051 -0.0008739203 -#> gdp 0.0746023155 -0.13749131 0.053161687 0.11346517 0.0191243192 -#> indep -#> dep unemp.3 infl.3 ip.3 eti.3 gdp.3 -#> unemp 0.180207956 -0.0114850007 -0.001508907 0.02592424 -0.041120556 -#> infl 0.007807395 -0.0708573306 0.013163578 0.04482953 -0.016255266 -#> ip 0.057385158 -0.1363831049 -0.021944661 0.12187429 0.088945671 -#> eti -0.017849315 -0.0009358978 -0.001677266 0.07604319 -0.009111746 -#> gdp -0.032142196 -0.3113726809 0.036196309 0.31889298 0.053109505 -#> indep -#> dep unemp.4 infl.4 ip.4 eti.4 gdp.4 -#> unemp -0.01408621 0.008131893 0.002628882 0.005924449 -0.019056511 -#> infl 0.01576549 -0.089161854 0.001888369 -0.006608489 -0.011180306 -#> ip 0.23290153 -0.020399438 0.037653856 0.288592311 0.106793025 -#> eti -0.01859433 0.002372733 0.004739475 0.024390984 -0.004189443 -#> gdp 0.14376710 -0.283877282 0.017936840 0.206197468 0.021997354 -#> -#> -#> Sigma: -#> -#> unemp infl ip eti gdp -#> unemp 0.073875070 -0.01507315 0.01497214 -0.003623481 -0.092901512 -#> infl -0.015073150 0.15106145 0.08214672 0.011032287 0.244597985 -#> ip 0.014972144 0.08214672 3.44281563 0.039709454 1.196231322 -#> eti -0.003623481 0.01103229 0.03970945 0.061596980 0.009206275 -#> gdp -0.092901512 0.24459799 1.19623132 0.009206275 1.949578823 -#> -#> -#> Intercept: -#> const -#> unemp 0.2891893 -#> infl 0.4700659 -#> ip -0.5041606 -#> eti -0.2305289 -#> gdp 0.2735818 -summary(mod_ss) -#> Mixed-frequency steady-state BVAR with: -#> 5 variables (unemp, infl, ip, eti, gdp) -#> 4 lags -#> 233 time periods (1996-08-31 - 2015-12-31) -#> 8 periods forecasted -#> 1000 draws used in main chain -#> -#> ######################### -#> Posterior means computed -#> -#> Pi: -#> indep -#> dep unemp.1 infl.1 ip.1 eti.1 gdp.1 -#> unemp 0.565601021 -0.0501664015 -0.008066323 -0.05900102 -0.0007952872 -#> infl -0.074308112 -0.0007645842 0.009610393 0.07430790 0.0202526049 -#> ip -0.292356043 0.3809592812 -0.311388254 1.27143261 0.0273369800 -#> eti 0.005474905 -0.1043835489 -0.010273813 0.12866117 0.0401244127 -#> gdp -0.086955522 0.1303419397 -0.004154454 0.93475266 0.2006896126 -#> indep -#> dep unemp.2 infl.2 ip.2 eti.2 gdp.2 -#> unemp 0.2272407402 0.03003094 -0.015271721 0.06977185 -0.011801016 -#> infl -0.0005859319 -0.10423933 0.008107247 -0.10627777 -0.026221590 -#> ip 0.0230012865 -0.04416975 -0.105170620 0.51452143 0.130369410 -#> eti 0.0603574544 -0.06187126 0.004178602 0.04128857 -0.002430807 -#> gdp 0.0281931404 -0.21188665 0.039575709 0.09325510 0.018345264 -#> indep -#> dep unemp.3 infl.3 ip.3 eti.3 gdp.3 -#> unemp 0.192855517 -0.01277139 -0.002135726 0.02822116 -0.038509233 -#> infl 0.003961296 -0.06653470 0.013711116 0.04528497 -0.020365545 -#> ip 0.048791472 -0.15263152 -0.017387847 0.11410916 0.088923592 -#> eti -0.018663976 0.00117084 -0.001842802 0.08064725 -0.007882617 -#> gdp -0.048661001 -0.30248337 0.031722774 0.28184597 0.006087828 -#> indep -#> dep unemp.4 infl.4 ip.4 eti.4 gdp.4 -#> unemp -0.01671768 0.008880352 0.002886700 0.006653569 -0.020834549 -#> infl 0.01883941 -0.085232793 0.001704578 -0.011254046 -0.010258549 -#> ip 0.21395374 -0.014386281 0.039623469 0.277043607 0.100948317 -#> eti -0.01861225 0.005324372 0.005419283 0.022077091 -0.005266916 -#> gdp 0.13001238 -0.232934626 0.018196798 0.174806053 0.028511508 -#> -#> -#> Sigma: -#> -#> unemp infl ip eti gdp -#> unemp 0.071486945 -0.01446027 0.01217137 -0.003060791 -0.07739934 -#> infl -0.014460268 0.14545293 0.08448546 0.010415174 0.22662945 -#> ip 0.012171371 0.08448546 3.34777514 0.039130525 1.12270508 -#> eti -0.003060791 0.01041517 0.03913053 0.059837815 0.01561569 -#> gdp -0.077399337 0.22662945 1.12270508 0.015615690 1.51667808 -#> -#> -#> Psi: -#> d1 -#> unemp 6.56635166 -#> infl 0.13596181 -#> ip 0.07535510 -#> eti -0.02896786 -#> gdp 0.52704781 -``` - -### Marginal data density estimation - -To estimate the marginal data density, there is a generic function `mdd()` for which there are methods for classes `mfbvar_ss` and `mfbvar_minn`. - -``` r -mdd_minn <- mdd(mod_minn, p_trunc = 0.5) -mdd_ss_1 <- mdd(mod_ss) -mdd_ss_2 <- mdd(mod_ss, p_trunc = 0.5) - -mdd_minn -#> [1] -867.5615 -mdd_ss_1 -#> [1] -802.3645 -mdd_ss_2 -#> [1] -803.4983 -``` +See the vignette for details and examples. diff --git a/README_cache/markdown_github-ascii_identifiers/__packages b/README_cache/markdown_github-ascii_identifiers/__packages deleted file mode 100644 index dc0f186..0000000 --- a/README_cache/markdown_github-ascii_identifiers/__packages +++ /dev/null @@ -1,3 +0,0 @@ -base -mfbvar -ggplot2 diff --git a/README_cache/markdown_github-ascii_identifiers/plot_minn_a002844aedc86a67f3b931a461e600c7.RData b/README_cache/markdown_github-ascii_identifiers/plot_minn_a002844aedc86a67f3b931a461e600c7.RData deleted file mode 100644 index 8840600..0000000 Binary files a/README_cache/markdown_github-ascii_identifiers/plot_minn_a002844aedc86a67f3b931a461e600c7.RData and /dev/null differ diff --git a/README_cache/markdown_github-ascii_identifiers/plot_minn_a002844aedc86a67f3b931a461e600c7.rdb b/README_cache/markdown_github-ascii_identifiers/plot_minn_a002844aedc86a67f3b931a461e600c7.rdb deleted file mode 100644 index e69de29..0000000 diff --git a/README_cache/markdown_github-ascii_identifiers/plot_minn_a002844aedc86a67f3b931a461e600c7.rdx b/README_cache/markdown_github-ascii_identifiers/plot_minn_a002844aedc86a67f3b931a461e600c7.rdx deleted file mode 100644 index c09d9a3..0000000 Binary files a/README_cache/markdown_github-ascii_identifiers/plot_minn_a002844aedc86a67f3b931a461e600c7.rdx and /dev/null differ diff --git a/README_cache/markdown_github-ascii_identifiers/plot_ss_344c10aed1199ad05678af43c60d5f13.RData b/README_cache/markdown_github-ascii_identifiers/plot_ss_344c10aed1199ad05678af43c60d5f13.RData deleted file mode 100644 index 9a3df8b..0000000 Binary files a/README_cache/markdown_github-ascii_identifiers/plot_ss_344c10aed1199ad05678af43c60d5f13.RData and /dev/null differ diff --git a/README_cache/markdown_github-ascii_identifiers/plot_ss_344c10aed1199ad05678af43c60d5f13.rdb b/README_cache/markdown_github-ascii_identifiers/plot_ss_344c10aed1199ad05678af43c60d5f13.rdb deleted file mode 100644 index e69de29..0000000 diff --git a/README_cache/markdown_github-ascii_identifiers/plot_ss_344c10aed1199ad05678af43c60d5f13.rdx b/README_cache/markdown_github-ascii_identifiers/plot_ss_344c10aed1199ad05678af43c60d5f13.rdx deleted file mode 100644 index c09d9a3..0000000 Binary files a/README_cache/markdown_github-ascii_identifiers/plot_ss_344c10aed1199ad05678af43c60d5f13.rdx and /dev/null differ diff --git a/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-10_325b7684b30e75d2ad6bba3861d6f3bf.RData b/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-10_325b7684b30e75d2ad6bba3861d6f3bf.RData deleted file mode 100644 index 039cbdd..0000000 Binary files a/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-10_325b7684b30e75d2ad6bba3861d6f3bf.RData and /dev/null differ diff --git a/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-10_325b7684b30e75d2ad6bba3861d6f3bf.rdb b/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-10_325b7684b30e75d2ad6bba3861d6f3bf.rdb deleted file mode 100644 index b459555..0000000 Binary files a/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-10_325b7684b30e75d2ad6bba3861d6f3bf.rdb and /dev/null differ diff --git a/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-10_325b7684b30e75d2ad6bba3861d6f3bf.rdx b/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-10_325b7684b30e75d2ad6bba3861d6f3bf.rdx deleted file mode 100644 index 25604e1..0000000 Binary files a/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-10_325b7684b30e75d2ad6bba3861d6f3bf.rdx and /dev/null differ diff --git a/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-11_a36374a7d7e516c36b1b3c5ca1e410d8.RData b/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-11_a36374a7d7e516c36b1b3c5ca1e410d8.RData deleted file mode 100644 index fd8fc2c..0000000 Binary files a/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-11_a36374a7d7e516c36b1b3c5ca1e410d8.RData and /dev/null differ diff --git a/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-11_a36374a7d7e516c36b1b3c5ca1e410d8.rdb b/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-11_a36374a7d7e516c36b1b3c5ca1e410d8.rdb deleted file mode 100644 index bc3925e..0000000 Binary files a/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-11_a36374a7d7e516c36b1b3c5ca1e410d8.rdb and /dev/null differ diff --git a/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-11_a36374a7d7e516c36b1b3c5ca1e410d8.rdx b/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-11_a36374a7d7e516c36b1b3c5ca1e410d8.rdx deleted file mode 100644 index c276ede..0000000 Binary files a/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-11_a36374a7d7e516c36b1b3c5ca1e410d8.rdx and /dev/null differ diff --git a/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-16_670e2537f4bdd394c478fe8bdc6cbe97.RData b/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-16_670e2537f4bdd394c478fe8bdc6cbe97.RData deleted file mode 100644 index 0cd6790..0000000 Binary files a/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-16_670e2537f4bdd394c478fe8bdc6cbe97.RData and /dev/null differ diff --git a/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-16_670e2537f4bdd394c478fe8bdc6cbe97.rdb b/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-16_670e2537f4bdd394c478fe8bdc6cbe97.rdb deleted file mode 100644 index a9ff5af..0000000 Binary files a/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-16_670e2537f4bdd394c478fe8bdc6cbe97.rdb and /dev/null differ diff --git a/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-16_670e2537f4bdd394c478fe8bdc6cbe97.rdx b/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-16_670e2537f4bdd394c478fe8bdc6cbe97.rdx deleted file mode 100644 index c290ee8..0000000 Binary files a/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-16_670e2537f4bdd394c478fe8bdc6cbe97.rdx and /dev/null differ diff --git a/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-3_38310e8efee370f746e264c82335e0f2.RData b/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-3_38310e8efee370f746e264c82335e0f2.RData deleted file mode 100644 index 8bfe9a6..0000000 Binary files a/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-3_38310e8efee370f746e264c82335e0f2.RData and /dev/null differ diff --git a/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-3_38310e8efee370f746e264c82335e0f2.rdb b/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-3_38310e8efee370f746e264c82335e0f2.rdb deleted file mode 100644 index e69de29..0000000 diff --git a/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-3_38310e8efee370f746e264c82335e0f2.rdx b/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-3_38310e8efee370f746e264c82335e0f2.rdx deleted file mode 100644 index c09d9a3..0000000 Binary files a/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-3_38310e8efee370f746e264c82335e0f2.rdx and /dev/null differ diff --git a/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-7_4051147f685cd5a44d797a44b15703ed.RData b/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-7_4051147f685cd5a44d797a44b15703ed.RData deleted file mode 100644 index ceacc60..0000000 Binary files a/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-7_4051147f685cd5a44d797a44b15703ed.RData and /dev/null differ diff --git a/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-7_4051147f685cd5a44d797a44b15703ed.rdb b/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-7_4051147f685cd5a44d797a44b15703ed.rdb deleted file mode 100644 index aa3fad8..0000000 Binary files a/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-7_4051147f685cd5a44d797a44b15703ed.rdb and /dev/null differ diff --git a/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-7_4051147f685cd5a44d797a44b15703ed.rdx b/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-7_4051147f685cd5a44d797a44b15703ed.rdx deleted file mode 100644 index be620e0..0000000 Binary files a/README_cache/markdown_github-ascii_identifiers/unnamed-chunk-7_4051147f685cd5a44d797a44b15703ed.rdx and /dev/null differ diff --git a/cran-comments.md b/cran-comments.md index b5275b9..4922373 100644 --- a/cran-comments.md +++ b/cran-comments.md @@ -1,22 +1,36 @@ -## Resubmission -This is a resubmission. In this version I have: +## Re-submission of archived package +This is a resubmission of the mfbvar package that was archived on 2021-02-05 because check problems were not corrected in time. This version thus: + * Fixes the check problems. The observed warnings came from the vignette, which attempted to download data. This has now been removed; vignette does no longer use an active internet connection (as per the CRAN policy). + * I have also extended the ability of the package to handle mixed-frequency data to now also include weekly-monthly data. + * .Rd files now include \value, as per Gregor Seyer's request on 2021-02-09. + * DESCRIPTION has been extended to include references. -* Explained the acronym in the Description. - -* Added links/references in the Description to the methods used in the package. ## Test environments -* Local OS X Mojave 10.14.1 install, R 3.5.1 -* win-builder (R 3.5.1, devel) -* ubuntu 14.04 (on travis-ci), R 3.4.4, 3.5.1, devel -* OS X El Capitan 10.13.3 (on travis-ci), R 3.4.4, 3.5.1 + * win-builder (R devel, R 4.0.2, R 3.6.3) + * Local Mac OS X 10.14.3 (R 4.0.3) ## R CMD check results There were no ERRORs or WARNINGs. -There was 1 NOTE: +On some test environments, one of two NOTEs may appear: +* checking for GNU extensions in Makefiles ... NOTE + GNU make is a SystemRequirements. * checking CRAN incoming feasibility ... NOTE -Maintainer: ‘Sebastian Ankargren ’ + Maintainer: ‘Sebastian Ankargren ’ + + New submission + + Package was archived on CRAN + + Possibly mis-spelled words in DESCRIPTION: + Ankargren (10:324, 10:389, 10:441) + Joneus (10:403, 10:455) + Schorfheide (10:224) + Unosson (10:335) + + CRAN repository db overrides: + X-CRAN-Comment: Archived on 2021-02-05 as check problems were not + corrected in time. -New submission diff --git a/data/mf_usa.RData b/data/mf_usa.RData new file mode 100644 index 0000000..7b948ed Binary files /dev/null and b/data/mf_usa.RData differ diff --git a/inst/CITATION b/inst/CITATION new file mode 100644 index 0000000..06b616e --- /dev/null +++ b/inst/CITATION @@ -0,0 +1,61 @@ +## citHeader("To cite package mfbvar in publications use:") + +## R >= 2.8.0 passes package metadata to citation(). +if(!exists("meta") || is.null(meta)) meta <- packageDescription("mfbvar") +year <- sub("-.*", "", meta$Packaged) +note <- sprintf("R package version %s", meta$Version) + +bibentry(bibtype = "article", + header = "To cite mfbvar in publications use:", + title = "Mixed-Frequency {B}ayesian {VAR} Models in {R}: the {mfbvar} package", + author = personList( + person(given = "Sebastian", + family = "Ankargren", + email = "sebastian.ankargren@konj.se"), + person(given = "Yukai", + family = "Yang", + email = "yukai.yang@statistics.uu.se")), + journal = "R package vignette", + year = "2021", + textVersion = + paste("Ankargren, Sebastian and Yang, Yukai (2021).", + "Mixed-Frequency Bayesian VAR Models in R: the mfbvar package.", + "R package vignette.", + "URL: https://CRAN.R-project.org/package=mfbvar/vignettes/mfbvar_jss.pdf") +) + +citEntry(header = "For the adaptive simulation smoother, please cite:", + entry = "article", + title = "Simulation Smoothing for Nowcasting with Large Mixed-Frequency {VAR}s", + author = personList(as.person("Sebastian Ankargren"), + as.person("Paulina Jon\\\'{e}us")), + journal = "Econometrics and Statistics", + year = "2020", + doi = "10.1016/j.ecosta.2020.05.007", + textVersion = "Ankargren, Sebastian and Jonéus, Paulina (2020). Simulation Smoothing for Nowcasting with Large Mixed-Frequency VARs. Econometrics and Statistics. https://doi.org/10.1016/j.ecosta.2020.05.007" +) + +citEntry(header = "For the steady-state mixed-frequency BVAR, please cite:", + entry = "article", + title = "A Flexible Mixed-Frequency Vector Autoregression with a Steady-State Prior", + author = personList(as.person("Sebastian Ankargren"), + as.person("Måns Unosson"), + as.person("Yukai Yang")), + journal = "Journal of Time Series Econometrics", + volume = "12", + number = "2", + year = "2020", + doi = "10.1515/jtse-2018-0034", + textVersion = "Ankargren, Sebastian, Måns Unosson and Yukai Yang (2020). A Flexible Mixed-Frequency Vector Autoregression with a Steady-State Prior. Journal of Time Series Econometrics, 12(2). https://doi.org/10.1515/jtse-2018-0034" +) + +citEntry(header = "For the mixed-frequency BVAR with factor stochastic volatility, please cite:", + entry = "article", + title = "Estimating Large Mixed-Frequency Bayesian VAR Models", + author = personList(as.person("Sebastian Ankargren"), + as.person("Paulina Jon\\\'{e}us")), + journal = "arXiv", + year = "2019", + url = "https://arxiv.org/abs/1912.02231", + textVersion = "Ankargren, Sebastian and Paulina Jonéus (2019). Estimating Large Mixed-Frequency Bayesian VAR Models. arXiv, https://arxiv.org/abs/1912.02231" +) diff --git a/inst/include/auxmix.h b/inst/include/auxmix.h index 11e5e89..ae72f03 100644 --- a/inst/include/auxmix.h +++ b/inst/include/auxmix.h @@ -3,9 +3,12 @@ #include -// Some constants relating to the approximation of log(chisq) trough -// normal mixture (from Omori et al., 2007), and -// corresponding functions related to sampling the indicators +// Copyright of original code: Gregor Kastner (stochvol package) +// Copyright of modified code: Sebastian Ankargren (mfbvar package) +// The following code is a derivative work of the code +// developed by Gregor Kastner for the stochvol package, which +// is licensed GPL>=2. This code is therefore licensed under +// the terms of the GNU Public License, version 3. const double mix_prob[10] = {.00609, .04775, .13057, .20674, .22715, .18842, .12047, .05591, .01575, .00115}; @@ -53,18 +56,15 @@ const double mix_pre[10] = { -4.8643822832849297199686589010525494813919067382812500000, -7.7642143280080739842219372803810983896255493164062500000}; -// Non-normalized posterior probabilities void findMixprobs( arma::vec& mixprob, const arma::vec& datanorm); -// Cumulative sum over columns of a matrix void colCumsums( arma::vec& x, int const nrow, int const ncol); -// Combines findMixprobs() and colCumsums() (see above) into one function void findMixCDF( arma::vec& mixprob, const arma::vec& datanorm); diff --git a/inst/include/mfbvar.h b/inst/include/mfbvar.h index 7c2c3cc..b80b74c 100644 --- a/inst/include/mfbvar.h +++ b/inst/include/mfbvar.h @@ -15,23 +15,8 @@ arma::vec rmultn(const arma::vec & m, const arma::mat & Sigma); arma::mat rinvwish(int v, const arma::mat & S); arma::mat rmatn(const arma::mat & M, const arma::mat & Q, const arma::mat & P); -void posterior_psi_iw(arma::vec & psi_i, arma::mat & mu_mat, - const arma::mat & Pi_i, const arma::mat & D_mat, - const arma::mat & Sigma_i, const arma::mat & inv_prior_psi_Omega, - const arma::mat & Z_i, const arma::mat & X, - const arma::mat & inv_prior_psi_Omega_mean, const arma::mat & dt, - int n_determ, int n_vars, int n_lags); -void posterior_psi_csv(arma::vec & psi_i, arma::mat & mu_mat, - const arma::mat & Pi_i, const arma::mat & D_mat, - const arma::mat & Sigma_chol_inv, const arma::mat & exp_sqrt_f, - const arma::mat & inv_prior_psi_Omega, - const arma::mat & Z_i, const arma::mat & X, - const arma::mat & inv_prior_psi_Omega_mean, const arma::mat & dt, - int n_determ, int n_vars, int n_lags); - -double max_eig_cpp(const arma::mat & A); - // Import the rgig double do_rgig1(double lambda, double chi, double psi); +double rig(double mu, double lambda); #endif diff --git a/inst/include/mvn.h b/inst/include/mvn.h index 7222c60..b8219b7 100644 --- a/inst/include/mvn.h +++ b/inst/include/mvn.h @@ -1,10 +1,16 @@ -#ifndef MFBVAR_MVN_H -#define MFBVAR_MVN_H -inline arma::vec mvn_bcm(const arma::mat & Phi, const arma::vec & d, +#ifndef MFBVAR_MVN_BCM_H +#define MFBVAR_MVN_BCM_H +inline arma::vec mvn_bcm(const arma::mat & Phi, const arma::vec & d, const arma::vec & alpha) { + // Function to sample from a normal posterior in accordance with Bhattacharya, + // Chakraborty and Mallick (2016) + // Notation following Bhattacharya, Chakraborty and Mallick (2016), doi:10.1093/biomet/asw042 + // Phi: scaled regressor matrix (n x p) + // d: scaled diagonal of the (diagonal) prior covariance matrix + // alpha: scaled response variable arma::uword n = Phi.n_rows; arma::uword p = Phi.n_cols; - + arma::mat U = Phi.t(); U.each_col() %= d; arma::vec d_sqrt = sqrt(d); @@ -17,7 +23,38 @@ inline arma::vec mvn_bcm(const arma::mat & Phi, const arma::vec & d, arma::vec v = Phi * u + delta; arma::vec w = arma::solve(Phi * U + I, (alpha - v)); arma::vec theta = u + U * w; - + + return theta; +} + +#endif + +#ifndef MFBVAR_MVN_BCM_EPS_H +#define MFBVAR_MVN_BCM_EPS_H +inline arma::vec mvn_bcm_eps(const arma::mat & Phi, const arma::vec & d, + const arma::vec & alpha, const arma::vec & eps) { + // Function to sample from a normal posterior in accordance with Bhattacharya, + // Chakraborty and Mallick (2016) with pregenerated random numbers + // Notation following Bhattacharya, Chakraborty and Mallick (2016), doi:10.1093/biomet/asw042 + // Phi: scaled regressor matrix (n x p) + // d: scaled diagonal of the (diagonal) prior covariance matrix + // alpha: scaled response variable + // eps: vector of iid N(0,1) (n+p) + arma::uword n = Phi.n_rows; + arma::uword p = Phi.n_cols; + + arma::vec u = arma::vec(eps.begin(), p); + arma::vec delta = arma::vec(eps.begin()+p, n); + + arma::mat U = Phi.t(); + U.each_col() %= d; + arma::vec d_sqrt = sqrt(d); + arma::mat I(n, n, arma::fill::eye); + u %= d_sqrt; + arma::vec v = Phi * u + delta; + arma::vec w = arma::solve(Phi * U + I, (alpha - v)); + arma::vec theta = u + U * w; + return theta; } @@ -25,9 +62,14 @@ inline arma::vec mvn_bcm(const arma::mat & Phi, const arma::vec & d, #ifndef MFBVAR_MVN_RUE_H #define MFBVAR_MVN_RUE_H -inline arma::vec mvn_rue(const arma::mat & Phi, const arma::vec & d, +inline arma::vec mvn_rue(const arma::mat & Phi, const arma::vec & d, const arma::vec & alpha) { - + // Function to sample from a normal posterior in accordance with Rue (2001) + // Notation following Bhattacharya, Chakraborty and Mallick (2016), doi:10.1093/biomet/asw042 + // Phi: scaled regressor matrix (n x p) + // d: scaled diagonal of the (diagonal) prior covariance matrix + // alpha: scaled response variable + arma::mat Q = Phi.t() * Phi; Q.diag() += pow(d, -1.0); arma::mat L = arma::chol(Q, "lower"); @@ -38,17 +80,23 @@ inline arma::vec mvn_rue(const arma::mat & Phi, const arma::vec & d, z.imbue(norm_rand); arma::vec y = arma::solve(arma::trimatu(L.t()), z); arma::mat theta = mu + y; - + return theta; } - #endif + #ifndef MFBVAR_MVN_CCM_H #define MFBVAR_MVN_CCM_H -inline arma::vec mvn_ccm(const arma::mat & Phi, const arma::vec & d, +inline arma::vec mvn_ccm(const arma::mat & Phi, const arma::vec & d, const arma::vec & alpha, double c, double j) { - + // Function to sample from a normal posterior when one parameter has non-zero prior mean + // Notation following Bhattacharya, Chakraborty and Mallick (2016), doi:10.1093/biomet/asw042 + // Phi: scaled regressor matrix (n x p) + // d: scaled diagonal of the (diagonal) prior covariance matrix + // alpha: scaled response variable + // c: prior mean of the parameter that has non-zero prior mean + // j: index of the parameter arma::mat Q = Phi.t() * Phi; Q.diag() += pow(d, -1.0); arma::mat L = arma::chol(Q, "lower"); @@ -58,8 +106,35 @@ inline arma::vec mvn_ccm(const arma::mat & Phi, const arma::vec & d, arma::vec z(Phi.n_cols); z.imbue(norm_rand); arma::vec theta = arma::solve(arma::trimatu(L.t()), v+z); - + return theta; } -#endif \ No newline at end of file +#endif + +#ifndef MFBVAR_MVN_RUE_EPS_H +#define MFBVAR_MVN_RUE_EPS_H +inline arma::vec mvn_rue_eps(const arma::mat & Phi, const arma::vec & d, + const arma::vec & alpha, const arma::vec & eps, + double c, double j) { + // Function to sample from a normal posterior when one parameter has non-zero prior mean + // using the Rue (2001) algorithm + // Notation following Bhattacharya, Chakraborty and Mallick (2016), doi:10.1093/biomet/asw042 + // Phi: scaled regressor matrix (n x p) + // d: scaled diagonal of the (diagonal) prior covariance matrix + // alpha: scaled response variable + // c: prior mean of the parameter that has non-zero prior mean + // j: index of the parameter + + arma::mat Q = Phi.t() * Phi; + Q.diag() += pow(d, -1.0); + arma::mat L = arma::chol(Q, "lower"); + arma::mat b = Phi.t() * alpha; + b(j) += c; + arma::vec v = arma::solve(arma::trimatl(L), b); + arma::vec theta = arma::solve(arma::trimatu(L.t()), v+eps); + + return theta; +} + +#endif diff --git a/inst/include/mvn_par.h b/inst/include/mvn_par.h new file mode 100644 index 0000000..89785f0 --- /dev/null +++ b/inst/include/mvn_par.h @@ -0,0 +1,51 @@ +#include +#include "mvn.h" +struct Pi_parallel_rue : public RcppParallel::Worker { + arma::mat & output; + const arma::mat & y; + const arma::mat & X; + const arma::mat & d; + const arma::mat & eps; + const arma::mat & volatility; + const arma::mat & prior_AR1; + arma::uword T; + arma::uword n; + arma::uword p; + + Pi_parallel_rue(arma::mat & output, + const arma::mat & y, + const arma::mat & X, + const arma::mat & d, + const arma::mat & eps, + const arma::mat & volatility, + const arma::mat & prior_AR1, + const arma::uword T, + const arma::uword n, + const arma::uword p); + + void operator()(std::size_t begin, std::size_t end); +}; + +struct Pi_parallel_bcm : public RcppParallel::Worker { + arma::mat & output; + const arma::mat & y; + const arma::mat & X; + const arma::mat & d; + const arma::mat & eps; + const arma::mat & volatility; + arma::uword T; + arma::uword n; + arma::uword p; + + Pi_parallel_bcm(arma::mat & output, + const arma::mat & y, + const arma::mat & X, + const arma::mat & d, + const arma::mat & eps, + const arma::mat & volatility, + const arma::uword T, + const arma::uword n, + const arma::uword p); + + void operator()(std::size_t begin, std::size_t end); +}; diff --git a/inst/include/simsm_adaptive_cv.h b/inst/include/simsm_adaptive_cv.h index 30180b3..fbf1844 100644 --- a/inst/include/simsm_adaptive_cv.h +++ b/inst/include/simsm_adaptive_cv.h @@ -5,6 +5,14 @@ inline arma::mat simsm_adaptive_cv(arma::mat y_, arma::mat Phi, arma::mat Sigma_chol, arma::mat Lambda, arma::mat Z1, arma::uword n_q_, arma::uword T_b) { + // y_: The data matrix (n_T x n_vars) + // Phi: Matrix with regression parameters, first column is the intercept (n_vars x (1 + n_vars * n_lags)) + // Sigma_chol: lower triangular Cholesky factor of error covariance matrix (n_vars x n_vars) + // Lambda: aggregation matrix (obtained from mfbvar:::build_Lambda), (usually n_vars x n_lags) + // Z1: matrix with initial values (n_lags x n_vars) + // n_q_: number of quarterly variables + // T_b: time index of first NA among monthly variables ( = n_T if none) + // intercept is first column arma::uword n_vars = y_.n_cols; arma::uword n_lags = Z1.n_rows; diff --git a/inst/include/simsm_adaptive_sv.h b/inst/include/simsm_adaptive_sv.h index 0641ec5..537ba1a 100644 --- a/inst/include/simsm_adaptive_sv.h +++ b/inst/include/simsm_adaptive_sv.h @@ -5,6 +5,14 @@ inline arma::mat simsm_adaptive_sv(arma::mat y_, arma::mat Phi, arma::cube Sigma_chol, arma::mat Lambda, arma::mat Z1, arma::uword n_q_, arma::uword T_b) { + // y_: The data matrix (n_T x n_vars) + // Phi: Matrix with regression parameters, first column is the intercept (n_vars x (1 + n_vars * n_lags)) + // Sigma_chol: array of lower triangular Cholesky factors of error covariance matrices (n_T x n_vars x n_vars) + // Lambda: aggregation matrix (obtained from mfbvar:::build_Lambda), (usually n_vars x n_lags) + // Z1: matrix with initial values (n_lags x n_vars) + // n_q_: number of quarterly variables + // T_b: time index of first NA among monthly variables ( = n_T if none) + // intercept is first column arma::uword n_vars = y_.n_cols; arma::uword n_lags = Z1.n_rows; diff --git a/inst/include/simsm_adaptive_univariate.h b/inst/include/simsm_adaptive_univariate.h index f9e14b6..0cf0752 100644 --- a/inst/include/simsm_adaptive_univariate.h +++ b/inst/include/simsm_adaptive_univariate.h @@ -4,7 +4,14 @@ #define MFBVAR_SIMSM_ADAPTIVE_UNIVARIATE_H inline arma::mat simsm_adaptive_univariate(arma::mat y_, arma::mat Phi, arma::mat Sigma, arma::mat Lambda, arma::mat Z1, arma::uword n_q_, arma::uword T_b, arma::mat f) { - + // y_: The data matrix (n_T x n_vars) + // Phi: Matrix with regression parameters, first column is the intercept (n_vars x (1 + n_vars * n_lags)) + // Sigma: matrix where each row contains the conditional standard deviations of the equations at time t (n_T x n_vars) + // Lambda: aggregation matrix (obtained from mfbvar:::build_Lambda), (usually n_vars x n_lags) + // Z1: matrix with initial values (n_lags x n_vars) + // n_q_: number of quarterly variables + // T_b: time index of first NA among monthly variables ( = n_T if none) + // f: matrix where each row contains the common component of the equations at time t (n_T x n_vars) // intercept is first column arma::uword n_vars = y_.n_cols; diff --git a/inst/include/update_csv.h b/inst/include/update_csv.h index 34a3aa1..57b2f63 100644 --- a/inst/include/update_csv.h +++ b/inst/include/update_csv.h @@ -13,5 +13,6 @@ void update_csv( const double phi_meaninvvar, const double prior_sigma2, const double prior_df); + #endif diff --git a/inst/include/update_dl.h b/inst/include/update_dl.h new file mode 100644 index 0000000..406af47 --- /dev/null +++ b/inst/include/update_dl.h @@ -0,0 +1,8 @@ +#ifndef MFBVAR_UPDATE_DL_H +#define MFBVAR_UPDATE_DL_H +void update_dl(arma::mat & prior_Pi_Omega, arma::vec & aux, + arma::vec & local, double & global, const arma::mat & Pi_i, + arma::uword n_vars, arma::uword n_lags, const double a, + arma::vec & slice, bool gig = true, + bool intercept = true); +#endif diff --git a/inst/include/update_fsv.h b/inst/include/update_fsv.h new file mode 100644 index 0000000..fd9eb45 --- /dev/null +++ b/inst/include/update_fsv.h @@ -0,0 +1,12 @@ +#ifndef _UPDATE_FSV_H_ +#define _UPDATE_FSV_H_ +void update_fsv(arma::mat & armafacload, arma::mat & armaf, arma::mat & armah, + arma::vec & armah0, + Rcpp::NumericMatrix & curpara, + const arma::mat & armatau2, + const arma::mat & armay, + const double bmu, const double Bmu, const double a0idi, const double b0idi, + const double a0fac, const double b0fac, const Rcpp::NumericVector & Bsigma, + const double B011inv, const double B022inv, + const Rcpp::NumericVector & priorh0, const arma::imat & armarestr); +#endif diff --git a/src/update_ng.h b/inst/include/update_ng.h similarity index 100% rename from src/update_ng.h rename to inst/include/update_ng.h diff --git a/man-roxygen/man_template.R b/man-roxygen/man_template.R index ef0bac1..81c1ebe 100644 --- a/man-roxygen/man_template.R +++ b/man-roxygen/man_template.R @@ -6,7 +6,7 @@ #' <%=ifelse(exists("d"), "@param d The matrix of size \\code{(n_T + n_lags) * n_determ} of deterministic terms.", "") %> #' <%=ifelse(exists("D_mat"), "@param D_mat The \\code{D} matrix (from \\code{\\link{build_DD}}).", "") %> #' <%=ifelse(exists("d_fcst"), "@param d_fcst The deterministic terms for the forecasting period.", "") %> -#' <%=ifelse(exists("freq"), "@param freq Character vector with elements 'm' (monthly) or 'q' (quarterly) for sampling frequency. Monthly variables must precede all quarterly variables.", "") %> +#' <%=ifelse(exists("freq"), "@param freq (Only used if \\code{Y} is a matrix) Character vector with elements 'm' (monthly) or 'q' (quarterly) for sampling frequency. Monthly variables must precede all quarterly variables.", "") %> #' <%=ifelse(exists("h0"), "@param h0 The initial state (\\code{(n_vars*n_lags)*1}).", "") %> #' <%=ifelse(exists("Lambda"), "@param Lambda The Lambda matrix (size \\code{n_vars* (n_vars*n_lags)}).", "") %> #' <%=ifelse(exists("lambda1"), "@param lambda1 The overall tightness.", "") %> diff --git a/man/build_DD.Rd b/man/build_DD.Rd deleted file mode 100644 index 938148e..0000000 --- a/man/build_DD.Rd +++ /dev/null @@ -1,21 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/builders.R -\name{build_DD} -\alias{build_DD} -\title{Build the \eqn{D} matrix} -\usage{ -build_DD(d, n_lags) -} -\arguments{ -\item{d}{The matrix of size \code{(n_T + n_lags) * n_determ} of deterministic terms.} - -\item{n_lags}{The number of lags.} -} -\value{ -\item{DD}{A matrix of size \code{n_T * ((n_lags + 1)*n_determ)} where -row \code{t} is \eqn{(d_t', -d_{t-1}', \dots, -d_{t-k}')}.} -} -\description{ -\code{build_DD} builds the \eqn{D} matrix. -} -\keyword{internal} diff --git a/man/build_Lambda.Rd b/man/build_Lambda.Rd deleted file mode 100644 index 8689511..0000000 --- a/man/build_Lambda.Rd +++ /dev/null @@ -1,23 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/builders.R -\name{build_Lambda} -\alias{build_Lambda} -\title{Build the \eqn{\Lambda} matrix} -\usage{ -build_Lambda(aggregation, n_lags) -} -\arguments{ -\item{aggregation}{A character vector of length \code{n_vars} with elements being \code{'identity'}, \code{'average'} or \code{'triangular'} to indicate the type of aggregation scheme to assume.} - -\item{n_lags}{The number of lags.} -} -\value{ -\item{Lambda}{An \code{n_vars * (n_vars*n_pseudolags)} matrix, where \code{n_pseudolags} is \code{max(5, n_lags)} if any variable uses the triangular aggregation scheme, \code{max(3, n_lags)} if any uses the quarterly average.} -} -\description{ -Builds the aggregation matrix \eqn{\Lambda}. -} -\details{ -The choice \code{aggregation = "identity"} means that what is observed is assumed to be exactly the underlying variable, whereas \code{aggregation = "average"} uses the quarterly average of the monthly underlying observations. Lastly, \code{aggregation = "triangular"} uses the triangular specification used by Mariano and Murasawa (2010). -} -\keyword{internal} diff --git a/man/build_M_Lambda.Rd b/man/build_M_Lambda.Rd deleted file mode 100644 index f76c9aa..0000000 --- a/man/build_M_Lambda.Rd +++ /dev/null @@ -1,29 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/builders.R -\name{build_M_Lambda} -\alias{build_M_Lambda} -\title{Build the \eqn{M_t\Lambda} matrices} -\usage{ -build_M_Lambda(Y, Lambda, n_vars, n_lags, n_T) -} -\arguments{ -\item{Y}{The data matrix of size \code{(n_T + n_lags) * n_vars} with \code{NA} representing missingness. All monthly variables must be placed before quarterly variables.} - -\item{Lambda}{The Lambda matrix (size \code{n_vars* (n_vars*n_lags)}).} - -\item{n_vars}{The number of variables.} - -\item{n_lags}{The number of lags.} - -\item{n_T}{The number of time points.} -} -\value{ -\item{M_Lambda}{A list of length \code{n_T}.} -} -\description{ -Builds the selection matrices \eqn{M_t\Lambda}. -} -\details{ -The element \code{M_Lambda[[t]]} corresponds to \eqn{M_t\Lambda}. Currently, if element \code{i} of \code{Y[t, ]} is \code{NA}, then row \code{i} of \code{M_Lambda[[t]]} is all \code{NA}. -} -\keyword{internal} diff --git a/man/build_U.Rd b/man/build_U.Rd deleted file mode 100644 index 8b0aa70..0000000 --- a/man/build_U.Rd +++ /dev/null @@ -1,34 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/RcppExports.R, R/builders.R -\name{build_U_cpp} -\alias{build_U_cpp} -\alias{build_U} -\title{Template titel} -\usage{ -build_U_cpp(Pi, n_determ, n_vars, n_lags) - -build_U(Pi, n_determ) -} -\arguments{ -\item{Pi}{Matrix of size \code{n_vars * (n_vars*n_lags)} containing the dynamic coefficients.} - -\item{n_determ}{The number of deterministic terms.} - -\item{n_vars}{The number of variables.} - -\item{n_lags}{The number of lags.} -} -\value{ -\item{U}{The \code{U} matrix, of size \code{((n_lags+1)n_vars*n_determ) * n_vars*n_determ}.} -} -\description{ -Builds the parameter matrix of dynamic coefficients for the companion form representation. -} -\section{Functions}{ -\itemize{ -\item \code{build_U_cpp}: Build the U matrix (C++ implementation) - -\item \code{build_U}: Build the U matrix (R implementation) -}} - -\keyword{internal} diff --git a/man/build_Y_tilde.Rd b/man/build_Y_tilde.Rd deleted file mode 100644 index 9e346fc..0000000 --- a/man/build_Y_tilde.Rd +++ /dev/null @@ -1,23 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/builders.R -\name{build_Y_tilde} -\alias{build_Y_tilde} -\title{Build the lag-corrected data matrix} -\usage{ -build_Y_tilde(Pi, z) -} -\arguments{ -\item{Pi}{Matrix of size \code{n_vars * (n_vars*n_lags)} containing the dynamic coefficients.} - -\item{z}{A matrix of size \code{(n_T + n_lags) * n_vars} of data.} -} -\value{ -\item{Y_tilde}{A matrix of size \code{n_T * n_vars}.} -} -\description{ -Builds the \eqn{\tilde{Y}=\Pi(L)Y} matrix. -} -\details{ -Note that \code{z} does not contain missing values; at this point, the missing values have been replaced by values drawn using the simulation smoother. -} -\keyword{internal} diff --git a/man/build_Z.Rd b/man/build_Z.Rd deleted file mode 100644 index 9ecfc2a..0000000 --- a/man/build_Z.Rd +++ /dev/null @@ -1,20 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/builders.R -\name{build_Z} -\alias{build_Z} -\title{Build the \eqn{Z} matrix} -\usage{ -build_Z(z, n_lags) -} -\arguments{ -\item{z}{A matrix of size \code{(n_T + n_lags) * n_vars} of data.} - -\item{n_lags}{The number of lags.} -} -\value{ -\item{Z}{A matrix of size \code{n_T * (n_vars*n_lags)}.} -} -\description{ -Builds the \eqn{Z} matrix, which consists of lags of \eqn{z}. -} -\keyword{internal} diff --git a/man/build_companion.Rd b/man/build_companion.Rd deleted file mode 100644 index 68f2d37..0000000 --- a/man/build_companion.Rd +++ /dev/null @@ -1,22 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/builders.R -\name{build_companion} -\alias{build_companion} -\title{Build the companion matrix for the dynamic parameters} -\usage{ -build_companion(Pi, n_vars, n_lags) -} -\arguments{ -\item{Pi}{Matrix of size \code{n_vars * (n_vars*n_lags)} containing the dynamic coefficients.} - -\item{n_vars}{The number of variables.} - -\item{n_lags}{The number of lags.} -} -\value{ -\item{Pi_comp}{The companion form matrix of size \code{(n_vars*n_lags) * (n_vars*n_lags)}}. -} -\description{ -Builds the parameter matrix of dynamic coefficients for the companion form representation. -} -\keyword{internal} diff --git a/man/create_prior_Pi_Omega.Rd b/man/create_prior_Pi_Omega.Rd deleted file mode 100644 index 4bfc65d..0000000 --- a/man/create_prior_Pi_Omega.Rd +++ /dev/null @@ -1,28 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/prior_pi_sigma.R -\name{create_prior_Pi_Omega} -\alias{create_prior_Pi_Omega} -\title{Create the priors for Pi and Sigma} -\usage{ -create_prior_Pi_Omega(lambda1, lambda2, lambda3, prior_Pi_AR1, Y, n_lags) -} -\arguments{ -\item{lambda1}{overall tightness} - -\item{lambda2}{cross-equation tightness} - -\item{lambda3}{lag decay} - -\item{prior_Pi_AR1}{prior means for AR(1) coefficients} - -\item{Y}{data} - -\item{n_lags}{number of lags} -} -\value{ -\item{prior_Pi_Omega}{The prior covariance matrix for Pi.} -} -\description{ -Creates the prior mean and covariance for Pi given the hyperparameters, and the prior parameters for Sigma. -} -\keyword{internal} diff --git a/man/dmatt.Rd b/man/dmatt.Rd deleted file mode 100644 index 44b3b7e..0000000 --- a/man/dmatt.Rd +++ /dev/null @@ -1,32 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/densities.R -\name{dmatt} -\alias{dmatt} -\title{Matrix t distribution} -\usage{ -dmatt(X, M, P, Q, v) -} -\arguments{ -\item{X}{\code{p * q} matrix at which the density is to be evaluated} - -\item{M}{\code{p * q} matrix of means} - -\item{P}{\code{p * p} scale matrix} - -\item{Q}{\code{q * q} scale matrix} - -\item{v}{degrees of freedom} -} -\value{ -For \code{dmultn}: the evaluated density.\\n -For \code{rmultn}: \eqn{p} random numbers. -} -\description{ -Density function for the truncated multivariate normal distribution -} -\references{ -Karlsson, S. (2013) Forecasting with Bayesian Vector Autoregression. -In Elliott, G. and Timmermann, A., editors, \emph{Handbook of Economic Forecasting}, -volume 2, chapter 15, pp. 791-897. Elsevier B.V. -} -\keyword{internal} diff --git a/man/dmultn.Rd b/man/dmultn.Rd deleted file mode 100644 index 7e6e3e8..0000000 --- a/man/dmultn.Rd +++ /dev/null @@ -1,26 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/RcppExports.R, R/densities.R -\name{rmultn} -\alias{rmultn} -\alias{dmultn} -\title{Multivariate normal density function} -\usage{ -rmultn(m, Sigma) - -dmultn(x, m, Sigma) -} -\arguments{ -\item{m}{The mean vector of size \code{p}.} - -\item{Sigma}{The covariance matrix.} - -\item{x}{A vector of size \code{p}.} -} -\value{ -For \code{dmultn}: the evaluated density.\\n -For \code{rmultn}: \eqn{p} random numbers. -} -\description{ -Density function for the multivariate normal distribution -} -\keyword{internal} diff --git a/man/dnorm_trunc.Rd b/man/dnorm_trunc.Rd deleted file mode 100644 index 7f7ba21..0000000 --- a/man/dnorm_trunc.Rd +++ /dev/null @@ -1,29 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/densities.R -\name{dnorm_trunc} -\alias{dnorm_trunc} -\title{Truncated multivariate normal density function} -\usage{ -dnorm_trunc(x, m, V_inv, d, p_trunc, chisq_val) -} -\arguments{ -\item{x}{A vector of size \code{p}.} - -\item{m}{The mean vector of size \code{p}.} - -\item{V_inv}{The inverse of the covariance matrix of size \code{d * d}.} - -\item{d}{The number of components.} - -\item{p_trunc}{\code{1-p_trunc} is the degree of truncation (i.e. \code{p_trunc=1} is no truncation).} - -\item{chisq_val}{The value in the corresponding chi-square distribution; if the normal quadratic form exceeds this, the pdf is 0.} -} -\value{ -For \code{dmultn}: the evaluated density.\\n -For \code{rmultn}: \eqn{p} random numbers. -} -\description{ -Density function for the truncated multivariate normal distribution -} -\keyword{internal} diff --git a/man/dnorminvwish.Rd b/man/dnorminvwish.Rd deleted file mode 100644 index 2b5b3f0..0000000 --- a/man/dnorminvwish.Rd +++ /dev/null @@ -1,37 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/RcppExports.R, R/densities.R -\name{rmatn} -\alias{rmatn} -\alias{rinvwish} -\alias{dnorminvwish} -\title{Normal inverse Wishart density function} -\usage{ -rmatn(M, Q, P) - -rinvwish(v, S) - -dnorminvwish(X, Sigma, M, P, S, v) -} -\arguments{ -\item{M}{The mean matrix of size \code{p * q}.} - -\item{Q}{\code{q * q} covariance matrix.} - -\item{P}{\code{p * p} covariance matrix.} - -\item{v}{The degrees of freedom.} - -\item{S}{\code{q * q} scale matrix.} - -\item{X}{Matrix of size \code{p * q}.} - -\item{Sigma}{The covariance matrix.} -} -\value{ -For \code{dnorminvwish}: the evaluated density.\\n -For \code{rmatn} or \code{rinvwish}: the random numbers. -} -\description{ -Density function for the (matrix) normal inverse Wishart distribution -} -\keyword{internal} diff --git a/man/estimate_mdd_ss_1.Rd b/man/estimate_mdd_ss_1.Rd deleted file mode 100644 index 5b4261c..0000000 --- a/man/estimate_mdd_ss_1.Rd +++ /dev/null @@ -1,40 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/mdd.R -\name{estimate_mdd_ss_1} -\alias{estimate_mdd_ss_1} -\alias{estimate_mdd_ss_2} -\title{Estimate marginal data density in steady-state MF-BVAR} -\usage{ -estimate_mdd_ss_1(mfbvar_obj) - -estimate_mdd_ss_2(mfbvar_obj, p_trunc) -} -\arguments{ -\item{mfbvar_obj}{An object of class \code{mfbvar} containing the results.} - -\item{p_trunc}{\code{1-p_trunc} is the degree of truncation (i.e. \code{p_trunc=1} is no truncation).} -} -\value{ -\code{estimate_mdd_ss_1} returns a list with components (all are currently in logarithms): -\item{lklhd}{The likelihood.} -\item{eval_prior_Pi_Sigma}{The evaluated prior.} -\item{eval_prior_psi}{The evaluated prior of psi.} -\item{eval_RB_Pi_Sigma}{The Rao-Blackwellized estimate of the conditional posterior of Pi and Sigma.} -\item{eval_marg_psi}{The evaluated marginal posterior of psi.} -\item{log_mdd}{The mdd estimate (in log).} - -\code{estimate_mdd_ss_1} returns a list with components being \code{n_reps}-long vectors and a scalar (the final estimate). -\item{eval_posterior_Pi_Sigma}{Posterior of Pi and Sigma.} -\item{data_likelihood}{The likelihood.} -\item{eval_prior_Pi_Sigma}{Prior of Pi and Sigma.} -\item{eval_prior_psi}{Prior of psi.} -\item{psi_truncated}{The truncated psi pdf.} -\item{log_mdd}{The mdd estimate (in log).} -} -\description{ -This function provides the possibility to estimate the log marginal density using the steady-state MF-BVAR. -} -\details{ -\code{estimate_mdd_ss_1} uses method 1, \code{estimate_mdd_ss_2} uses method 2. -} -\keyword{internal} diff --git a/man/estimate_mfbvar.Rd b/man/estimate_mfbvar.Rd index 7ab97a1..362e901 100644 --- a/man/estimate_mfbvar.Rd +++ b/man/estimate_mfbvar.Rd @@ -9,47 +9,60 @@ estimate_mfbvar(mfbvar_prior = NULL, prior, variance = "iw", ...) \arguments{ \item{mfbvar_prior}{a \code{mfbvar_prior} object} -\item{prior}{either \code{"ss"} (steady-state prior) or \code{"minn"} (Minnesota prior)} +\item{prior}{either \code{"ss"} (steady-state prior), \code{"ssng"} (hierarchical steady-state prior with normal-gamma shrinkage) or \code{"minn"} (Minnesota prior)} -\item{variance}{form of the error variance-covariance matrix: either \code{"iw"} for the inverse Wishart prior, or \code{"fsv"} for a time-varying matrix modeled using a factor stochastic volatility model} +\item{variance}{form of the error variance-covariance matrix: \code{"iw"} for the inverse Wishart prior, \code{"diffuse"} for a diffuse prior, \code{"csv"} for common stochastic volatility or \code{"fsv"} for factor stochastic volatility} \item{...}{additional arguments to \code{update_prior} (if \code{mfbvar_prior} is \code{NULL}, the arguments are passed on to \code{set_prior})} } \value{ -An object of class \code{mfbvar}, \code{mfbvar_} and \code{mfbvar__} containing posterior quantities as well as the prior object - -For all choices of \code{prior} and \code{variance}, the returned object contains: +An object of class \code{mfbvar}, \code{mfbvar_} and \code{mfbvar__} containing posterior quantities as well as the prior object. For all choices of \code{prior} and \code{variance}, the returned object contains: \item{Pi}{Array of dynamic coefficient matrices; \code{Pi[,, r]} is the \code{r}th draw} \item{Z}{Array of monthly processes; \code{Z[,, r]} is the \code{r}th draw} \item{Z_fcst}{Array of monthly forecasts; \code{Z_fcst[,, r]} is the \code{r}th forecast. The first \code{n_lags} rows are taken from the data to offer a bridge between observations and forecasts and for computing nowcasts (i.e. with ragged edges).} - +\subsection{Steady-state priors}{ If \code{prior = "ss"}, it also includes: -\item{psi}{Matrix of steady-state parameter vectors; \code{psi[r,]} is the \code{r}th draw} -\item{roots}{The maximum eigenvalue of the lag polynomial (if \code{check_roots = TRUE})} -\item{num_tries}{The number of attempts for drawing a stationary \eqn{\Pi} (if \code{check_roots = TRUE})} +\describe{\item{\code{psi}}{Matrix of steady-state parameter vectors; \code{psi[r,]} is the \code{r}th draw} +\item{\code{roots}}{The maximum eigenvalue of the lag polynomial (if \code{check_roots = TRUE})}} -If \code{variance = "iw"}, it also includes: -\item{Sigma}{Array of error covariance matrices; \code{Sigma[,, r]} is the \code{r}th draw} +If \code{prior = "ssng"}, it also includes: +\describe{ +\item{\code{psi}}{Matrix of steady-state parameter vectors; \code{psi[r,]} is the \code{r}th draw} +\item{\code{roots}}{The maximum eigenvalue of the lag polynomial (if \code{check_roots = TRUE})} +\item{\code{lambda_psi}}{Vector of draws of the global hyperparameter in the normal-Gamma prior} +\item{\code{phi_psi}}{Vector of draws of the auxiliary hyperparameter in the normal-Gamma prior} +\item{\code{omega_psi}}{Matrix of draws of the prior variances of psi; \code{omega_psi[r, ]} is the \code{r}th draw, where \code{diag(omega_psi[r, ])} is used as the prior covariance matrix for psi}}} +\subsection{Constant error covariances}{ +If \code{variance = "iw"} or \code{variance = "diffuse"}, it also includes: +\describe{\item{\code{Sigma}}{Array of error covariance matrices; \code{Sigma[,, r]} is the \code{r}th draw}}} +\subsection{Time-varying error covariances}{ +If \code{variance = "csv"}, it also includes: +\describe{\item{\code{Sigma}}{Array of error covariance matrices; \code{Sigma[,, r]} is the \code{r}th draw} +\item{\code{phi}}{Vector of AR(1) parameters for the log-volatility regression; \code{phi[r]} is the \code{r}th draw} +\item{\code{sigma}}{Vector of error standard deviations for the log-volatility regression; \code{sigma[r]} is the \code{r}th draw} +\item{\code{f}}{Matrix of log-volatilities; \code{f[r, ]} is the \code{r}th draw}} If \code{variance = "fsv"}, it also includes: -\item{facload}{Array of factor loadings; \code{facload[,, r]} is the \code{r}th draw} -\item{latent}{Array of latent log-volatilities; \code{latent[,, r]} is the \code{r}th draw} -\item{mu}{Matrix of means of the log-volatilities; \code{mu[, r]} is the \code{r}th draw} -\item{phi}{Matrix of AR(1) parameters for the log-volatilities; \code{phi[, r]} is the \code{r}th draw} -\item{sigma}{Matrix of innovation variances for the log-volatilities; \code{sigma[, r]} is the \code{r}th draw} +\describe{\item{\code{facload}}{Array of factor loadings; \code{facload[,, r]} is the \code{r}th draw} +\item{\code{latent}}{Array of latent log-volatilities; \code{latent[,, r]} is the \code{r}th draw} +\item{\code{mu}}{Matrix of means of the log-volatilities; \code{mu[, r]} is the \code{r}th draw} +\item{\code{phi}}{Matrix of AR(1) parameters for the log-volatilities; \code{phi[, r]} is the \code{r}th draw} +\item{\code{sigma}}{Matrix of innovation variances for the log-volatilities; \code{sigma[, r]} is the \code{r}th draw}}} } \description{ The main function for estimating a mixed-frequency BVAR. } \examples{ -prior_obj <- set_prior(Y = mf_sweden, freq = c(rep("m", 4), "q"), - n_lags = 4, n_burnin = 20, n_reps = 20) +prior_obj <- set_prior(Y = mf_usa, n_lags = 4, n_reps = 20) mod_minn <- estimate_mfbvar(prior_obj, prior = "minn") } \references{ -Schorfheide, F., & Song, D. (2015) Real-Time Forecasting With a Mixed-Frequency VAR. \emph{Journal of Business & Economic Statistics}, 33(3), 366--380. \url{http://dx.doi.org/10.1080/07350015.2014.954707}\cr -Ankargren, S., Unosson, M., & Yang, Y. (2018) A Mixed-Frequency Bayesian Vector Autoregression with a Steady-State Prior. Working Paper, Department of Statistics, Uppsala University No. 2018:3. +Ankargren, S., Unosson, M., & Yang, Y. (2020) A Flexible Mixed-Frequency Bayesian Vector Autoregression with a Steady-State Prior. \emph{Journal of Time Series Econometrics}, 12(2), \doi{10.1515/jtse-2018-0034}.\cr +Ankargren, S., & Jonéus, P. (2020) Simulation Smoothing for Nowcasting with Large Mixed-Frequency VARs. \emph{Econometrics and Statistics}, \doi{10.1016/j.ecosta.2020.05.007}.\cr +Ankargren, S., & Jonéus, P. (2019) Estimating Large Mixed-Frequency Bayesian VAR Models. arXiv:1912.02231, \url{https://arxiv.org/abs/1912.02231}.\cr +Kastner, G., & Huber, F. (2020) Sparse Bayesian Vector Autoregressions in Huge Dimensions. \emph{Journal of Forecasting}, 39, 1142--1165. \doi{10.1002/for.2680}.\cr +Schorfheide, F., & Song, D. (2015) Real-Time Forecasting With a Mixed-Frequency VAR. \emph{Journal of Business & Economic Statistics}, 33(3), 366--380. \doi{10.1080/07350015.2014.954707}\cr } \seealso{ \code{\link{set_prior}}, \code{\link{update_prior}}, \code{\link{predict.mfbvar}}, \code{\link{plot.mfbvar_minn}}, diff --git a/man/eval_Pi_Sigma_RaoBlack.Rd b/man/eval_Pi_Sigma_RaoBlack.Rd deleted file mode 100644 index fe64311..0000000 --- a/man/eval_Pi_Sigma_RaoBlack.Rd +++ /dev/null @@ -1,43 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/eval.R -\name{eval_Pi_Sigma_RaoBlack} -\alias{eval_Pi_Sigma_RaoBlack} -\title{Evaluate the conditional posterior of Pi and Sigma using Rao-Blackwellization} -\usage{ -eval_Pi_Sigma_RaoBlack(Z_array, d, post_psi_center, post_Pi_center, - post_Sigma_center, post_nu, prior_Pi_mean, prior_Pi_Omega, prior_S, - n_vars, n_lags, n_reps) -} -\arguments{ -\item{Z_array}{The array of draws of Z from the Gibbs sampler.} - -\item{d}{The matrix of size \code{(n_T + n_lags) * n_determ} of deterministic terms.} - -\item{post_psi_center}{The value at which to do the evaluation (e.g. the posterior mean/median).} - -\item{post_Pi_center}{The value at which to do the evaluation (e.g. the posterior mean/median).} - -\item{post_Sigma_center}{The value at which to do the evaluation (e.g. the posterior mean/median).} - -\item{post_nu}{The posterior of the parameter \eqn{\nu}.} - -\item{prior_Pi_Omega}{Matrix of size \code{(n_vars*n_lags)* (n_vars*n_lags)} containing the prior for (part of) the prior covariance of the dynamic coefficients.} - -\item{prior_S}{The prior for \eqn{\Sigma}.} - -\item{n_vars}{The number of variables.} - -\item{n_lags}{The number of lags.} - -\item{n_reps}{The number of replications.} - -\item{prior_Pi}{Matrix of size \code{n_vars * (n_vars*n_lags)} containing the prior for the mean of the dynamic coefficients.} -} -\value{ -The return is: -\item{evals}{A vector with the evaulations.} -} -\description{ -Evaluates the conditional posterior of Pi and Sigma using Rao-Blackwellization of the draws from the Gibbs sampler. -} -\keyword{internal} diff --git a/man/eval_psi_MargPost.Rd b/man/eval_psi_MargPost.Rd deleted file mode 100644 index 9f0dbb2..0000000 --- a/man/eval_psi_MargPost.Rd +++ /dev/null @@ -1,40 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/eval.R -\name{eval_psi_MargPost} -\alias{eval_psi_MargPost} -\title{Evaluate the marginal posterior of psi} -\usage{ -eval_psi_MargPost(Pi_array, Sigma_array, Z_array, post_psi_center, - prior_psi_mean, prior_psi_Omega, D_mat, n_determ, n_vars, n_lags, n_reps) -} -\arguments{ -\item{Pi_array}{Array of draws of Pi from the Gibbs sampler.} - -\item{Sigma_array}{Array of draws of Sigma from the Gibbs sampler.} - -\item{Z_array}{The array of draws of Z from the Gibbs sampler.} - -\item{post_psi_center}{The value at which to do the evaluation (e.g. the posterior mean/median).} - -\item{prior_psi_mean}{Vector of length \code{n_determ*n_vars} with the prior means of the steady-state parameters.} - -\item{prior_psi_Omega}{Matrix of size \code{(n_determ*n_vars) * (n_determ*n_vars)} with the prior covariance of the steady-state parameters.} - -\item{D_mat}{The \code{D} matrix (from \code{\link{build_DD}}).} - -\item{n_determ}{The number of deterministic terms.} - -\item{n_vars}{The number of variables.} - -\item{n_lags}{The number of lags.} - -\item{n_reps}{The number of replications.} -} -\value{ -The return is: -\item{evals}{A vector with the evaulations.} -} -\description{ -Evaluates the marginal posterior of psi using the draws from the Gibbs sampler. -} -\keyword{internal} diff --git a/man/figures/README-plot_minn-1.png b/man/figures/README-plot_minn-1.png deleted file mode 100644 index ed308df..0000000 Binary files a/man/figures/README-plot_minn-1.png and /dev/null differ diff --git a/man/figures/README-plot_ss-1.png b/man/figures/README-plot_ss-1.png deleted file mode 100644 index bb8c833..0000000 Binary files a/man/figures/README-plot_ss-1.png and /dev/null differ diff --git a/man/fill_na.Rd b/man/fill_na.Rd deleted file mode 100644 index baebc56..0000000 --- a/man/fill_na.Rd +++ /dev/null @@ -1,18 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/fill_na.R -\name{fill_na} -\alias{fill_na} -\title{Fills NAs with the next non-NA value} -\usage{ -fill_na(Y) -} -\arguments{ -\item{Y}{The data matrix of size \code{(n_T + n_lags) * n_vars} with \code{NA} representing missingness. All monthly variables must be placed before quarterly variables.} -} -\value{ -A matrix with no \code{NA}s. -} -\description{ -The function fills elements with \code{NA} with the next non-\code{NA} value (so that quarterly averages observed at the end of the quarter are assumed as observations for the remaining months of the quarter). -} -\keyword{internal} diff --git a/man/interval_to_moments.Rd b/man/interval_to_moments.Rd index 523e46f..4ff82da 100644 --- a/man/interval_to_moments.Rd +++ b/man/interval_to_moments.Rd @@ -24,4 +24,3 @@ prior_intervals <- matrix(c(0.1, 0.2, 0.4, 0.6), ncol = 2, byrow = TRUE) psi_moments <- interval_to_moments(prior_intervals) } -\keyword{internal} diff --git a/man/kf_loglike.Rd b/man/kf_loglike.Rd deleted file mode 100644 index 9312d6f..0000000 --- a/man/kf_loglike.Rd +++ /dev/null @@ -1,35 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/RcppExports.R -\name{kf_loglike} -\alias{kf_loglike} -\title{Kalman filter and smoother} -\usage{ -kf_loglike(y_, Phi_, Sigma_, Lambda_, a00, P00) -} -\arguments{ -\item{y_}{matrix with the data} - -\item{Phi_}{matrix with the autoregressive parameters, where the last column is the intercept} - -\item{Sigma_}{error covariance matrix} - -\item{Lambda_}{aggregation matrix (for quarterly variables only)} - -\item{n_q_}{number of quarterly variables} - -\item{T_b_}{final time period where all monthly variables are observed} -} -\value{ -For \code{kf_ragged}, a list with elements: -\item{a}{The one-step predictions (for the compact form)} -\item{a_tt}{The filtered estimates (for the compact form)} -\item{a_tT}{The smoothed estimates (for the compact form)} -\item{Z_tT}{The smoothed estimated (for the original form)} -} -\description{ -Kalman filter and smoother (\code{kf_ragged}) and simulation smoother (\code{kf_sim_smooth}) for mixed-frequency data with ragged edges. This function is more computationally efficient than using a companion form representation. -} -\details{ -The returned matrices have the same number of rows as \code{y_}, but the first \code{n_lags} rows are zero. -} -\keyword{internal} diff --git a/man/kf_ragged.Rd b/man/kf_ragged.Rd deleted file mode 100644 index ad5cf40..0000000 --- a/man/kf_ragged.Rd +++ /dev/null @@ -1,47 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/RcppExports.R -\name{kf_ragged} -\alias{kf_ragged} -\alias{kf_sim_smooth} -\title{Kalman filter and smoother} -\usage{ -kf_ragged(y_, Phi_, Sigma_, Lambda_, Z1_, n_q_, T_b_) - -kf_sim_smooth(y_, Phi_, Sigma_, Lambda_, Z1_, n_q_, T_b_) -} -\arguments{ -\item{y_}{matrix with the data} - -\item{Phi_}{matrix with the autoregressive parameters, where the last column is the intercept} - -\item{Sigma_}{error covariance matrix} - -\item{Lambda_}{aggregation matrix (for quarterly variables only)} - -\item{n_q_}{number of quarterly variables} - -\item{T_b_}{final time period where all monthly variables are observed} - -\item{Z1}{initial values, with \code{n_lags} rows and same number of columns as \code{y_}} -} -\value{ -For \code{kf_ragged}, a list with elements: -\item{a}{The one-step predictions (for the compact form)} -\item{a_tt}{The filtered estimates (for the compact form)} -\item{a_tT}{The smoothed estimates (for the compact form)} -\item{Z_tT}{The smoothed estimated (for the original form)} - -For \code{kf_sim_smooth}, a matrix with the draw from the posterior distribution. -} -\description{ -Kalman filter and smoother (\code{kf_ragged}) and simulation smoother (\code{kf_sim_smooth}) for mixed-frequency data with ragged edges. This function is more computationally efficient than using a companion form representation. -} -\details{ -The returned matrices have the same number of rows as \code{y_}, but the first \code{n_lags} rows are zero. -} -\section{Functions}{ -\itemize{ -\item \code{kf_sim_smooth}: Simulation smoother -}} - -\keyword{internal} diff --git a/man/max_eig_cpp.Rd b/man/max_eig_cpp.Rd deleted file mode 100644 index 588050d..0000000 --- a/man/max_eig_cpp.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/RcppExports.R -\name{max_eig_cpp} -\alias{max_eig_cpp} -\alias{max_eig} -\title{Find maximum eigenvalue} -\usage{ -max_eig_cpp(A) -} -\arguments{ -\item{A}{Symmetrix matrix whose maximum eigenvalue is to be computed.} -} -\value{ -The maximum eigenvalue. -} -\description{ -The function computes the maximum eigenvalue. -} -\keyword{internal} diff --git a/man/mcmc_sampler.Rd b/man/mcmc_sampler.Rd deleted file mode 100644 index f7f6f84..0000000 --- a/man/mcmc_sampler.Rd +++ /dev/null @@ -1,29 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/mcmc_sampler.R, R/mcmc_sampler_sv.R -\name{mcmc_sampler} -\alias{mcmc_sampler} -\alias{mcmc_sampler.mfbvar_ss_iw} -\alias{mcmc_sampler.mfbvar_minn_iw} -\alias{mcmc_sampler.mfbvar_minn_fsv} -\alias{mcmc_sampler.mfbvar_ss_fsv} -\title{MCMC sampler} -\usage{ -mcmc_sampler(x, ...) - -\method{mcmc_sampler}{mfbvar_ss_iw}(x, ...) - -\method{mcmc_sampler}{mfbvar_minn_iw}(x, ...) - -\method{mcmc_sampler}{mfbvar_minn_fsv}(x, ...) - -\method{mcmc_sampler}{mfbvar_ss_fsv}(x, ...) -} -\arguments{ -\item{x}{argument to dispatch on (of class \code{prior_obj})} - -\item{...}{additional named arguments passed on to the methods} -} -\description{ -\code{mcmc_sampler} is a generic function for deciding which specific MCMC -algorithm to dispatch to. It is called internally. -} diff --git a/man/mdd.Rd b/man/mdd.Rd index e754c4e..2192002 100644 --- a/man/mdd.Rd +++ b/man/mdd.Rd @@ -11,11 +11,16 @@ mdd(x, ...) \item{...}{additional named arguments passed on to the methods} } +\value{ +The logarithm of the marginal data density. +} \description{ \code{mdd} estimates the (log) marginal data density. } \details{ This is a generic function. See the methods for more information. + +The marginal data density is also known as the marginal likelihood. } \seealso{ \code{\link{mdd.mfbvar_ss_iw}}, \code{\link{mdd.mfbvar_minn_iw}} diff --git a/man/mdd.mfbvar_minn_iw.Rd b/man/mdd.mfbvar_minn_iw.Rd index 28326ed..99a0bf8 100644 --- a/man/mdd.mfbvar_minn_iw.Rd +++ b/man/mdd.mfbvar_minn_iw.Rd @@ -22,7 +22,7 @@ The method used for estimating the marginal data density is the proposal made by Schorfheide and Song (2015). } \references{ -Schorfheide, F., & Song, D. (2015) Real-Time Forecasting With a Mixed-Frequency VAR. \emph{Journal of Business & Economic Statistics}, 33(3), 366--380. \url{http://dx.doi.org/10.1080/07350015.2014.954707} +Schorfheide, F., & Song, D. (2015) Real-Time Forecasting With a Mixed-Frequency VAR. \emph{Journal of Business & Economic Statistics}, 33(3), 366--380. \doi{10.1080/07350015.2014.954707} } \seealso{ \code{\link{mdd}}, \code{\link{mdd.mfbvar_ss_iw}} diff --git a/man/mdd.mfbvar_ss_iw.Rd b/man/mdd.mfbvar_ss_iw.Rd index 7fdd316..be26241 100644 --- a/man/mdd.mfbvar_ss_iw.Rd +++ b/man/mdd.mfbvar_ss_iw.Rd @@ -25,7 +25,7 @@ Fuentes-Albero and Melosi (2013) and Ankargren, Unosson and Yang (2018). } \references{ Fuentes-Albero, C. and Melosi, L. (2013) Methods for Computing Marginal Data Densities from the Gibbs Output. -\emph{Journal of Econometrics}, 175(2), 132-141, \url{https://doi.org/10.1016/j.jeconom.2013.03.002}\cr +\emph{Journal of Econometrics}, 175(2), 132-141, \doi{10.1016/j.jeconom.2013.03.002}\cr Ankargren, S., Unosson, M., & Yang, Y. (2018) A Mixed-Frequency Bayesian Vector Autoregression with a Steady-State Prior. Working Paper, Department of Statistics, Uppsala University No. 2018:3. } \seealso{ diff --git a/man/mdd.minn.Rd b/man/mdd.minn.Rd deleted file mode 100644 index f97baef..0000000 --- a/man/mdd.minn.Rd +++ /dev/null @@ -1,22 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/mdd.R -\name{estimate_mdd_minn} -\alias{estimate_mdd_minn} -\title{Estimate marginal data density in Minnesota MF-BVAR} -\usage{ -estimate_mdd_minn(mfbvar_obj, p_trunc, ...) -} -\arguments{ -\item{mfbvar_obj}{An object of class \code{mfbvar} containing the results.} - -\item{p_trunc}{\code{1-p_trunc} is the degree of truncation (i.e. \code{p_trunc=1} is no truncation).} - -\item{quarterly_cols}{numeric vector with positions of quarterly variables} -} -\value{ -The log marginal data density estimate (bar a constant) -} -\description{ -This function provides the possibility to estimate the log marginal density (up to a constant) using the Minnesota MF-BVAR. -} -\keyword{internal} diff --git a/man/mf_sweden.Rd b/man/mf_sweden.Rd index f8ff4ba..28a1447 100644 --- a/man/mf_sweden.Rd +++ b/man/mf_sweden.Rd @@ -3,15 +3,17 @@ \docType{data} \name{mf_sweden} \alias{mf_sweden} -\title{Real-time data set.} -\format{A mixed-frequency data set of five Swedish macroeconomic variables. +\title{Real-time data set for Sweden.} +\format{ +A mixed-frequency data set of five Swedish macroeconomic variables. \describe{ \item{unemp}{harmonized unemployment rate (source: OECD)} \item{infl}{inflation rate (source: OECD)} \item{ip}{industrial production (source: OECD)} \item{eti}{economic tendency indicator (source: National Institute of Economic Research)} \item{gdp}{GDP growth (source: Statistics Sweden)} -}} +} +} \usage{ mf_sweden } diff --git a/man/mf_usa.Rd b/man/mf_usa.Rd new file mode 100644 index 0000000..8cd4b64 --- /dev/null +++ b/man/mf_usa.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/data.R +\docType{data} +\name{mf_usa} +\alias{mf_usa} +\title{US Macroeconomic Data Set} +\format{ +A list with components: +\describe{ + \item{CPIAUCSL}{inflation rate} + \item{UNRATE}{unemployment rate} + \item{GDPC1}{GDP growth rate} +} +} +\usage{ +mf_usa +} +\description{ +A dataset containing mixed-frequency data from FRED for three US macroeconomic variables. +} +\keyword{datasets} diff --git a/man/mfbvar.Rd b/man/mfbvar.Rd index 25e6758..73959fa 100644 --- a/man/mfbvar.Rd +++ b/man/mfbvar.Rd @@ -3,14 +3,13 @@ \docType{package} \name{mfbvar} \alias{mfbvar} -\alias{mfbvar-package} \title{mfbvar: A package for mixed-frequency Bayesian vector autoregressive (VAR) models.} \description{ The mfbvar package makes estimation of Bayesian VARs with a mix of monthly and quarterly data simple. The prior for the regression parameters is normal with Minnesota-style prior moments. The package supports either an inverse Wishart prior for the error covariance matrix, yielding a standard normal-inverse Wishart prior, or a time-varying error covariance matrix by means of a factor -stochastic volatility model through the \code{\link[factorstochvol]{factorstochvol}} package. +stochastic volatility model through the \code{\link[factorstochvol]{factorstochvol-package}} package. } \section{Specifying the prior}{ diff --git a/man/ols_initialization.Rd b/man/ols_initialization.Rd deleted file mode 100644 index 99a70fa..0000000 --- a/man/ols_initialization.Rd +++ /dev/null @@ -1,31 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/ols.R -\name{ols_initialization} -\alias{ols_initialization} -\title{Initialize Gibbs sampler using OLS} -\usage{ -ols_initialization(z, d, n_lags, n_T, n_vars, n_determ) -} -\arguments{ -\item{z}{A matrix of size \code{(n_T + n_lags) * n_vars} of data.} - -\item{d}{The matrix of size \code{(n_T + n_lags) * n_determ} of deterministic terms.} - -\item{n_lags}{The number of lags.} - -\item{n_T}{The number of time points.} - -\item{n_vars}{The number of variables.} - -\item{n_determ}{The number of deterministic terms.} -} -\value{ -A list with components: -\item{Gam}{A matrix of size \code{n_vars * (n_vars*n_lags +n_determ)} of estimated parameters.} -\item{S}{Estimated error covariance matrix.} -\item{psi}{The estimated steady-state parameters.} -} -\description{ -Initializes the Gibbs sampler using OLS. -} -\keyword{internal} diff --git a/man/ols_pi.Rd b/man/ols_pi.Rd deleted file mode 100644 index ab73902..0000000 --- a/man/ols_pi.Rd +++ /dev/null @@ -1,28 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/ols.R -\name{ols_pi} -\alias{ols_pi} -\alias{ols_s} -\title{OLS functions} -\usage{ -ols_pi(X, Y) - -ols_s(X, Y, Pi) -} -\arguments{ -\item{X}{The regressor matrix.} - -\item{Y}{The dependnet variable matrix.} - -\item{Pi}{The estimated coefficients. -@keywords internal} -} -\value{ -\item{pi_sample}{Estimated coefficients.} - -\item{s_sample}{The sum of squared residuals matrix.} -} -\description{ -Helper functions for multivariate regression and sum of squared error computations -} -\keyword{internal} diff --git a/man/plot-mfbvar.Rd b/man/plot-mfbvar.Rd index bf4ac05..829f09a 100644 --- a/man/plot-mfbvar.Rd +++ b/man/plot-mfbvar.Rd @@ -3,24 +3,46 @@ \name{plot-mfbvar} \alias{plot-mfbvar} \alias{plot.mfbvar_ss} +\alias{plot.mfbvar_ssng} \alias{plot.mfbvar_minn} \alias{varplot} \title{Plotting methods for posterior mfbvar objects} \usage{ -\method{plot}{mfbvar_ss}(x, fcst_start = NULL, aggregate_fcst = TRUE, - plot_start = NULL, pred_bands = 0.8, nrow_facet = NULL, - ss_bands = 0.95, ...) +\method{plot}{mfbvar_ss}( + x, + aggregate_fcst = TRUE, + plot_start = NULL, + pred_bands = 0.8, + nrow_facet = NULL, + ss_bands = 0.95, + ... +) -\method{plot}{mfbvar_minn}(x, fcst_start = NULL, aggregate_fcst = TRUE, - plot_start = NULL, pred_bands = 0.8, nrow_facet = NULL, ...) +\method{plot}{mfbvar_ssng}( + x, + aggregate_fcst = TRUE, + plot_start = NULL, + pred_bands = 0.8, + nrow_facet = NULL, + ss_bands = 0.95, + ... +) -varplot(x, variables = colnames(x$Y), var_bands = 0.95, - nrow_facet = NULL, ...) +\method{plot}{mfbvar_minn}( + x, + aggregate_fcst = TRUE, + plot_start = NULL, + pred_bands = 0.8, + nrow_facet = NULL, + ... +) + +varplot(x, variables = colnames(x$Y), var_bands = 0.95, nrow_facet = NULL, ...) } \arguments{ \item{x}{object of class \code{mfbvar_minn} or \code{mfbvar_ss}} -\item{fcst_start}{Date of the first forecast; if dates are available for the data used for obtaining \code{x}, these will be used.} +\item{aggregate_fcst}{Boolean indicating whether forecasts of the latent monthly series should be aggregated to the quarterly frequency.} \item{plot_start}{Time period (date or number) to start plotting from. Default is to to use \code{5*n_fcst} time periods if \code{n_fcst} exists, otherwise the entire sample.} @@ -32,18 +54,24 @@ varplot(x, variables = colnames(x$Y), var_bands = 0.95, \item{...}{Currently not in use.} +\item{variables}{Vector of names or positions of variables to include in the plot of variances} + \item{var_bands}{(\code{varplot} only) Single number (between \code{0.0} and \code{1.0}) giving the coverage level of posterior intervals for the error standard deviations.} } +\value{ +A \code{\link[ggplot2]{ggplot}}. +} \description{ -Methods for plotting posterior mfbvar objects (\code{mfbvar_minn} and \code{mfbvar_ss}). +Methods for plotting posterior mfbvar objects. } \examples{ -prior_obj <- set_prior(Y = mf_sweden[, 4:5], d = "intercept", - freq = c("m", "q"), n_lags = 4, n_burnin = 20, n_reps = 20, +prior_obj <- set_prior(Y = mf_usa, d = "intercept", + n_lags = 4, n_reps = 20, n_fcst = 4, n_fac = 1) -prior_intervals <- matrix(c(-0.1, 0.1, - 0.4, 0.6), ncol = 2, byrow = TRUE) +prior_intervals <- matrix(c(1, 3, + 4, 8, + 1, 3), ncol = 2, byrow = TRUE) psi_moments <- interval_to_moments(prior_intervals) prior_psi_mean <- psi_moments$prior_psi_mean prior_psi_Omega <- psi_moments$prior_psi_Omega diff --git a/man/plot.mfbvar_prior.Rd b/man/plot.mfbvar_prior.Rd index deac758..f9803c2 100644 --- a/man/plot.mfbvar_prior.Rd +++ b/man/plot.mfbvar_prior.Rd @@ -13,6 +13,9 @@ \item{...}{Currently not in use.} } +\value{ +A \code{\link[ggplot2]{ggplot}}. +} \description{ Method for plotting \code{mfbvar_prior} objects. } @@ -20,7 +23,6 @@ Method for plotting \code{mfbvar_prior} objects. The function plots the data. If the prior moments for the steady-state parameters are available in \code{x}, these are included. } \examples{ -prior_obj <- set_prior(Y = mf_sweden[, 4:5], freq = c("m", "q"), - n_lags = 4, n_burnin = 20, n_reps = 20, n_fcst = 4) +prior_obj <- set_prior(Y = mf_usa, n_lags = 4, n_reps = 20, n_fcst = 4) plot(prior_obj) } diff --git a/man/posterior_Pi_Sigma.Rd b/man/posterior_Pi_Sigma.Rd deleted file mode 100644 index 8b87bb9..0000000 --- a/man/posterior_Pi_Sigma.Rd +++ /dev/null @@ -1,48 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/posteriors.R -\name{posterior_Pi_Sigma} -\alias{posterior_Pi_Sigma} -\title{Draw from posterior of Pi and Sigma} -\usage{ -posterior_Pi_Sigma(Z_r1, d, psi_r1, prior_Pi_mean, prior_Pi_Omega, - inv_prior_Pi_Omega, Omega_Pi, prior_S, prior_nu, check_roots, n_vars, - n_lags, n_T) -} -\arguments{ -\item{Z_r1}{The previous draw of \code{Z} (i.e. \code{Z[,, r-1]}).} - -\item{d}{The matrix of size \code{(n_T + n_lags) * n_determ} of deterministic terms.} - -\item{psi_r1}{The previous draw of \code{psi} (i.e. \code{psi[r-1,]}).} - -\item{prior_Pi_mean}{Matrix of size \code{n_vars * (n_vars*n_lags)} containing the prior for the mean of the dynamic coefficients.} - -\item{prior_Pi_Omega}{Matrix of size \code{(n_vars*n_lags)* (n_vars*n_lags)} containing the prior for (part of) the prior covariance of the dynamic coefficients.} - -\item{inv_prior_Pi_Omega}{The inverse of the prior covariance matrix for Pi.} - -\item{Omega_Pi}{The \code{inv_prior_Pi_Omega} multiplied by \code{prior_Pi} matrix.} - -\item{prior_S}{The prior for \eqn{\Sigma}.} - -\item{prior_nu}{The prior degrees of freedom.} - -\item{check_roots}{Logical, if roots of the companion matrix are to be checked to ensure stationarity.} - -\item{n_vars}{The number of variables.} - -\item{n_lags}{The number of lags.} - -\item{n_T}{The number of time points.} -} -\value{ -\code{posterior_Pi_Sigma} returns a list with: -\item{Pi_r}{The draw of \code{Pi}.} -\item{Sigma_r}{The draw of \code{Sigma}.} -\item{num_try}{The try at which a stable draw was obtained.} -\item{root}{The maximum eigenvalue (in modulus) of the system.} -} -\description{ -Function for drawing from the posterior of Pi and Sigma, which can be used as a block in a Gibbs sampler. -} -\keyword{internal} diff --git a/man/posterior_psi.Rd b/man/posterior_psi.Rd deleted file mode 100644 index 6a41b1a..0000000 --- a/man/posterior_psi.Rd +++ /dev/null @@ -1,36 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/posteriors.R -\name{posterior_psi} -\alias{posterior_psi} -\title{Draw from posterior of psi} -\usage{ -posterior_psi(Pi_r, Sigma_r, Z_r1, prior_psi_mean, prior_psi_Omega, D_mat, - n_vars, n_lags, n_determ) -} -\arguments{ -\item{Pi_r}{The current draw of \code{Pi} (i.e. \code{Pi[,, r]}).} - -\item{Sigma_r}{The current draw of \code{Sigma} (i.e. \code{Sigma[,, r]}).} - -\item{Z_r1}{The previous draw of \code{Z} (i.e. \code{Z[,, r-1]}).} - -\item{prior_psi_mean}{Vector of length \code{n_determ*n_vars} with the prior means of the steady-state parameters.} - -\item{prior_psi_Omega}{Matrix of size \code{(n_determ*n_vars) * (n_determ*n_vars)} with the prior covariance of the steady-state parameters.} - -\item{D_mat}{The \code{D} matrix (from \code{\link{build_DD}}).} - -\item{n_vars}{The number of variables.} - -\item{n_lags}{The number of lags.} - -\item{n_determ}{The number of deterministic terms.} -} -\value{ -\code{posterior_psi} returns: -\item{psi_r}{The draw of \code{psi}.} -} -\description{ -Function for drawing from the posterior of psi, which can be used as a block in a Gibbs sampler. -} -\keyword{internal} diff --git a/man/posterior_psi_mean.Rd b/man/posterior_psi_mean.Rd deleted file mode 100644 index 7b84697..0000000 --- a/man/posterior_psi_mean.Rd +++ /dev/null @@ -1,42 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/posteriors.R -\name{posterior_psi_mean} -\alias{posterior_psi_mean} -\alias{posterior_psi_Omega} -\alias{posterior_psi_Omega_fsv} -\title{Compute posterior moments of the steady-state parameters} -\usage{ -posterior_psi_mean(U, D_mat, Sigma, prior_psi_Omega, post_psi_Omega, - Y_tilde, prior_psi_mean) - -posterior_psi_Omega(U, D_mat, Sigma, prior_psi_Omega) - -posterior_psi_Omega_fsv(U, D_mat, idivar, prior_psi_Omega) -} -\arguments{ -\item{U}{\eqn{U} matrix, of size \code{(n_vars*n_determ*(n_lags+1)) * (n_vars*n_determ)}. This can be obtained using \code{\link{build_U}}.} - -\item{D_mat}{The \code{D} matrix (from \code{\link{build_DD}}).} - -\item{Sigma}{The covariance matrix.} - -\item{prior_psi_Omega}{Matrix of size \code{(n_determ*n_vars) * (n_determ*n_vars)} with the prior covariance of the steady-state parameters.} - -\item{post_psi_Omega}{The covariance matrix in the posterior, \eqn{\bar{\Omega}_{\Psi}}.} - -\item{Y_tilde}{The lag-corrected data matrix (with no missing values) of size \code{n_T * n_vars}.} - -\item{prior_psi_mean}{Vector of length \code{n_determ*n_vars} with the prior means of the steady-state parameters.} -} -\value{ -The return is: -\item{psi}{The posterior mean (from \code{\link{posterior_psi_mean}})} - -\item{psi_Omega}{The posterior variance (from \code{\link{posterior_psi_Omega}})} - -\item{psi_Omega}{The posterior variance (from \code{\link{posterior_psi_Omega}})} -} -\description{ -Computes the mean and variance of the conditional posterior distribution of the steady-state parameters. -} -\keyword{internal} diff --git a/man/predict.mfbvar.Rd b/man/predict.mfbvar.Rd index f3321e5..ea21156 100644 --- a/man/predict.mfbvar.Rd +++ b/man/predict.mfbvar.Rd @@ -4,17 +4,29 @@ \alias{predict.mfbvar} \title{Predict method for class \code{mfbvar}} \usage{ -\method{predict}{mfbvar}(object, fcst_start = NULL, - aggregate_fcst = TRUE, pred_bands = 0.8, ...) +\method{predict}{mfbvar}(object, aggregate_fcst = TRUE, pred_bands = 0.8, ...) } \arguments{ \item{object}{object of class mfbvar} -\item{...}{Currently not in use.} +\item{aggregate_fcst}{If forecasts of quarterly variables should be aggregated back to the quarterly frequency.} -\item{pred_quantiles}{The quantiles of the posterior predictive distribution to use.} +\item{pred_bands}{The level of the probability bands for the forecasts.} -\item{tidy}{If results should be tidy or not.} +\item{...}{Currently not in use.} +} +\value{ +A \code{\link[tibble]{tibble}} with columns: +\describe{\item{\code{variable}}{Name of variable} +\item{\code{time}}{Time index} +\item{\code{fcst_date}}{Date of forecast}} +If the argument \code{pred_bands} is given as a numeric value between 0 and 1, the returned tibble also includes columns: +\describe{\item{\code{lower}}{The \code{(1-pred_bands)/2} lower quantiles of the predictive distributions} +\item{\code{median}}{The medians of the predictive distributions} +\item{\code{upper}}{The \code{(1+pred_bands)/2} upper quantiles of the predictive distributions}} +If \code{pred_bands} \code{NULL} or \code{NA}, the returned tibble also includes the columns: +\describe{\item{\code{fcst}}{MCMC samples from the predictive distributions} +\item{\code{iter}}{Iteration indexes for the MCMC samples}} } \description{ Method for predicting \code{mfbvar} objects. @@ -23,9 +35,7 @@ Method for predicting \code{mfbvar} objects. Note that this requires that forecasts were made in the original \code{mfbvar} call. } \examples{ -prior_obj <- set_prior(Y = mf_sweden[, 4:5], freq = c("m", "q"), - n_lags = 4, n_burnin = 20, n_reps = 20, n_fcst = 4) +prior_obj <- set_prior(Y = mf_usa, n_lags = 4, n_reps = 20, n_fcst = 4) mod_minn <- estimate_mfbvar(prior_obj, prior = "minn") predict(mod_minn) -predict(mod_minn, pred_quantiles = 0.5, tidy = TRUE) } diff --git a/man/print.mfbvar.Rd b/man/print.mfbvar.Rd index 7634be5..36d0a3e 100644 --- a/man/print.mfbvar.Rd +++ b/man/print.mfbvar.Rd @@ -11,12 +11,14 @@ \item{...}{Currently not in use.} } +\value{ +No return value, called for side effects. +} \description{ Method for printing \code{mfbvar} objects. } \examples{ -prior_obj <- set_prior(Y = mf_sweden[, 4:5], d = "intercept", - freq = c("m", "q"), n_lags = 4, n_burnin = 20, n_reps = 20) +prior_obj <- set_prior(Y = mf_usa, n_lags = 4, n_reps = 20) mod_minn <- estimate_mfbvar(prior_obj, prior = "minn") mod_minn } diff --git a/man/print.mfbvar_prior.Rd b/man/print.mfbvar_prior.Rd index a91f7c8..31393f5 100644 --- a/man/print.mfbvar_prior.Rd +++ b/man/print.mfbvar_prior.Rd @@ -11,6 +11,9 @@ \item{...}{additional arguments (currently unused)} } +\value{ +No return value, called for side effects. +} \description{ Printing method for object of class mfbvar_prior, checking if information in the prior is sufficient for estimating models. @@ -23,8 +26,7 @@ The print method checks whether the steady-state and Minnesota requires additional information). } \examples{ -prior_obj <- set_prior(Y = mf_sweden, freq = c(rep("m", 4), "q"), - n_lags = 4, n_burnin = 100, n_reps = 100) +prior_obj <- set_prior(Y = mf_usa, n_lags = 4, n_reps = 100) print(prior_obj) } \seealso{ diff --git a/man/prior_Pi_Sigma.Rd b/man/prior_Pi_Sigma.Rd deleted file mode 100644 index 388cbc0..0000000 --- a/man/prior_Pi_Sigma.Rd +++ /dev/null @@ -1,30 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/prior_pi_sigma.R -\name{prior_Pi_Sigma} -\alias{prior_Pi_Sigma} -\title{Create the priors for Pi and Sigma} -\usage{ -prior_Pi_Sigma(lambda1, lambda2, prior_Pi_AR1, Y, n_lags, prior_nu) -} -\arguments{ -\item{lambda1}{The overall tightness.} - -\item{lambda2}{The lag decay.} - -\item{prior_Pi_AR1}{The prior means for the AR(1) coefficients.} - -\item{Y}{The data matrix of size \code{(n_T + n_lags) * n_vars} with \code{NA} representing missingness. All monthly variables must be placed before quarterly variables.} - -\item{n_lags}{The number of lags.} - -\item{prior_nu}{The prior degrees of freedom.} -} -\value{ -\item{prior_Pi}{The prior mean matrix for Pi.} -\item{prior_Pi_Omega}{The prior covariance matrix for Pi.} -\item{prior_s}{The prior for Sigma.} -} -\description{ -Creates the prior mean and covariance for Pi given the hyperparameters, and the prior parameters for Sigma. -} -\keyword{internal} diff --git a/man/set_prior.Rd b/man/set_prior.Rd index da3c254..01ffb1a 100644 --- a/man/set_prior.Rd +++ b/man/set_prior.Rd @@ -3,41 +3,68 @@ \name{set_prior} \alias{set_prior} \alias{update_prior} -\alias{check_prior} -\title{Set priors for an mfbvar model} +\title{Set priors for mfbvar} \usage{ -set_prior(Y, freq, prior_Pi_AR1 = rep(0, ncol(Y)), lambda1 = 0.2, - lambda2 = 0.5, lambda3 = 1, lambda4 = 10000, n_lags, n_fcst = 0, - thin = 1, n_burnin, n_reps, d = NULL, d_fcst = NULL, - prior_psi_mean = NULL, prior_psi_Omega = NULL, n_fac = NULL, - cl = NULL, verbose = FALSE, check_roots = FALSE, ...) +set_prior( + Y, + aggregation = "average", + prior_Pi_AR1 = 0, + lambda1 = 0.2, + lambda2 = 0.5, + lambda3 = 1, + lambda4 = 10000, + block_exo = NULL, + n_lags, + n_fcst = 0, + n_thin = 1, + n_reps, + n_burnin = n_reps, + freq = NULL, + d = NULL, + d_fcst = NULL, + prior_psi_mean = NULL, + prior_psi_Omega = NULL, + check_roots = FALSE, + s = -1000, + prior_ng = c(0.01, 0.01), + prior_phi = c(0.9, 0.1), + prior_sigma2 = c(0.01, 4), + n_fac = NULL, + n_cores = 1, + verbose = FALSE, + ... +) update_prior(prior_obj, ...) - -check_prior(prior_obj) } \arguments{ -\item{Y}{The data matrix of size \code{(n_T + n_lags) * n_vars} with \code{NA} representing missingness. All monthly variables must be placed before quarterly variables.} +\item{Y}{data input. For monthly-quarterly data, should be a list with components containing regularly spaced time series (that inherit from \code{ts} or \code{zooreg}). If a component contains a single time series, the component itself must be named. If a component contains multiple time series, each time series must be named. Monthly variables can only contain missing values at the end of the sample, and should precede quarterly variables in the list. Matrices in which quarterly variables are padded with \code{NA} and observations stored at the end of each quarter are also accepted, but then the frequency of each variable must be given in the argument \code{freq}. Weekly-monthly mixes can be provided using the matrix way, see examples.} -\item{freq}{Character vector with elements 'm' (monthly) or 'q' (quarterly) for sampling frequency. Monthly variables must precede all quarterly variables.} +\item{aggregation}{the aggregation scheme used for relating latent high-frequency series to their low-frequency observations. The default is \code{"average"} for averaging within each low-frequency period (e.g., quarterly observations are averages of the constituent monthly observations). The alternative \code{"triangular"} can be used for monthly-quarterly mixes, and uses the Mariano-Murasawa triangular set of weights. See details for more information.} \item{prior_Pi_AR1}{The prior means for the AR(1) coefficients.} \item{lambda1}{The overall tightness.} +\item{lambda2}{(Only if \code{variance} is one of \code{c("diffuse", "fsv")} The cross-variable tightness} + \item{lambda3}{The tightness of the intercept prior variance.} \item{lambda4}{(Minnesota only) Prior variance of the intercept.} +\item{block_exo}{(Only if \code{variance} is one of \code{c("diffuse", "fsv")}) Vector of indexes/names of variables to be treated as block exogenous} + \item{n_lags}{The number of lags.} \item{n_fcst}{The number of periods to forecast.} -\item{thin}{Store every \code{thin}th draw} +\item{n_thin}{Store every \code{n_thin}th draw} + +\item{n_reps}{The number of replications.} \item{n_burnin}{The number of burn-in replications.} -\item{n_reps}{The number of replications.} +\item{freq}{(Only used if \code{Y} is a matrix) Character vector with elements 'm' (monthly) or 'q' (quarterly) for sampling frequency. Monthly variables must precede all quarterly variables.} \item{d}{(Steady state only) Either a matrix with same number of rows as \code{Y} and \code{n_determ} number of columns containing the deterministic terms or a string \code{"intercept"} for requesting an intercept as the only deterministic term.} @@ -46,39 +73,52 @@ term.} \item{prior_psi_mean}{(Steady state only) Vector of length \code{n_determ*n_vars} with the prior means of the steady-state parameters.} -\item{prior_psi_Omega}{(Steady state only) Matrix of size \code{(n_determ*n_vars) * (n_determ*n_vars)} with the prior covariance of the steady-state parameters.} +\item{prior_psi_Omega}{(Steady state only) Matrix of size \code{(n_determ*n_vars) * (n_determ*n_vars)} with the prior covariance of the steady-state parameters.#'} + +\item{check_roots}{Logical, if roots of the companion matrix are to be checked to ensure stationarity.} + +\item{s}{(Hierarchical steady state only) scalar giving the tuning parameter for the Metropolis-Hastings proposal for the kurtosis parameter. If \code{s < 0}, then adaptive Metropolis-Hastings targeting an acceptance rate of 0.44 is used, where the scaling factor is restricted to the interval \code{[-abs(s), abs(s)]}} + +\item{prior_ng}{(Hierarchical steady state only) vector with two elements giving the parameters \code{c(c0, c1)} of the hyperprior for the global shrinkage parameter} + +\item{prior_phi}{(Only used with common stochastic volatility) Vector with two elements \code{c(mean, variance)} for the AR(1) parameter in the log-volatility regression} + +\item{prior_sigma2}{(Only used with common stochastic volatility) Vector with two elements \code{c(mean, df)} for the innovation variance of the log-volatility regression} \item{n_fac}{(Only used with factor stochastic volatility) Number of factors to use for the factor stochastic volatility model} -\item{cl}{(Only used with factor stochastic volatility) Cluster object to use for drawing regression parameters in parallel} +\item{n_cores}{(Only used with factor stochastic volatility) Number of cores to use for drawing regression parameters in parallel} \item{verbose}{Logical, if progress should be printed to the console.} -\item{check_roots}{Logical, if roots of the companion matrix are to be checked to ensure stationarity.} - \item{...}{(Only used with factor stochastic volatility) Arguments to pass along to \code{\link[factorstochvol]{fsvsample}}. See details.} \item{prior_obj}{an object of class \code{mfbvar_prior}} - -\item{(Only}{if \code{variance != "iw"}) The cross-variable tightness} - -\item{...}{named arguments for prior attributes to update} +} +\value{ +An object of class \code{mfbvar_prior} that is used as input to \code{estimate_mfbvar}. } \description{ -Create an object storing all information needed for estimation, including data as well as model and prior specifications for both a Minnesota or steady-state prior. +The function creates an object storing all information needed for estimating a mixed-frequency BVAR. The object includes data as well as details for the model and its priors. } \details{ -The first arguments (\code{Y} through \code{n_reps}) must be set for the model to be estimated irrespective of the choice -of prior, but some have default values. +Some support is provided for single-frequency data sets, where \code{Y} contains variables sampled with the same frequency. + +The aggregation weights that can be used for \code{aggregation} are intra-quarterly averages (\code{aggregation = "average"}), where the quarterly observations \eqn{y_{q,t}} are assumed to relate to the underlying monthly series \eqn{z_{q,,t}} through: +\deqn{y_{q,t} = \frac{1}{3}(z_{q,,t} + z_{q,,t-1} + z_{q,, t-2})} -For the Minnesota prior, \code{lambda4} must also be set, but it too has a default that it relies on if not specified. +If \code{aggregation = "triangular"}, then instead +\deqn{y_{q,t} = \frac{1}{9}(z_{q,,t} + 2z_{q,,t-1} + 3z_{q,, t-2}) + 2z_{q,, t-3}) + z_{q,, t-4})} -For the steady-state prior, the deterministic matrix needs to be supplied, or a string indicating that the intercept should be -the only deterministic term. If the latter, also \code{d_fcst} is set to be intercept only. Otherwise, if forecasts are requested -(\code{n_fcst > 0}) also \code{d_fcst} needs to be provided. Finally, the prior moments for the steady-state parameters must also be -provided. +The latter is typically used when modeling growth rates, and the former when working with log-levels. -For modeling stochastic volatility by the factor stochastic volatility model, the number of factors to use must be supplied. Further arguments can be passed along to \code{\link[factorstochvol]{fsvsample}}. If arguments are not given, the defaults used are as follows (see \code{\link[factorstochvol]{fsvsample}} for descriptions): +If the steady-state prior is to be used, the deterministic matrix needs to be supplied, or a string indicating that the intercept should be the only deterministic term (\code{d = "intercept"}). If the latter, \code{d_fcst} is automatically set to be intercept only. Otherwise, if forecasts are requested +(\code{n_fcst > 0}) also \code{d_fcst} must be provided. Finally, the prior means of the steady-state parameters must (at the very minimum) also be +provided in \code{prior_psi_mean}. The steady-state prior involves inverting the lag polynomial. For this reason, draws in which the largest eigenvalue +(in absolute value) of the lag polynomial is greater than 1 are discarded and new draws are made if \code{check_roots = TRUE}. The maximum number of +attempts is 1,000. + +For modeling stochastic volatility by the factor stochastic volatility model, the number of factors to use must be supplied. Further arguments can be passed along, but are not included as formal arguments. If the default settings are not overriden, the defaults used are as follows (see \code{\link[factorstochvol]{fsvsample}} for descriptions): \itemize{ \item{\code{priormu}}{\code{ = c(0, 10)}} \item{\code{priorphiidi}}{\code{ = c(10, 3)}} @@ -86,23 +126,22 @@ For modeling stochastic volatility by the factor stochastic volatility model, th \item{\code{priorsigmaidi}}{\code{ = 1}} \item{\code{priorsigmafac}}{\code{ = 1}} \item{\code{priorfacload}}{\code{ = 1}} - \item{\code{priorng}}{\code{ = c(1, 1)}} - \item{\code{columnwise}}{\code{ = FALSE}} \item{\code{restrict}}{\code{ = "none"}} - \item{\code{heteroskedastic}}{\code{ = TRUE}} - \item{\code{priorhomoskedastic}}{\code{ = NA}} } -The steady-state prior involves inverting the lag polynomial. For this reason, draws in which the largest eigenvalue -(in absolute value) of the lag polynomial is greater than 1 are discarded and new draws are made. The maximum number of -attempts is 1,000. The components in the output named \code{roots} and \code{num_tries} contain the largest roots and the -number of attempts, respectively, if \code{check_roots = TRUE} (the default). +The function \code{update_prior} can be used to update an existing prior object. See the examples. } \examples{ -prior_obj <- set_prior(Y = mf_sweden, freq = c(rep("m", 4), "q"), - n_lags = 4, n_burnin = 100, n_reps = 100) +# Standard list-based way +prior_obj <- set_prior(Y = mf_usa, n_lags = 4, n_reps = 100) prior_obj <- update_prior(prior_obj, n_fcst = 4) + +# Weekly-monthly mix of data, four weeks per month +Y <- matrix(rnorm(400), 100, 4) +Y[setdiff(1:100,seq(4, 100, by = 4)), 4] <- NA +prior_obj <- set_prior(Y = Y, freq = c(rep("w", 3), "m"), + n_lags = 4, n_reps = 10) } \seealso{ -\code{\link{interval_to_moments}}, \code{\link{print.mfbvar_prior}}, \code{\link{summary.mfbvar_prior}}, \code{\link{estimate_mfbvar}}, \code{\link[factorstochvol]{fsvsample}} +\code{\link{estimate_mfbvar}}, \code{\link{update_prior}}, \code{\link{interval_to_moments}}, \code{\link{print.mfbvar_prior}}, \code{\link{summary.mfbvar_prior}}, \code{\link[factorstochvol]{fsvsample}} } diff --git a/man/smoother.Rd b/man/smoother.Rd deleted file mode 100644 index 692c692..0000000 --- a/man/smoother.Rd +++ /dev/null @@ -1,46 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/RcppExports.R -\name{loglike} -\alias{loglike} -\alias{smoother} -\alias{simulation_smoother} -\alias{generate_mhh} -\title{Smooth and sample from the smoothed distribution} -\usage{ -loglike(Y, Lambda, Pi_comp, Q_comp, n_T, n_vars, n_comp, z0, P0) -} -\arguments{ -\item{Y}{The data matrix of size \code{(n_T + n_lags) * n_vars} with \code{NA} representing missingness. All monthly variables must be placed before quarterly variables.} - -\item{Lambda}{The Lambda matrix (size \code{n_vars* (n_vars*n_lags)}).} - -\item{Pi_comp}{Matrix with the dynamic coefficients in companion form.} - -\item{Q_comp}{The lower-triangular Cholesky decomposition of the covariance matrix (in companion form).} - -\item{n_T}{The number of time points.} - -\item{n_vars}{The number of variables.} - -\item{n_comp}{The length of the companion form vector of data (\code{n_vars*n_lags}).} - -\item{z0}{A matrix of size \code{(n_lags*n_vars) * n_vars} of initial values of the latent variable.} - -\item{P0}{The covariance matrix of the initial state (\code{(n_vars*n_lags)*(n_vars*n_lags)}).} -} -\value{ -For \code{loglike}: -\item{}{An \code{n_T}-long vector of the log-likelihoods. \code{exp(sum(loglike(...)))} is the likelihood.} -} -\description{ -Functions for smoothing and sampling from the (smoothed) distribution \eqn{p(Z_{1:T}|Y_{1:T}, \Theta)}. -} -\details{ -Implemented in C++. -} -\section{Functions}{ -\itemize{ -\item \code{loglike}: Compute smoothed states -}} - -\keyword{internal} diff --git a/man/summary.mfbvar.Rd b/man/summary.mfbvar.Rd index 7260f73..48acaaa 100644 --- a/man/summary.mfbvar.Rd +++ b/man/summary.mfbvar.Rd @@ -4,10 +4,10 @@ \alias{summary.mfbvar} \title{Summary method for class mfbvar} \usage{ -\method{summary}{mfbvar}(x, ...) +\method{summary}{mfbvar}(object, ...) } \arguments{ -\item{x}{object of class \code{mfbvar}} +\item{object}{object of class \code{mfbvar}} \item{...}{Currently not in use.} } @@ -15,8 +15,7 @@ Method for summarizing \code{mfbvar} objects. } \examples{ -prior_obj <- set_prior(Y = mf_sweden[, 4:5], d = "intercept", - freq = c("m", "q"), n_lags = 4, n_burnin = 20, n_reps = 20) +prior_obj <- set_prior(Y = mf_usa, n_lags = 4, n_reps = 20) mod_minn <- estimate_mfbvar(prior_obj, prior = "minn") summary(mod_minn) } diff --git a/man/summary.mfbvar_prior.Rd b/man/summary.mfbvar_prior.Rd index 0553583..4586ec5 100644 --- a/man/summary.mfbvar_prior.Rd +++ b/man/summary.mfbvar_prior.Rd @@ -16,8 +16,7 @@ summary method for object of class mfbvar_prior, showing some basic information regarding the contents of the prior. } \examples{ -prior_obj <- set_prior(Y = mf_sweden, freq = c(rep("m", 4), "q"), - n_lags = 4, n_burnin = 100, n_reps = 100) +prior_obj <- set_prior(Y = mf_usa, n_lags = 4, n_reps = 100) summary(prior_obj) } \seealso{ diff --git a/src/Makevars.in b/src/Makevars.in index b2bc732..b251637 100644 --- a/src/Makevars.in +++ b/src/Makevars.in @@ -1,3 +1,4 @@ CXX_STD = CXX11 PKG_CXXFLAGS = @OPENMP_FLAG@ -I../inst/include PKG_LIBS= @OPENMP_FLAG@ $(LAPACK_LIBS) $(BLAS_LIBS) $(FLIBS) +PKG_LIBS += $(shell ${R_HOME}/bin/Rscript -e "RcppParallel::RcppParallelLibs()") diff --git a/src/Makevars.win b/src/Makevars.win index 1d44c19..f2862fe 100644 --- a/src/Makevars.win +++ b/src/Makevars.win @@ -1,3 +1,6 @@ CXX_STD = CXX11 PKG_CXXFLAGS = $(SHLIB_OPENMP_CXXFLAGS) -I../inst/include PKG_LIBS = $(SHLIB_OPENMP_CXXFLAGS) $(LAPACK_LIBS) $(BLAS_LIBS) $(FLIBS) +PKG_CXXFLAGS += -DRCPP_PARALLEL_USE_TBB=1 +PKG_LIBS += $(shell "${R_HOME}/bin${R_ARCH_BIN}/Rscript.exe" \ + -e "RcppParallel::RcppParallelLibs()") diff --git a/src/RcppExports.cpp b/src/RcppExports.cpp index c4f3f61..01c95fb 100644 --- a/src/RcppExports.cpp +++ b/src/RcppExports.cpp @@ -67,6 +67,25 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// dl_reg +void dl_reg(const arma::mat& y, arma::mat& x, arma::mat& beta, arma::mat& aux, arma::vec& global, arma::mat& local, arma::mat& prior_Pi_Omega, arma::uword n_reps, const double a, bool gig); +RcppExport SEXP _mfbvar_dl_reg(SEXP ySEXP, SEXP xSEXP, SEXP betaSEXP, SEXP auxSEXP, SEXP globalSEXP, SEXP localSEXP, SEXP prior_Pi_OmegaSEXP, SEXP n_repsSEXP, SEXP aSEXP, SEXP gigSEXP) { +BEGIN_RCPP + Rcpp::RNGScope rcpp_rngScope_gen; + Rcpp::traits::input_parameter< const arma::mat& >::type y(ySEXP); + Rcpp::traits::input_parameter< arma::mat& >::type x(xSEXP); + Rcpp::traits::input_parameter< arma::mat& >::type beta(betaSEXP); + Rcpp::traits::input_parameter< arma::mat& >::type aux(auxSEXP); + Rcpp::traits::input_parameter< arma::vec& >::type global(globalSEXP); + Rcpp::traits::input_parameter< arma::mat& >::type local(localSEXP); + Rcpp::traits::input_parameter< arma::mat& >::type prior_Pi_Omega(prior_Pi_OmegaSEXP); + Rcpp::traits::input_parameter< arma::uword >::type n_reps(n_repsSEXP); + Rcpp::traits::input_parameter< const double >::type a(aSEXP); + Rcpp::traits::input_parameter< bool >::type gig(gigSEXP); + dl_reg(y, x, beta, aux, global, local, prior_Pi_Omega, n_reps, a, gig); + return R_NilValue; +END_RCPP +} // kf_loglike arma::vec kf_loglike(arma::mat y_, arma::mat Phi_, arma::mat Sigma_, arma::mat Lambda_, arma::mat a00, arma::mat P00); RcppExport SEXP _mfbvar_kf_loglike(SEXP y_SEXP, SEXP Phi_SEXP, SEXP Sigma_SEXP, SEXP Lambda_SEXP, SEXP a00SEXP, SEXP P00SEXP) { @@ -129,52 +148,13 @@ BEGIN_RCPP END_RCPP } // mcmc_minn_csv -void mcmc_minn_csv(const arma::mat& y_in_p, arma::cube& Pi, arma::cube& Sigma, arma::cube& Z, arma::cube& Z_fcst, arma::vec& phi, arma::vec& sigma, arma::mat& f, const arma::mat& Lambda_comp, const arma::mat& prior_Pi_Omega, const arma::mat& inv_prior_Pi_Omega, const arma::mat& Omega_Pi, const arma::mat& prior_Pi_mean, const arma::mat& prior_S, const arma::mat& Z_1, const double priorlatent0, const double phi_invvar, const double phi_meaninvvar, const double prior_sigma2, const double prior_df, arma::uword n_reps, arma::uword n_q, arma::uword T_b, arma::uword n_lags, arma::uword n_vars, arma::uword n_T, arma::uword n_fcst, arma::uword n_thin, bool verbose); -RcppExport SEXP _mfbvar_mcmc_minn_csv(SEXP y_in_pSEXP, SEXP PiSEXP, SEXP SigmaSEXP, SEXP ZSEXP, SEXP Z_fcstSEXP, SEXP phiSEXP, SEXP sigmaSEXP, SEXP fSEXP, SEXP Lambda_compSEXP, SEXP prior_Pi_OmegaSEXP, SEXP inv_prior_Pi_OmegaSEXP, SEXP Omega_PiSEXP, SEXP prior_Pi_meanSEXP, SEXP prior_SSEXP, SEXP Z_1SEXP, SEXP priorlatent0SEXP, SEXP phi_invvarSEXP, SEXP phi_meaninvvarSEXP, SEXP prior_sigma2SEXP, SEXP prior_dfSEXP, SEXP n_repsSEXP, SEXP n_qSEXP, SEXP T_bSEXP, SEXP n_lagsSEXP, SEXP n_varsSEXP, SEXP n_TSEXP, SEXP n_fcstSEXP, SEXP n_thinSEXP, SEXP verboseSEXP) { -BEGIN_RCPP - Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const arma::mat& >::type y_in_p(y_in_pSEXP); - Rcpp::traits::input_parameter< arma::cube& >::type Pi(PiSEXP); - Rcpp::traits::input_parameter< arma::cube& >::type Sigma(SigmaSEXP); - Rcpp::traits::input_parameter< arma::cube& >::type Z(ZSEXP); - Rcpp::traits::input_parameter< arma::cube& >::type Z_fcst(Z_fcstSEXP); - Rcpp::traits::input_parameter< arma::vec& >::type phi(phiSEXP); - Rcpp::traits::input_parameter< arma::vec& >::type sigma(sigmaSEXP); - Rcpp::traits::input_parameter< arma::mat& >::type f(fSEXP); - Rcpp::traits::input_parameter< const arma::mat& >::type Lambda_comp(Lambda_compSEXP); - Rcpp::traits::input_parameter< const arma::mat& >::type prior_Pi_Omega(prior_Pi_OmegaSEXP); - Rcpp::traits::input_parameter< const arma::mat& >::type inv_prior_Pi_Omega(inv_prior_Pi_OmegaSEXP); - Rcpp::traits::input_parameter< const arma::mat& >::type Omega_Pi(Omega_PiSEXP); - Rcpp::traits::input_parameter< const arma::mat& >::type prior_Pi_mean(prior_Pi_meanSEXP); - Rcpp::traits::input_parameter< const arma::mat& >::type prior_S(prior_SSEXP); - Rcpp::traits::input_parameter< const arma::mat& >::type Z_1(Z_1SEXP); - Rcpp::traits::input_parameter< const double >::type priorlatent0(priorlatent0SEXP); - Rcpp::traits::input_parameter< const double >::type phi_invvar(phi_invvarSEXP); - Rcpp::traits::input_parameter< const double >::type phi_meaninvvar(phi_meaninvvarSEXP); - Rcpp::traits::input_parameter< const double >::type prior_sigma2(prior_sigma2SEXP); - Rcpp::traits::input_parameter< const double >::type prior_df(prior_dfSEXP); - Rcpp::traits::input_parameter< arma::uword >::type n_reps(n_repsSEXP); - Rcpp::traits::input_parameter< arma::uword >::type n_q(n_qSEXP); - Rcpp::traits::input_parameter< arma::uword >::type T_b(T_bSEXP); - Rcpp::traits::input_parameter< arma::uword >::type n_lags(n_lagsSEXP); - Rcpp::traits::input_parameter< arma::uword >::type n_vars(n_varsSEXP); - Rcpp::traits::input_parameter< arma::uword >::type n_T(n_TSEXP); - Rcpp::traits::input_parameter< arma::uword >::type n_fcst(n_fcstSEXP); - Rcpp::traits::input_parameter< arma::uword >::type n_thin(n_thinSEXP); - Rcpp::traits::input_parameter< bool >::type verbose(verboseSEXP); - mcmc_minn_csv(y_in_p, Pi, Sigma, Z, Z_fcst, phi, sigma, f, Lambda_comp, prior_Pi_Omega, inv_prior_Pi_Omega, Omega_Pi, prior_Pi_mean, prior_S, Z_1, priorlatent0, phi_invvar, phi_meaninvvar, prior_sigma2, prior_df, n_reps, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_thin, verbose); - return R_NilValue; -END_RCPP -} -// mcmc_ss_csv -void mcmc_ss_csv(const arma::mat& y_in_p, arma::cube& Pi, arma::cube& Sigma, arma::mat& psi, arma::cube& Z, arma::cube& Z_fcst, arma::vec& phi, arma::vec& sigma, arma::mat& f, const arma::mat& Lambda_comp, const arma::mat& prior_Pi_Omega, const arma::mat& inv_prior_Pi_Omega, const arma::mat& Omega_Pi, const arma::mat& prior_Pi_mean, const arma::mat& prior_S, const arma::mat& D_mat, const arma::mat& dt, const arma::mat& d1, const arma::mat& d_fcst_lags, const arma::mat& inv_prior_psi_Omega, const arma::mat& inv_prior_psi_Omega_mean, bool check_roots, const arma::mat& Z_1, const double priorlatent0, const double phi_invvar, const double phi_meaninvvar, const double prior_sigma2, const double prior_df, arma::uword n_reps, arma::uword n_q, arma::uword T_b, arma::uword n_lags, arma::uword n_vars, arma::uword n_T, arma::uword n_fcst, arma::uword n_determ, arma::uword n_thin, bool verbose); -RcppExport SEXP _mfbvar_mcmc_ss_csv(SEXP y_in_pSEXP, SEXP PiSEXP, SEXP SigmaSEXP, SEXP psiSEXP, SEXP ZSEXP, SEXP Z_fcstSEXP, SEXP phiSEXP, SEXP sigmaSEXP, SEXP fSEXP, SEXP Lambda_compSEXP, SEXP prior_Pi_OmegaSEXP, SEXP inv_prior_Pi_OmegaSEXP, SEXP Omega_PiSEXP, SEXP prior_Pi_meanSEXP, SEXP prior_SSEXP, SEXP D_matSEXP, SEXP dtSEXP, SEXP d1SEXP, SEXP d_fcst_lagsSEXP, SEXP inv_prior_psi_OmegaSEXP, SEXP inv_prior_psi_Omega_meanSEXP, SEXP check_rootsSEXP, SEXP Z_1SEXP, SEXP priorlatent0SEXP, SEXP phi_invvarSEXP, SEXP phi_meaninvvarSEXP, SEXP prior_sigma2SEXP, SEXP prior_dfSEXP, SEXP n_repsSEXP, SEXP n_qSEXP, SEXP T_bSEXP, SEXP n_lagsSEXP, SEXP n_varsSEXP, SEXP n_TSEXP, SEXP n_fcstSEXP, SEXP n_determSEXP, SEXP n_thinSEXP, SEXP verboseSEXP) { +void mcmc_minn_csv(const arma::mat& y_in_p, arma::cube& Pi, arma::cube& Sigma, arma::cube& Z, arma::cube& Z_fcst, arma::vec& phi, arma::vec& sigma, arma::mat& f, const arma::mat& Lambda_comp, const arma::mat& prior_Pi_Omega, const arma::mat& inv_prior_Pi_Omega, const arma::mat& Omega_Pi, const arma::mat& prior_Pi_mean, const arma::mat& prior_S, const arma::mat& Z_1, const double priorlatent0, const double phi_invvar, const double phi_meaninvvar, const double prior_sigma2, const double prior_df, arma::uword n_reps, arma::uword n_burnin, arma::uword n_q, arma::uword T_b, arma::uword n_lags, arma::uword n_vars, arma::uword n_T, arma::uword n_fcst, arma::uword n_thin, bool verbose); +RcppExport SEXP _mfbvar_mcmc_minn_csv(SEXP y_in_pSEXP, SEXP PiSEXP, SEXP SigmaSEXP, SEXP ZSEXP, SEXP Z_fcstSEXP, SEXP phiSEXP, SEXP sigmaSEXP, SEXP fSEXP, SEXP Lambda_compSEXP, SEXP prior_Pi_OmegaSEXP, SEXP inv_prior_Pi_OmegaSEXP, SEXP Omega_PiSEXP, SEXP prior_Pi_meanSEXP, SEXP prior_SSEXP, SEXP Z_1SEXP, SEXP priorlatent0SEXP, SEXP phi_invvarSEXP, SEXP phi_meaninvvarSEXP, SEXP prior_sigma2SEXP, SEXP prior_dfSEXP, SEXP n_repsSEXP, SEXP n_burninSEXP, SEXP n_qSEXP, SEXP T_bSEXP, SEXP n_lagsSEXP, SEXP n_varsSEXP, SEXP n_TSEXP, SEXP n_fcstSEXP, SEXP n_thinSEXP, SEXP verboseSEXP) { BEGIN_RCPP Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< const arma::mat& >::type y_in_p(y_in_pSEXP); Rcpp::traits::input_parameter< arma::cube& >::type Pi(PiSEXP); Rcpp::traits::input_parameter< arma::cube& >::type Sigma(SigmaSEXP); - Rcpp::traits::input_parameter< arma::mat& >::type psi(psiSEXP); Rcpp::traits::input_parameter< arma::cube& >::type Z(ZSEXP); Rcpp::traits::input_parameter< arma::cube& >::type Z_fcst(Z_fcstSEXP); Rcpp::traits::input_parameter< arma::vec& >::type phi(phiSEXP); @@ -186,13 +166,6 @@ BEGIN_RCPP Rcpp::traits::input_parameter< const arma::mat& >::type Omega_Pi(Omega_PiSEXP); Rcpp::traits::input_parameter< const arma::mat& >::type prior_Pi_mean(prior_Pi_meanSEXP); Rcpp::traits::input_parameter< const arma::mat& >::type prior_S(prior_SSEXP); - Rcpp::traits::input_parameter< const arma::mat& >::type D_mat(D_matSEXP); - Rcpp::traits::input_parameter< const arma::mat& >::type dt(dtSEXP); - Rcpp::traits::input_parameter< const arma::mat& >::type d1(d1SEXP); - Rcpp::traits::input_parameter< const arma::mat& >::type d_fcst_lags(d_fcst_lagsSEXP); - Rcpp::traits::input_parameter< const arma::mat& >::type inv_prior_psi_Omega(inv_prior_psi_OmegaSEXP); - Rcpp::traits::input_parameter< const arma::mat& >::type inv_prior_psi_Omega_mean(inv_prior_psi_Omega_meanSEXP); - Rcpp::traits::input_parameter< bool >::type check_roots(check_rootsSEXP); Rcpp::traits::input_parameter< const arma::mat& >::type Z_1(Z_1SEXP); Rcpp::traits::input_parameter< const double >::type priorlatent0(priorlatent0SEXP); Rcpp::traits::input_parameter< const double >::type phi_invvar(phi_invvarSEXP); @@ -200,22 +173,22 @@ BEGIN_RCPP Rcpp::traits::input_parameter< const double >::type prior_sigma2(prior_sigma2SEXP); Rcpp::traits::input_parameter< const double >::type prior_df(prior_dfSEXP); Rcpp::traits::input_parameter< arma::uword >::type n_reps(n_repsSEXP); + Rcpp::traits::input_parameter< arma::uword >::type n_burnin(n_burninSEXP); Rcpp::traits::input_parameter< arma::uword >::type n_q(n_qSEXP); Rcpp::traits::input_parameter< arma::uword >::type T_b(T_bSEXP); Rcpp::traits::input_parameter< arma::uword >::type n_lags(n_lagsSEXP); Rcpp::traits::input_parameter< arma::uword >::type n_vars(n_varsSEXP); Rcpp::traits::input_parameter< arma::uword >::type n_T(n_TSEXP); Rcpp::traits::input_parameter< arma::uword >::type n_fcst(n_fcstSEXP); - Rcpp::traits::input_parameter< arma::uword >::type n_determ(n_determSEXP); Rcpp::traits::input_parameter< arma::uword >::type n_thin(n_thinSEXP); Rcpp::traits::input_parameter< bool >::type verbose(verboseSEXP); - mcmc_ss_csv(y_in_p, Pi, Sigma, psi, Z, Z_fcst, phi, sigma, f, Lambda_comp, prior_Pi_Omega, inv_prior_Pi_Omega, Omega_Pi, prior_Pi_mean, prior_S, D_mat, dt, d1, d_fcst_lags, inv_prior_psi_Omega, inv_prior_psi_Omega_mean, check_roots, Z_1, priorlatent0, phi_invvar, phi_meaninvvar, prior_sigma2, prior_df, n_reps, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_determ, n_thin, verbose); + mcmc_minn_csv(y_in_p, Pi, Sigma, Z, Z_fcst, phi, sigma, f, Lambda_comp, prior_Pi_Omega, inv_prior_Pi_Omega, Omega_Pi, prior_Pi_mean, prior_S, Z_1, priorlatent0, phi_invvar, phi_meaninvvar, prior_sigma2, prior_df, n_reps, n_burnin, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_thin, verbose); return R_NilValue; END_RCPP } // mcmc_ssng_csv -void mcmc_ssng_csv(const arma::mat& y_in_p, arma::cube& Pi, arma::cube& Sigma, arma::mat& psi, arma::vec& phi_mu, arma::vec& lambda_mu, arma::mat& omega, arma::cube& Z, arma::cube& Z_fcst, arma::vec& phi, arma::vec& sigma, arma::mat& f, const arma::mat& Lambda_comp, const arma::mat& prior_Pi_Omega, const arma::mat& inv_prior_Pi_Omega, const arma::mat& Omega_Pi, const arma::mat& prior_Pi_mean, const arma::mat& prior_S, const arma::mat& D_mat, const arma::mat& dt, const arma::mat& d1, const arma::mat& d_fcst_lags, const arma::vec& prior_psi_mean, double c0, double c1, double s, bool check_roots, const arma::mat& Z_1, const double priorlatent0, const double phi_invvar, const double phi_meaninvvar, const double prior_sigma2, const double prior_df, arma::uword n_reps, arma::uword n_q, arma::uword T_b, arma::uword n_lags, arma::uword n_vars, arma::uword n_T, arma::uword n_fcst, arma::uword n_determ, arma::uword n_thin, bool verbose); -RcppExport SEXP _mfbvar_mcmc_ssng_csv(SEXP y_in_pSEXP, SEXP PiSEXP, SEXP SigmaSEXP, SEXP psiSEXP, SEXP phi_muSEXP, SEXP lambda_muSEXP, SEXP omegaSEXP, SEXP ZSEXP, SEXP Z_fcstSEXP, SEXP phiSEXP, SEXP sigmaSEXP, SEXP fSEXP, SEXP Lambda_compSEXP, SEXP prior_Pi_OmegaSEXP, SEXP inv_prior_Pi_OmegaSEXP, SEXP Omega_PiSEXP, SEXP prior_Pi_meanSEXP, SEXP prior_SSEXP, SEXP D_matSEXP, SEXP dtSEXP, SEXP d1SEXP, SEXP d_fcst_lagsSEXP, SEXP prior_psi_meanSEXP, SEXP c0SEXP, SEXP c1SEXP, SEXP sSEXP, SEXP check_rootsSEXP, SEXP Z_1SEXP, SEXP priorlatent0SEXP, SEXP phi_invvarSEXP, SEXP phi_meaninvvarSEXP, SEXP prior_sigma2SEXP, SEXP prior_dfSEXP, SEXP n_repsSEXP, SEXP n_qSEXP, SEXP T_bSEXP, SEXP n_lagsSEXP, SEXP n_varsSEXP, SEXP n_TSEXP, SEXP n_fcstSEXP, SEXP n_determSEXP, SEXP n_thinSEXP, SEXP verboseSEXP) { +void mcmc_ssng_csv(const arma::mat& y_in_p, arma::cube& Pi, arma::cube& Sigma, arma::mat& psi, arma::vec& phi_mu, arma::vec& lambda_mu, arma::mat& omega, arma::cube& Z, arma::cube& Z_fcst, arma::vec& phi, arma::vec& sigma, arma::mat& f, const arma::mat& Lambda_comp, const arma::mat& prior_Pi_Omega, const arma::mat& inv_prior_Pi_Omega, const arma::mat& Omega_Pi, const arma::mat& prior_Pi_mean, const arma::mat& prior_S, const arma::mat& D_mat, const arma::mat& dt, const arma::mat& d1, const arma::mat& d_fcst_lags, const arma::vec& prior_psi_mean, double c0, double c1, double s, bool check_roots, const arma::mat& Z_1, const double priorlatent0, const double phi_invvar, const double phi_meaninvvar, const double prior_sigma2, const double prior_df, arma::uword n_reps, arma::uword n_burnin, arma::uword n_q, arma::uword T_b, arma::uword n_lags, arma::uword n_vars, arma::uword n_T, arma::uword n_fcst, arma::uword n_determ, arma::uword n_thin, bool verbose, bool ssng); +RcppExport SEXP _mfbvar_mcmc_ssng_csv(SEXP y_in_pSEXP, SEXP PiSEXP, SEXP SigmaSEXP, SEXP psiSEXP, SEXP phi_muSEXP, SEXP lambda_muSEXP, SEXP omegaSEXP, SEXP ZSEXP, SEXP Z_fcstSEXP, SEXP phiSEXP, SEXP sigmaSEXP, SEXP fSEXP, SEXP Lambda_compSEXP, SEXP prior_Pi_OmegaSEXP, SEXP inv_prior_Pi_OmegaSEXP, SEXP Omega_PiSEXP, SEXP prior_Pi_meanSEXP, SEXP prior_SSEXP, SEXP D_matSEXP, SEXP dtSEXP, SEXP d1SEXP, SEXP d_fcst_lagsSEXP, SEXP prior_psi_meanSEXP, SEXP c0SEXP, SEXP c1SEXP, SEXP sSEXP, SEXP check_rootsSEXP, SEXP Z_1SEXP, SEXP priorlatent0SEXP, SEXP phi_invvarSEXP, SEXP phi_meaninvvarSEXP, SEXP prior_sigma2SEXP, SEXP prior_dfSEXP, SEXP n_repsSEXP, SEXP n_burninSEXP, SEXP n_qSEXP, SEXP T_bSEXP, SEXP n_lagsSEXP, SEXP n_varsSEXP, SEXP n_TSEXP, SEXP n_fcstSEXP, SEXP n_determSEXP, SEXP n_thinSEXP, SEXP verboseSEXP, SEXP ssngSEXP) { BEGIN_RCPP Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< const arma::mat& >::type y_in_p(y_in_pSEXP); @@ -252,6 +225,7 @@ BEGIN_RCPP Rcpp::traits::input_parameter< const double >::type prior_sigma2(prior_sigma2SEXP); Rcpp::traits::input_parameter< const double >::type prior_df(prior_dfSEXP); Rcpp::traits::input_parameter< arma::uword >::type n_reps(n_repsSEXP); + Rcpp::traits::input_parameter< arma::uword >::type n_burnin(n_burninSEXP); Rcpp::traits::input_parameter< arma::uword >::type n_q(n_qSEXP); Rcpp::traits::input_parameter< arma::uword >::type T_b(T_bSEXP); Rcpp::traits::input_parameter< arma::uword >::type n_lags(n_lagsSEXP); @@ -261,13 +235,14 @@ BEGIN_RCPP Rcpp::traits::input_parameter< arma::uword >::type n_determ(n_determSEXP); Rcpp::traits::input_parameter< arma::uword >::type n_thin(n_thinSEXP); Rcpp::traits::input_parameter< bool >::type verbose(verboseSEXP); - mcmc_ssng_csv(y_in_p, Pi, Sigma, psi, phi_mu, lambda_mu, omega, Z, Z_fcst, phi, sigma, f, Lambda_comp, prior_Pi_Omega, inv_prior_Pi_Omega, Omega_Pi, prior_Pi_mean, prior_S, D_mat, dt, d1, d_fcst_lags, prior_psi_mean, c0, c1, s, check_roots, Z_1, priorlatent0, phi_invvar, phi_meaninvvar, prior_sigma2, prior_df, n_reps, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_determ, n_thin, verbose); + Rcpp::traits::input_parameter< bool >::type ssng(ssngSEXP); + mcmc_ssng_csv(y_in_p, Pi, Sigma, psi, phi_mu, lambda_mu, omega, Z, Z_fcst, phi, sigma, f, Lambda_comp, prior_Pi_Omega, inv_prior_Pi_Omega, Omega_Pi, prior_Pi_mean, prior_S, D_mat, dt, d1, d_fcst_lags, prior_psi_mean, c0, c1, s, check_roots, Z_1, priorlatent0, phi_invvar, phi_meaninvvar, prior_sigma2, prior_df, n_reps, n_burnin, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_determ, n_thin, verbose, ssng); return R_NilValue; END_RCPP } // mcmc_minn_diffuse -void mcmc_minn_diffuse(const arma::mat& y_in_p, arma::cube& Pi, arma::cube& Sigma, arma::cube& Z, arma::cube& Z_fcst, const arma::mat& Lambda_comp, const arma::mat& prior_Pi_Omega, const arma::mat& Omega_Pi, const arma::mat& Z_1, arma::uword n_reps, arma::uword n_q, arma::uword T_b, arma::uword n_lags, arma::uword n_vars, arma::uword n_T, arma::uword n_fcst, arma::uword n_thin, bool verbose); -RcppExport SEXP _mfbvar_mcmc_minn_diffuse(SEXP y_in_pSEXP, SEXP PiSEXP, SEXP SigmaSEXP, SEXP ZSEXP, SEXP Z_fcstSEXP, SEXP Lambda_compSEXP, SEXP prior_Pi_OmegaSEXP, SEXP Omega_PiSEXP, SEXP Z_1SEXP, SEXP n_repsSEXP, SEXP n_qSEXP, SEXP T_bSEXP, SEXP n_lagsSEXP, SEXP n_varsSEXP, SEXP n_TSEXP, SEXP n_fcstSEXP, SEXP n_thinSEXP, SEXP verboseSEXP) { +void mcmc_minn_diffuse(const arma::mat& y_in_p, arma::cube& Pi, arma::cube& Sigma, arma::cube& Z, arma::cube& Z_fcst, arma::mat& aux, arma::vec& global, arma::mat& local, arma::vec& slice, const arma::mat& Lambda_comp, arma::mat prior_Pi_Omega, arma::vec prior_Pi_mean_vec, const arma::mat& Z_1, arma::uword n_reps, arma::uword n_burnin, arma::uword n_q, arma::uword T_b, arma::uword n_lags, arma::uword n_vars, arma::uword n_T, arma::uword n_fcst, arma::uword n_thin, bool verbose, const double a, bool gig); +RcppExport SEXP _mfbvar_mcmc_minn_diffuse(SEXP y_in_pSEXP, SEXP PiSEXP, SEXP SigmaSEXP, SEXP ZSEXP, SEXP Z_fcstSEXP, SEXP auxSEXP, SEXP globalSEXP, SEXP localSEXP, SEXP sliceSEXP, SEXP Lambda_compSEXP, SEXP prior_Pi_OmegaSEXP, SEXP prior_Pi_mean_vecSEXP, SEXP Z_1SEXP, SEXP n_repsSEXP, SEXP n_burninSEXP, SEXP n_qSEXP, SEXP T_bSEXP, SEXP n_lagsSEXP, SEXP n_varsSEXP, SEXP n_TSEXP, SEXP n_fcstSEXP, SEXP n_thinSEXP, SEXP verboseSEXP, SEXP aSEXP, SEXP gigSEXP) { BEGIN_RCPP Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< const arma::mat& >::type y_in_p(y_in_pSEXP); @@ -275,11 +250,16 @@ BEGIN_RCPP Rcpp::traits::input_parameter< arma::cube& >::type Sigma(SigmaSEXP); Rcpp::traits::input_parameter< arma::cube& >::type Z(ZSEXP); Rcpp::traits::input_parameter< arma::cube& >::type Z_fcst(Z_fcstSEXP); + Rcpp::traits::input_parameter< arma::mat& >::type aux(auxSEXP); + Rcpp::traits::input_parameter< arma::vec& >::type global(globalSEXP); + Rcpp::traits::input_parameter< arma::mat& >::type local(localSEXP); + Rcpp::traits::input_parameter< arma::vec& >::type slice(sliceSEXP); Rcpp::traits::input_parameter< const arma::mat& >::type Lambda_comp(Lambda_compSEXP); - Rcpp::traits::input_parameter< const arma::mat& >::type prior_Pi_Omega(prior_Pi_OmegaSEXP); - Rcpp::traits::input_parameter< const arma::mat& >::type Omega_Pi(Omega_PiSEXP); + Rcpp::traits::input_parameter< arma::mat >::type prior_Pi_Omega(prior_Pi_OmegaSEXP); + Rcpp::traits::input_parameter< arma::vec >::type prior_Pi_mean_vec(prior_Pi_mean_vecSEXP); Rcpp::traits::input_parameter< const arma::mat& >::type Z_1(Z_1SEXP); Rcpp::traits::input_parameter< arma::uword >::type n_reps(n_repsSEXP); + Rcpp::traits::input_parameter< arma::uword >::type n_burnin(n_burninSEXP); Rcpp::traits::input_parameter< arma::uword >::type n_q(n_qSEXP); Rcpp::traits::input_parameter< arma::uword >::type T_b(T_bSEXP); Rcpp::traits::input_parameter< arma::uword >::type n_lags(n_lagsSEXP); @@ -288,103 +268,154 @@ BEGIN_RCPP Rcpp::traits::input_parameter< arma::uword >::type n_fcst(n_fcstSEXP); Rcpp::traits::input_parameter< arma::uword >::type n_thin(n_thinSEXP); Rcpp::traits::input_parameter< bool >::type verbose(verboseSEXP); - mcmc_minn_diffuse(y_in_p, Pi, Sigma, Z, Z_fcst, Lambda_comp, prior_Pi_Omega, Omega_Pi, Z_1, n_reps, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_thin, verbose); + Rcpp::traits::input_parameter< const double >::type a(aSEXP); + Rcpp::traits::input_parameter< bool >::type gig(gigSEXP); + mcmc_minn_diffuse(y_in_p, Pi, Sigma, Z, Z_fcst, aux, global, local, slice, Lambda_comp, prior_Pi_Omega, prior_Pi_mean_vec, Z_1, n_reps, n_burnin, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_thin, verbose, a, gig); return R_NilValue; END_RCPP } -// mcmc_minn_iw -void mcmc_minn_iw(const arma::mat& y_in_p, arma::cube& Pi, arma::cube& Sigma, arma::cube& Z, arma::cube& Z_fcst, const arma::mat& Lambda_comp, const arma::mat& prior_Pi_Omega, const arma::mat& inv_prior_Pi_Omega, const arma::mat& Omega_Pi, const arma::mat& prior_Pi_mean, const arma::mat& prior_S, const arma::mat& Z_1, arma::uword n_reps, arma::uword n_q, arma::uword T_b, arma::uword n_lags, arma::uword n_vars, arma::uword n_T, arma::uword n_fcst, arma::uword n_thin, bool verbose, int prior_nu); -RcppExport SEXP _mfbvar_mcmc_minn_iw(SEXP y_in_pSEXP, SEXP PiSEXP, SEXP SigmaSEXP, SEXP ZSEXP, SEXP Z_fcstSEXP, SEXP Lambda_compSEXP, SEXP prior_Pi_OmegaSEXP, SEXP inv_prior_Pi_OmegaSEXP, SEXP Omega_PiSEXP, SEXP prior_Pi_meanSEXP, SEXP prior_SSEXP, SEXP Z_1SEXP, SEXP n_repsSEXP, SEXP n_qSEXP, SEXP T_bSEXP, SEXP n_lagsSEXP, SEXP n_varsSEXP, SEXP n_TSEXP, SEXP n_fcstSEXP, SEXP n_thinSEXP, SEXP verboseSEXP, SEXP prior_nuSEXP) { +// mcmc_ssng_diffuse +void mcmc_ssng_diffuse(const arma::mat& y_in_p, arma::cube& Pi, arma::cube& Sigma, arma::mat& psi, arma::vec& phi_mu, arma::vec& lambda_mu, arma::mat& omega, arma::cube& Z, arma::cube& Z_fcst, const arma::mat& Lambda_comp, const arma::mat& prior_Pi_Omega, const arma::mat& Omega_Pi, const arma::mat& D_mat, const arma::mat& dt, const arma::mat& d1, const arma::mat& d_fcst_lags, const arma::vec& prior_psi_mean, double c0, double c1, double s, bool check_roots, const arma::mat& Z_1, arma::uword n_reps, arma::uword n_burnin, arma::uword n_q, arma::uword T_b, arma::uword n_lags, arma::uword n_vars, arma::uword n_T, arma::uword n_fcst, arma::uword n_determ, arma::uword n_thin, bool verbose, bool ssng); +RcppExport SEXP _mfbvar_mcmc_ssng_diffuse(SEXP y_in_pSEXP, SEXP PiSEXP, SEXP SigmaSEXP, SEXP psiSEXP, SEXP phi_muSEXP, SEXP lambda_muSEXP, SEXP omegaSEXP, SEXP ZSEXP, SEXP Z_fcstSEXP, SEXP Lambda_compSEXP, SEXP prior_Pi_OmegaSEXP, SEXP Omega_PiSEXP, SEXP D_matSEXP, SEXP dtSEXP, SEXP d1SEXP, SEXP d_fcst_lagsSEXP, SEXP prior_psi_meanSEXP, SEXP c0SEXP, SEXP c1SEXP, SEXP sSEXP, SEXP check_rootsSEXP, SEXP Z_1SEXP, SEXP n_repsSEXP, SEXP n_burninSEXP, SEXP n_qSEXP, SEXP T_bSEXP, SEXP n_lagsSEXP, SEXP n_varsSEXP, SEXP n_TSEXP, SEXP n_fcstSEXP, SEXP n_determSEXP, SEXP n_thinSEXP, SEXP verboseSEXP, SEXP ssngSEXP) { BEGIN_RCPP Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< const arma::mat& >::type y_in_p(y_in_pSEXP); Rcpp::traits::input_parameter< arma::cube& >::type Pi(PiSEXP); Rcpp::traits::input_parameter< arma::cube& >::type Sigma(SigmaSEXP); + Rcpp::traits::input_parameter< arma::mat& >::type psi(psiSEXP); + Rcpp::traits::input_parameter< arma::vec& >::type phi_mu(phi_muSEXP); + Rcpp::traits::input_parameter< arma::vec& >::type lambda_mu(lambda_muSEXP); + Rcpp::traits::input_parameter< arma::mat& >::type omega(omegaSEXP); Rcpp::traits::input_parameter< arma::cube& >::type Z(ZSEXP); Rcpp::traits::input_parameter< arma::cube& >::type Z_fcst(Z_fcstSEXP); Rcpp::traits::input_parameter< const arma::mat& >::type Lambda_comp(Lambda_compSEXP); Rcpp::traits::input_parameter< const arma::mat& >::type prior_Pi_Omega(prior_Pi_OmegaSEXP); - Rcpp::traits::input_parameter< const arma::mat& >::type inv_prior_Pi_Omega(inv_prior_Pi_OmegaSEXP); Rcpp::traits::input_parameter< const arma::mat& >::type Omega_Pi(Omega_PiSEXP); - Rcpp::traits::input_parameter< const arma::mat& >::type prior_Pi_mean(prior_Pi_meanSEXP); - Rcpp::traits::input_parameter< const arma::mat& >::type prior_S(prior_SSEXP); + Rcpp::traits::input_parameter< const arma::mat& >::type D_mat(D_matSEXP); + Rcpp::traits::input_parameter< const arma::mat& >::type dt(dtSEXP); + Rcpp::traits::input_parameter< const arma::mat& >::type d1(d1SEXP); + Rcpp::traits::input_parameter< const arma::mat& >::type d_fcst_lags(d_fcst_lagsSEXP); + Rcpp::traits::input_parameter< const arma::vec& >::type prior_psi_mean(prior_psi_meanSEXP); + Rcpp::traits::input_parameter< double >::type c0(c0SEXP); + Rcpp::traits::input_parameter< double >::type c1(c1SEXP); + Rcpp::traits::input_parameter< double >::type s(sSEXP); + Rcpp::traits::input_parameter< bool >::type check_roots(check_rootsSEXP); Rcpp::traits::input_parameter< const arma::mat& >::type Z_1(Z_1SEXP); Rcpp::traits::input_parameter< arma::uword >::type n_reps(n_repsSEXP); + Rcpp::traits::input_parameter< arma::uword >::type n_burnin(n_burninSEXP); Rcpp::traits::input_parameter< arma::uword >::type n_q(n_qSEXP); Rcpp::traits::input_parameter< arma::uword >::type T_b(T_bSEXP); Rcpp::traits::input_parameter< arma::uword >::type n_lags(n_lagsSEXP); Rcpp::traits::input_parameter< arma::uword >::type n_vars(n_varsSEXP); Rcpp::traits::input_parameter< arma::uword >::type n_T(n_TSEXP); Rcpp::traits::input_parameter< arma::uword >::type n_fcst(n_fcstSEXP); + Rcpp::traits::input_parameter< arma::uword >::type n_determ(n_determSEXP); Rcpp::traits::input_parameter< arma::uword >::type n_thin(n_thinSEXP); Rcpp::traits::input_parameter< bool >::type verbose(verboseSEXP); - Rcpp::traits::input_parameter< int >::type prior_nu(prior_nuSEXP); - mcmc_minn_iw(y_in_p, Pi, Sigma, Z, Z_fcst, Lambda_comp, prior_Pi_Omega, inv_prior_Pi_Omega, Omega_Pi, prior_Pi_mean, prior_S, Z_1, n_reps, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_thin, verbose, prior_nu); + Rcpp::traits::input_parameter< bool >::type ssng(ssngSEXP); + mcmc_ssng_diffuse(y_in_p, Pi, Sigma, psi, phi_mu, lambda_mu, omega, Z, Z_fcst, Lambda_comp, prior_Pi_Omega, Omega_Pi, D_mat, dt, d1, d_fcst_lags, prior_psi_mean, c0, c1, s, check_roots, Z_1, n_reps, n_burnin, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_determ, n_thin, verbose, ssng); return R_NilValue; END_RCPP } -// mcmc_ss_diffuse -void mcmc_ss_diffuse(const arma::mat& y_in_p, arma::cube& Pi, arma::cube& Sigma, arma::mat& psi, arma::cube& Z, arma::cube& Z_fcst, const arma::mat& Lambda_comp, const arma::mat& prior_Pi_Omega, const arma::mat& Omega_Pi, const arma::mat& D_mat, const arma::mat& dt, const arma::mat& d1, const arma::mat& d_fcst_lags, const arma::mat& inv_prior_psi_Omega, const arma::mat& inv_prior_psi_Omega_mean, bool check_roots, const arma::mat& Z_1, arma::uword n_reps, arma::uword n_q, arma::uword T_b, arma::uword n_lags, arma::uword n_vars, arma::uword n_T, arma::uword n_fcst, arma::uword n_determ, arma::uword n_thin, bool verbose); -RcppExport SEXP _mfbvar_mcmc_ss_diffuse(SEXP y_in_pSEXP, SEXP PiSEXP, SEXP SigmaSEXP, SEXP psiSEXP, SEXP ZSEXP, SEXP Z_fcstSEXP, SEXP Lambda_compSEXP, SEXP prior_Pi_OmegaSEXP, SEXP Omega_PiSEXP, SEXP D_matSEXP, SEXP dtSEXP, SEXP d1SEXP, SEXP d_fcst_lagsSEXP, SEXP inv_prior_psi_OmegaSEXP, SEXP inv_prior_psi_Omega_meanSEXP, SEXP check_rootsSEXP, SEXP Z_1SEXP, SEXP n_repsSEXP, SEXP n_qSEXP, SEXP T_bSEXP, SEXP n_lagsSEXP, SEXP n_varsSEXP, SEXP n_TSEXP, SEXP n_fcstSEXP, SEXP n_determSEXP, SEXP n_thinSEXP, SEXP verboseSEXP) { +// mcmc_minn_fsv +void mcmc_minn_fsv(const arma::mat& y_in_p, arma::cube& Pi, arma::cube& Z, arma::cube& Z_fcst, arma::mat& mu, arma::mat& phi, arma::mat& sigma, arma::cube& f, arma::cube& facload, arma::cube& h, arma::mat& aux, arma::vec& global, arma::mat& local, arma::vec& slice, const arma::mat& Lambda_comp, arma::mat prior_Pi_Omega, const arma::vec& prior_Pi_AR1, const arma::mat& Z_1, double bmu, double Bmu, double a0idi, double b0idi, double a0fac, double b0fac, const Rcpp::NumericVector& Bsigma, double B011inv, double B022inv, const Rcpp::NumericVector& priorh0, const arma::imat& armarestr, const arma::mat& armatau2, arma::uword n_fac, arma::uword n_reps, arma::uword n_burnin, arma::uword n_q, arma::uword T_b, arma::uword n_lags, arma::uword n_vars, arma::uword n_T, arma::uword n_fcst, arma::uword n_thin, bool verbose, const double a, bool gig); +RcppExport SEXP _mfbvar_mcmc_minn_fsv(SEXP y_in_pSEXP, SEXP PiSEXP, SEXP ZSEXP, SEXP Z_fcstSEXP, SEXP muSEXP, SEXP phiSEXP, SEXP sigmaSEXP, SEXP fSEXP, SEXP facloadSEXP, SEXP hSEXP, SEXP auxSEXP, SEXP globalSEXP, SEXP localSEXP, SEXP sliceSEXP, SEXP Lambda_compSEXP, SEXP prior_Pi_OmegaSEXP, SEXP prior_Pi_AR1SEXP, SEXP Z_1SEXP, SEXP bmuSEXP, SEXP BmuSEXP, SEXP a0idiSEXP, SEXP b0idiSEXP, SEXP a0facSEXP, SEXP b0facSEXP, SEXP BsigmaSEXP, SEXP B011invSEXP, SEXP B022invSEXP, SEXP priorh0SEXP, SEXP armarestrSEXP, SEXP armatau2SEXP, SEXP n_facSEXP, SEXP n_repsSEXP, SEXP n_burninSEXP, SEXP n_qSEXP, SEXP T_bSEXP, SEXP n_lagsSEXP, SEXP n_varsSEXP, SEXP n_TSEXP, SEXP n_fcstSEXP, SEXP n_thinSEXP, SEXP verboseSEXP, SEXP aSEXP, SEXP gigSEXP) { BEGIN_RCPP Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< const arma::mat& >::type y_in_p(y_in_pSEXP); Rcpp::traits::input_parameter< arma::cube& >::type Pi(PiSEXP); - Rcpp::traits::input_parameter< arma::cube& >::type Sigma(SigmaSEXP); - Rcpp::traits::input_parameter< arma::mat& >::type psi(psiSEXP); Rcpp::traits::input_parameter< arma::cube& >::type Z(ZSEXP); Rcpp::traits::input_parameter< arma::cube& >::type Z_fcst(Z_fcstSEXP); + Rcpp::traits::input_parameter< arma::mat& >::type mu(muSEXP); + Rcpp::traits::input_parameter< arma::mat& >::type phi(phiSEXP); + Rcpp::traits::input_parameter< arma::mat& >::type sigma(sigmaSEXP); + Rcpp::traits::input_parameter< arma::cube& >::type f(fSEXP); + Rcpp::traits::input_parameter< arma::cube& >::type facload(facloadSEXP); + Rcpp::traits::input_parameter< arma::cube& >::type h(hSEXP); + Rcpp::traits::input_parameter< arma::mat& >::type aux(auxSEXP); + Rcpp::traits::input_parameter< arma::vec& >::type global(globalSEXP); + Rcpp::traits::input_parameter< arma::mat& >::type local(localSEXP); + Rcpp::traits::input_parameter< arma::vec& >::type slice(sliceSEXP); Rcpp::traits::input_parameter< const arma::mat& >::type Lambda_comp(Lambda_compSEXP); - Rcpp::traits::input_parameter< const arma::mat& >::type prior_Pi_Omega(prior_Pi_OmegaSEXP); - Rcpp::traits::input_parameter< const arma::mat& >::type Omega_Pi(Omega_PiSEXP); - Rcpp::traits::input_parameter< const arma::mat& >::type D_mat(D_matSEXP); - Rcpp::traits::input_parameter< const arma::mat& >::type dt(dtSEXP); - Rcpp::traits::input_parameter< const arma::mat& >::type d1(d1SEXP); - Rcpp::traits::input_parameter< const arma::mat& >::type d_fcst_lags(d_fcst_lagsSEXP); - Rcpp::traits::input_parameter< const arma::mat& >::type inv_prior_psi_Omega(inv_prior_psi_OmegaSEXP); - Rcpp::traits::input_parameter< const arma::mat& >::type inv_prior_psi_Omega_mean(inv_prior_psi_Omega_meanSEXP); - Rcpp::traits::input_parameter< bool >::type check_roots(check_rootsSEXP); + Rcpp::traits::input_parameter< arma::mat >::type prior_Pi_Omega(prior_Pi_OmegaSEXP); + Rcpp::traits::input_parameter< const arma::vec& >::type prior_Pi_AR1(prior_Pi_AR1SEXP); Rcpp::traits::input_parameter< const arma::mat& >::type Z_1(Z_1SEXP); + Rcpp::traits::input_parameter< double >::type bmu(bmuSEXP); + Rcpp::traits::input_parameter< double >::type Bmu(BmuSEXP); + Rcpp::traits::input_parameter< double >::type a0idi(a0idiSEXP); + Rcpp::traits::input_parameter< double >::type b0idi(b0idiSEXP); + Rcpp::traits::input_parameter< double >::type a0fac(a0facSEXP); + Rcpp::traits::input_parameter< double >::type b0fac(b0facSEXP); + Rcpp::traits::input_parameter< const Rcpp::NumericVector& >::type Bsigma(BsigmaSEXP); + Rcpp::traits::input_parameter< double >::type B011inv(B011invSEXP); + Rcpp::traits::input_parameter< double >::type B022inv(B022invSEXP); + Rcpp::traits::input_parameter< const Rcpp::NumericVector& >::type priorh0(priorh0SEXP); + Rcpp::traits::input_parameter< const arma::imat& >::type armarestr(armarestrSEXP); + Rcpp::traits::input_parameter< const arma::mat& >::type armatau2(armatau2SEXP); + Rcpp::traits::input_parameter< arma::uword >::type n_fac(n_facSEXP); Rcpp::traits::input_parameter< arma::uword >::type n_reps(n_repsSEXP); + Rcpp::traits::input_parameter< arma::uword >::type n_burnin(n_burninSEXP); Rcpp::traits::input_parameter< arma::uword >::type n_q(n_qSEXP); Rcpp::traits::input_parameter< arma::uword >::type T_b(T_bSEXP); Rcpp::traits::input_parameter< arma::uword >::type n_lags(n_lagsSEXP); Rcpp::traits::input_parameter< arma::uword >::type n_vars(n_varsSEXP); Rcpp::traits::input_parameter< arma::uword >::type n_T(n_TSEXP); Rcpp::traits::input_parameter< arma::uword >::type n_fcst(n_fcstSEXP); - Rcpp::traits::input_parameter< arma::uword >::type n_determ(n_determSEXP); Rcpp::traits::input_parameter< arma::uword >::type n_thin(n_thinSEXP); Rcpp::traits::input_parameter< bool >::type verbose(verboseSEXP); - mcmc_ss_diffuse(y_in_p, Pi, Sigma, psi, Z, Z_fcst, Lambda_comp, prior_Pi_Omega, Omega_Pi, D_mat, dt, d1, d_fcst_lags, inv_prior_psi_Omega, inv_prior_psi_Omega_mean, check_roots, Z_1, n_reps, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_determ, n_thin, verbose); + Rcpp::traits::input_parameter< const double >::type a(aSEXP); + Rcpp::traits::input_parameter< bool >::type gig(gigSEXP); + mcmc_minn_fsv(y_in_p, Pi, Z, Z_fcst, mu, phi, sigma, f, facload, h, aux, global, local, slice, Lambda_comp, prior_Pi_Omega, prior_Pi_AR1, Z_1, bmu, Bmu, a0idi, b0idi, a0fac, b0fac, Bsigma, B011inv, B022inv, priorh0, armarestr, armatau2, n_fac, n_reps, n_burnin, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_thin, verbose, a, gig); return R_NilValue; END_RCPP } -// mcmc_ss_iw -void mcmc_ss_iw(const arma::mat& y_in_p, arma::cube& Pi, arma::cube& Sigma, arma::mat& psi, arma::cube& Z, arma::cube& Z_fcst, const arma::mat& Lambda_comp, const arma::mat& prior_Pi_Omega, const arma::mat& inv_prior_Pi_Omega, const arma::mat& Omega_Pi, const arma::mat& prior_Pi_mean, const arma::mat& prior_S, const arma::mat& D_mat, const arma::mat& dt, const arma::mat& d1, const arma::mat& d_fcst_lags, const arma::mat& inv_prior_psi_Omega, const arma::mat& inv_prior_psi_Omega_mean, bool check_roots, const arma::mat& Z_1, arma::uword n_reps, arma::uword n_q, arma::uword T_b, arma::uword n_lags, arma::uword n_vars, arma::uword n_T, arma::uword n_fcst, arma::uword n_determ, arma::uword n_thin, bool verbose); -RcppExport SEXP _mfbvar_mcmc_ss_iw(SEXP y_in_pSEXP, SEXP PiSEXP, SEXP SigmaSEXP, SEXP psiSEXP, SEXP ZSEXP, SEXP Z_fcstSEXP, SEXP Lambda_compSEXP, SEXP prior_Pi_OmegaSEXP, SEXP inv_prior_Pi_OmegaSEXP, SEXP Omega_PiSEXP, SEXP prior_Pi_meanSEXP, SEXP prior_SSEXP, SEXP D_matSEXP, SEXP dtSEXP, SEXP d1SEXP, SEXP d_fcst_lagsSEXP, SEXP inv_prior_psi_OmegaSEXP, SEXP inv_prior_psi_Omega_meanSEXP, SEXP check_rootsSEXP, SEXP Z_1SEXP, SEXP n_repsSEXP, SEXP n_qSEXP, SEXP T_bSEXP, SEXP n_lagsSEXP, SEXP n_varsSEXP, SEXP n_TSEXP, SEXP n_fcstSEXP, SEXP n_determSEXP, SEXP n_thinSEXP, SEXP verboseSEXP) { +// mcmc_ssng_fsv +void mcmc_ssng_fsv(const arma::mat& y_in_p, arma::cube& Pi, arma::mat& psi, arma::vec& phi_mu, arma::vec& lambda_mu, arma::mat& omega, arma::cube& Z, arma::cube& Z_fcst, arma::mat& mu, arma::mat& phi, arma::mat& sigma, arma::cube& f, arma::cube& facload, arma::cube& h, const arma::mat& Lambda_comp, arma::mat prior_Pi_Omega, const arma::vec& prior_Pi_AR1, const arma::mat& D_mat, const arma::mat& dt, const arma::mat& d1, const arma::mat& d_fcst_lags, const arma::vec& prior_psi_mean, double c0, double c1, double s, bool check_roots, const arma::mat& Z_1, double bmu, double Bmu, double a0idi, double b0idi, double a0fac, double b0fac, const Rcpp::NumericVector& Bsigma, double B011inv, double B022inv, const Rcpp::NumericVector& priorh0, const arma::imat& armarestr, const arma::mat& armatau2, arma::uword n_fac, arma::uword n_reps, arma::uword n_burnin, arma::uword n_q, arma::uword T_b, arma::uword n_lags, arma::uword n_vars, arma::uword n_T, arma::uword n_fcst, arma::uword n_determ, arma::uword n_thin, bool verbose, bool ssng); +RcppExport SEXP _mfbvar_mcmc_ssng_fsv(SEXP y_in_pSEXP, SEXP PiSEXP, SEXP psiSEXP, SEXP phi_muSEXP, SEXP lambda_muSEXP, SEXP omegaSEXP, SEXP ZSEXP, SEXP Z_fcstSEXP, SEXP muSEXP, SEXP phiSEXP, SEXP sigmaSEXP, SEXP fSEXP, SEXP facloadSEXP, SEXP hSEXP, SEXP Lambda_compSEXP, SEXP prior_Pi_OmegaSEXP, SEXP prior_Pi_AR1SEXP, SEXP D_matSEXP, SEXP dtSEXP, SEXP d1SEXP, SEXP d_fcst_lagsSEXP, SEXP prior_psi_meanSEXP, SEXP c0SEXP, SEXP c1SEXP, SEXP sSEXP, SEXP check_rootsSEXP, SEXP Z_1SEXP, SEXP bmuSEXP, SEXP BmuSEXP, SEXP a0idiSEXP, SEXP b0idiSEXP, SEXP a0facSEXP, SEXP b0facSEXP, SEXP BsigmaSEXP, SEXP B011invSEXP, SEXP B022invSEXP, SEXP priorh0SEXP, SEXP armarestrSEXP, SEXP armatau2SEXP, SEXP n_facSEXP, SEXP n_repsSEXP, SEXP n_burninSEXP, SEXP n_qSEXP, SEXP T_bSEXP, SEXP n_lagsSEXP, SEXP n_varsSEXP, SEXP n_TSEXP, SEXP n_fcstSEXP, SEXP n_determSEXP, SEXP n_thinSEXP, SEXP verboseSEXP, SEXP ssngSEXP) { BEGIN_RCPP Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< const arma::mat& >::type y_in_p(y_in_pSEXP); Rcpp::traits::input_parameter< arma::cube& >::type Pi(PiSEXP); - Rcpp::traits::input_parameter< arma::cube& >::type Sigma(SigmaSEXP); Rcpp::traits::input_parameter< arma::mat& >::type psi(psiSEXP); + Rcpp::traits::input_parameter< arma::vec& >::type phi_mu(phi_muSEXP); + Rcpp::traits::input_parameter< arma::vec& >::type lambda_mu(lambda_muSEXP); + Rcpp::traits::input_parameter< arma::mat& >::type omega(omegaSEXP); Rcpp::traits::input_parameter< arma::cube& >::type Z(ZSEXP); Rcpp::traits::input_parameter< arma::cube& >::type Z_fcst(Z_fcstSEXP); + Rcpp::traits::input_parameter< arma::mat& >::type mu(muSEXP); + Rcpp::traits::input_parameter< arma::mat& >::type phi(phiSEXP); + Rcpp::traits::input_parameter< arma::mat& >::type sigma(sigmaSEXP); + Rcpp::traits::input_parameter< arma::cube& >::type f(fSEXP); + Rcpp::traits::input_parameter< arma::cube& >::type facload(facloadSEXP); + Rcpp::traits::input_parameter< arma::cube& >::type h(hSEXP); Rcpp::traits::input_parameter< const arma::mat& >::type Lambda_comp(Lambda_compSEXP); - Rcpp::traits::input_parameter< const arma::mat& >::type prior_Pi_Omega(prior_Pi_OmegaSEXP); - Rcpp::traits::input_parameter< const arma::mat& >::type inv_prior_Pi_Omega(inv_prior_Pi_OmegaSEXP); - Rcpp::traits::input_parameter< const arma::mat& >::type Omega_Pi(Omega_PiSEXP); - Rcpp::traits::input_parameter< const arma::mat& >::type prior_Pi_mean(prior_Pi_meanSEXP); - Rcpp::traits::input_parameter< const arma::mat& >::type prior_S(prior_SSEXP); + Rcpp::traits::input_parameter< arma::mat >::type prior_Pi_Omega(prior_Pi_OmegaSEXP); + Rcpp::traits::input_parameter< const arma::vec& >::type prior_Pi_AR1(prior_Pi_AR1SEXP); Rcpp::traits::input_parameter< const arma::mat& >::type D_mat(D_matSEXP); Rcpp::traits::input_parameter< const arma::mat& >::type dt(dtSEXP); Rcpp::traits::input_parameter< const arma::mat& >::type d1(d1SEXP); Rcpp::traits::input_parameter< const arma::mat& >::type d_fcst_lags(d_fcst_lagsSEXP); - Rcpp::traits::input_parameter< const arma::mat& >::type inv_prior_psi_Omega(inv_prior_psi_OmegaSEXP); - Rcpp::traits::input_parameter< const arma::mat& >::type inv_prior_psi_Omega_mean(inv_prior_psi_Omega_meanSEXP); + Rcpp::traits::input_parameter< const arma::vec& >::type prior_psi_mean(prior_psi_meanSEXP); + Rcpp::traits::input_parameter< double >::type c0(c0SEXP); + Rcpp::traits::input_parameter< double >::type c1(c1SEXP); + Rcpp::traits::input_parameter< double >::type s(sSEXP); Rcpp::traits::input_parameter< bool >::type check_roots(check_rootsSEXP); Rcpp::traits::input_parameter< const arma::mat& >::type Z_1(Z_1SEXP); + Rcpp::traits::input_parameter< double >::type bmu(bmuSEXP); + Rcpp::traits::input_parameter< double >::type Bmu(BmuSEXP); + Rcpp::traits::input_parameter< double >::type a0idi(a0idiSEXP); + Rcpp::traits::input_parameter< double >::type b0idi(b0idiSEXP); + Rcpp::traits::input_parameter< double >::type a0fac(a0facSEXP); + Rcpp::traits::input_parameter< double >::type b0fac(b0facSEXP); + Rcpp::traits::input_parameter< const Rcpp::NumericVector& >::type Bsigma(BsigmaSEXP); + Rcpp::traits::input_parameter< double >::type B011inv(B011invSEXP); + Rcpp::traits::input_parameter< double >::type B022inv(B022invSEXP); + Rcpp::traits::input_parameter< const Rcpp::NumericVector& >::type priorh0(priorh0SEXP); + Rcpp::traits::input_parameter< const arma::imat& >::type armarestr(armarestrSEXP); + Rcpp::traits::input_parameter< const arma::mat& >::type armatau2(armatau2SEXP); + Rcpp::traits::input_parameter< arma::uword >::type n_fac(n_facSEXP); Rcpp::traits::input_parameter< arma::uword >::type n_reps(n_repsSEXP); + Rcpp::traits::input_parameter< arma::uword >::type n_burnin(n_burninSEXP); Rcpp::traits::input_parameter< arma::uword >::type n_q(n_qSEXP); Rcpp::traits::input_parameter< arma::uword >::type T_b(T_bSEXP); Rcpp::traits::input_parameter< arma::uword >::type n_lags(n_lagsSEXP); @@ -394,54 +425,46 @@ BEGIN_RCPP Rcpp::traits::input_parameter< arma::uword >::type n_determ(n_determSEXP); Rcpp::traits::input_parameter< arma::uword >::type n_thin(n_thinSEXP); Rcpp::traits::input_parameter< bool >::type verbose(verboseSEXP); - mcmc_ss_iw(y_in_p, Pi, Sigma, psi, Z, Z_fcst, Lambda_comp, prior_Pi_Omega, inv_prior_Pi_Omega, Omega_Pi, prior_Pi_mean, prior_S, D_mat, dt, d1, d_fcst_lags, inv_prior_psi_Omega, inv_prior_psi_Omega_mean, check_roots, Z_1, n_reps, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_determ, n_thin, verbose); + Rcpp::traits::input_parameter< bool >::type ssng(ssngSEXP); + mcmc_ssng_fsv(y_in_p, Pi, psi, phi_mu, lambda_mu, omega, Z, Z_fcst, mu, phi, sigma, f, facload, h, Lambda_comp, prior_Pi_Omega, prior_Pi_AR1, D_mat, dt, d1, d_fcst_lags, prior_psi_mean, c0, c1, s, check_roots, Z_1, bmu, Bmu, a0idi, b0idi, a0fac, b0fac, Bsigma, B011inv, B022inv, priorh0, armarestr, armatau2, n_fac, n_reps, n_burnin, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_determ, n_thin, verbose, ssng); return R_NilValue; END_RCPP } -// mcmc_ssng_diffuse -void mcmc_ssng_diffuse(const arma::mat& y_in_p, arma::cube& Pi, arma::cube& Sigma, arma::mat& psi, arma::vec& phi_mu, arma::vec& lambda_mu, arma::mat& omega, arma::cube& Z, arma::cube& Z_fcst, const arma::mat& Lambda_comp, const arma::mat& prior_Pi_Omega, const arma::mat& Omega_Pi, const arma::mat& D_mat, const arma::mat& dt, const arma::mat& d1, const arma::mat& d_fcst_lags, const arma::vec& prior_psi_mean, double c0, double c1, double s, bool check_roots, const arma::mat& Z_1, arma::uword n_reps, arma::uword n_q, arma::uword T_b, arma::uword n_lags, arma::uword n_vars, arma::uword n_T, arma::uword n_fcst, arma::uword n_determ, arma::uword n_thin, bool verbose); -RcppExport SEXP _mfbvar_mcmc_ssng_diffuse(SEXP y_in_pSEXP, SEXP PiSEXP, SEXP SigmaSEXP, SEXP psiSEXP, SEXP phi_muSEXP, SEXP lambda_muSEXP, SEXP omegaSEXP, SEXP ZSEXP, SEXP Z_fcstSEXP, SEXP Lambda_compSEXP, SEXP prior_Pi_OmegaSEXP, SEXP Omega_PiSEXP, SEXP D_matSEXP, SEXP dtSEXP, SEXP d1SEXP, SEXP d_fcst_lagsSEXP, SEXP prior_psi_meanSEXP, SEXP c0SEXP, SEXP c1SEXP, SEXP sSEXP, SEXP check_rootsSEXP, SEXP Z_1SEXP, SEXP n_repsSEXP, SEXP n_qSEXP, SEXP T_bSEXP, SEXP n_lagsSEXP, SEXP n_varsSEXP, SEXP n_TSEXP, SEXP n_fcstSEXP, SEXP n_determSEXP, SEXP n_thinSEXP, SEXP verboseSEXP) { +// mcmc_minn_iw +void mcmc_minn_iw(const arma::mat& y_in_p, arma::cube& Pi, arma::cube& Sigma, arma::cube& Z, arma::cube& Z_fcst, const arma::mat& Lambda_comp, const arma::mat& prior_Pi_Omega, const arma::mat& inv_prior_Pi_Omega, const arma::mat& Omega_Pi, const arma::mat& prior_Pi_mean, const arma::mat& prior_S, const arma::mat& Z_1, arma::uword n_reps, arma::uword n_burnin, arma::uword n_q, arma::uword T_b, arma::uword n_lags, arma::uword n_vars, arma::uword n_T, arma::uword n_fcst, arma::uword n_thin, bool verbose, int prior_nu); +RcppExport SEXP _mfbvar_mcmc_minn_iw(SEXP y_in_pSEXP, SEXP PiSEXP, SEXP SigmaSEXP, SEXP ZSEXP, SEXP Z_fcstSEXP, SEXP Lambda_compSEXP, SEXP prior_Pi_OmegaSEXP, SEXP inv_prior_Pi_OmegaSEXP, SEXP Omega_PiSEXP, SEXP prior_Pi_meanSEXP, SEXP prior_SSEXP, SEXP Z_1SEXP, SEXP n_repsSEXP, SEXP n_burninSEXP, SEXP n_qSEXP, SEXP T_bSEXP, SEXP n_lagsSEXP, SEXP n_varsSEXP, SEXP n_TSEXP, SEXP n_fcstSEXP, SEXP n_thinSEXP, SEXP verboseSEXP, SEXP prior_nuSEXP) { BEGIN_RCPP Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< const arma::mat& >::type y_in_p(y_in_pSEXP); Rcpp::traits::input_parameter< arma::cube& >::type Pi(PiSEXP); Rcpp::traits::input_parameter< arma::cube& >::type Sigma(SigmaSEXP); - Rcpp::traits::input_parameter< arma::mat& >::type psi(psiSEXP); - Rcpp::traits::input_parameter< arma::vec& >::type phi_mu(phi_muSEXP); - Rcpp::traits::input_parameter< arma::vec& >::type lambda_mu(lambda_muSEXP); - Rcpp::traits::input_parameter< arma::mat& >::type omega(omegaSEXP); Rcpp::traits::input_parameter< arma::cube& >::type Z(ZSEXP); Rcpp::traits::input_parameter< arma::cube& >::type Z_fcst(Z_fcstSEXP); Rcpp::traits::input_parameter< const arma::mat& >::type Lambda_comp(Lambda_compSEXP); Rcpp::traits::input_parameter< const arma::mat& >::type prior_Pi_Omega(prior_Pi_OmegaSEXP); + Rcpp::traits::input_parameter< const arma::mat& >::type inv_prior_Pi_Omega(inv_prior_Pi_OmegaSEXP); Rcpp::traits::input_parameter< const arma::mat& >::type Omega_Pi(Omega_PiSEXP); - Rcpp::traits::input_parameter< const arma::mat& >::type D_mat(D_matSEXP); - Rcpp::traits::input_parameter< const arma::mat& >::type dt(dtSEXP); - Rcpp::traits::input_parameter< const arma::mat& >::type d1(d1SEXP); - Rcpp::traits::input_parameter< const arma::mat& >::type d_fcst_lags(d_fcst_lagsSEXP); - Rcpp::traits::input_parameter< const arma::vec& >::type prior_psi_mean(prior_psi_meanSEXP); - Rcpp::traits::input_parameter< double >::type c0(c0SEXP); - Rcpp::traits::input_parameter< double >::type c1(c1SEXP); - Rcpp::traits::input_parameter< double >::type s(sSEXP); - Rcpp::traits::input_parameter< bool >::type check_roots(check_rootsSEXP); + Rcpp::traits::input_parameter< const arma::mat& >::type prior_Pi_mean(prior_Pi_meanSEXP); + Rcpp::traits::input_parameter< const arma::mat& >::type prior_S(prior_SSEXP); Rcpp::traits::input_parameter< const arma::mat& >::type Z_1(Z_1SEXP); Rcpp::traits::input_parameter< arma::uword >::type n_reps(n_repsSEXP); + Rcpp::traits::input_parameter< arma::uword >::type n_burnin(n_burninSEXP); Rcpp::traits::input_parameter< arma::uword >::type n_q(n_qSEXP); Rcpp::traits::input_parameter< arma::uword >::type T_b(T_bSEXP); Rcpp::traits::input_parameter< arma::uword >::type n_lags(n_lagsSEXP); Rcpp::traits::input_parameter< arma::uword >::type n_vars(n_varsSEXP); Rcpp::traits::input_parameter< arma::uword >::type n_T(n_TSEXP); Rcpp::traits::input_parameter< arma::uword >::type n_fcst(n_fcstSEXP); - Rcpp::traits::input_parameter< arma::uword >::type n_determ(n_determSEXP); Rcpp::traits::input_parameter< arma::uword >::type n_thin(n_thinSEXP); Rcpp::traits::input_parameter< bool >::type verbose(verboseSEXP); - mcmc_ssng_diffuse(y_in_p, Pi, Sigma, psi, phi_mu, lambda_mu, omega, Z, Z_fcst, Lambda_comp, prior_Pi_Omega, Omega_Pi, D_mat, dt, d1, d_fcst_lags, prior_psi_mean, c0, c1, s, check_roots, Z_1, n_reps, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_determ, n_thin, verbose); + Rcpp::traits::input_parameter< int >::type prior_nu(prior_nuSEXP); + mcmc_minn_iw(y_in_p, Pi, Sigma, Z, Z_fcst, Lambda_comp, prior_Pi_Omega, inv_prior_Pi_Omega, Omega_Pi, prior_Pi_mean, prior_S, Z_1, n_reps, n_burnin, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_thin, verbose, prior_nu); return R_NilValue; END_RCPP } // mcmc_ssng_iw -void mcmc_ssng_iw(const arma::mat& y_in_p, arma::cube& Pi, arma::cube& Sigma, arma::mat& psi, arma::vec& phi_mu, arma::vec& lambda_mu, arma::mat& omega, arma::cube& Z, arma::cube& Z_fcst, const arma::mat& Lambda_comp, const arma::mat& prior_Pi_Omega, const arma::mat& inv_prior_Pi_Omega, const arma::mat& Omega_Pi, const arma::mat& prior_Pi_mean, const arma::mat& prior_S, const arma::mat& D_mat, const arma::mat& dt, const arma::mat& d1, const arma::mat& d_fcst_lags, const arma::vec& prior_psi_mean, double c0, double c1, double s, bool check_roots, const arma::mat& Z_1, arma::uword n_reps, arma::uword n_q, arma::uword T_b, arma::uword n_lags, arma::uword n_vars, arma::uword n_T, arma::uword n_fcst, arma::uword n_determ, arma::uword n_thin, bool verbose); -RcppExport SEXP _mfbvar_mcmc_ssng_iw(SEXP y_in_pSEXP, SEXP PiSEXP, SEXP SigmaSEXP, SEXP psiSEXP, SEXP phi_muSEXP, SEXP lambda_muSEXP, SEXP omegaSEXP, SEXP ZSEXP, SEXP Z_fcstSEXP, SEXP Lambda_compSEXP, SEXP prior_Pi_OmegaSEXP, SEXP inv_prior_Pi_OmegaSEXP, SEXP Omega_PiSEXP, SEXP prior_Pi_meanSEXP, SEXP prior_SSEXP, SEXP D_matSEXP, SEXP dtSEXP, SEXP d1SEXP, SEXP d_fcst_lagsSEXP, SEXP prior_psi_meanSEXP, SEXP c0SEXP, SEXP c1SEXP, SEXP sSEXP, SEXP check_rootsSEXP, SEXP Z_1SEXP, SEXP n_repsSEXP, SEXP n_qSEXP, SEXP T_bSEXP, SEXP n_lagsSEXP, SEXP n_varsSEXP, SEXP n_TSEXP, SEXP n_fcstSEXP, SEXP n_determSEXP, SEXP n_thinSEXP, SEXP verboseSEXP) { +void mcmc_ssng_iw(const arma::mat& y_in_p, arma::cube& Pi, arma::cube& Sigma, arma::mat& psi, arma::vec& phi_mu, arma::vec& lambda_mu, arma::mat& omega, arma::cube& Z, arma::cube& Z_fcst, const arma::mat& Lambda_comp, const arma::mat& prior_Pi_Omega, const arma::mat& inv_prior_Pi_Omega, const arma::mat& Omega_Pi, const arma::mat& prior_Pi_mean, const arma::mat& prior_S, const arma::mat& D_mat, const arma::mat& dt, const arma::mat& d1, const arma::mat& d_fcst_lags, const arma::vec& prior_psi_mean, double c0, double c1, double s, bool check_roots, const arma::mat& Z_1, arma::uword n_reps, arma::uword n_burnin, arma::uword n_q, arma::uword T_b, arma::uword n_lags, arma::uword n_vars, arma::uword n_T, arma::uword n_fcst, arma::uword n_determ, arma::uword n_thin, bool verbose, bool ssng); +RcppExport SEXP _mfbvar_mcmc_ssng_iw(SEXP y_in_pSEXP, SEXP PiSEXP, SEXP SigmaSEXP, SEXP psiSEXP, SEXP phi_muSEXP, SEXP lambda_muSEXP, SEXP omegaSEXP, SEXP ZSEXP, SEXP Z_fcstSEXP, SEXP Lambda_compSEXP, SEXP prior_Pi_OmegaSEXP, SEXP inv_prior_Pi_OmegaSEXP, SEXP Omega_PiSEXP, SEXP prior_Pi_meanSEXP, SEXP prior_SSEXP, SEXP D_matSEXP, SEXP dtSEXP, SEXP d1SEXP, SEXP d_fcst_lagsSEXP, SEXP prior_psi_meanSEXP, SEXP c0SEXP, SEXP c1SEXP, SEXP sSEXP, SEXP check_rootsSEXP, SEXP Z_1SEXP, SEXP n_repsSEXP, SEXP n_burninSEXP, SEXP n_qSEXP, SEXP T_bSEXP, SEXP n_lagsSEXP, SEXP n_varsSEXP, SEXP n_TSEXP, SEXP n_fcstSEXP, SEXP n_determSEXP, SEXP n_thinSEXP, SEXP verboseSEXP, SEXP ssngSEXP) { BEGIN_RCPP Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< const arma::mat& >::type y_in_p(y_in_pSEXP); @@ -470,6 +493,7 @@ BEGIN_RCPP Rcpp::traits::input_parameter< bool >::type check_roots(check_rootsSEXP); Rcpp::traits::input_parameter< const arma::mat& >::type Z_1(Z_1SEXP); Rcpp::traits::input_parameter< arma::uword >::type n_reps(n_repsSEXP); + Rcpp::traits::input_parameter< arma::uword >::type n_burnin(n_burninSEXP); Rcpp::traits::input_parameter< arma::uword >::type n_q(n_qSEXP); Rcpp::traits::input_parameter< arma::uword >::type T_b(T_bSEXP); Rcpp::traits::input_parameter< arma::uword >::type n_lags(n_lagsSEXP); @@ -479,7 +503,8 @@ BEGIN_RCPP Rcpp::traits::input_parameter< arma::uword >::type n_determ(n_determSEXP); Rcpp::traits::input_parameter< arma::uword >::type n_thin(n_thinSEXP); Rcpp::traits::input_parameter< bool >::type verbose(verboseSEXP); - mcmc_ssng_iw(y_in_p, Pi, Sigma, psi, phi_mu, lambda_mu, omega, Z, Z_fcst, Lambda_comp, prior_Pi_Omega, inv_prior_Pi_Omega, Omega_Pi, prior_Pi_mean, prior_S, D_mat, dt, d1, d_fcst_lags, prior_psi_mean, c0, c1, s, check_roots, Z_1, n_reps, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_determ, n_thin, verbose); + Rcpp::traits::input_parameter< bool >::type ssng(ssngSEXP); + mcmc_ssng_iw(y_in_p, Pi, Sigma, psi, phi_mu, lambda_mu, omega, Z, Z_fcst, Lambda_comp, prior_Pi_Omega, inv_prior_Pi_Omega, Omega_Pi, prior_Pi_mean, prior_S, D_mat, dt, d1, d_fcst_lags, prior_psi_mean, c0, c1, s, check_roots, Z_1, n_reps, n_burnin, n_q, T_b, n_lags, n_vars, n_T, n_fcst, n_determ, n_thin, verbose, ssng); return R_NilValue; END_RCPP } @@ -706,6 +731,18 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// rig +double rig(double mu, double lambda); +RcppExport SEXP _mfbvar_rig(SEXP muSEXP, SEXP lambdaSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::RNGScope rcpp_rngScope_gen; + Rcpp::traits::input_parameter< double >::type mu(muSEXP); + Rcpp::traits::input_parameter< double >::type lambda(lambdaSEXP); + rcpp_result_gen = Rcpp::wrap(rig(mu, lambda)); + return rcpp_result_gen; +END_RCPP +} // rmvn arma::vec rmvn(const arma::mat& Phi, const arma::vec& d, const arma::vec& alpha); RcppExport SEXP _mfbvar_rmvn(SEXP PhiSEXP, SEXP dSEXP, SEXP alphaSEXP) { @@ -883,19 +920,19 @@ static const R_CallMethodDef CallEntries[] = { {"_mfbvar_create_X_noint", (DL_FUNC) &_mfbvar_create_X_noint, 2}, {"_mfbvar_create_X_t", (DL_FUNC) &_mfbvar_create_X_t, 1}, {"_mfbvar_create_X_t_noint", (DL_FUNC) &_mfbvar_create_X_t_noint, 1}, + {"_mfbvar_dl_reg", (DL_FUNC) &_mfbvar_dl_reg, 10}, {"_mfbvar_kf_loglike", (DL_FUNC) &_mfbvar_kf_loglike, 6}, {"_mfbvar_kf_ragged", (DL_FUNC) &_mfbvar_kf_ragged, 7}, {"_mfbvar_kf_sim_smooth", (DL_FUNC) &_mfbvar_kf_sim_smooth, 7}, {"_mfbvar_max_eig_cpp", (DL_FUNC) &_mfbvar_max_eig_cpp, 1}, - {"_mfbvar_mcmc_minn_csv", (DL_FUNC) &_mfbvar_mcmc_minn_csv, 29}, - {"_mfbvar_mcmc_ss_csv", (DL_FUNC) &_mfbvar_mcmc_ss_csv, 38}, - {"_mfbvar_mcmc_ssng_csv", (DL_FUNC) &_mfbvar_mcmc_ssng_csv, 43}, - {"_mfbvar_mcmc_minn_diffuse", (DL_FUNC) &_mfbvar_mcmc_minn_diffuse, 18}, - {"_mfbvar_mcmc_minn_iw", (DL_FUNC) &_mfbvar_mcmc_minn_iw, 22}, - {"_mfbvar_mcmc_ss_diffuse", (DL_FUNC) &_mfbvar_mcmc_ss_diffuse, 27}, - {"_mfbvar_mcmc_ss_iw", (DL_FUNC) &_mfbvar_mcmc_ss_iw, 30}, - {"_mfbvar_mcmc_ssng_diffuse", (DL_FUNC) &_mfbvar_mcmc_ssng_diffuse, 32}, - {"_mfbvar_mcmc_ssng_iw", (DL_FUNC) &_mfbvar_mcmc_ssng_iw, 35}, + {"_mfbvar_mcmc_minn_csv", (DL_FUNC) &_mfbvar_mcmc_minn_csv, 30}, + {"_mfbvar_mcmc_ssng_csv", (DL_FUNC) &_mfbvar_mcmc_ssng_csv, 45}, + {"_mfbvar_mcmc_minn_diffuse", (DL_FUNC) &_mfbvar_mcmc_minn_diffuse, 25}, + {"_mfbvar_mcmc_ssng_diffuse", (DL_FUNC) &_mfbvar_mcmc_ssng_diffuse, 34}, + {"_mfbvar_mcmc_minn_fsv", (DL_FUNC) &_mfbvar_mcmc_minn_fsv, 43}, + {"_mfbvar_mcmc_ssng_fsv", (DL_FUNC) &_mfbvar_mcmc_ssng_fsv, 52}, + {"_mfbvar_mcmc_minn_iw", (DL_FUNC) &_mfbvar_mcmc_minn_iw, 23}, + {"_mfbvar_mcmc_ssng_iw", (DL_FUNC) &_mfbvar_mcmc_ssng_iw, 37}, {"_mfbvar_variances_fsv", (DL_FUNC) &_mfbvar_variances_fsv, 9}, {"_mfbvar_variances_csv", (DL_FUNC) &_mfbvar_variances_csv, 6}, {"_mfbvar_posterior_psi_Omega_fsv", (DL_FUNC) &_mfbvar_posterior_psi_Omega_fsv, 4}, @@ -909,6 +946,7 @@ static const R_CallMethodDef CallEntries[] = { {"_mfbvar_posterior_psi_iw", (DL_FUNC) &_mfbvar_posterior_psi_iw, 13}, {"_mfbvar_forwardAlg", (DL_FUNC) &_mfbvar_forwardAlg, 4}, {"_mfbvar_do_rgig1", (DL_FUNC) &_mfbvar_do_rgig1, 3}, + {"_mfbvar_rig", (DL_FUNC) &_mfbvar_rig, 2}, {"_mfbvar_rmvn", (DL_FUNC) &_mfbvar_rmvn, 3}, {"_mfbvar_rmvn_ccm", (DL_FUNC) &_mfbvar_rmvn_ccm, 5}, {"_mfbvar_rmatn", (DL_FUNC) &_mfbvar_rmatn, 3}, diff --git a/src/builders.cpp b/src/builders.cpp index df18972..74f377c 100644 --- a/src/builders.cpp +++ b/src/builders.cpp @@ -4,6 +4,7 @@ //' @templateVar n_vars TRUE //' @templateVar n_lags TRUE //' @keywords internal +//' @noRd //' @template man_template // [[Rcpp::export]] arma::mat build_U_cpp(const arma::mat & Pi, int n_determ, int n_vars, int n_lags){ diff --git a/src/dl_reg.cpp b/src/dl_reg.cpp new file mode 100644 index 0000000..d1317c4 --- /dev/null +++ b/src/dl_reg.cpp @@ -0,0 +1,39 @@ +#include "mfbvar.h" +#include "update_dl.h" +// [[Rcpp::export]] +void dl_reg(const arma::mat & y, arma::mat & x, arma::mat & beta, + arma::mat & aux, arma::vec & global, arma::mat & local, + arma::mat & prior_Pi_Omega, arma::uword n_reps, + const double a, bool gig) { + + arma::mat eps = arma::mat(x.n_cols, 1); + arma::mat beta_i = beta.row(0).t(); + + double global_i = global(0); + arma::vec aux_i = aux.row(0).t(); + arma::vec local_i = local.row(0).t(); + arma::vec slice = arma::vec(local_i.n_elem).fill(1.0); + + arma::mat Sigma, Sigma_inv, L; + arma::vec mu; + arma::uword n_lags = x.n_cols; + + for (arma::uword i = 0; i < n_reps; ++i) { + + eps.imbue(norm_rand); + Sigma = x.t() * x; + Sigma.diag() += 1/prior_Pi_Omega; + Sigma_inv = arma::inv_sympd(Sigma); + L = arma::chol(Sigma_inv, "lower"); + mu = Sigma_inv * x.t() * y; + beta_i.col(0) = mu + L * eps; + + //beta_i.col(0) = mvn_rue(x, prior_Pi_Omega, y); + beta.row(i) = beta_i.col(0).t(); + update_dl(prior_Pi_Omega, aux_i, local_i, global_i, beta_i, 1, n_lags, a, slice, gig, false); + global(i) = global_i; + aux.row(i) = aux_i.t(); + local.row(i) = local_i.t(); + } + +} diff --git a/src/kf_cpp.cpp b/src/kf_cpp.cpp index 3f291c2..8a6fc39 100644 --- a/src/kf_cpp.cpp +++ b/src/kf_cpp.cpp @@ -183,6 +183,7 @@ arma::vec KF::loglike() { //' @param n_q_ number of quarterly variables //' @param T_b_ final time period where all monthly variables are observed //' @keywords internal +//' @noRd //' @return For \code{kf_ragged}, a list with elements: //' \item{a}{The one-step predictions (for the compact form)} //' \item{a_tt}{The filtered estimates (for the compact form)} @@ -380,7 +381,6 @@ arma::mat KF_ragged::create_d(int T_end_) { } //' @title Kalman filter and smoother -//' //' @description Kalman filter and smoother (\code{kf_ragged}) and simulation smoother (\code{kf_sim_smooth}) for mixed-frequency data with ragged edges. This function is more computationally efficient than using a companion form representation. //' @param y_ matrix with the data //' @param Phi_ matrix with the autoregressive parameters, where the last column is the intercept @@ -389,6 +389,7 @@ arma::mat KF_ragged::create_d(int T_end_) { //' @param n_q_ number of quarterly variables //' @param T_b_ final time period where all monthly variables are observed //' @keywords internal +//' @noRd //' @return For \code{kf_ragged}, a list with elements: //' \item{a}{The one-step predictions (for the compact form)} //' \item{a_tt}{The filtered estimates (for the compact form)} diff --git a/src/max_eig_cpp.cpp b/src/max_eig_cpp.cpp index c0f7c33..a4f0813 100644 --- a/src/max_eig_cpp.cpp +++ b/src/max_eig_cpp.cpp @@ -9,6 +9,7 @@ //' @templateVar A TRUE //' @template man_template //' @keywords internal +//' @noRd //' @return The maximum eigenvalue. // [[Rcpp::export]] double max_eig_cpp(const arma::mat & A) { diff --git a/src/mcmc_minn_csv.cpp b/src/mcmc_csv.cpp similarity index 55% rename from src/mcmc_minn_csv.cpp rename to src/mcmc_csv.cpp index 0b7d820..6b5bab6 100644 --- a/src/mcmc_minn_csv.cpp +++ b/src/mcmc_csv.cpp @@ -13,7 +13,7 @@ void mcmc_minn_csv(const arma::mat & y_in_p, const arma::mat & prior_S, const arma::mat& Z_1, const double priorlatent0, const double phi_invvar, const double phi_meaninvvar, const double prior_sigma2, const double prior_df, - arma::uword n_reps, + arma::uword n_reps, arma::uword n_burnin, arma::uword n_q, arma::uword T_b, arma::uword n_lags, arma::uword n_vars, arma::uword n_T, arma::uword n_fcst, arma::uword n_thin, bool verbose) { bool single_freq; @@ -23,7 +23,7 @@ void mcmc_minn_csv(const arma::mat & y_in_p, single_freq = false; } - Progress p(n_reps, verbose); + Progress p(n_reps + n_burnin, verbose); arma::mat Pi_i = Pi.slice(0); arma::mat Sigma_i = Sigma.slice(0); @@ -52,7 +52,7 @@ void mcmc_minn_csv(const arma::mat & y_in_p, X = create_X(Z_i, n_lags); } - for (arma::uword i = 0; i < n_reps; ++i) { + for (arma::uword i = 0; i < n_reps + n_burnin; ++i) { Sigma_chol_cube.each_slice() = Sigma_chol; for (arma::uword j = 0; j < n_T; ++j) { Sigma_chol_cube.slice(j) = Sigma_chol_cube.slice(j) * exp_sqrt_f(j); @@ -89,7 +89,7 @@ void mcmc_minn_csv(const arma::mat & y_in_p, update_csv(u_tilde, phi_i, sigma_i, f_i, f0, mixprob, r, priorlatent0, phi_invvar, phi_meaninvvar, prior_sigma2, prior_df); vol_pred = f_i(n_T-1); - if ((i+1) % n_thin == 0) { + if (((i+1) % n_thin == 0) && (i >= n_burnin)) { if (n_fcst > 0) { Z_fcst_i.head_cols(n_lags) = Z_i.tail_rows(n_lags).t(); for (arma::uword h = 0; h < n_fcst; ++h) { @@ -99,14 +99,14 @@ void mcmc_minn_csv(const arma::mat & y_in_p, x = create_X_t(Z_fcst_i.cols(0+h, n_lags-1+h).t()); Z_fcst_i.col(n_lags + h) = Pi_i * x + Sigma_chol * errors; } - Z_fcst.slice(i/n_thin) = Z_fcst_i.t(); + Z_fcst.slice((i-n_burnin)/n_thin) = Z_fcst_i.t(); } - Z.slice(i/n_thin) = Z_i; - Sigma.slice(i/n_thin) = Sigma_i; - Pi.slice(i/n_thin) = Pi_i; - f.row(i/n_thin) = f_i.t(); - phi(i/n_thin) = phi_i; - sigma(i/n_thin) = sigma_i; + Z.slice((i-n_burnin)/n_thin) = Z_i; + Sigma.slice((i-n_burnin)/n_thin) = Sigma_i; + Pi.slice((i-n_burnin)/n_thin) = Pi_i; + f.row((i-n_burnin)/n_thin) = f_i.t(); + phi((i-n_burnin)/n_thin) = phi_i; + sigma((i-n_burnin)/n_thin) = sigma_i; } if (verbose) { p.increment(); @@ -115,189 +115,6 @@ void mcmc_minn_csv(const arma::mat & y_in_p, } - -// [[Rcpp::export]] -void mcmc_ss_csv(const arma::mat & y_in_p, - arma::cube& Pi, arma::cube& Sigma, arma::mat& psi, arma::cube& Z, - arma::cube& Z_fcst, - arma::vec& phi, arma::vec& sigma, arma::mat& f, - const arma::mat& Lambda_comp, const arma::mat& prior_Pi_Omega, - const arma::mat& inv_prior_Pi_Omega, - const arma::mat& Omega_Pi, const arma::mat& prior_Pi_mean, - const arma::mat & prior_S, - const arma::mat & D_mat, const arma::mat & dt, const arma::mat & d1, - const arma::mat & d_fcst_lags, const arma::mat& inv_prior_psi_Omega, const arma::mat& inv_prior_psi_Omega_mean, - bool check_roots, const arma::mat& Z_1, - const double priorlatent0, const double phi_invvar, const double phi_meaninvvar, - const double prior_sigma2, const double prior_df, - arma::uword n_reps, - arma::uword n_q, arma::uword T_b, arma::uword n_lags, arma::uword n_vars, - arma::uword n_T, arma::uword n_fcst, arma::uword n_determ, arma::uword n_thin, - bool verbose) { - bool single_freq; - if (n_q == 0 || n_q == n_vars) { - single_freq = true; - } else { - single_freq = false; - } - - Progress p(n_reps, verbose); - - arma::mat Pi_i = Pi.slice(0); - arma::mat Sigma_i = Sigma.slice(0); - arma::vec psi_i = psi.row(0).t(); - arma::mat X, XX, XX_inv, Pi_sample, post_Pi_Omega, post_Pi, Sigma_chol, Sigma_chol_inv; - arma::mat y_i = y_in_p; - arma::mat S, Pi_diff, post_S, x, mu_mat, mZ, mZ1, mX, y_scaled, X_scaled, eps, u, u_tilde; - - arma::vec f_i = f.row(0).t(); - arma::vec exp_sqrt_f = arma::exp(0.5 * f_i); - arma::vec errors = arma::vec(n_vars); - double phi_i = phi(0), sigma_i = sigma(0), vol_pred; - arma::imat r = arma::imat(n_T, n_vars); - double f0 = 0.0; - arma::mat mixprob = arma::mat(10*n_T, n_vars); - - arma::mat my = arma::mat(arma::size(y_in_p), arma::fill::zeros); - - arma::mat Z_i = arma::mat(n_lags + y_in_p.n_rows, n_vars, arma::fill::zeros); - arma::mat Z_fcst_i = arma::mat(n_vars, n_lags + n_fcst); - arma::mat Z_i_demean = Z_i; - Z_i.rows(0, n_lags - 1) = Z_1; - - arma::mat Pi_i0 = arma::mat(n_vars, n_vars*n_lags+1, arma::fill::zeros); - arma::mat Pi_comp = arma::mat(n_vars*n_lags, n_vars*n_lags, arma::fill::zeros); - Pi_comp.submat(n_vars, 0, n_vars*n_lags - 1, n_vars*(n_lags-1) - 1) = arma::eye(n_vars*(n_lags-1), n_vars*(n_lags-1)); - - arma::mat Psi_i = arma::mat(psi_i.begin(), n_vars, n_determ, false, true); - mu_mat = dt * Psi_i.t(); - arma::uword n_Lambda = Lambda_comp.n_cols/Lambda_comp.n_rows; - arma::mat mu_long = arma::mat(n_Lambda+n_T, n_vars, arma::fill::zeros); - arma::rowvec Lambda_single = arma::rowvec(n_Lambda, arma::fill::zeros); - for (arma::uword i = 0; i < n_Lambda; ++i) { - Lambda_single(i) = Lambda_comp.at(0, i*n_q); - } - - arma::cube Sigma_chol_cube = arma::cube(n_vars, n_vars, n_T, arma::fill::zeros); - Sigma_chol = arma::chol(Sigma_i, "lower"); - int post_nu = n_T + n_vars + 2; - - // if single freq, we don't need to update - if (single_freq) { - Z_i.rows(n_lags, n_T + n_lags - 1) = y_in_p; - } - - for (arma::uword i = 0; i < n_reps; ++i) { - Sigma_chol_cube.each_slice() = Sigma_chol; - for (arma::uword j = 0; j < n_T; ++j) { - Sigma_chol_cube.slice(j) = Sigma_chol_cube.slice(j) * exp_sqrt_f(j); - } - - if (!single_freq) { - my.cols(0, n_vars - n_q - 1) = y_in_p.cols(0, n_vars - n_q - 1) - mu_mat.cols(0, n_vars - n_q - 1); - mu_long.rows(0, n_Lambda-1) = d1.tail_rows(n_Lambda) * Psi_i.t(); - mu_long.rows(n_Lambda, n_T+n_Lambda-1) = mu_mat; - for (arma::uword j = 0; j < n_T; ++j) { - my.row(j).cols(n_vars - n_q - 1, n_vars - 1) = y_in_p.row(j).cols(n_vars - n_q - 1, n_vars - 1) - Lambda_single * mu_long.rows(j, j+n_Lambda-1).cols(n_vars - n_q - 1, n_vars - 1);// Needs fixing - } - } else { - // Even if single freq, mZ needs to be updated - mZ = y_in_p - mu_mat; - } - - mZ1 = Z_1 - d1 * Psi_i.t(); - Pi_i0.cols(1, n_vars*n_lags) = Pi_i; - - if (!single_freq) { - mZ = simsm_adaptive_sv(my, Pi_i0, Sigma_chol_cube, Lambda_comp, mZ1, n_q, T_b); - Z_i.rows(n_lags, n_T + n_lags - 1) = mZ + mu_mat; - } - - Z_i_demean.rows(0, n_lags - 1) = mZ1; - Z_i_demean.rows(n_lags, n_T + n_lags - 1) = mZ; - - mX = create_X_noint(Z_i_demean, n_lags); - exp_sqrt_f = arma::exp(0.5 * f_i); - y_scaled = mZ; - y_scaled.each_col() /= exp_sqrt_f; - X_scaled = mX; - X_scaled.each_col() /= exp_sqrt_f; - XX = X_scaled.t() * X_scaled; - - XX_inv = arma::inv_sympd(XX); - Pi_sample = XX_inv * (X_scaled.t() * y_scaled); - post_Pi_Omega = arma::inv_sympd(inv_prior_Pi_Omega + XX); - post_Pi = post_Pi_Omega * (Omega_Pi + X_scaled.t() * y_scaled); - S = arma::trans((y_scaled - X_scaled * Pi_sample)) * (y_scaled - X_scaled * Pi_sample); - Pi_diff = prior_Pi_mean - Pi_sample; - post_S = prior_S + S + Pi_diff.t() * arma::inv_sympd(prior_Pi_Omega + XX_inv) * Pi_diff; - Sigma_i = rinvwish(post_nu, post_S); - Sigma_chol = arma::chol(Sigma_i, "lower"); - Sigma_chol_inv = arma::inv(arma::trimatl(Sigma_chol)); - - bool stationarity_check = false; - int num_try = 0, iter = 0; - double root = 1000; - while (stationarity_check == false) { - iter += 1; - Pi_i = rmatn(post_Pi.t(), post_Pi_Omega, Sigma_i); - if (check_roots) { - Pi_comp.rows(0, n_vars-1) = Pi_i; - root = max_eig_cpp(Pi_comp); - } else { - root = 0.0; - } - if (root < 1.0) { - stationarity_check = true; - num_try = iter; - } - if (iter == 1000) { - Rcpp::stop("Attemped to draw stationary Pi 1,000 times."); - } - } - - X = create_X_noint(Z_i, n_lags); - posterior_psi_csv(psi_i, mu_mat, Pi_i, D_mat, Sigma_chol_inv, exp_sqrt_f, inv_prior_psi_Omega, mZ + mu_mat, X, - inv_prior_psi_Omega_mean, dt, n_determ, n_vars, n_lags); - - mZ1 = Z_1 - d1 * Psi_i.t(); - Z_i_demean.rows(0, n_lags - 1) = mZ1; - Z_i_demean.rows(n_lags, n_T + n_lags - 1) = Z_i.rows(n_lags, n_T + n_lags - 1) - mu_mat; // Not the same as mu_mat b/c different mu_mat - X = create_X_noint(Z_i_demean, n_lags); - eps = Z_i_demean.rows(n_lags, n_T + n_lags - 1) - X * Pi_i.t(); - u = eps * Sigma_chol_inv.t(); - u_tilde = arma::log(arma::pow(u, 2.0)); - update_csv(u_tilde, phi_i, sigma_i, f_i, f0, mixprob, r, priorlatent0, phi_invvar, - phi_meaninvvar, prior_sigma2, prior_df); - - vol_pred = f_i(n_T-1); - if (verbose) { - p.increment(); - } - if ((i+1) % n_thin == 0) { - if (n_fcst > 0) { - Z_fcst_i.head_cols(n_lags) = Z_i_demean.tail_rows(n_lags).t(); - for (arma::uword h = 0; h < n_fcst; ++h) { - vol_pred = phi_i * vol_pred + R::rnorm(0.0, sigma_i); - errors.imbue(norm_rand); - errors = errors * std::exp(0.5 * vol_pred); - x = create_X_t_noint(Z_fcst_i.cols(0+h, n_lags-1+h).t()); - Z_fcst_i.col(n_lags + h) = Pi_i * x + Sigma_chol * errors; - } - Z_fcst.slice(i/n_thin) = Z_fcst_i.t() + d_fcst_lags * Psi_i.t(); - } - - Z.slice(i/n_thin) = Z_i; - Sigma.slice(i/n_thin) = Sigma_i; - Pi.slice(i/n_thin) = Pi_i; - psi.row(i/n_thin) = psi_i.t(); - f.row(i/n_thin) = f_i.t(); - phi(i/n_thin) = phi_i; - sigma(i/n_thin) = sigma_i; - } - } -} - // [[Rcpp::export]] void mcmc_ssng_csv(const arma::mat & y_in_p, arma::cube& Pi, arma::cube& Sigma, arma::mat& psi, arma::vec& phi_mu, @@ -314,10 +131,10 @@ void mcmc_ssng_csv(const arma::mat & y_in_p, bool check_roots, const arma::mat& Z_1, const double priorlatent0, const double phi_invvar, const double phi_meaninvvar, const double prior_sigma2, const double prior_df, - arma::uword n_reps, + arma::uword n_reps, arma::uword n_burnin, arma::uword n_q, arma::uword T_b, arma::uword n_lags, arma::uword n_vars, arma::uword n_T, arma::uword n_fcst, arma::uword n_determ, arma::uword n_thin, - bool verbose) { + bool verbose, bool ssng) { bool single_freq; if (n_q == 0 || n_q == n_vars) { single_freq = true; @@ -325,7 +142,7 @@ void mcmc_ssng_csv(const arma::mat & y_in_p, single_freq = false; } - Progress p(n_reps, verbose); + Progress p(n_reps + n_burnin, verbose); arma::mat Pi_i = Pi.slice(0); arma::mat Sigma_i = Sigma.slice(0); @@ -357,7 +174,7 @@ void mcmc_ssng_csv(const arma::mat & y_in_p, double lambda_mu_i = lambda_mu(0); double phi_mu_i = phi_mu(0); arma::vec omega_i = omega.row(0).t(); - arma::mat inv_prior_psi_Omega = arma::diagmat(omega_i); + arma::mat inv_prior_psi_Omega = arma::diagmat(1.0/omega_i); arma::vec inv_prior_psi_Omega_mean = prior_psi_mean / omega_i; double M, batch = 1.0; arma::running_stat stats; @@ -393,7 +210,7 @@ void mcmc_ssng_csv(const arma::mat & y_in_p, Z_i.rows(n_lags, n_T + n_lags - 1) = y_in_p; } - for (arma::uword i = 0; i < n_reps; ++i) { + for (arma::uword i = 0; i < n_reps + n_burnin; ++i) { Sigma_chol_cube.each_slice() = Sigma_chol; for (arma::uword j = 0; j < n_T; ++j) { Sigma_chol_cube.slice(j) = Sigma_chol_cube.slice(j) * exp_sqrt_f(j); @@ -411,7 +228,7 @@ void mcmc_ssng_csv(const arma::mat & y_in_p, Pi_i0.cols(1, n_vars*n_lags) = Pi_i; if (!single_freq) { - mZ = simsm_adaptive_cv(my, Pi_i0, Sigma_chol, Lambda_comp, mZ1, n_q, T_b); + mZ = simsm_adaptive_sv(my, Pi_i0, Sigma_chol_cube, Lambda_comp, mZ1, n_q, T_b); Z_i.rows(n_lags, n_T + n_lags - 1) = mZ + mu_mat; } @@ -458,28 +275,31 @@ void mcmc_ssng_csv(const arma::mat & y_in_p, } } - update_ng(phi_mu_i, lambda_mu_i, omega_i, nm, c0, c1, s, psi_i, prior_psi_mean, accept); - if (adaptive_mh) { - stats(accept); - if (i % 100 == 0) { - batch += 1.0; - min_vec(1) = std::pow(batch, -0.5); - if (stats.mean() > 0.44) { - s_prop = log(s) + arma::min(min_vec); - if (s_prop < M){ - s = std::exp(s_prop); - } - } else { - s_prop = log(s) - arma::min(min_vec); - if (s_prop > -M){ - s = std::exp(s_prop); + if (ssng) { + update_ng(phi_mu_i, lambda_mu_i, omega_i, nm, c0, c1, s, psi_i, prior_psi_mean, accept); + if (adaptive_mh) { + stats(accept); + if (i % 100 == 0) { + batch += 1.0; + min_vec(1) = std::pow(batch, -0.5); + if (stats.mean() > 0.44) { + s_prop = log(s) + arma::min(min_vec); + if (s_prop < M){ + s = std::exp(s_prop); + } + } else { + s_prop = log(s) - arma::min(min_vec); + if (s_prop > -M){ + s = std::exp(s_prop); + } } + stats.reset(); } - stats.reset(); } + inv_prior_psi_Omega = arma::diagmat(1/omega_i); + inv_prior_psi_Omega_mean = prior_psi_mean / omega_i; } - inv_prior_psi_Omega = arma::diagmat(1/omega_i); - inv_prior_psi_Omega_mean = prior_psi_mean / omega_i; + X = create_X_noint(Z_i, n_lags); posterior_psi_csv(psi_i, mu_mat, Pi_i, D_mat, Sigma_chol_inv, exp_sqrt_f, inv_prior_psi_Omega, mZ + mu_mat, X, inv_prior_psi_Omega_mean, dt, n_determ, n_vars, n_lags); @@ -498,7 +318,7 @@ void mcmc_ssng_csv(const arma::mat & y_in_p, if (verbose) { p.increment(); } - if ((i+1) % n_thin == 0) { + if (((i+1) % n_thin == 0) && i >= n_burnin) { if (n_fcst > 0) { Z_fcst_i.head_cols(n_lags) = Z_i_demean.tail_rows(n_lags).t(); for (arma::uword h = 0; h < n_fcst; ++h) { @@ -508,19 +328,21 @@ void mcmc_ssng_csv(const arma::mat & y_in_p, x = create_X_t_noint(Z_fcst_i.cols(0+h, n_lags-1+h).t()); Z_fcst_i.col(n_lags + h) = Pi_i * x + Sigma_chol * errors; } - Z_fcst.slice(i/n_thin) = Z_fcst_i.t() + d_fcst_lags * Psi_i.t(); + Z_fcst.slice((i-n_burnin)/n_thin) = Z_fcst_i.t() + d_fcst_lags * Psi_i.t(); } - Z.slice(i/n_thin) = Z_i; - Sigma.slice(i/n_thin) = Sigma_i; - Pi.slice(i/n_thin) = Pi_i; - psi.row(i/n_thin) = psi_i.t(); - f.row(i/n_thin) = f_i.t(); - phi(i/n_thin) = phi_i; - sigma(i/n_thin) = sigma_i; - phi_mu(i/n_thin) = phi_mu_i; - lambda_mu(i/n_thin) = lambda_mu_i; - omega.row(i/n_thin) = omega_i.t(); + Z.slice((i-n_burnin)/n_thin) = Z_i; + Sigma.slice((i-n_burnin)/n_thin) = Sigma_i; + Pi.slice((i-n_burnin)/n_thin) = Pi_i; + psi.row((i-n_burnin)/n_thin) = psi_i.t(); + f.row((i-n_burnin)/n_thin) = f_i.t(); + phi((i-n_burnin)/n_thin) = phi_i; + sigma((i-n_burnin)/n_thin) = sigma_i; + if (ssng) { + phi_mu((i-n_burnin)/n_thin) = phi_mu_i; + lambda_mu((i-n_burnin)/n_thin) = lambda_mu_i; + omega.row((i-n_burnin)/n_thin) = omega_i.t(); + } } } } diff --git a/src/mcmc_diffuse.cpp b/src/mcmc_diffuse.cpp new file mode 100644 index 0000000..b102455 --- /dev/null +++ b/src/mcmc_diffuse.cpp @@ -0,0 +1,317 @@ +#include "mfbvar.h" +#include "minn_utils.h" +#include "ss_utils.h" +#include "update_ng.h" +#include "update_dl.h" +// [[Rcpp::export]] +void mcmc_minn_diffuse(const arma::mat & y_in_p, + arma::cube& Pi, arma::cube& Sigma, arma::cube& Z, arma::cube& Z_fcst, + arma::mat & aux, arma::vec & global, arma::mat & local, + arma::vec & slice, + const arma::mat& Lambda_comp, arma::mat prior_Pi_Omega, + arma::vec prior_Pi_mean_vec, + const arma::mat& Z_1, + arma::uword n_reps, arma::uword n_burnin, + arma::uword n_q, arma::uword T_b, arma::uword n_lags, arma::uword n_vars, + arma::uword n_T, arma::uword n_fcst, + arma::uword n_thin, bool verbose, + const double a, bool gig) { + bool single_freq; + if (n_q == 0 || n_q == n_vars) { + single_freq = true; + } else { + single_freq = false; + } + + + Progress p(n_reps + n_burnin, verbose); + arma::vec Pi_vec = arma::vec(Pi.begin(), n_vars*(n_vars*n_lags+1)); + arma::mat Pi_i = Pi.slice(0); //arma::mat(Pi_vec.begin(), n_vars, n_vars*n_lags + 1, false, true); + arma::mat Sigma_i = Sigma.slice(0); + arma::mat y_i = y_in_p; + arma::vec errors = arma::vec(n_vars); + arma::mat X, post_Pi_Omega_inv, L, b, u1, u2, u4, resid, x; + arma::mat u3 = arma::vec(n_vars*(n_vars*n_lags + 1)); + arma::mat post_S, Sigma_chol, Sigma_inv; + arma::mat Z_i = arma::mat(n_lags + y_in_p.n_rows, n_vars, arma::fill::zeros); + arma::mat Z_fcst_i = arma::mat(n_vars, n_lags + n_fcst); + Z_i.rows(0, n_lags - 1) = Z_1; + + if (single_freq) { + Z_i.rows(n_lags, n_T + n_lags - 1) = y_i; + X = create_X(Z_i, n_lags); + } + + // DL + bool dl = false; + double global_i; + if (a > 0) { + dl = true; + global_i = global(0); + } + arma::vec aux_i = aux.row(0).t(); + arma::vec local_i = local.row(0).t(); + + if (dl) { + prior_Pi_Omega.rows(1, n_vars*n_lags) = arma::reshape(aux_i % arma::pow(global_i * local_i, 2.0), n_vars*n_lags, n_vars); + } + + Sigma_chol = arma::chol(Sigma_i, "lower"); + Sigma_inv = arma::inv_sympd(Sigma_i); + arma::vec prior_Pi_Omega_vec_inv = 1.0 / arma::vectorise(prior_Pi_Omega); + arma::vec Omega_Pi = prior_Pi_mean_vec % prior_Pi_Omega_vec_inv; + + for (arma::uword i = 0; i < n_reps + n_burnin; ++i) { + if (!single_freq) { + y_i = simsm_adaptive_cv(y_in_p, Pi_i, Sigma_chol, Lambda_comp, Z_1, n_q, T_b); + Z_i.rows(n_lags, n_T + n_lags - 1) = y_i; + X = create_X(Z_i, n_lags); + } + + // Pi + post_Pi_Omega_inv = arma::kron(Sigma_inv, X.t() * X); + post_Pi_Omega_inv.diag() += prior_Pi_Omega_vec_inv; + L = arma::chol(post_Pi_Omega_inv, "lower"); + b = arma::vectorise(X.t() * y_i * Sigma_inv) + Omega_Pi; + u1 = arma::solve(arma::trimatl(L), b); + u2 = arma::solve(arma::trimatu(L.t()), u1); + u3.imbue(norm_rand); + u4 = arma::solve(arma::trimatu(L.t()), u3); + Pi_vec = u2 + u4; + Pi_i = arma::trans(arma::reshape(Pi_vec, n_vars*n_lags+1, n_vars)); + resid = y_i - X * Pi_i.t(); // Pi_vec and Pi_i use the same memory + // Sigma + post_S = resid.t() * resid; + Sigma_i = rinvwish(n_T, post_S); + Sigma_chol = arma::chol(Sigma_i, "lower"); + Sigma_inv = arma::inv_sympd(Sigma_i); + + if (dl) { + update_dl(prior_Pi_Omega, aux_i, local_i, global_i, Pi_i.t(), n_vars, n_lags, a, slice, gig, true); + prior_Pi_Omega_vec_inv = 1.0 / arma::vectorise(prior_Pi_Omega); + Omega_Pi = prior_Pi_mean_vec % prior_Pi_Omega_vec_inv; + } + + if (((i+1) % n_thin == 0) && (i >= n_burnin)) { + if (n_fcst > 0) { + + Z_fcst_i.head_cols(n_lags) = Z_i.tail_rows(n_lags).t(); + for (arma::uword h = 0; h < n_fcst; ++h) { + errors.imbue(norm_rand); + x = create_X_t(Z_fcst_i.cols(0+h, n_lags-1+h).t()); + Z_fcst_i.col(n_lags + h) = Pi_i * x + Sigma_chol * errors; + } + Z_fcst.slice((i-n_burnin)/n_thin) = Z_fcst_i.t(); + } + + Z.slice((i-n_burnin)/n_thin) = Z_i; + Sigma.slice((i-n_burnin)/n_thin) = Sigma_i; + Pi.slice((i-n_burnin)/n_thin) = Pi_i; + } + if (verbose) { + p.increment(); + } + } + +} + + + +// [[Rcpp::export]] +void mcmc_ssng_diffuse(const arma::mat & y_in_p, + arma::cube& Pi, arma::cube& Sigma, arma::mat& psi, arma::vec& phi_mu, + arma::vec& lambda_mu, arma::mat& omega, arma::cube& Z, + arma::cube& Z_fcst, const arma::mat& Lambda_comp, const arma::mat& prior_Pi_Omega, + const arma::mat& Omega_Pi, + const arma::mat & D_mat, const arma::mat & dt, const arma::mat & d1, + const arma::mat & d_fcst_lags, const arma::vec& prior_psi_mean, + double c0, double c1, double s, + bool check_roots, const arma::mat& Z_1, arma::uword n_reps, arma::uword n_burnin, + arma::uword n_q, arma::uword T_b, arma::uword n_lags, arma::uword n_vars, + arma::uword n_T, arma::uword n_fcst, arma::uword n_determ, + arma::uword n_thin, bool verbose, bool ssng) { + bool single_freq; + if (n_q == 0 || n_q == n_vars) { + single_freq = true; + } else { + single_freq = false; + } + + Progress p(n_reps + n_burnin, verbose); + + arma::vec Pi_vec = arma::vec(Pi.begin(), n_vars*(n_vars*n_lags)); + arma::mat Pi_i = Pi.slice(0); + arma::mat Sigma_i = Sigma.slice(0); + arma::vec psi_i = psi.row(0).t(); + arma::mat y_i = y_in_p; + arma::mat X, post_Pi_Omega_inv, L, b, u1, u2, u4, resid, x; + arma::mat post_S, mu_mat, mZ, mZ1, mX, Sigma_chol, Sigma_inv; + arma::mat u3 = arma::vec(n_vars*(n_vars*n_lags)); + arma::mat my = arma::mat(arma::size(y_in_p), arma::fill::zeros); + + arma::mat Z_i = arma::mat(n_lags + y_in_p.n_rows, n_vars, arma::fill::zeros); + arma::mat Z_fcst_i = arma::mat(n_vars, n_lags + n_fcst); + arma::mat Z_i_demean = Z_i; + Z_i.rows(0, n_lags - 1) = Z_1; + + arma::mat Pi_i0 = arma::mat(n_vars, n_vars*n_lags+1, arma::fill::zeros); + arma::mat Pi_comp = arma::mat(n_vars*n_lags, n_vars*n_lags, arma::fill::zeros); + Pi_comp.submat(n_vars, 0, n_vars*n_lags - 1, n_vars*(n_lags-1) - 1) = arma::eye(n_vars*(n_lags-1), n_vars*(n_lags-1)); + + arma::mat Psi_i = arma::mat(psi_i.begin(), n_vars, n_determ, false, true); + mu_mat = dt * Psi_i.t(); + arma::uword n_Lambda = Lambda_comp.n_cols/Lambda_comp.n_rows; + arma::mat mu_long = arma::mat(n_Lambda+n_T, n_vars, arma::fill::zeros); + arma::rowvec Lambda_single = arma::rowvec(n_Lambda, arma::fill::zeros); + for (arma::uword i = 0; i < n_Lambda; ++i) { + Lambda_single(i) = Lambda_comp.at(0, i*n_q); + } + + Sigma_chol = arma::chol(Sigma_i, "lower"); + Sigma_inv = arma::inv_sympd(Sigma_i); + arma::vec prior_Pi_Omega_vec_inv = 1.0 / arma::vectorise(prior_Pi_Omega); + + // if single freq, we don't need to update + if (single_freq) { + Z_i.rows(n_lags, n_T + n_lags - 1) = y_in_p; + } + + // NG stuff + arma::uword nm = n_vars*n_determ; + double lambda_mu_i = lambda_mu(0); + double phi_mu_i = phi_mu(0); + arma::vec omega_i = omega.row(0).t(); + arma::mat inv_prior_psi_Omega = arma::diagmat(1.0/omega_i); + arma::vec inv_prior_psi_Omega_mean = prior_psi_mean / omega_i; + double M, batch = 1.0; + arma::running_stat stats; + double accept = 0.0; + bool adaptive_mh = false; + double s_prop; + if (s < 0) { + M = std::abs(s); + s = 1.0; + adaptive_mh = true; + } + arma::vec min_vec(2); + min_vec(0) = 0.01; + + for (arma::uword i = 0; i < n_reps + n_burnin; ++i) { + if (!single_freq) { + update_demean(my, mu_long, y_in_p, mu_mat, d1, Psi_i, Lambda_single, n_vars, + n_q, n_Lambda, n_T); + } else { + // Even if single freq, mZ needs to be updated + mZ = y_in_p - mu_mat; + } + + mZ1 = Z_1 - d1 * Psi_i.t(); + Pi_i0.cols(1, n_vars*n_lags) = Pi_i; + + if (!single_freq) { + mZ = simsm_adaptive_cv(my, Pi_i0, Sigma_chol, Lambda_comp, mZ1, n_q, T_b); + Z_i.rows(n_lags, n_T + n_lags - 1) = mZ + mu_mat; + } + + Z_i_demean.rows(0, n_lags - 1) = mZ1; + Z_i_demean.rows(n_lags, n_T + n_lags - 1) = mZ; + + mX = create_X_noint(Z_i_demean, n_lags); + // Pi + post_Pi_Omega_inv = arma::kron(Sigma_inv, mX.t() * mX); + post_Pi_Omega_inv.diag() += prior_Pi_Omega_vec_inv; + L = arma::chol(post_Pi_Omega_inv, "lower"); + b = arma::vectorise(mX.t() * mZ * Sigma_inv + Omega_Pi); + u1 = arma::solve(arma::trimatl(L), b); + u2 = arma::solve(arma::trimatu(L.t()), u1); + + bool stationarity_check = false; + int num_try = 0, iter = 0; + double root = 1000; + while (stationarity_check == false) { + iter += 1; + u3.imbue(norm_rand); + u4 = arma::solve(arma::trimatu(L.t()), u3); + Pi_vec = u2 + u4; + Pi_i = arma::trans(arma::reshape(Pi_vec, n_vars*n_lags, n_vars)); + if (check_roots) { + Pi_comp.rows(0, n_vars-1) = Pi_i; + root = max_eig_cpp(Pi_comp); + } else { + root = 0.0; + } + if (root < 1.0) { + stationarity_check = true; + num_try = iter; + } + if (iter == 1000) { + Rcpp::stop("Attemped to draw stationary Pi 1,000 times."); + } + } + + resid = mZ - mX * Pi_i.t(); + // Sigma + post_S = resid.t() * resid; + Sigma_i = rinvwish(n_T, post_S); + Sigma_chol = arma::chol(Sigma_i, "lower"); + Sigma_inv = arma::inv_sympd(Sigma_i); + + if (ssng) {update_ng(phi_mu_i, lambda_mu_i, omega_i, nm, c0, c1, s, psi_i, prior_psi_mean, accept); + if (adaptive_mh) { + stats(accept); + if (i % 100 == 0) { + batch += 1.0; + min_vec(1) = std::pow(batch, -0.5); + if (stats.mean() > 0.44) { + s_prop = log(s) + arma::min(min_vec); + if (s_prop < M){ + s = std::exp(s_prop); + } + } else { + s_prop = log(s) - arma::min(min_vec); + if (s_prop > -M){ + s = std::exp(s_prop); + } + } + stats.reset(); + } + } + + inv_prior_psi_Omega = arma::diagmat(1/omega_i); + inv_prior_psi_Omega_mean = prior_psi_mean / omega_i; + } + + X = create_X_noint(Z_i, n_lags); + posterior_psi_iw(psi_i, mu_mat, Pi_i, D_mat, Sigma_i, inv_prior_psi_Omega, mZ + mu_mat, X, inv_prior_psi_Omega_mean, dt, n_determ, n_vars, n_lags); + + arma::vec errors = arma::vec(n_vars); + if (((i+1) % n_thin == 0) && (i >= n_burnin)) { + if (n_fcst > 0) { + Z_fcst_i.head_cols(n_lags) = Z_i.tail_rows(n_lags).t() - mu_mat.tail_rows(n_lags).t(); + for (arma::uword h = 0; h < n_fcst; ++h) { + errors.imbue(norm_rand); + x = create_X_t_noint(Z_fcst_i.cols(0+h, n_lags-1+h).t()); + Z_fcst_i.col(n_lags + h) = Pi_i * x + Sigma_chol * errors; + } + Z_fcst.slice((i-n_burnin)/n_thin) = Z_fcst_i.t() + d_fcst_lags * Psi_i.t(); + } + + Z.slice((i-n_burnin)/n_thin) = Z_i; + Sigma.slice((i-n_burnin)/n_thin) = Sigma_i; + Pi.slice((i-n_burnin)/n_thin) = Pi_i; + psi.row((i-n_burnin)/n_thin) = psi_i.t(); + if (ssng) { + phi_mu((i-n_burnin)/n_thin) = phi_mu_i; + lambda_mu((i-n_burnin)/n_thin) = lambda_mu_i; + omega.row((i-n_burnin)/n_thin) = omega_i.t(); + } + + } + if (verbose) { + p.increment(); + } + } + +} + + diff --git a/src/mcmc_fsv.cpp b/src/mcmc_fsv.cpp new file mode 100644 index 0000000..4df39cc --- /dev/null +++ b/src/mcmc_fsv.cpp @@ -0,0 +1,452 @@ +#include "mfbvar.h" +#include "minn_utils.h" +#include "ss_utils.h" +#include "update_fsv.h" +#include "mvn.h" +#include "mvn_par.h" +#include "update_ng.h" +#include "update_dl.h" +// [[Rcpp::export]] +void mcmc_minn_fsv(const arma::mat & y_in_p, + arma::cube& Pi, arma::cube& Z, arma::cube& Z_fcst, + arma::mat& mu, arma::mat& phi, arma::mat& sigma, + arma::cube& f, arma::cube& facload, arma::cube& h, + arma::mat & aux, arma::vec & global, arma::mat & local, + arma::vec & slice, + const arma::mat& Lambda_comp, arma::mat prior_Pi_Omega, + const arma::vec& prior_Pi_AR1, const arma::mat& Z_1, + double bmu, double Bmu, double a0idi, double b0idi, double a0fac, double b0fac, + const Rcpp::NumericVector & Bsigma, double B011inv, double B022inv, + const Rcpp::NumericVector & priorh0, const arma::imat & armarestr, + const arma::mat & armatau2, // armatau2 is the matrix with prior variance of factor loadings + arma::uword n_fac, arma::uword n_reps, arma::uword n_burnin, + arma::uword n_q, arma::uword T_b, arma::uword n_lags, arma::uword n_vars, + arma::uword n_T, arma::uword n_fcst, arma::uword n_thin, bool verbose, + const double a, bool gig) { + bool single_freq; + if (n_q == 0 || n_q == n_vars) { + single_freq = true; + } else { + single_freq = false; + } + + Progress p(n_reps+n_burnin, verbose); + arma::mat Pi_i = Pi.slice(0); + arma::mat X; + arma::mat y_i = y_in_p; + arma::mat x; + arma::vec vol_pred; + + + // fsv + Rcpp::NumericMatrix curpara = Rcpp::NumericMatrix(3, n_vars + n_fac); + arma::mat curpara_arma(curpara.begin(), curpara.nrow(), curpara.ncol(), false); + curpara_arma.fill(0.0); + curpara_arma.row(0).cols(0, n_vars - 1) = mu.col(0).t(); + curpara_arma.row(1) = phi.col(0).t(); + curpara_arma.row(2) = sigma.col(0).t(); + + arma::vec mu_i = mu.col(0); + arma::vec phi_i = phi.col(0); + arma::vec sigma_i = sigma.col(0); + + arma::mat armaf = f.slice(0); + arma::mat armafacload = facload.slice(0); + arma::mat armah = h.slice(0); + arma::mat cc_i = armaf.t() * armafacload.t(); + + arma::vec armah0 = arma::vec(n_vars + n_fac); + + arma::mat Sig_i, y_hat, latent_nofac, h_j, X_j, y_j; + arma::vec error_pred; + arma::vec errors_sv = arma::vec(n_vars + n_fac); + arma::vec errors_var = arma::vec(n_vars + n_fac); + + arma::mat Z_i = arma::mat(n_lags + y_in_p.n_rows, n_vars, arma::fill::zeros); + arma::mat Z_fcst_i = arma::mat(n_vars, n_lags + n_fcst); + Z_i.rows(0, n_lags - 1) = Z_1; + + arma::mat eps; + bool rue = true; + if (((n_vars*n_lags) > 1.1 * n_T) & (arma::range(prior_Pi_AR1) < 1e-12)) { + rue = false; + eps = arma::mat(n_T+n_vars*n_lags+1, n_vars); + } else { + eps = arma::mat(n_vars*n_lags+1, n_vars); + } + + if (single_freq) { + Z_i.rows(n_lags, n_T + n_lags - 1) = y_i; + X = create_X(Z_i, n_lags); + } + + // DL + bool dl = false; + double global_i; + if (a > 0) { + dl = true; + global_i = global(0); + } + arma::vec aux_i = aux.row(0).t(); + arma::vec local_i = local.row(0).t(); + + if (dl) { + prior_Pi_Omega.rows(1, n_vars*n_lags) = arma::reshape(aux_i % arma::pow(global_i * local_i, 2.0), n_vars*n_lags, n_vars); + } + + arma::mat curpara_old, armafacload_old, armaf_old; + + for (arma::uword i = 0; i < n_reps + n_burnin; ++i) { + if (!single_freq) { + Sig_i = arma::exp(0.5 * armah.head_cols(n_vars)); + y_i = simsm_adaptive_univariate(y_in_p, Pi_i, Sig_i, Lambda_comp, Z_1, n_q, T_b, cc_i); + Z_i.rows(n_lags, n_T + n_lags - 1) = y_i; + X = create_X(Z_i, n_lags); + } + + y_hat = y_i - X * Pi_i.t(); + + curpara_old = curpara_arma; + armafacload_old = armafacload; + armaf_old = armaf; + + update_fsv(armafacload, armaf, armah, armah0, curpara, armatau2, y_hat.t(), + bmu, Bmu, a0idi, b0idi, a0fac, b0fac, Bsigma, B011inv, B022inv, + priorh0, armarestr); + + if ((i+1) % n_thin == 0 && i >= n_burnin) { + mu_i = curpara_old.row(0).t(); + phi_i = curpara_old.row(1).t(); + sigma_i = curpara_old.row(2).t(); + if (n_fcst > 0) { + vol_pred = armah.tail_rows(1).t(); + Z_fcst_i.head_cols(n_lags) = Z_i.tail_rows(n_lags).t(); + for (arma::uword h = 0; h < n_fcst; ++h) { + errors_sv.imbue(norm_rand); + errors_var.imbue(norm_rand); + vol_pred = mu_i + phi_i % (vol_pred - mu_i) + sigma_i % errors_sv; // Twice because we first need it for the volatility, then for the VAR + error_pred = arma::exp(0.5 * vol_pred) % errors_var; + x = create_X_t(Z_fcst_i.cols(0+h, n_lags-1+h).t()); + Z_fcst_i.col(n_lags + h) = Pi_i * x + armafacload_old * error_pred.tail_rows(n_fac) + error_pred.head_rows(n_vars); + } + Z_fcst.slice((i-n_burnin)/n_thin) = Z_fcst_i.t(); + } + + Z.slice((i-n_burnin)/n_thin) = Z_i; + Pi.slice((i-n_burnin)/n_thin) = Pi_i; + f.slice((i-n_burnin)/n_thin) = armaf_old; + facload.slice((i-n_burnin)/n_thin) = armafacload_old; + h.slice((i-n_burnin)/n_thin) = armah; + mu.col((i-n_burnin)/n_thin) = mu_i.head(n_vars); + phi.col((i-n_burnin)/n_thin) = phi_i; + sigma.col((i-n_burnin)/n_thin) = sigma_i; + if (dl) { + global((i-n_burnin)/n_thin) = global_i; + aux.row((i-n_burnin)/n_thin) = aux_i.t(); + local.row((i-n_burnin)/n_thin) = local_i.t(); + } + } + + + cc_i = armaf.t() * armafacload.t(); // Common component + latent_nofac = y_i - cc_i; + + eps.imbue(norm_rand); + arma::mat output(n_vars*n_lags+1, n_vars); + if (rue) { + Pi_parallel_rue Pi_parallel_i(output, latent_nofac, X, prior_Pi_Omega, eps, + armah, prior_Pi_AR1, n_T, n_vars, n_lags); + RcppParallel::parallelFor(0, n_vars, Pi_parallel_i); + } else { + Pi_parallel_bcm Pi_parallel_i(output, latent_nofac, X, prior_Pi_Omega, eps, + armah, n_T, n_vars, n_lags); + RcppParallel::parallelFor(0, n_vars, Pi_parallel_i); + } + + Pi_i = output.t(); + + if (dl) { + update_dl(prior_Pi_Omega, aux_i, local_i, global_i, Pi_i.t(), n_vars, n_lags, a, slice, gig, true); + } + + if (verbose) { + p.increment(); + } + + + } + +} + +// [[Rcpp::export]] +void mcmc_ssng_fsv(const arma::mat & y_in_p, + arma::cube& Pi, arma::mat& psi, arma::vec& phi_mu, + arma::vec& lambda_mu, arma::mat& omega, arma::cube& Z, arma::cube& Z_fcst, + arma::mat& mu, arma::mat& phi, arma::mat& sigma, + arma::cube& f, arma::cube& facload, arma::cube& h, + const arma::mat& Lambda_comp, arma::mat prior_Pi_Omega, + const arma::vec& prior_Pi_AR1, + const arma::mat & D_mat, const arma::mat & dt, const arma::mat & d1, + const arma::mat & d_fcst_lags, const arma::vec& prior_psi_mean, + double c0, double c1, double s, bool check_roots, + const arma::mat& Z_1, + double bmu, double Bmu, double a0idi, double b0idi, double a0fac, double b0fac, + const Rcpp::NumericVector & Bsigma, double B011inv, double B022inv, + const Rcpp::NumericVector & priorh0, const arma::imat & armarestr, + const arma::mat & armatau2, // armatau2 is the matrix with prior variance of factor loadings + arma::uword n_fac, arma::uword n_reps, arma::uword n_burnin, + arma::uword n_q, arma::uword T_b, arma::uword n_lags, arma::uword n_vars, + arma::uword n_T, arma::uword n_fcst, arma::uword n_determ, arma::uword n_thin, + bool verbose, bool ssng) { + bool single_freq; + if (n_q == 0 || n_q == n_vars) { + single_freq = true; + } else { + single_freq = false; + } + + Progress p(n_reps+n_burnin, verbose); + + arma::mat Pi_i = Pi.slice(0); + arma::vec psi_i = psi.row(0).t(); + arma::mat X; + arma::mat y_i = y_in_p; + arma::mat x; + arma::vec vol_pred; + + + // fsv + Rcpp::NumericMatrix curpara = Rcpp::NumericMatrix(3, n_vars + n_fac); + arma::mat curpara_arma(curpara.begin(), curpara.nrow(), curpara.ncol(), false); + curpara_arma.fill(0.0); + curpara_arma.row(0).cols(0, n_vars - 1) = mu.col(0).t(); + curpara_arma.row(1) = phi.col(0).t(); + curpara_arma.row(2) = sigma.col(0).t(); + + arma::vec mu_i = mu.col(0); + arma::vec phi_i = phi.col(0); + arma::vec sigma_i = sigma.col(0); + + arma::mat armaf = f.slice(0); + arma::mat armafacload = facload.slice(0); + arma::mat armah = h.slice(0); + arma::mat cc_i = armaf.t() * armafacload.t(); + + arma::vec armah0 = arma::vec(n_vars + n_fac); + + arma::mat Sig_i, y_hat, latent_nofac, h_j, X_j, y_j; + arma::vec error_pred; + arma::vec errors_sv = arma::vec(n_vars + n_fac); + arma::vec errors_var = arma::vec(n_vars + n_fac); + + arma::mat Z_i = arma::mat(n_lags + y_in_p.n_rows, n_vars, arma::fill::zeros); + arma::mat Z_fcst_i = arma::mat(n_vars, n_lags + n_fcst); + Z_i.rows(0, n_lags - 1) = Z_1; + + arma::mat eps; + bool rue = true; + if (((n_vars*n_lags) > 1.1 * n_T) & (arma::range(prior_Pi_AR1) < 1e-12)) { + rue = false; + eps = arma::mat(n_T+n_vars*n_lags, n_vars); + } else { + eps = arma::mat(n_vars*n_lags, n_vars); + } + + + + + // ss + + arma::mat mu_mat, mZ, mZ1, mX; + arma::mat my = arma::mat(arma::size(y_in_p), arma::fill::zeros); + arma::mat Z_i_demean = arma::mat(n_lags + y_in_p.n_rows, n_vars, arma::fill::zeros); + arma::mat Pi_i0 = arma::mat(n_vars, n_vars*n_lags+1, arma::fill::zeros); + arma::mat Pi_comp = arma::mat(n_vars*n_lags, n_vars*n_lags, arma::fill::zeros); + Pi_comp.submat(n_vars, 0, n_vars*n_lags - 1, n_vars*(n_lags-1) - 1) = arma::eye(n_vars*(n_lags-1), n_vars*(n_lags-1)); + + arma::mat Psi_i = arma::mat(psi_i.begin(), n_vars, n_determ, false, true); + mu_mat = dt * Psi_i.t(); + arma::uword n_Lambda = Lambda_comp.n_cols/Lambda_comp.n_rows; + arma::mat mu_long = arma::mat(n_Lambda+n_T, n_vars, arma::fill::zeros); + arma::rowvec Lambda_single = arma::rowvec(n_Lambda, arma::fill::zeros); + for (arma::uword i = 0; i < n_Lambda; ++i) { + Lambda_single(i) = Lambda_comp.at(0, i*n_q); + } + arma::mat idivar = arma::mat(armah.begin_col(0), armah.n_rows, n_vars, false); + + // ssng + arma::uword nm = n_vars*n_determ; + double lambda_mu_i, phi_mu_i, accept, s_prop, M, batch; + bool adaptive_mh; + if (ssng) { + lambda_mu_i = lambda_mu(0); + phi_mu_i = phi_mu(0); + accept = 0.0; + batch = 1.0; + + adaptive_mh = false; + + if (s < 0) { + M = std::abs(s); + s = 1.0; + adaptive_mh = true; + } + } + + arma::vec omega_i = omega.row(0).t(); + arma::mat inv_prior_psi_Omega = arma::diagmat(1/omega_i); + arma::vec inv_prior_psi_Omega_mean = prior_psi_mean / omega_i; + arma::running_stat stats; + + arma::vec min_vec(2); + min_vec(0) = 0.01; + + if (single_freq) { + Z_i.rows(n_lags, n_T + n_lags - 1) = y_i; + } + + arma::mat curpara_old, armafacload_old, armaf_old; + for (arma::uword i = 0; i < n_reps + n_burnin; ++i) { + if (!single_freq) { + update_demean(my, mu_long, y_in_p, mu_mat, d1, Psi_i, Lambda_single, n_vars, + n_q, n_Lambda, n_T); + } else { + // Even if single freq, mZ needs to be updated + mZ = y_in_p - mu_mat; + } + + mZ1 = Z_1 - d1 * Psi_i.t(); + Pi_i0.cols(1, n_vars*n_lags) = Pi_i; + + if (!single_freq) { + Sig_i = arma::exp(0.5 * armah.head_cols(n_vars)); + mZ = simsm_adaptive_univariate(my, Pi_i0, Sig_i, Lambda_comp, mZ1, n_q, T_b, cc_i); + Z_i.rows(n_lags, n_T + n_lags - 1) = mZ + mu_mat; + } + Z_i_demean.rows(0, n_lags - 1) = mZ1; + Z_i_demean.rows(n_lags, n_T + n_lags - 1) = mZ; + + mX = create_X_noint(Z_i_demean, n_lags); + + y_hat = mZ - mX * Pi_i.t(); + + curpara_old = curpara_arma; + armafacload_old = armafacload; + armaf_old = armaf; + update_fsv(armafacload, armaf, armah, armah0, curpara, armatau2, y_hat.t(), + bmu, Bmu, a0idi, b0idi, a0fac, b0fac, Bsigma, B011inv, B022inv, + priorh0, armarestr); + + if ((i+1) % n_thin == 0 && i>= n_burnin) { + mu_i = curpara_old.row(0).t(); + phi_i = curpara_old.row(1).t(); + sigma_i = curpara_old.row(2).t(); // sigma, not sigma2 + if (n_fcst > 0) { + vol_pred = armah.tail_rows(1).t(); + Z_fcst_i.head_cols(n_lags) = Z_i.tail_rows(n_lags).t() - mu_mat.tail_rows(n_lags).t(); + for (arma::uword h = 0; h < n_fcst; ++h) { + errors_sv.imbue(norm_rand); + errors_var.imbue(norm_rand); + vol_pred = mu_i + phi_i % (vol_pred - mu_i) + sigma_i % errors_sv; // Twice because we first need it for the volatility, then for the VAR + error_pred = arma::exp(0.5 * vol_pred) % errors_var; + x = create_X_t_noint(Z_fcst_i.cols(0+h, n_lags-1+h).t()); + Z_fcst_i.col(n_lags + h) = Pi_i * x + armafacload_old * error_pred.tail_rows(n_fac) + error_pred.head_rows(n_vars); + } + Z_fcst.slice((i-n_burnin)/n_thin) = Z_fcst_i.t() + d_fcst_lags * Psi_i.t(); + } + + + Z.slice((i-n_burnin)/n_thin) = Z_i; + Pi.slice((i-n_burnin)/n_thin) = Pi_i; + psi.row((i-n_burnin)/n_thin) = psi_i.t(); + + f.slice((i-n_burnin)/n_thin) = armaf_old; + facload.slice((i-n_burnin)/n_thin) = armafacload_old; + h.slice((i-n_burnin)/n_thin) = armah; + + + mu.col((i-n_burnin)/n_thin) = mu_i.head(n_vars); + phi.col((i-n_burnin)/n_thin) = phi_i; + sigma.col((i-n_burnin)/n_thin) = sigma_i; + + if (ssng) { + phi_mu((i-n_burnin)/n_thin) = phi_mu_i; + lambda_mu((i-n_burnin)/n_thin) = lambda_mu_i; + omega.row((i-n_burnin)/n_thin) = omega_i.t(); + } + } + + + cc_i = armaf.t() * armafacload.t(); // Common component + latent_nofac = mZ - cc_i; + bool stationarity_check = false; + int num_try = 0, iter = 0; + double root = 1000; + while (stationarity_check == false) { + iter += 1; + eps.imbue(norm_rand); + arma::mat output(n_vars*n_lags, n_vars); + if (rue) { + Pi_parallel_rue Pi_parallel_i(output, latent_nofac, mX, prior_Pi_Omega, eps, + armah, prior_Pi_AR1, n_T, n_vars, n_lags); + RcppParallel::parallelFor(0, n_vars, Pi_parallel_i); + } else { + Pi_parallel_bcm Pi_parallel_i(output, latent_nofac, mX, prior_Pi_Omega, eps, + armah, n_T, n_vars, n_lags); + RcppParallel::parallelFor(0, n_vars, Pi_parallel_i); + } + Pi_i = output.t(); + if (check_roots) { + Pi_comp.rows(0, n_vars-1) = Pi_i; + root = max_eig_cpp(Pi_comp); + } else { + root = 0.0; + } + if (root < 1.0) { + stationarity_check = true; + num_try = iter; + } + if (iter == 1000) { + Rcpp::stop("Attemped to draw stationary Pi 1,000 times."); + } + } + if (ssng) { + update_ng(phi_mu_i, lambda_mu_i, omega_i, nm, c0, c1, s, psi_i, prior_psi_mean, accept); + if (adaptive_mh) { + stats(accept); + if (i % 100 == 0) { + batch += 1.0; + min_vec(1) = std::pow(batch, -0.5); + if (stats.mean() > 0.44) { + s_prop = log(s) + arma::min(min_vec); + if (s_prop < M){ + s = std::exp(s_prop); + } + } else { + s_prop = log(s) - arma::min(min_vec); + if (s_prop > -M){ + s = std::exp(s_prop); + } + } + stats.reset(); + } + } + + inv_prior_psi_Omega = arma::diagmat(1/omega_i); + inv_prior_psi_Omega_mean = prior_psi_mean / omega_i; + } + X = create_X_noint(Z_i, n_lags); + posterior_psi_fsv(psi_i, mu_mat, Pi_i, D_mat, arma::exp(idivar), + inv_prior_psi_Omega, Z_i.rows(n_lags, n_T + n_lags - 1), X, + armafacload, armaf, inv_prior_psi_Omega_mean, + dt, n_determ, n_vars, n_lags); + + + if (verbose) { + p.increment(); + } + } + +} + + diff --git a/src/mcmc_iw.cpp b/src/mcmc_iw.cpp new file mode 100644 index 0000000..236cc04 --- /dev/null +++ b/src/mcmc_iw.cpp @@ -0,0 +1,280 @@ +#include "mfbvar.h" +#include "minn_utils.h" +#include "ss_utils.h" +#include "update_ng.h" +// [[Rcpp::export]] +void mcmc_minn_iw(const arma::mat & y_in_p, + arma::cube& Pi, arma::cube& Sigma, arma::cube& Z, arma::cube& Z_fcst, + const arma::mat& Lambda_comp, const arma::mat& prior_Pi_Omega, + const arma::mat& inv_prior_Pi_Omega, + const arma::mat& Omega_Pi, const arma::mat& prior_Pi_mean, + const arma::mat & prior_S, const arma::mat& Z_1, + arma::uword n_reps, arma::uword n_burnin, + arma::uword n_q, arma::uword T_b, arma::uword n_lags, arma::uword n_vars, + arma::uword n_T, arma::uword n_fcst, + arma::uword n_thin, bool verbose, int prior_nu) { + bool single_freq; + if (n_q == 0 || n_q == n_vars) { + single_freq = true; + } else { + single_freq = false; + } + + + Progress p(n_reps+n_burnin, verbose); + arma::mat Pi_i = Pi.slice(0); + arma::mat Sigma_i = Sigma.slice(0); + arma::mat y_i = y_in_p; + arma::vec errors = arma::vec(n_vars); + arma::mat X, XX, XX_inv, Pi_sample, post_Pi_Omega, post_Pi; + arma::mat S, Pi_diff, post_S, Sigma_chol, x; + arma::mat Z_i = arma::mat(n_lags + y_in_p.n_rows, n_vars, arma::fill::zeros); + arma::mat Z_fcst_i = arma::mat(n_vars, n_lags + n_fcst); + Z_i.rows(0, n_lags - 1) = Z_1; + int post_nu = n_T + n_vars + prior_nu; + + if (single_freq) { + Z_i.rows(n_lags, n_T + n_lags - 1) = y_i; + X = create_X(Z_i, n_lags); + XX = X.t() * X; + XX_inv = arma::inv_sympd(XX); + Pi_sample = XX_inv * (X.t() * y_i); + post_Pi_Omega = arma::inv_sympd(inv_prior_Pi_Omega + XX); + post_Pi = post_Pi_Omega * (Omega_Pi + X.t() * y_i); + S = arma::trans((y_i - X * Pi_sample)) * (y_i - X * Pi_sample); + Pi_diff = prior_Pi_mean - Pi_sample; + post_S = prior_S + S + Pi_diff.t() * arma::inv_sympd(prior_Pi_Omega + XX_inv) * Pi_diff; + } + + Sigma_chol = arma::chol(Sigma_i, "lower"); + + for (arma::uword i = 0; i < n_reps + n_burnin; ++i) { + if (!single_freq) { + y_i = simsm_adaptive_cv(y_in_p, Pi_i, Sigma_chol, Lambda_comp, Z_1, n_q, T_b); + Z_i.rows(n_lags, n_T + n_lags - 1) = y_i; + X = create_X(Z_i, n_lags); + XX = X.t() * X; + XX_inv = arma::inv_sympd(XX); + Pi_sample = XX_inv * (X.t() * y_i); + post_Pi_Omega = arma::inv_sympd(inv_prior_Pi_Omega + XX); + post_Pi = post_Pi_Omega * (Omega_Pi + X.t() * y_i); + S = arma::trans((y_i - X * Pi_sample)) * (y_i - X * Pi_sample); + Pi_diff = prior_Pi_mean - Pi_sample; + post_S = prior_S + S + Pi_diff.t() * arma::inv_sympd(prior_Pi_Omega + XX_inv) * Pi_diff; + } + Sigma_i = rinvwish(post_nu, post_S); + Sigma_chol = arma::chol(Sigma_i, "lower"); + Pi_i = rmatn(post_Pi.t(), post_Pi_Omega, Sigma_i); + + if ((i+1) % n_thin == 0 && i >= n_burnin) { + if (n_fcst > 0) { + + Z_fcst_i.head_cols(n_lags) = Z_i.tail_rows(n_lags).t(); + for (arma::uword h = 0; h < n_fcst; ++h) { + errors.imbue(norm_rand); + x = create_X_t(Z_fcst_i.cols(0+h, n_lags-1+h).t()); + Z_fcst_i.col(n_lags + h) = Pi_i * x + Sigma_chol * errors; + } + Z_fcst.slice((i-n_burnin)/n_thin) = Z_fcst_i.t(); + } + + Z.slice((i-n_burnin)/n_thin) = Z_i; + Sigma.slice((i-n_burnin)/n_thin) = Sigma_i; + Pi.slice((i-n_burnin)/n_thin) = Pi_i; + } + if (verbose) { + p.increment(); + } + } + +} + +// [[Rcpp::export]] +void mcmc_ssng_iw(const arma::mat & y_in_p, + arma::cube& Pi, arma::cube& Sigma, arma::mat& psi, arma::vec& phi_mu, + arma::vec& lambda_mu, arma::mat& omega, arma::cube& Z, + arma::cube& Z_fcst, const arma::mat& Lambda_comp, const arma::mat& prior_Pi_Omega, + const arma::mat& inv_prior_Pi_Omega, + const arma::mat& Omega_Pi, const arma::mat& prior_Pi_mean, + const arma::mat & prior_S, + const arma::mat & D_mat, const arma::mat & dt, const arma::mat & d1, + const arma::mat & d_fcst_lags, const arma::vec& prior_psi_mean, + double c0, double c1, double s, + bool check_roots, const arma::mat& Z_1, arma::uword n_reps, arma::uword n_burnin, + arma::uword n_q, arma::uword T_b, arma::uword n_lags, arma::uword n_vars, + arma::uword n_T, arma::uword n_fcst, arma::uword n_determ, + arma::uword n_thin, bool verbose, bool ssng) { + bool single_freq; + if (n_q == 0 || n_q == n_vars) { + single_freq = true; + } else { + single_freq = false; + } + + Progress p(n_reps+n_burnin, verbose); + + arma::mat Pi_i = Pi.slice(0); + arma::mat Sigma_i = Sigma.slice(0); + arma::vec psi_i = psi.row(0).t(); + arma::mat y_i, X, XX, XX_inv, Pi_sample, post_Pi_Omega, post_Pi; + arma::mat S, Pi_diff, post_S, x, mu_mat, mZ, mZ1, mX; + arma::mat my = arma::mat(arma::size(y_in_p), arma::fill::zeros); + + arma::mat Z_i = arma::mat(n_lags + y_in_p.n_rows, n_vars, arma::fill::zeros); + arma::mat Z_fcst_i = arma::mat(n_vars, n_lags + n_fcst); + arma::mat Z_i_demean = Z_i; + Z_i.rows(0, n_lags - 1) = Z_1; + + arma::mat Pi_i0 = arma::mat(n_vars, n_vars*n_lags+1, arma::fill::zeros); + arma::mat Pi_comp = arma::mat(n_vars*n_lags, n_vars*n_lags, arma::fill::zeros); + Pi_comp.submat(n_vars, 0, n_vars*n_lags - 1, n_vars*(n_lags-1) - 1) = arma::eye(n_vars*(n_lags-1), n_vars*(n_lags-1)); + + arma::mat Psi_i = arma::mat(psi_i.begin(), n_vars, n_determ, false, true); + mu_mat = dt * Psi_i.t(); + arma::uword n_Lambda = Lambda_comp.n_cols/Lambda_comp.n_rows; + arma::mat mu_long = arma::mat(n_Lambda+n_T, n_vars, arma::fill::zeros); + arma::rowvec Lambda_single = arma::rowvec(n_Lambda, arma::fill::zeros); + for (arma::uword i = 0; i < n_Lambda; ++i) { + Lambda_single(i) = Lambda_comp.at(0, i*n_q); + } + + int post_nu = n_T + n_vars + 2; + arma::mat Sigma_chol = arma::chol(Sigma_i, "lower"); + + arma::uword nm = n_vars*n_determ; + double lambda_mu_i = lambda_mu(0); + double phi_mu_i = phi_mu(0); + arma::vec omega_i = omega.row(0).t(); + arma::mat inv_prior_psi_Omega = arma::diagmat(1.0/omega_i); + arma::vec inv_prior_psi_Omega_mean = prior_psi_mean / omega_i; + double M, batch = 1.0; + arma::running_stat stats; + double accept = 0.0; + bool adaptive_mh = false; + double s_prop; + if (s < 0) { + M = std::abs(s); + s = 1.0; + adaptive_mh = true; + } + arma::vec min_vec(2); + min_vec(0) = 0.01; + + // if single freq, we don't need to update + if (single_freq) { + Z_i.rows(n_lags, n_T + n_lags - 1) = y_in_p; + } + for (arma::uword i = 0; i < n_reps + n_burnin; ++i) { + + if (!single_freq) { + update_demean(my, mu_long, y_in_p, mu_mat, d1, Psi_i, Lambda_single, n_vars, + n_q, n_Lambda, n_T); + } else { + // Even if single freq, mZ needs to be updated + mZ = y_in_p - mu_mat; + } + + mZ1 = Z_1 - d1 * Psi_i.t(); + Pi_i0.cols(1, n_vars*n_lags) = Pi_i; + + if (!single_freq) { + mZ = simsm_adaptive_cv(my, Pi_i0, Sigma_chol, Lambda_comp, mZ1, n_q, T_b); + Z_i.rows(n_lags, n_T + n_lags - 1) = mZ + mu_mat; + } + + Z_i_demean.rows(0, n_lags - 1) = mZ1; + Z_i_demean.rows(n_lags, n_T + n_lags - 1) = mZ; + + mX = create_X_noint(Z_i_demean, n_lags); + XX = mX.t() * mX; + XX_inv = arma::inv_sympd(XX); + Pi_sample = XX_inv * (mX.t() * mZ); + + post_Pi_Omega = arma::inv_sympd(inv_prior_Pi_Omega + XX); + post_Pi = post_Pi_Omega * (Omega_Pi + mX.t() * mZ); + S = arma::trans((mZ - mX * Pi_sample)) * (mZ - mX * Pi_sample); + Pi_diff = prior_Pi_mean - Pi_sample; + post_S = prior_S + S + Pi_diff.t() * arma::inv_sympd(prior_Pi_Omega + XX_inv) * Pi_diff; + Sigma_i = rinvwish(post_nu, arma::symmatu(post_S)); //Fixed in 9.400.3 + Sigma_chol = arma::chol(Sigma_i, "lower"); + bool stationarity_check = false; + int num_try = 0, iter = 0; + double root = 1000; + while (stationarity_check == false) { + iter += 1; + Pi_i = rmatn(post_Pi.t(), post_Pi_Omega, Sigma_i); + if (check_roots) { + Pi_comp.rows(0, n_vars-1) = Pi_i; + root = max_eig_cpp(Pi_comp); + } else { + root = 0.0; + } + if (root < 1.0) { + stationarity_check = true; + num_try = iter; + } + if (iter == 1000) { + Rcpp::stop("Attemped to draw stationary Pi 1,000 times."); + } + } + + if (ssng) { + update_ng(phi_mu_i, lambda_mu_i, omega_i, nm, c0, c1, s, psi_i, prior_psi_mean, accept); + if (adaptive_mh) { + stats(accept); + if (i % 100 == 0) { + batch += 1.0; + min_vec(1) = std::pow(batch, -0.5); + if (stats.mean() > 0.44) { + s_prop = log(s) + arma::min(min_vec); + if (s_prop < M){ + s = std::exp(s_prop); + } + } else { + s_prop = log(s) - arma::min(min_vec); + if (s_prop > -M){ + s = std::exp(s_prop); + } + } + stats.reset(); + } + } + + inv_prior_psi_Omega = arma::diagmat(1.0/omega_i); + inv_prior_psi_Omega_mean = prior_psi_mean / omega_i; + } + + + X = create_X_noint(Z_i, n_lags); + + posterior_psi_iw(psi_i, mu_mat, Pi_i, D_mat, Sigma_i, inv_prior_psi_Omega, mZ + mu_mat, X, inv_prior_psi_Omega_mean, dt, n_determ, n_vars, n_lags); + arma::vec errors = arma::vec(n_vars); + if ((i+1) % n_thin == 0 && i>=n_burnin) { + if (n_fcst > 0) { + Z_fcst_i.head_cols(n_lags) = Z_i.tail_rows(n_lags).t() - mu_mat.tail_rows(n_lags).t(); + for (arma::uword h = 0; h < n_fcst; ++h) { + + errors.imbue(norm_rand); + x = create_X_t_noint(Z_fcst_i.cols(0+h, n_lags-1+h).t()); + Z_fcst_i.col(n_lags + h) = Pi_i * x + Sigma_chol * errors; + } + Z_fcst.slice((i-n_burnin)/n_thin) = Z_fcst_i.t() + d_fcst_lags * Psi_i.t(); + } + Z.slice((i-n_burnin)/n_thin) = Z_i; + Sigma.slice((i-n_burnin)/n_thin) = Sigma_i; + Pi.slice((i-n_burnin)/n_thin) = Pi_i; + psi.row((i-n_burnin)/n_thin) = psi_i.t(); + if (ssng) { + phi_mu((i-n_burnin)/n_thin) = phi_mu_i; + lambda_mu((i-n_burnin)/n_thin) = lambda_mu_i; + omega.row((i-n_burnin)/n_thin) = omega_i.t(); + } + + } + if (verbose) { + p.increment(); + } + } + +} + diff --git a/src/mcmc_minn_diffuse.cpp b/src/mcmc_minn_diffuse.cpp deleted file mode 100644 index 2d8d29e..0000000 --- a/src/mcmc_minn_diffuse.cpp +++ /dev/null @@ -1,91 +0,0 @@ -#include "mfbvar.h" -#include "minn_utils.h" -// [[Rcpp::export]] -void mcmc_minn_diffuse(const arma::mat & y_in_p, - arma::cube& Pi, arma::cube& Sigma, arma::cube& Z, arma::cube& Z_fcst, - const arma::mat& Lambda_comp, const arma::mat& prior_Pi_Omega, - const arma::mat& Omega_Pi, - const arma::mat& Z_1, - arma::uword n_reps, - arma::uword n_q, arma::uword T_b, arma::uword n_lags, arma::uword n_vars, - arma::uword n_T, arma::uword n_fcst, - arma::uword n_thin, bool verbose) { - bool single_freq; - if (n_q == 0 || n_q == n_vars) { - single_freq = true; - } else { - single_freq = false; - } - - - Progress p(n_reps, verbose); - arma::vec Pi_vec = arma::vec(Pi.begin(), n_vars*(n_vars*n_lags+1)); - arma::mat Pi_i = Pi.slice(0); //arma::mat(Pi_vec.begin(), n_vars, n_vars*n_lags + 1, false, true); - arma::mat Sigma_i = Sigma.slice(0); - arma::mat y_i = y_in_p; - arma::vec errors = arma::vec(n_vars); - arma::mat X, post_Pi_Omega_inv, L, b, u1, u2, u4, resid, x; - arma::mat u3 = arma::vec(n_vars*(n_vars*n_lags + 1)); - arma::mat post_S, Sigma_chol, Sigma_inv; - arma::mat Z_i = arma::mat(n_lags + y_in_p.n_rows, n_vars, arma::fill::zeros); - arma::mat Z_fcst_i = arma::mat(n_vars, n_lags + n_fcst); - Z_i.rows(0, n_lags - 1) = Z_1; - - if (single_freq) { - Z_i.rows(n_lags, n_T + n_lags - 1) = y_i; - X = create_X(Z_i, n_lags); - } - - Sigma_chol = arma::chol(Sigma_i, "lower"); - Sigma_inv = arma::inv_sympd(Sigma_i); - arma::vec prior_Pi_Omega_vec_inv = 1.0 / arma::vectorise(prior_Pi_Omega); - - for (arma::uword i = 0; i < n_reps; ++i) { - if (!single_freq) { - y_i = simsm_adaptive_cv(y_in_p, Pi_i, Sigma_chol, Lambda_comp, Z_1, n_q, T_b); - Z_i.rows(n_lags, n_T + n_lags - 1) = y_i; - X = create_X(Z_i, n_lags); - } - - // Pi - post_Pi_Omega_inv = arma::kron(Sigma_inv, X.t() * X); - post_Pi_Omega_inv.diag() += prior_Pi_Omega_vec_inv; - L = arma::chol(post_Pi_Omega_inv, "lower"); - b = arma::vectorise(X.t() * y_i * Sigma_inv + Omega_Pi); - u1 = arma::solve(arma::trimatl(L), b); - u2 = arma::solve(arma::trimatu(L.t()), u1); - u3.imbue(norm_rand); - u4 = arma::solve(arma::trimatu(L.t()), u3); - Pi_vec = u2 + u4; - Pi_i = arma::trans(arma::reshape(Pi_vec, n_vars*n_lags+1, n_vars)); - resid = y_i - X * Pi_i.t(); // Pi_vec and Pi_i use the same memory - // Sigma - post_S = resid.t() * resid; - Sigma_i = rinvwish(n_T, post_S); - Sigma_chol = arma::chol(Sigma_i, "lower"); - Sigma_inv = arma::inv_sympd(Sigma_i); - - if ((i+1) % n_thin == 0) { - if (n_fcst > 0) { - - Z_fcst_i.head_cols(n_lags) = Z_i.tail_rows(n_lags).t(); - for (arma::uword h = 0; h < n_fcst; ++h) { - errors.imbue(norm_rand); - x = create_X_t(Z_fcst_i.cols(0+h, n_lags-1+h).t()); - Z_fcst_i.col(n_lags + h) = Pi_i * x + Sigma_chol * errors; - } - Z_fcst.slice(i/n_thin) = Z_fcst_i.t(); - } - - Z.slice(i/n_thin) = Z_i; - Sigma.slice(i/n_thin) = Sigma_i; - Pi.slice(i/n_thin) = Pi_i; - } - if (verbose) { - p.increment(); - } - } - -} - - diff --git a/src/mcmc_minn_iw.cpp b/src/mcmc_minn_iw.cpp deleted file mode 100644 index dc60f1f..0000000 --- a/src/mcmc_minn_iw.cpp +++ /dev/null @@ -1,90 +0,0 @@ -#include "mfbvar.h" -#include "minn_utils.h" -// [[Rcpp::export]] -void mcmc_minn_iw(const arma::mat & y_in_p, - arma::cube& Pi, arma::cube& Sigma, arma::cube& Z, arma::cube& Z_fcst, - const arma::mat& Lambda_comp, const arma::mat& prior_Pi_Omega, - const arma::mat& inv_prior_Pi_Omega, - const arma::mat& Omega_Pi, const arma::mat& prior_Pi_mean, - const arma::mat & prior_S, const arma::mat& Z_1, - arma::uword n_reps, - arma::uword n_q, arma::uword T_b, arma::uword n_lags, arma::uword n_vars, - arma::uword n_T, arma::uword n_fcst, - arma::uword n_thin, bool verbose, int prior_nu) { - bool single_freq; - if (n_q == 0 || n_q == n_vars) { - single_freq = true; - } else { - single_freq = false; - } - - - Progress p(n_reps, verbose); - arma::mat Pi_i = Pi.slice(0); - arma::mat Sigma_i = Sigma.slice(0); - arma::mat y_i = y_in_p; - arma::vec errors = arma::vec(n_vars); - arma::mat X, XX, XX_inv, Pi_sample, post_Pi_Omega, post_Pi; - arma::mat S, Pi_diff, post_S, Sigma_chol, x; - arma::mat Z_i = arma::mat(n_lags + y_in_p.n_rows, n_vars, arma::fill::zeros); - arma::mat Z_fcst_i = arma::mat(n_vars, n_lags + n_fcst); - Z_i.rows(0, n_lags - 1) = Z_1; - int post_nu = n_T + n_vars + prior_nu; - - if (single_freq) { - Z_i.rows(n_lags, n_T + n_lags - 1) = y_i; - X = create_X(Z_i, n_lags); - XX = X.t() * X; - XX_inv = arma::inv_sympd(XX); - Pi_sample = XX_inv * (X.t() * y_i); - post_Pi_Omega = arma::inv_sympd(inv_prior_Pi_Omega + XX); - post_Pi = post_Pi_Omega * (Omega_Pi + X.t() * y_i); - S = arma::trans((y_i - X * Pi_sample)) * (y_i - X * Pi_sample); - Pi_diff = prior_Pi_mean - Pi_sample; - post_S = prior_S + S + Pi_diff.t() * arma::inv_sympd(prior_Pi_Omega + XX_inv) * Pi_diff; - } - - Sigma_chol = arma::chol(Sigma_i, "lower"); - - for (arma::uword i = 0; i < n_reps; ++i) { - if (!single_freq) { - y_i = simsm_adaptive_cv(y_in_p, Pi_i, Sigma_chol, Lambda_comp, Z_1, n_q, T_b); - Z_i.rows(n_lags, n_T + n_lags - 1) = y_i; - X = create_X(Z_i, n_lags); - XX = X.t() * X; - XX_inv = arma::inv_sympd(XX); - Pi_sample = XX_inv * (X.t() * y_i); - post_Pi_Omega = arma::inv_sympd(inv_prior_Pi_Omega + XX); - post_Pi = post_Pi_Omega * (Omega_Pi + X.t() * y_i); - S = arma::trans((y_i - X * Pi_sample)) * (y_i - X * Pi_sample); - Pi_diff = prior_Pi_mean - Pi_sample; - post_S = prior_S + S + Pi_diff.t() * arma::inv_sympd(prior_Pi_Omega + XX_inv) * Pi_diff; - } - Sigma_i = rinvwish(post_nu, post_S); - Sigma_chol = arma::chol(Sigma_i, "lower"); - Pi_i = rmatn(post_Pi.t(), post_Pi_Omega, Sigma_i); - - if ((i+1) % n_thin == 0) { - if (n_fcst > 0) { - - Z_fcst_i.head_cols(n_lags) = Z_i.tail_rows(n_lags).t(); - for (arma::uword h = 0; h < n_fcst; ++h) { - errors.imbue(norm_rand); - x = create_X_t(Z_fcst_i.cols(0+h, n_lags-1+h).t()); - Z_fcst_i.col(n_lags + h) = Pi_i * x + Sigma_chol * errors; - } - Z_fcst.slice(i/n_thin) = Z_fcst_i.t(); - } - - Z.slice(i/n_thin) = Z_i; - Sigma.slice(i/n_thin) = Sigma_i; - Pi.slice(i/n_thin) = Pi_i; - } - if (verbose) { - p.increment(); - } - } - -} - - diff --git a/src/mcmc_ss_diffuse.cpp b/src/mcmc_ss_diffuse.cpp deleted file mode 100644 index 3279160..0000000 --- a/src/mcmc_ss_diffuse.cpp +++ /dev/null @@ -1,145 +0,0 @@ -#include "mfbvar.h" -#include "ss_utils.h" -// [[Rcpp::export]] -void mcmc_ss_diffuse(const arma::mat & y_in_p, - arma::cube& Pi, arma::cube& Sigma, arma::mat& psi, arma::cube& Z, - arma::cube& Z_fcst, const arma::mat& Lambda_comp, const arma::mat& prior_Pi_Omega, - const arma::mat& Omega_Pi, - const arma::mat & D_mat, const arma::mat & dt, const arma::mat & d1, - const arma::mat & d_fcst_lags, const arma::mat& inv_prior_psi_Omega, const arma::mat& inv_prior_psi_Omega_mean, - bool check_roots, const arma::mat& Z_1, arma::uword n_reps, - arma::uword n_q, arma::uword T_b, arma::uword n_lags, arma::uword n_vars, - arma::uword n_T, arma::uword n_fcst, arma::uword n_determ, - arma::uword n_thin, bool verbose) { - bool single_freq; - if (n_q == 0 || n_q == n_vars) { - single_freq = true; - } else { - single_freq = false; - } - - Progress p(n_reps, verbose); - - arma::vec Pi_vec = arma::vec(Pi.begin(), n_vars*(n_vars*n_lags)); - arma::mat Pi_i = Pi.slice(0); - arma::mat Sigma_i = Sigma.slice(0); - arma::vec psi_i = psi.row(0).t(); - arma::mat y_i = y_in_p; - arma::mat X, post_Pi_Omega_inv, L, b, u1, u2, u4, resid, x; - arma::mat post_S, mu_mat, mZ, mZ1, mX, Sigma_chol, Sigma_inv; - arma::mat u3 = arma::vec(n_vars*(n_vars*n_lags)); - arma::mat my = arma::mat(arma::size(y_in_p), arma::fill::zeros); - - arma::mat Z_i = arma::mat(n_lags + y_in_p.n_rows, n_vars, arma::fill::zeros); - arma::mat Z_fcst_i = arma::mat(n_vars, n_lags + n_fcst); - arma::mat Z_i_demean = Z_i; - Z_i.rows(0, n_lags - 1) = Z_1; - - arma::mat Pi_i0 = arma::mat(n_vars, n_vars*n_lags+1, arma::fill::zeros); - arma::mat Pi_comp = arma::mat(n_vars*n_lags, n_vars*n_lags, arma::fill::zeros); - Pi_comp.submat(n_vars, 0, n_vars*n_lags - 1, n_vars*(n_lags-1) - 1) = arma::eye(n_vars*(n_lags-1), n_vars*(n_lags-1)); - - arma::mat Psi_i = arma::mat(psi_i.begin(), n_vars, n_determ, false, true); - mu_mat = dt * Psi_i.t(); - arma::uword n_Lambda = Lambda_comp.n_cols/Lambda_comp.n_rows; - arma::mat mu_long = arma::mat(n_Lambda+n_T, n_vars, arma::fill::zeros); - arma::rowvec Lambda_single = arma::rowvec(n_Lambda, arma::fill::zeros); - for (arma::uword i = 0; i < n_Lambda; ++i) { - Lambda_single(i) = Lambda_comp.at(0, i*n_q); - } - - Sigma_chol = arma::chol(Sigma_i, "lower"); - Sigma_inv = arma::inv_sympd(Sigma_i); - arma::vec prior_Pi_Omega_vec_inv = 1.0 / arma::vectorise(prior_Pi_Omega); - - // if single freq, we don't need to update - if (single_freq) { - Z_i.rows(n_lags, n_T + n_lags - 1) = y_in_p; - } - - for (arma::uword i = 0; i < n_reps; ++i) { - - if (!single_freq) { - update_demean(my, mu_long, y_in_p, mu_mat, d1, Psi_i, Lambda_single, n_vars, - n_q, n_Lambda, n_T); - } else { - // Even if single freq, mZ needs to be updated - mZ = y_in_p - mu_mat; - } - - mZ1 = Z_1 - d1 * Psi_i.t(); - Pi_i0.cols(1, n_vars*n_lags) = Pi_i; - - if (!single_freq) { - mZ = simsm_adaptive_cv(my, Pi_i0, Sigma_chol, Lambda_comp, mZ1, n_q, T_b); - Z_i.rows(n_lags, n_T + n_lags - 1) = mZ + mu_mat; - } - - Z_i_demean.rows(0, n_lags - 1) = mZ1; - Z_i_demean.rows(n_lags, n_T + n_lags - 1) = mZ; - - mX = create_X_noint(Z_i_demean, n_lags); - // Pi - post_Pi_Omega_inv = arma::kron(Sigma_inv, mX.t() * mX); - post_Pi_Omega_inv.diag() += prior_Pi_Omega_vec_inv; - L = arma::chol(post_Pi_Omega_inv, "lower"); - b = arma::vectorise(mX.t() * mZ * Sigma_inv + Omega_Pi); - u1 = arma::solve(arma::trimatl(L), b); - u2 = arma::solve(arma::trimatu(L.t()), u1); - - bool stationarity_check = false; - int num_try = 0, iter = 0; - double root = 1000; - while (stationarity_check == false) { - iter += 1; - u3.imbue(norm_rand); - u4 = arma::solve(arma::trimatu(L.t()), u3); - Pi_vec = u2 + u4; - Pi_i = arma::trans(arma::reshape(Pi_vec, n_vars*n_lags, n_vars)); - if (check_roots) { - Pi_comp.rows(0, n_vars-1) = Pi_i; - root = max_eig_cpp(Pi_comp); - } else { - root = 0.0; - } - if (root < 1.0) { - stationarity_check = true; - num_try = iter; - } - if (iter == 1000) { - Rcpp::stop("Attemped to draw stationary Pi 1,000 times."); - } - } - - resid = mZ - mX * Pi_i.t(); - // Sigma - post_S = resid.t() * resid; - Sigma_i = rinvwish(n_T, post_S); - Sigma_chol = arma::chol(Sigma_i, "lower"); - Sigma_inv = arma::inv_sympd(Sigma_i); - - X = create_X_noint(Z_i, n_lags); - posterior_psi_iw(psi_i, mu_mat, Pi_i, D_mat, Sigma_i, inv_prior_psi_Omega, mZ + mu_mat, X, inv_prior_psi_Omega_mean, dt, n_determ, n_vars, n_lags); - - arma::vec errors = arma::vec(n_vars); - if ((i+1) % n_thin == 0) { - if (n_fcst > 0) { - Z_fcst_i.head_cols(n_lags) = Z_i.tail_rows(n_lags).t() - mu_mat.tail_rows(n_lags).t(); - for (arma::uword h = 0; h < n_fcst; ++h) { - errors.imbue(norm_rand); - x = create_X_t_noint(Z_fcst_i.cols(0+h, n_lags-1+h).t()); - Z_fcst_i.col(n_lags + h) = Pi_i * x + Sigma_chol * errors; - } - Z_fcst.slice(i/n_thin) = Z_fcst_i.t() + d_fcst_lags * Psi_i.t(); - } - Z.slice(i/n_thin) = Z_i; - Sigma.slice(i/n_thin) = Sigma_i; - Pi.slice(i/n_thin) = Pi_i; - psi.row(i/n_thin) = psi_i.t(); - } - if (verbose) { - p.increment(); - } - } - -} diff --git a/src/mcmc_ss_iw.cpp b/src/mcmc_ss_iw.cpp deleted file mode 100644 index e428218..0000000 --- a/src/mcmc_ss_iw.cpp +++ /dev/null @@ -1,137 +0,0 @@ -#include "mfbvar.h" -#include "ss_utils.h" -// [[Rcpp::export]] -void mcmc_ss_iw(const arma::mat & y_in_p, - arma::cube& Pi, arma::cube& Sigma, arma::mat& psi, arma::cube& Z, - arma::cube& Z_fcst, const arma::mat& Lambda_comp, const arma::mat& prior_Pi_Omega, - const arma::mat& inv_prior_Pi_Omega, - const arma::mat& Omega_Pi, const arma::mat& prior_Pi_mean, - const arma::mat & prior_S, - const arma::mat & D_mat, const arma::mat & dt, const arma::mat & d1, - const arma::mat & d_fcst_lags, const arma::mat& inv_prior_psi_Omega, const arma::mat& inv_prior_psi_Omega_mean, - bool check_roots, const arma::mat& Z_1, arma::uword n_reps, - arma::uword n_q, arma::uword T_b, arma::uword n_lags, arma::uword n_vars, - arma::uword n_T, arma::uword n_fcst, arma::uword n_determ, - arma::uword n_thin, bool verbose) { - bool single_freq; - if (n_q == 0 || n_q == n_vars) { - single_freq = true; - } else { - single_freq = false; - } - - Progress p(n_reps, verbose); - - arma::mat Pi_i = Pi.slice(0); - arma::mat Sigma_i = Sigma.slice(0); - arma::vec psi_i = psi.row(0).t(); - arma::mat y_i = y_in_p; - arma::mat X, XX, XX_inv, Pi_sample, post_Pi_Omega, post_Pi; - arma::mat S, Pi_diff, post_S, x, mu_mat, mZ, mZ1, mX; - arma::mat my = arma::mat(arma::size(y_in_p), arma::fill::zeros); - - arma::mat Z_i = arma::mat(n_lags + y_in_p.n_rows, n_vars, arma::fill::zeros); - arma::mat Z_fcst_i = arma::mat(n_vars, n_lags + n_fcst); - arma::mat Z_i_demean = Z_i; - Z_i.rows(0, n_lags - 1) = Z_1; - - arma::mat Pi_i0 = arma::mat(n_vars, n_vars*n_lags+1, arma::fill::zeros); - arma::mat Pi_comp = arma::mat(n_vars*n_lags, n_vars*n_lags, arma::fill::zeros); - Pi_comp.submat(n_vars, 0, n_vars*n_lags - 1, n_vars*(n_lags-1) - 1) = arma::eye(n_vars*(n_lags-1), n_vars*(n_lags-1)); - - arma::mat Psi_i = arma::mat(psi_i.begin(), n_vars, n_determ, false, true); - mu_mat = dt * Psi_i.t(); - arma::uword n_Lambda = Lambda_comp.n_cols/Lambda_comp.n_rows; - arma::mat mu_long = arma::mat(n_Lambda+n_T, n_vars, arma::fill::zeros); - arma::rowvec Lambda_single = arma::rowvec(n_Lambda, arma::fill::zeros); - for (arma::uword i = 0; i < n_Lambda; ++i) { - Lambda_single(i) = Lambda_comp.at(0, i*n_q); - } - - int post_nu = n_T + n_vars + 2; - arma::mat Sigma_chol = arma::chol(Sigma_i, "lower"); - - // if single freq, we don't need to update - if (single_freq) { - Z_i.rows(n_lags, n_T + n_lags - 1) = y_in_p; - } - - for (arma::uword i = 0; i < n_reps; ++i) { - if (!single_freq) { - update_demean(my, mu_long, y_in_p, mu_mat, d1, Psi_i, Lambda_single, n_vars, - n_q, n_Lambda, n_T); - } else { - // Even if single freq, mZ needs to be updated - mZ = y_in_p - mu_mat; - } - - mZ1 = Z_1 - d1 * Psi_i.t(); - Pi_i0.cols(1, n_vars*n_lags) = Pi_i; - - if (!single_freq) { - mZ = simsm_adaptive_cv(my, Pi_i0, Sigma_chol, Lambda_comp, mZ1, n_q, T_b); - Z_i.rows(n_lags, n_T + n_lags - 1) = mZ + mu_mat; - } - - Z_i_demean.rows(0, n_lags - 1) = mZ1; - Z_i_demean.rows(n_lags, n_T + n_lags - 1) = mZ; - - mX = create_X_noint(Z_i_demean, n_lags); - XX = mX.t() * mX; - XX_inv = arma::inv_sympd(XX); - Pi_sample = XX_inv * (mX.t() * mZ); - post_Pi_Omega = arma::inv_sympd(inv_prior_Pi_Omega + XX); - post_Pi = post_Pi_Omega * (Omega_Pi + mX.t() * mZ); - S = arma::trans((mZ - mX * Pi_sample)) * (mZ - mX * Pi_sample); - Pi_diff = prior_Pi_mean - Pi_sample; - post_S = prior_S + S + Pi_diff.t() * arma::inv_sympd(prior_Pi_Omega + XX_inv) * Pi_diff; - - Sigma_i = rinvwish(post_nu, post_S); - - Sigma_chol = arma::chol(Sigma_i, "lower"); - bool stationarity_check = false; - int num_try = 0, iter = 0; - double root = 1000; - while (stationarity_check == false) { - iter += 1; - Pi_i = rmatn(post_Pi.t(), post_Pi_Omega, Sigma_i); - if (check_roots) { - Pi_comp.rows(0, n_vars-1) = Pi_i; - root = max_eig_cpp(Pi_comp); - } else { - root = 0.0; - } - if (root < 1.0) { - stationarity_check = true; - num_try = iter; - } - if (iter == 1000) { - Rcpp::stop("Attemped to draw stationary Pi 1,000 times."); - } - } - - X = create_X_noint(Z_i, n_lags); - posterior_psi_iw(psi_i, mu_mat, Pi_i, D_mat, Sigma_i, inv_prior_psi_Omega, mZ + mu_mat, X, inv_prior_psi_Omega_mean, dt, n_determ, n_vars, n_lags); - - arma::vec errors = arma::vec(n_vars); - if ((i+1) % n_thin == 0) { - if (n_fcst > 0) { - Z_fcst_i.head_cols(n_lags) = Z_i.tail_rows(n_lags).t() - mu_mat.tail_rows(n_lags).t(); - for (arma::uword h = 0; h < n_fcst; ++h) { - errors.imbue(norm_rand); - x = create_X_t_noint(Z_fcst_i.cols(0+h, n_lags-1+h).t()); - Z_fcst_i.col(n_lags + h) = Pi_i * x + Sigma_chol * errors; - } - Z_fcst.slice(i/n_thin) = Z_fcst_i.t() + d_fcst_lags * Psi_i.t(); - } - Z.slice(i/n_thin) = Z_i; - Sigma.slice(i/n_thin) = Sigma_i; - Pi.slice(i/n_thin) = Pi_i; - psi.row(i/n_thin) = psi_i.t(); - } - if (verbose) { - p.increment(); - } - } - -} diff --git a/src/mcmc_ssng_diffuse.cpp b/src/mcmc_ssng_diffuse.cpp deleted file mode 100644 index 47fe6e2..0000000 --- a/src/mcmc_ssng_diffuse.cpp +++ /dev/null @@ -1,195 +0,0 @@ -#include "mfbvar.h" -#include "ss_utils.h" -#include "update_ng.h" -// [[Rcpp::export]] -void mcmc_ssng_diffuse(const arma::mat & y_in_p, - arma::cube& Pi, arma::cube& Sigma, arma::mat& psi, arma::vec& phi_mu, - arma::vec& lambda_mu, arma::mat& omega, arma::cube& Z, - arma::cube& Z_fcst, const arma::mat& Lambda_comp, const arma::mat& prior_Pi_Omega, - const arma::mat& Omega_Pi, - const arma::mat & D_mat, const arma::mat & dt, const arma::mat & d1, - const arma::mat & d_fcst_lags, const arma::vec& prior_psi_mean, - double c0, double c1, double s, - bool check_roots, const arma::mat& Z_1, arma::uword n_reps, - arma::uword n_q, arma::uword T_b, arma::uword n_lags, arma::uword n_vars, - arma::uword n_T, arma::uword n_fcst, arma::uword n_determ, - arma::uword n_thin, bool verbose) { - bool single_freq; - if (n_q == 0 || n_q == n_vars) { - single_freq = true; - } else { - single_freq = false; - } - - Progress p(n_reps, verbose); - - arma::vec Pi_vec = arma::vec(Pi.begin(), n_vars*(n_vars*n_lags)); - arma::mat Pi_i = Pi.slice(0); - arma::mat Sigma_i = Sigma.slice(0); - arma::vec psi_i = psi.row(0).t(); - arma::mat y_i = y_in_p; - arma::mat X, post_Pi_Omega_inv, L, b, u1, u2, u4, resid, x; - arma::mat post_S, mu_mat, mZ, mZ1, mX, Sigma_chol, Sigma_inv; - arma::mat u3 = arma::vec(n_vars*(n_vars*n_lags)); - arma::mat my = arma::mat(arma::size(y_in_p), arma::fill::zeros); - - arma::mat Z_i = arma::mat(n_lags + y_in_p.n_rows, n_vars, arma::fill::zeros); - arma::mat Z_fcst_i = arma::mat(n_vars, n_lags + n_fcst); - arma::mat Z_i_demean = Z_i; - Z_i.rows(0, n_lags - 1) = Z_1; - - arma::mat Pi_i0 = arma::mat(n_vars, n_vars*n_lags+1, arma::fill::zeros); - arma::mat Pi_comp = arma::mat(n_vars*n_lags, n_vars*n_lags, arma::fill::zeros); - Pi_comp.submat(n_vars, 0, n_vars*n_lags - 1, n_vars*(n_lags-1) - 1) = arma::eye(n_vars*(n_lags-1), n_vars*(n_lags-1)); - - arma::mat Psi_i = arma::mat(psi_i.begin(), n_vars, n_determ, false, true); - mu_mat = dt * Psi_i.t(); - arma::uword n_Lambda = Lambda_comp.n_cols/Lambda_comp.n_rows; - arma::mat mu_long = arma::mat(n_Lambda+n_T, n_vars, arma::fill::zeros); - arma::rowvec Lambda_single = arma::rowvec(n_Lambda, arma::fill::zeros); - for (arma::uword i = 0; i < n_Lambda; ++i) { - Lambda_single(i) = Lambda_comp.at(0, i*n_q); - } - - Sigma_chol = arma::chol(Sigma_i, "lower"); - Sigma_inv = arma::inv_sympd(Sigma_i); - arma::vec prior_Pi_Omega_vec_inv = 1.0 / arma::vectorise(prior_Pi_Omega); - - // if single freq, we don't need to update - if (single_freq) { - Z_i.rows(n_lags, n_T + n_lags - 1) = y_in_p; - } - - // NG stuff - arma::uword nm = n_vars*n_determ; - double lambda_mu_i = lambda_mu(0); - double phi_mu_i = phi_mu(0); - arma::vec omega_i = omega.row(0).t(); - arma::mat inv_prior_psi_Omega = arma::diagmat(omega_i); - arma::vec inv_prior_psi_Omega_mean = prior_psi_mean / omega_i; - double M, batch = 1.0; - arma::running_stat stats; - double accept = 0.0; - bool adaptive_mh = false; - double s_prop; - if (s < 0) { - M = std::abs(s); - s = 1.0; - adaptive_mh = true; - } - arma::vec min_vec(2); - min_vec(0) = 0.01; - - for (arma::uword i = 0; i < n_reps; ++i) { - - if (!single_freq) { - update_demean(my, mu_long, y_in_p, mu_mat, d1, Psi_i, Lambda_single, n_vars, - n_q, n_Lambda, n_T); - } else { - // Even if single freq, mZ needs to be updated - mZ = y_in_p - mu_mat; - } - - mZ1 = Z_1 - d1 * Psi_i.t(); - Pi_i0.cols(1, n_vars*n_lags) = Pi_i; - - if (!single_freq) { - mZ = simsm_adaptive_cv(my, Pi_i0, Sigma_chol, Lambda_comp, mZ1, n_q, T_b); - Z_i.rows(n_lags, n_T + n_lags - 1) = mZ + mu_mat; - } - - Z_i_demean.rows(0, n_lags - 1) = mZ1; - Z_i_demean.rows(n_lags, n_T + n_lags - 1) = mZ; - - mX = create_X_noint(Z_i_demean, n_lags); - // Pi - post_Pi_Omega_inv = arma::kron(Sigma_inv, mX.t() * mX); - post_Pi_Omega_inv.diag() += prior_Pi_Omega_vec_inv; - L = arma::chol(post_Pi_Omega_inv, "lower"); - b = arma::vectorise(mX.t() * mZ * Sigma_inv + Omega_Pi); - u1 = arma::solve(arma::trimatl(L), b); - u2 = arma::solve(arma::trimatu(L.t()), u1); - - bool stationarity_check = false; - int num_try = 0, iter = 0; - double root = 1000; - while (stationarity_check == false) { - iter += 1; - u3.imbue(norm_rand); - u4 = arma::solve(arma::trimatu(L.t()), u3); - Pi_vec = u2 + u4; - Pi_i = arma::trans(arma::reshape(Pi_vec, n_vars*n_lags, n_vars)); - if (check_roots) { - Pi_comp.rows(0, n_vars-1) = Pi_i; - root = max_eig_cpp(Pi_comp); - } else { - root = 0.0; - } - if (root < 1.0) { - stationarity_check = true; - num_try = iter; - } - if (iter == 1000) { - Rcpp::stop("Attemped to draw stationary Pi 1,000 times."); - } - } - - resid = mZ - mX * Pi_i.t(); - // Sigma - post_S = resid.t() * resid; - Sigma_i = rinvwish(n_T, post_S); - Sigma_chol = arma::chol(Sigma_i, "lower"); - Sigma_inv = arma::inv_sympd(Sigma_i); - - update_ng(phi_mu_i, lambda_mu_i, omega_i, nm, c0, c1, s, psi_i, prior_psi_mean, accept); - if (adaptive_mh) { - stats(accept); - if (i % 100 == 0) { - batch += 1.0; - min_vec(1) = std::pow(batch, -0.5); - if (stats.mean() > 0.44) { - s_prop = log(s) + arma::min(min_vec); - if (s_prop < M){ - s = std::exp(s_prop); - } - } else { - s_prop = log(s) - arma::min(min_vec); - if (s_prop > -M){ - s = std::exp(s_prop); - } - } - stats.reset(); - } - } - - inv_prior_psi_Omega = arma::diagmat(1/omega_i); - inv_prior_psi_Omega_mean = prior_psi_mean / omega_i; - - X = create_X_noint(Z_i, n_lags); - posterior_psi_iw(psi_i, mu_mat, Pi_i, D_mat, Sigma_i, inv_prior_psi_Omega, mZ + mu_mat, X, inv_prior_psi_Omega_mean, dt, n_determ, n_vars, n_lags); - - arma::vec errors = arma::vec(n_vars); - if ((i+1) % n_thin == 0) { - if (n_fcst > 0) { - Z_fcst_i.head_cols(n_lags) = Z_i.tail_rows(n_lags).t() - mu_mat.tail_rows(n_lags).t(); - for (arma::uword h = 0; h < n_fcst; ++h) { - errors.imbue(norm_rand); - x = create_X_t_noint(Z_fcst_i.cols(0+h, n_lags-1+h).t()); - Z_fcst_i.col(n_lags + h) = Pi_i * x + Sigma_chol * errors; - } - Z_fcst.slice(i/n_thin) = Z_fcst_i.t() + d_fcst_lags * Psi_i.t(); - } - Z.slice(i/n_thin) = Z_i; - Sigma.slice(i/n_thin) = Sigma_i; - Pi.slice(i/n_thin) = Pi_i; - psi.row(i/n_thin) = psi_i.t(); - phi_mu(i/n_thin) = phi_mu_i; - lambda_mu(i/n_thin) = lambda_mu_i; - omega.row(i/n_thin) = omega_i.t(); - } - if (verbose) { - p.increment(); - } - } - -} diff --git a/src/mcmc_ssng_iw.cpp b/src/mcmc_ssng_iw.cpp deleted file mode 100644 index 79c520b..0000000 --- a/src/mcmc_ssng_iw.cpp +++ /dev/null @@ -1,189 +0,0 @@ -#include "mfbvar.h" -#include "ss_utils.h" -#include "update_ng.h" -// [[Rcpp::export]] -void mcmc_ssng_iw(const arma::mat & y_in_p, - arma::cube& Pi, arma::cube& Sigma, arma::mat& psi, arma::vec& phi_mu, - arma::vec& lambda_mu, arma::mat& omega, arma::cube& Z, - arma::cube& Z_fcst, const arma::mat& Lambda_comp, const arma::mat& prior_Pi_Omega, - const arma::mat& inv_prior_Pi_Omega, - const arma::mat& Omega_Pi, const arma::mat& prior_Pi_mean, - const arma::mat & prior_S, - const arma::mat & D_mat, const arma::mat & dt, const arma::mat & d1, - const arma::mat & d_fcst_lags, const arma::vec& prior_psi_mean, - double c0, double c1, double s, - bool check_roots, const arma::mat& Z_1, arma::uword n_reps, - arma::uword n_q, arma::uword T_b, arma::uword n_lags, arma::uword n_vars, - arma::uword n_T, arma::uword n_fcst, arma::uword n_determ, - arma::uword n_thin, bool verbose) { - bool single_freq; - if (n_q == 0 || n_q == n_vars) { - single_freq = true; - } else { - single_freq = false; - } - - Progress p(n_reps, verbose); - - arma::mat Pi_i = Pi.slice(0); - arma::mat Sigma_i = Sigma.slice(0); - arma::vec psi_i = psi.row(0).t(); - arma::mat y_i, X, XX, XX_inv, Pi_sample, post_Pi_Omega, post_Pi; - arma::mat S, Pi_diff, post_S, x, mu_mat, mZ, mZ1, mX; - arma::mat my = arma::mat(arma::size(y_in_p), arma::fill::zeros); - - arma::mat Z_i = arma::mat(n_lags + y_in_p.n_rows, n_vars, arma::fill::zeros); - arma::mat Z_fcst_i = arma::mat(n_vars, n_lags + n_fcst); - arma::mat Z_i_demean = Z_i; - Z_i.rows(0, n_lags - 1) = Z_1; - - arma::mat Pi_i0 = arma::mat(n_vars, n_vars*n_lags+1, arma::fill::zeros); - arma::mat Pi_comp = arma::mat(n_vars*n_lags, n_vars*n_lags, arma::fill::zeros); - Pi_comp.submat(n_vars, 0, n_vars*n_lags - 1, n_vars*(n_lags-1) - 1) = arma::eye(n_vars*(n_lags-1), n_vars*(n_lags-1)); - - arma::mat Psi_i = arma::mat(psi_i.begin(), n_vars, n_determ, false, true); - mu_mat = dt * Psi_i.t(); - arma::uword n_Lambda = Lambda_comp.n_cols/Lambda_comp.n_rows; - arma::mat mu_long = arma::mat(n_Lambda+n_T, n_vars, arma::fill::zeros); - arma::rowvec Lambda_single = arma::rowvec(n_Lambda, arma::fill::zeros); - for (arma::uword i = 0; i < n_Lambda; ++i) { - Lambda_single(i) = Lambda_comp.at(0, i*n_q); - } - - int post_nu = n_T + n_vars + 2; - arma::mat Sigma_chol = arma::chol(Sigma_i, "lower"); - - arma::uword nm = n_vars*n_determ; - double lambda_mu_i = lambda_mu(0); - double phi_mu_i = phi_mu(0); - arma::vec omega_i = omega.row(0).t(); - arma::mat inv_prior_psi_Omega = arma::diagmat(omega_i); - arma::vec inv_prior_psi_Omega_mean = prior_psi_mean / omega_i; - double M, batch = 1.0; - arma::running_stat stats; - double accept = 0.0; - bool adaptive_mh = false; - double s_prop; - if (s < 0) { - M = std::abs(s); - s = 1.0; - adaptive_mh = true; - } - arma::vec min_vec(2); - min_vec(0) = 0.01; - - // if single freq, we don't need to update - if (single_freq) { - Z_i.rows(n_lags, n_T + n_lags - 1) = y_in_p; - } - for (arma::uword i = 0; i < n_reps; ++i) { - - if (!single_freq) { - my.cols(0, n_vars - n_q - 1) = y_in_p.cols(0, n_vars - n_q - 1) - mu_mat.cols(0, n_vars - n_q - 1); - mu_long.rows(0, n_Lambda-1) = d1.tail_rows(n_Lambda) * Psi_i.t(); - mu_long.rows(n_Lambda, n_T+n_Lambda-1) = mu_mat; - for (arma::uword j = 0; j < n_T; ++j) { - my.row(j).cols(n_vars - n_q - 1, n_vars - 1) = y_in_p.row(j).cols(n_vars - n_q - 1, n_vars - 1) - Lambda_single * mu_long.rows(j, j+n_Lambda-1).cols(n_vars - n_q - 1, n_vars - 1);// Needs fixing - } - } else { - // Even if single freq, mZ needs to be updated - mZ = y_in_p - mu_mat; - } - - mZ1 = Z_1 - d1 * Psi_i.t(); - Pi_i0.cols(1, n_vars*n_lags) = Pi_i; - - if (!single_freq) { - mZ = simsm_adaptive_cv(my, Pi_i0, Sigma_chol, Lambda_comp, mZ1, n_q, T_b); - Z_i.rows(n_lags, n_T + n_lags - 1) = mZ + mu_mat; - } - - Z_i_demean.rows(n_lags, n_T + n_lags - 1) = mZ; - Z_i.rows(n_lags, n_T + n_lags - 1) = mZ + mu_mat; - - mX = create_X_noint(Z_i_demean, n_lags); - XX = mX.t() * mX; - XX_inv = arma::inv_sympd(XX); - Pi_sample = XX_inv * (mX.t() * mZ); - - post_Pi_Omega = arma::inv_sympd(inv_prior_Pi_Omega + XX); - post_Pi = post_Pi_Omega * (Omega_Pi + mX.t() * mZ); - S = arma::trans((mZ - mX * Pi_sample)) * (mZ - mX * Pi_sample); - Pi_diff = prior_Pi_mean - Pi_sample; - post_S = prior_S + S + Pi_diff.t() * arma::inv_sympd(prior_Pi_Omega + XX_inv) * Pi_diff; - Sigma_i = rinvwish(post_nu, arma::symmatu(post_S)); //Fixed in 9.400.3 - Sigma_chol = arma::chol(Sigma_i, "lower"); - bool stationarity_check = false; - int num_try = 0, iter = 0; - double root = 1000; - while (stationarity_check == false) { - iter += 1; - Pi_i = rmatn(post_Pi.t(), post_Pi_Omega, Sigma_i); - if (check_roots) { - Pi_comp.rows(0, n_vars-1) = Pi_i; - root = max_eig_cpp(Pi_comp); - } else { - root = 0.0; - } - if (root < 1.0) { - stationarity_check = true; - num_try = iter; - } - if (iter == 1000) { - Rcpp::stop("Attemped to draw stationary Pi 1,000 times."); - } - } - - update_ng(phi_mu_i, lambda_mu_i, omega_i, nm, c0, c1, s, psi_i, prior_psi_mean, accept); - if (adaptive_mh) { - stats(accept); - if (i % 100 == 0) { - batch += 1.0; - min_vec(1) = std::pow(batch, -0.5); - if (stats.mean() > 0.44) { - s_prop = log(s) + arma::min(min_vec); - if (s_prop < M){ - s = std::exp(s_prop); - } - } else { - s_prop = log(s) - arma::min(min_vec); - if (s_prop > -M){ - s = std::exp(s_prop); - } - } - stats.reset(); - } - } - - inv_prior_psi_Omega = arma::diagmat(1/omega_i); - inv_prior_psi_Omega_mean = prior_psi_mean / omega_i; - - X = create_X_noint(Z_i, n_lags); - - posterior_psi_iw(psi_i, mu_mat, Pi_i, D_mat, Sigma_i, inv_prior_psi_Omega, mZ + mu_mat, X, inv_prior_psi_Omega_mean, dt, n_determ, n_vars, n_lags); - arma::vec errors = arma::vec(n_vars); - if ((i+1) % n_thin == 0) { - if (n_fcst > 0) { - Z_fcst_i.head_cols(n_lags) = Z_i.tail_rows(n_lags).t() - mu_mat.tail_rows(n_lags).t(); - for (arma::uword h = 0; h < n_fcst; ++h) { - - errors.imbue(norm_rand); - x = create_X_t_noint(Z_fcst_i.cols(0+h, n_lags-1+h).t()); - Z_fcst_i.col(n_lags + h) = Pi_i * x + Sigma_chol * errors; - } - Z_fcst.slice(i/n_thin) = Z_fcst_i.t() + d_fcst_lags * Psi_i.t(); - } - Z.slice(i/n_thin) = Z_i; - Sigma.slice(i/n_thin) = Sigma_i; - Pi.slice(i/n_thin) = Pi_i; - psi.row(i/n_thin) = psi_i.t(); - phi_mu(i/n_thin) = phi_mu_i; - lambda_mu(i/n_thin) = lambda_mu_i; - omega.row(i/n_thin) = omega_i.t(); - } - if (verbose) { - p.increment(); - } - } - -} diff --git a/src/mvn_par.cpp b/src/mvn_par.cpp new file mode 100644 index 0000000..eaa127d --- /dev/null +++ b/src/mvn_par.cpp @@ -0,0 +1,51 @@ +#include +#include +#include "mvn.h" +#include "mvn_par.h" + +Pi_parallel_rue::Pi_parallel_rue(arma::mat & output, + const arma::mat & y, + const arma::mat & X, + const arma::mat & d, + const arma::mat & eps, + const arma::mat & volatility, + const arma::mat & prior_AR1, + const arma::uword T, + const arma::uword n, + const arma::uword p) : output(output), y(y), X(X), d(d), eps(eps), volatility(volatility), prior_AR1(prior_AR1), T(T), n(n), p(p) {}; + +void Pi_parallel_rue::operator()(std::size_t begin, std::size_t end) { + for (std::size_t i = begin; i < end; i++) + { + arma::vec h_j = arma::exp(-0.5 * volatility.col(i)); + arma::mat X_j = X.each_col() % h_j; + arma::vec y_j = y.col(i) % h_j; + arma::vec eps_i = eps.unsafe_col(i); + arma::vec d_i = d.unsafe_col(i); + output.col(i) = mvn_rue_eps(X_j, d_i, y_j, eps_i, prior_AR1(i), i); + } +} + +Pi_parallel_bcm::Pi_parallel_bcm(arma::mat & output, + const arma::mat & y, + const arma::mat & X, + const arma::mat & d, + const arma::mat & eps, + const arma::mat & volatility, + const arma::uword T, + const arma::uword n, + const arma::uword p) : output(output), y(y), X(X), d(d), eps(eps), volatility(volatility), T(T), n(n), p(p) {}; + +void Pi_parallel_bcm::operator()(std::size_t begin, std::size_t end) { + for (std::size_t i = begin; i < end; i++) + { + arma::vec h_j = arma::exp(-0.5 * volatility.col(i)); + arma::mat X_j = X.each_col() % h_j; + arma::vec y_j = y.col(i) % h_j; + arma::vec eps_i = eps.unsafe_col(i); + arma::vec d_i = d.unsafe_col(i); + output.col(i) = mvn_bcm_eps(X_j, d_i, y_j, eps_i); + } +} + + diff --git a/src/plot_funs.cpp b/src/plot_funs.cpp index 0b80ce3..3baeaae 100644 --- a/src/plot_funs.cpp +++ b/src/plot_funs.cpp @@ -10,8 +10,9 @@ void variances_fsv(arma::cube & variances, const arma::cube & latent, const arma fac_i = latent.slice(i).row(tt).cols(n_vars, n_vars+n_fac-1); idi_i = latent.slice(i).row(tt); idi_i = idi_i.cols(variables_num-1); - variance_i = facload_i * arma::diagmat(arma::exp(fac_i)) * facload_i.t() + arma::exp(idi_i); - variances.slice(i).row(tt) = arma::sqrt(variance_i.diag()); + variance_i = facload_i * arma::diagmat(arma::exp(fac_i)) * facload_i.t(); + variance_i.diag() += arma::exp(idi_i); + variances.slice(i).row(tt) = arma::sqrt(variance_i.diag().t()); } } } diff --git a/src/posteriors.cpp b/src/posteriors.cpp index 9ec6714..f4f2dc7 100644 --- a/src/posteriors.cpp +++ b/src/posteriors.cpp @@ -96,7 +96,6 @@ void posterior_psi_fsv(arma::vec & psi_i, arma::mat & mu_mat, arma::mat U = build_U_cpp(Pi_i, n_determ, n_vars, n_lags); arma::mat post_psi_Omega = posterior_psi_Omega_fsv(U, D_mat, idivar, inv_prior_psi_Omega); arma::mat Y_tilde = Z_i - X * Pi_i.t() - arma::trans(startfacload * startfac); - arma::mat post_psi = posterior_psi_mean_fsv(U, D_mat, idivar, inv_prior_psi_Omega_mean, post_psi_Omega, Y_tilde); psi_i = rmultn(post_psi, post_psi_Omega); diff --git a/src/progutils_fsv.cpp b/src/progutils_fsv.cpp new file mode 100644 index 0000000..643b025 --- /dev/null +++ b/src/progutils_fsv.cpp @@ -0,0 +1,80 @@ +#include "progutils_fsv.h" +// Copyright of original code: Gregor Kastner (factorstochvol package) +// Copyright of modified code: Sebastian Ankargren (mfbvar package) +// The following code is a derivative work of the code +// developed by Gregor Kastner for the factorstochvol package, which +// is licensed GPL>=2. This code is therefore licensed under +// the terms of the GNU Public License, version 3. + +double logdnormquot(double x, double y, double mu, double sigma) { + return ((y-mu)*(y-mu) - (x-mu)*(x-mu)) / (2*sigma*sigma); +} + +double logspecialquot(double x, double y, double alpha, double beta, double c) { + return (alpha/c) * (x - y) - beta * (exp(x/c) - exp(y/c)); +} + + +void store(const Rcpp::NumericMatrix &curfacload, Rcpp::NumericVector &facload, + const Rcpp::NumericMatrix &curf, Rcpp::NumericVector &f, + const Rcpp::NumericMatrix &curh, Rcpp::NumericVector &h, + const Rcpp::NumericVector &curh0, Rcpp::NumericMatrix &h0, + const Rcpp::NumericMatrix &curpara, Rcpp::NumericVector ¶, + const Rcpp::NumericVector &curlambda2, Rcpp::NumericMatrix &lambda2, + const Rcpp::NumericMatrix &curtau2, Rcpp::NumericVector &tau2, + const Rcpp::NumericVector &curmixprob, Rcpp::NumericVector &mixprob, + const Rcpp::IntegerMatrix &curmixind, Rcpp::IntegerVector &mixind, + const bool auxstore, const int thintime, const int where) { + + std::copy(curfacload.begin(), curfacload.end(), facload.begin() + where * curfacload.length()); + std::copy(curpara.begin(), curpara.end(), para.begin() + where * curpara.length()); + + if (thintime == 1) { // store everything + + std::copy(curf.begin(), curf.end(), f.begin() + where * curf.length()); + std::copy(curh.begin(), curh.end(), h.begin() + where * curh.length()); + + } else if (thintime == -1) { // store only t = T + + for (int i = 0; i < curf.nrow(); i++) { + f(where*curf.nrow() + i) = curf(i, curf.ncol()-1); + } + + for (int i = 0; i < curh.ncol(); i++) { + h(where*curh.ncol() + i) = curh(curh.nrow()-1, i); + } + + } else if (thintime > 1) { // store every thintimeth point in time + + int tmp = curf.ncol()/thintime; + int tmpp = where * curf.nrow() * tmp; + + for (int j = 0; j < tmp; ++j) { + int tmppp = j*thintime; + int tmpppp = tmpp + j*curf.nrow(); + + for (int i = 0; i < curf.nrow(); ++i) { + f(tmpppp + i) = curf(i, tmppp); + } + } + + tmpp = where * curh.ncol() * tmp; + + for (int i = 0; i < curh.ncol(); ++i) { + int tmpppp = tmpp + i*tmp; + + for (int j = 0; j < tmp; ++j) { + h(tmpppp + j) = curh(j*thintime, i); + } + } + } + + std::copy(curh0.begin(), curh0.end(), h0.begin() + where * curh0.length()); + + if (auxstore) { // store mixture probabilities, mixture indicators, shrinkage hyperparas, h0 + std::copy(curmixprob.begin(), curmixprob.end(), mixprob.begin() + where * curmixprob.length()); + std::copy(curmixind.begin(), curmixind.end(), mixind.begin() + where * curmixind.length()); + std::copy(curlambda2.begin(), curlambda2.end(), lambda2.begin() + where * curlambda2.length()); + std::copy(curtau2.begin(), curtau2.end(), tau2.begin() + where * curtau2.length()); + } +} diff --git a/src/progutils_fsv.h b/src/progutils_fsv.h new file mode 100644 index 0000000..9776cbc --- /dev/null +++ b/src/progutils_fsv.h @@ -0,0 +1,20 @@ +#ifndef _PROGUTILS_H +#define _PROGUTILS_H + +#include + +double logdnormquot(double x, double y, double mu, double sigma); +double logspecialquot(double x, double y, double alpha, double beta, double c); + +void store(const Rcpp::NumericMatrix &curfacload, Rcpp::NumericVector &facload, + const Rcpp::NumericMatrix &curf, Rcpp::NumericVector &f, + const Rcpp::NumericMatrix &curh, Rcpp::NumericVector &h, + const Rcpp::NumericVector &curh0, Rcpp::NumericMatrix &h0, + const Rcpp::NumericMatrix &curpara, Rcpp::NumericVector ¶, + const Rcpp::NumericVector &curlambda2, Rcpp::NumericMatrix &lambda2, + const Rcpp::NumericMatrix &curtau2, Rcpp::NumericVector &tau2, + const Rcpp::NumericVector &curmixprob, Rcpp::NumericVector &mixprob, + const Rcpp::IntegerMatrix &curmixind, Rcpp::IntegerVector &mixind, + const bool auxstore, const int thintime, const int where); + +#endif diff --git a/src/rgig.cpp b/src/rgig.cpp index 4e35a59..37af332 100644 --- a/src/rgig.cpp +++ b/src/rgig.cpp @@ -14,3 +14,18 @@ double do_rgig1(double lambda, double chi, double psi) { if (!fun) fun = (SEXP(*)(int, double, double, double)) R_GetCCallable("GIGrvg", "do_rgig"); return Rcpp::as(fun(1, lambda, chi, psi)); } + +// [[Rcpp::export]] +double rig(double mu, double lambda){ + double z = R::rnorm(0,1); + double y = z*z; + double x = mu+0.5*mu*mu*y/lambda - 0.5*(mu/lambda)*sqrt(4*mu*lambda*y+mu*mu*y*y); + double u=R::runif(0,1); + double out; + if(u <= mu/(mu+x)){ + out = x; + } else { + out = mu*mu/x; + } + return out; +} diff --git a/src/rnd_numbers.cpp b/src/rnd_numbers.cpp index 1a6ac9b..401430c 100644 --- a/src/rnd_numbers.cpp +++ b/src/rnd_numbers.cpp @@ -2,6 +2,7 @@ //' @rdname dnorminvwish //' @keywords internal +//' @noRd // [[Rcpp::export]] arma::mat rmatn(const arma::mat & M, const arma::mat & Q, const arma::mat & P){ /*------------------------------------------------------- @@ -19,6 +20,7 @@ arma::mat rmatn(const arma::mat & M, const arma::mat & Q, const arma::mat & P){ //' @rdname dnorminvwish //' @keywords internal +//' @noRd // [[Rcpp::export]] arma::mat rinvwish(int v, const arma::mat & S){ int p = S.n_rows; @@ -42,6 +44,7 @@ arma::mat rinvwish(int v, const arma::mat & S){ //' @rdname dmultn //' @keywords internal +//' @noRd // [[Rcpp::export]] arma::vec rmultn(const arma::vec & m, const arma::mat & Sigma){ /*------------------------------------------------------- diff --git a/src/smoothing.cpp b/src/smoothing.cpp index 6abd904..c7580fd 100644 --- a/src/smoothing.cpp +++ b/src/smoothing.cpp @@ -20,6 +20,7 @@ //' @templateVar P0 TRUE //' @template man_template //' @keywords internal +//' @noRd //' @return For \code{loglike}: //' \item{}{An \code{n_T}-long vector of the log-likelihoods. \code{exp(sum(loglike(...)))} is the likelihood.} // [[Rcpp::export]] @@ -27,7 +28,7 @@ arma::mat loglike( arma::mat Y, arma::mat Lambda, arma::mat Pi_comp, /* This function computes the smoothed state vector */ /****************************************************/ /* Initialize matrices and cubes */ - arma::mat QQ = Q_comp * Q_comp.t(); + arma::mat QQ = arma::symmatu(Q_comp * Q_comp.t()); arma::mat mv(n_T, n_vars); mv.fill(NA_REAL); arma::mat me(n_T, n_vars); @@ -52,7 +53,7 @@ arma::mat loglike( arma::mat Y, arma::mat Lambda, arma::mat Pi_comp, /* Fill some temporary variables */ arma::mat h1 = Pi_comp * z0; - arma::mat P1 = Pi_comp * P0 * Pi_comp.t() + QQ; + arma::mat P1 = arma::symmatu(Pi_comp * P0 * Pi_comp.t() + QQ); arma::mat mH = Lambda.rows(obs_vars); arma::mat vz = mz.cols(obs_vars); @@ -60,7 +61,7 @@ arma::mat loglike( arma::mat Y, arma::mat Lambda, arma::mat Pi_comp, vv.cols(obs_vars) = vz - trans(mH * h1); mv.row(0) = vv; - arma::mat aS = mH * P1 * mH.t(); + arma::mat aS = arma::symmatu(mH * P1 * mH.t()); arma::mat mIS = IS.slice(0); mIS(obs_vars, obs_vars) = inv_sympd(aS); IS.slice(0) = mIS; @@ -70,7 +71,7 @@ arma::mat loglike( arma::mat Y, arma::mat Lambda, arma::mat Pi_comp, aK.slice(0) = mK; arma::mat h2 = h1 + mK.cols(obs_vars) * trans(vv.cols(obs_vars)); - arma::mat P2 = (identity_mat - mK.cols(obs_vars) * mH) * P1; + arma::mat P2 = arma::symmatu((identity_mat - mK.cols(obs_vars) * mH) * P1); double log_det_val; double log_det_sign; @@ -80,7 +81,7 @@ arma::mat loglike( arma::mat Y, arma::mat Lambda, arma::mat Pi_comp, obs_vars = find_finite(mz); h1 = Pi_comp * h2; - P1 = Pi_comp * P2 * Pi_comp.t() + QQ; + P1 = arma::symmatu(Pi_comp * P2 * Pi_comp.t() + QQ); mH = Lambda.rows(obs_vars); vz = mz.cols(obs_vars); @@ -89,7 +90,7 @@ arma::mat loglike( arma::mat Y, arma::mat Lambda, arma::mat Pi_comp, vv.cols(obs_vars) = vz - trans(mH * h1); mv.row(i) = vv; - aS = mH * P1 * mH.t(); + aS = arma::symmatu(mH * P1 * mH.t()); mIS = IS.slice(i); mIS(obs_vars, obs_vars) = inv_sympd(aS); IS.slice(i) = mIS; @@ -99,9 +100,9 @@ arma::mat loglike( arma::mat Y, arma::mat Lambda, arma::mat Pi_comp, aK.slice(i) = mK; h2 = h1 + mK.cols(obs_vars) * trans(vv.cols(obs_vars)); - P2 = (identity_mat - mK.cols(obs_vars) * mH) * P1; + P2 = arma::symmatu((identity_mat - mK.cols(obs_vars) * mH) * P1); log_det(log_det_val, log_det_sign, aS); - logl.row(i) = -0.5* obs_vars.n_elem * log(2*M_PI) - (log_det_val + vv.cols(obs_vars) * mIS(obs_vars, obs_vars) * trans(vv.cols(obs_vars)))*0.5; + logl.row(i) = -0.5* obs_vars.n_elem * std::log(2*M_PI) - (log_det_val + vv.cols(obs_vars) * mIS(obs_vars, obs_vars) * trans(vv.cols(obs_vars)))*0.5; } /* The return is the smoothed state vector */ diff --git a/src/ss_utils.h b/src/ss_utils.h index 33ed8eb..7c6d6ee 100644 --- a/src/ss_utils.h +++ b/src/ss_utils.h @@ -7,4 +7,27 @@ void update_demean(arma::mat & my, arma::mat & mu_long, const arma::mat & y_in_p, const arma::mat & mu_mat, const arma::mat & d1, const arma::mat & Psi_i, const arma::mat & Lambda_single, arma::uword n_vars, arma::uword n_q, arma::uword n_Lambda, arma::uword n_T); + +void posterior_psi_iw(arma::vec & psi_i, arma::mat & mu_mat, + const arma::mat & Pi_i, const arma::mat & D_mat, + const arma::mat & Sigma_i, const arma::mat & inv_prior_psi_Omega, + const arma::mat & Z_i, const arma::mat & X, + const arma::mat & inv_prior_psi_Omega_mean, const arma::mat & dt, + int n_determ, int n_vars, int n_lags); +void posterior_psi_csv(arma::vec & psi_i, arma::mat & mu_mat, + const arma::mat & Pi_i, const arma::mat & D_mat, + const arma::mat & Sigma_chol_inv, const arma::mat & exp_sqrt_f, + const arma::mat & inv_prior_psi_Omega, + const arma::mat & Z_i, const arma::mat & X, + const arma::mat & inv_prior_psi_Omega_mean, const arma::mat & dt, + int n_determ, int n_vars, int n_lags); +void posterior_psi_fsv(arma::vec & psi_i, arma::mat & mu_mat, + const arma::mat & Pi_i, const arma::mat & D_mat, + const arma::mat & idivar, const arma::mat & inv_prior_psi_Omega, + const arma::mat & Z_i, const arma::mat & X, + const arma::mat & startfacload, const arma::mat & startfac, + const arma::mat & inv_prior_psi_Omega_mean, const arma::mat & dt, + int n_determ, int n_vars, int n_lags); + +double max_eig_cpp(const arma::mat & A); #endif diff --git a/src/update_csv.cpp b/src/update_csv.cpp index a955122..29598fa 100644 --- a/src/update_csv.cpp +++ b/src/update_csv.cpp @@ -21,6 +21,18 @@ void update_csv( const double phi_meaninvvar, const double prior_sigma2, const double prior_df) { + // data: data matrix + // phi: AR(1) parameter + // sigma: standard deviation of log-volatility innovation + // h: vector of log volatilities + // h0: log volatility initial value + // mixprob: mixture probabilities for Kim, Shephard, Chib (1998) algorithm + // r: mixture indicators for KSC (1998) algorithm + // priorlatent0: prior variance for initial value of log volatility + // phi_invvar: inverse of prior variance for AR(1) parameter + // phi_meaninvvar: prior mean of AR(1) parameter times phi_invvar + // prior_sigma2: prior mean of variance of innovation + // prior_df: prior degrees of freedom for variance of innovation int T = data.n_rows; int n = data.n_cols; diff --git a/src/update_dl.cpp b/src/update_dl.cpp new file mode 100644 index 0000000..fad966d --- /dev/null +++ b/src/update_dl.cpp @@ -0,0 +1,62 @@ +#include "mfbvar.h" +void update_dl(arma::mat & prior_Pi_Omega, arma::vec & aux, + arma::vec & local, double & global, const arma::mat & Pi_i, + arma::uword n_vars, arma::uword n_lags, const double a, + arma::vec & slice, bool gig = true, bool intercept = true) { + + arma::vec Pi_vec; + if (intercept) { + Pi_vec = arma::vectorise(Pi_i.rows(1, n_vars*n_lags)); + } else { + Pi_vec = arma::vectorise(Pi_i); + } + + + arma::uword K = Pi_vec.n_elem; + + for (arma::uword i = 0; i < K; ++i) { + aux[i] = 1.0/rig(global * local[i] / fabs(Pi_vec[i]), 1.0); + } + arma::vec Pi_local = arma::abs(Pi_vec) / local; + + global = do_rgig1(K*(a-1.0), 2.0 * arma::accu(Pi_local), 1.0); + + + if (gig) { + for (arma::uword i = 0; i < K; ++i) { + local[i] = do_rgig1((a-1.0), 2.0 * fabs(Pi_vec[i]), 1.0); + } + } else { + arma::vec u1 = arma::vec(K); + std::generate(u1.begin(), u1.end(), ::unif_rand); + u1 %= arma::exp(-0.5 / slice); + arma::vec lb = 0.5/(arma::log(1/u1)); + double Flb; + arma::vec u2 = arma::vec(K); + for (arma::uword i = 0; i < K; ++i) { + Flb = R::pgamma(lb[i], 1-a, 1/fabs(Pi_vec[i]), true, false); + u2[i] = R::runif(Flb, 1.0); + } + arma::uvec u3 = arma::find(u2 > 1-(1e-16)); + if (u3.n_elem > 0) { + u2.elem(u3).fill(1-(1e-16)); + } + for (arma::uword i = 0; i < K; ++i) { + slice[i] = R::qgamma(u2[i], 1-a, 1/fabs(Pi_vec[i]), true, false); + } + local = 1/slice; + } + + local = local / arma::accu(local); + + arma::uvec local_idx = arma::find(local < 1e-20); + local.elem(local_idx).fill(1e-20); + + if (intercept) { + prior_Pi_Omega.rows(1, n_vars*n_lags) = arma::reshape(aux % arma::pow(global * local, 2.0), n_vars*n_lags, n_vars); + } else { + prior_Pi_Omega = arma::reshape(aux % arma::pow(global * local, 2.0), n_vars*n_lags, n_vars); + } + + +} diff --git a/src/update_fsv.cpp b/src/update_fsv.cpp new file mode 100644 index 0000000..bb939a4 --- /dev/null +++ b/src/update_fsv.cpp @@ -0,0 +1,459 @@ +#include "mfbvar.h" +#include // decl'd and def'd in "stochvol" (univariate SV-update) +#include "progutils_fsv.h" +#include "auxmix.h" + +// Copyright of original code: Gregor Kastner (factorstochvol package) +// Copyright of modified code: Sebastian Ankargren (mfbvar package) +// The following code is a derivative work of the code +// developed by Gregor Kastner for the factorstochvol package, which +// is licensed GPL>=2. This code is therefore licensed under +// the terms of the GNU Public License, version 3. + + +// curfacload changed to armafacload +void update_fsv(arma::mat & armafacload, arma::mat & armaf, arma::mat & armah, + arma::vec & armah0, + Rcpp::NumericMatrix & curpara, + const arma::mat & armatau2, + const arma::mat & armay, + const double bmu, const double Bmu, const double a0idi, const double b0idi, + const double a0fac, const double b0fac, const Rcpp::NumericVector & Bsigma, + const double B011inv, const double B022inv, + const Rcpp::NumericVector & priorh0, const arma::imat & armarestr) { + // The function uses a subset of what is used in factorstochvol, see its + // documentation for what each variable represents + + bool Gammaprior = true; + bool truncnormal = false; + double MHcontrol = -1.0; + int MHsteps = 2; + int parameterization = 3; + + const int interweaving = 4; + const bool signswitch = false; + const bool samplefac = true; + int nlambda = 0; + const double c0 = 2.5; + const Rcpp::NumericVector C0 = 1.5*Bsigma; + + using namespace Rcpp; + + const int m = armay.n_rows; // number of time series + const int T = armay.n_cols; // length of time series + const int r = armafacload.n_cols; // number of latent factors + const int mpr = m + r; + + arma::irowvec nonzerospercol = arma::sum(armarestr, 0); + arma::icolvec nonzerosperrow = arma::sum(armarestr, 1); + + // restriction on factor loadings matrix: + for (int i = 0; i < m; i++) { + for (int j = 0; j < r; j++) { + if (armarestr(i, j) == 0) armafacload(i,j) = 0.; + } + } + /* + * Needs to be done in R first + // restriction on factor loadings matrix: + for (int i = 0; i < m; i++) { + for (int j = 0; j < r; j++) { + if (armarestr(i,j) == 0) armatau2(i,j) = 0.; + } + } + */ + // pre-calculation of a posterior parameter + double cT = 0; + if (Gammaprior) { + if (MHsteps == 2 || MHsteps == 3) cT = T/2.0; // we want IG(-.5,0) as proposal + else if (MHsteps == 1) cT = (T-1)/2.0; // we want IG(-.5,0) as proposal + } else { + if (MHsteps == 2) cT = c0 + (T+1)/2.0; // pre-calculation outside the loop + } + + int tmpcounter = 0; + arma::uvec diagindices(m); + for (int k = 0; k < m; k++) { + for (int l = k; l < m; l++) { + if (k == l) diagindices(k) = tmpcounter; + tmpcounter++; + } + } + + //convention: "arma"-prefixed variables denote Armadillo proxy objects + arma::mat armafacloadt = arma::trans(armafacload); + arma::uvec armafacloadtunrestrictedelements = arma::find(armarestr.t() != 0); + arma::vec armafacloadtmp = arma::zeros(armafacloadtunrestrictedelements.size()); + arma::vec armafacload2inter(r, arma::fill::zeros); + arma::mat armahtilde(armah.n_rows, m); + + + + + + //current shrinkage latents lambda^2 + arma::vec armalambda2(nlambda); + + // temporary stroage for hopen in interweaving + arma::vec hopen(T); + + // NOTE: (Almost) all storage of MCMC draws is done in NumericVectors + // because no 'array' structure is available at this point in time. + + // facload holds the factor loadings: + NumericVector facload(m * r); + facload.attr("dim") = Dimension(m, r, 1); + + //current mixture indicator draws + IntegerMatrix curmixind(T, mpr); + NumericVector curmixprob(10 * T * mpr); + + // h holds the latent log-volatilities, but not h0! + NumericVector h(T); + h.attr("dim") = Dimension(1, T, 1); + + // f holds the latent factor draws + NumericVector f(T); + f.attr("dim") = Dimension(T, 1, 1); + + + // mixind holds the mixture indicators for the auxiliary mixture sampling + IntegerVector mixind(T * mpr); + + // mixprob holds the mixture probabilities for the auxmix + NumericVector mixprob(10 * T * mpr); + //mixprob.attr("dim") = Dimension(10, T, mpr, 1); no 4-dim possible? + + // para holds the parameter draws (mu, phi, sigma) + NumericVector para(3 * mpr * (1)); + para.attr("dim") = Dimension(3, mpr, 1) ; + + // curynorm will hold log((y - facload %*% f)^2) in STEP 1 + arma::mat armaynorm(m, T); + + // curynorm2 will hold log(f^2) in STEP 1 + arma::mat armafnorm(r, T, arma::fill::zeros); + arma::mat armaXt(r, T); + arma::mat armaXt2(r, m); + arma::colvec armaytilde(T); + arma::colvec armaytilde2(m); + arma::mat armaSigma(r, r); + arma::mat armaR(r, r); + arma::mat armaRinv(r, r); + arma::mat armaSigma2(r, r); + arma::mat armaR2(r, r); + arma::mat armaR2inv(r, r); + arma::colvec armamean(r); + arma::colvec armamean2(r); + arma::colvec armadraw(r); + arma::colvec armadraw2(r*T); + + // we always use the centered parameterization as baseline + // (for compatibility reasons with stochvol) + const bool centered_baseline = true; + + // RNGScope scope; + // variables are declared afterwards + + + + + // temporary variables for the updated stochvol code + arma::mat curpara_arma(curpara.begin(), curpara.nrow(), curpara.ncol(), false); + arma::mat curmixprob_arma(curmixprob.begin(), 10*T, mpr, false); + arma::imat curmixind_arma(curmixind.begin(), curmixind.nrow(), curmixind.ncol(), false); + + // "linearized residuals" + // NOTE: "log", "square" are component-wise functions, '*' denotes matrix multiplication + double offset = 0.00001; + if (r > 0) { + armaynorm = log(square(armay - armafacload * armaf)); + } else { + armaynorm = log(square(armay) + offset); + } + armafnorm = log(square(armaf)); + + + + armahtilde = exp(-armah(arma::span::all, arma::span(0,m-1))/2.); + + for (arma::uword ii = 0; ii < armay.n_cols; ii++) { + Rcpp::NumericVector tmp(5); + tmp[0] = armay(0, ii); + tmp[1] = armafacload[0]; + tmp[2] = armaf[ii]; + tmp[3] = arma::as_scalar(armafacload.row(0) * armaf.col(ii)); + tmp[4] = armaynorm(0, ii); + + } + // STEP 1: + // update indicators, latent volatilities, and SV-parameters + + + + // STEP 1 for "linearized residuals" + + + for (int j = 0; j < m; j++) { + + double curh0j = armah0(j); + arma::vec curpara_j = curpara_arma.unsafe_col(j); + arma::vec curh_j = armah.unsafe_col(j); + arma::vec armaynorm_j = armaynorm.row(j).t(); + arma::vec curmixprob_j = curmixprob_arma.unsafe_col(j); + arma::ivec curmixind_j = curmixind_arma.unsafe_col(j); + double priorh0_j = priorh0(j); + double C0_j = C0(j); + double Bsigma_j = Bsigma(j); + + if (j < 1) { + + } + stochvol::update_sv(armaynorm_j, curpara_j, curh_j, curh0j, curmixprob_j, curmixind_j, + centered_baseline, C0_j, cT, Bsigma_j, a0idi, b0idi, bmu, Bmu, B011inv, B022inv, Gammaprior, + truncnormal, MHcontrol, MHsteps, parameterization, false, priorh0_j); + + armah0(j) = curh0j; + } + + + + // STEP 1 for factors + for (int j = m; j < mpr; j++) { + double curh0j = armah0(j); + arma::vec curpara_j = curpara_arma.unsafe_col(j); + arma::vec curh_j = armah.unsafe_col(j); + arma::vec curmixprob_j = curmixprob_arma.unsafe_col(j); + arma::ivec curmixind_j = curmixind_arma.unsafe_col(j); + stochvol::update_sv(armafnorm.row(j-m).t(), curpara_j, curh_j, curh0j, curmixprob_j, curmixind_j, + centered_baseline, C0(j), cT, Bsigma(j), a0fac, b0fac, bmu, Bmu, B011inv, B022inv, Gammaprior, + truncnormal, MHcontrol, MHsteps, parameterization, true, priorh0(j)); + armah0(j) = curh0j; + } + + // intermediate step: calculate transformation of curh + + + armahtilde = exp(-armah(arma::span::all, arma::span(0,m-1))/2.); + + + // STEP 2: + // update factor loadings: m independent r-variate regressions + // with T observations (for unrestricted case) + + + if (r > 0) { + + int oldpos = 0; + for (int j = 0; j < m; j++) { + + // TODO: some things outside + + + // transposed design matrix Xt is filled "manually" + int activecols = 0; + for (int l = 0; l < r; l++) { + for (int k = 0; k < T; k++) { + armaXt(activecols, k) = armaf(l, k) * armahtilde(k, j); + } + activecols++; + } + + armaytilde = armay.row(j).t() % armahtilde.col(j); + + + // Now draw from the multivariate normal distribution + // armaSigma is first used as temporary variable: + armaSigma.submat(0,0,activecols-1,activecols-1) = armaXt.rows(0,activecols-1) * armaXt.rows(0,activecols-1).t(); + + // add precisions to diagonal: + armaSigma.submat(0,0,activecols-1,activecols-1).diag() += 1/arma::nonzeros(armatau2.row(j)); + // Find Cholesky factor of posterior precision matrix + try { + armaR.submat(0, 0, activecols-1, activecols-1) = arma::chol(armaSigma.submat(0,0,activecols-1,activecols-1)); + } catch (...) { + ::Rf_error("Error: Couldn't Cholesky-decompose posterior loadings precision in row %i", j+1); + } + + + // TODO: Check whether Armadillo automatically exploits the fact that R2 is upper triangular for inversion + // (Partial) Answer: Seems to be OK for native R but solve(trimatu(R), I) is faster with OpenBLAS + try { + // armaRinv.submat(0,0,activecols-1,activecols-1) = arma::inv(arma::trimatu(armaR.submat(0,0,activecols-1,activecols-1))); + armaRinv.submat(0,0,activecols-1,activecols-1) = + arma::solve(arma::trimatu(armaR.submat(0,0,activecols-1,activecols-1)), + arma::eye(activecols, activecols)); + } catch (...) { + ::Rf_error("Error: Couldn't invert Cholesky factor of posterior loadings precision in row %i", j+1); + } + + // calculate posterior covariance armaSigma: + armaSigma.submat(0, 0, activecols-1, activecols-1) = + armaRinv.submat(0, 0, activecols-1, activecols-1) * + armaRinv.submat(0, 0, activecols-1, activecols-1).t(); + + // calculate posterior mean: + armamean.head(activecols) = armaSigma.submat(0, 0, activecols-1, activecols-1) * + armaXt.submat(0, 0, activecols-1, T-1) * + armaytilde; + + // draw from the r-variate normal distribution + armadraw = rnorm(r); + + try { + armafacloadtmp(arma::span(oldpos, oldpos + activecols - 1)) = armamean.head(activecols) + armaRinv.submat(0,0,activecols-1,activecols-1) * armadraw.head(activecols); + } catch(...) { + ::Rf_error("Error: Couldn't sample row %i of factor loadings", j+1); + } + + // Rprintf("\n%i to %i: ", oldpos, oldpos+activecols-1); + //for (int is = oldpos; is < oldpos+activecols; is++) Rprintf("%f ", armafacloadtmp(is)); + //Rprintf("\n\n"); + oldpos = oldpos + activecols; + + } + armafacloadt(armafacloadtunrestrictedelements) = armafacloadtmp; + armafacload = arma::trans(armafacloadt); + + //Rprintf("\n\n"); + //for (int is = 0; is < m; is++) Rprintf("%f %f\n", curfacload(is, 0), curfacload(is, 1)); + // STEP 2+: "Deep" Interweaving + + + for (int j = 0; j < r; j++) { + + int userow = j; + if (interweaving == 4) { // find largest absolute element in column to interweave + userow = 0; + for (int k = 1; k < m; k++) if (std::fabs(armafacload(k, j)) > std::fabs(armafacload(userow, j))) userow = k; + } + + + //Rprintf("%i and %i\n", j, userow); + + double phi = curpara(1,m+j); + double sigma = curpara(2,m+j); + double mu_old = log(armafacload(userow,j) * armafacload(userow,j)); + hopen = armah.col(m+j) + mu_old; + double h0open = armah0(m+j) + mu_old; + double logacceptrate; + double mu_prop; + + if (priorh0(m+j) < 0.) { // old prior for h0 (stationary distribution, depends on phi), as in JCGS submission Feb 2016 + double tmph = hopen(0) - phi*h0open; + for (int k = 1; k < T; k++) tmph += hopen(k) - phi*hopen(k-1); + + double gamma_old = (1 - phi) * mu_old; + double gamma_prop = as(rnorm(1, tmph/(T+B011inv), sigma/std::sqrt(T+B011inv))); + mu_prop = gamma_prop/(1-phi); + + logacceptrate = logdnormquot(mu_prop, mu_old, h0open, sigma/std::sqrt(1-phi*phi)); + logacceptrate += logspecialquot(gamma_prop, gamma_old, .5, 1/(2.*armatau2(userow,j)), 1-phi); + logacceptrate += logdnormquot(gamma_old, gamma_prop, 0., sigma*std::sqrt(1/B011inv)); + + } else { // new prior does not depend on phi + double tmph = hopen(0); + for (int k = 1; k < (T-1); k++) tmph += hopen(k); + + double tmp4prop = T*priorh0(m+j)*(1-phi)*(1-phi) + 1; + double prop_mean = (priorh0(m+j) * (1-phi) * (hopen(T-1) + (1-phi)*tmph - phi*h0open) + h0open) / tmp4prop; + double prop_sd = (sqrt(priorh0(m+j)) * sigma) / std::sqrt(tmp4prop); + + mu_prop = as(rnorm(1, prop_mean, prop_sd)); + logacceptrate = .5 * ((mu_prop - mu_old) - (std::exp(mu_prop) - std::exp(mu_old)) / armatau2(userow,j)); + } + + // NEW, same for both priors: + arma::vec relevantload = armafacload.col(j); + arma::vec relevanttau2 = armatau2.col(j); + + // use all except interwoven element (restricted loadings are assumed to be zero!) + double mysum = accu(square(nonzeros(relevantload))/nonzeros(relevanttau2)) - + (relevantload(userow)*relevantload(userow))/relevanttau2(userow); + + logacceptrate += .5 * ((nonzerospercol(j)-1)*(mu_prop - mu_old) - + mysum / (armafacload(userow,j)*armafacload(userow,j)) * (exp(mu_prop) - exp(mu_old))); + + + // Rprintf("ACCEPT? "); + + //ACCEPT/REJECT + if (log(as(runif(1))) < logacceptrate) { + // Rprintf("ACC col %i el %02i - ", j+1, userow+1); + armah.col(m+j) = hopen - mu_prop; + armah0(m+j) = h0open - mu_prop; + + double tmp = std::exp(mu_prop/2)/armafacload(userow,j); + armafacload.col(j) *= tmp; + armaf.row(j) *= 1/tmp; + // } else { + // Rprintf("REJ col %i el %02i - ", j+1, userow+1); + } + } + // STEP 3: + // update the factors (T independent r-variate regressions with m observations) + + + if (samplefac) { + armadraw2 = rnorm(r*T); + for (int j = 0; j < T; j++) { + + // transposed design matrix Xt2 (r x m) is filled "manually" + for (int k = 0; k < m; k++) { + for (int l = 0; l < r; l++) { + armaXt2(l, k) = armafacload(k, l) * armahtilde(j,k); + } + } + + armaytilde2 = armay.col(j) % armahtilde.row(j).t(); + + // Now draw form the multivariate normal distribution + + // armaSigma2 is first used as temporary variable (to hold the precision): + armaSigma2 = armaXt2 * armaXt2.t(); + + // add precisions to diagonal: + armaSigma2.diag() += exp(-armah(j, arma::span(m, mpr-1))); + + // find Cholesky factor of posterior precision + try { + armaR2 = arma::chol(armaSigma2); + } catch (...) { + ::Rf_error("Error: Couldn't Cholesky-decompose posterior factor precision at time %i of %i", j+1, T); + } + + try { + // armaR2inv = arma::inv(R2); # This is a little bit faster for very small matrices but a lot slower for large ones... + // armaR2inv = arma::inv(arma::trimatu(armaR2)); # This is OK on Native R but not so nice in OpenBLAS + armaR2inv = arma::solve(arma::trimatu(armaR2), arma::eye(r, r)); + } catch (...) { + ::Rf_error("Error: Couldn't invert Cholesky factor of posterior factor precision at time %i of %i",j+1, T); + } + + // calculate posterior covariance matrix armaSigma2: + armaSigma2 = armaR2inv * armaR2inv.t(); + + // calculate posterior mean armamean2: + armamean2 = armaSigma2 * armaXt2 * armaytilde2; + + // draw from the r-variate normal distribution + try { + armaf.col(j) = armamean2 + (armaR2inv * armadraw2.subvec(j*r, (j+1)*r - 1)); + } catch(...) { + ::Rf_error("Error: Couldn't sample factors at time %i of %i", j+1, T); + } + } + } + } + + + // SIGN SWITCH: + if (signswitch) { + for (int j = 0; j < r; j++) { + if (as(runif(1)) > .5) { + armafacload.col(j) *= -1; + armaf.row(j) *= -1; + } + } + } +} diff --git a/src/update_ng.cpp b/src/update_ng.cpp index be2f562..a0a94f3 100644 --- a/src/update_ng.cpp +++ b/src/update_ng.cpp @@ -13,6 +13,16 @@ double posterior_phi_mu(const double lambda, const double phi_mu, const arma::ve void update_ng(double & phi_mu, double & lambda_mu, arma::vec & omega, arma::uword nm, const double c0, const double c1, double s, const arma::vec & psi_i, const arma::vec & prior_psi_mean, double & accept) { + // phi_mu: the shrinkage parameter phi_mu + // lambda_mu: the shrinkage parameter lambda_mu + // omega: the idiosyncratic shrinkage parameters omega + // nm: n_vars * n_determ (number of parameters) + // c0: hyperparameter c0 + // c1: hyperparameter c1 + // s: scale of proposal + // psi_i: the steady-state parameters + // prior_psi_mean: the prior means of psi_i + // accept: indicator for whether the proposal is accepted or not // Update omega double gig_lambda = phi_mu-0.5; @@ -26,9 +36,8 @@ void update_ng(double & phi_mu, double & lambda_mu, arma::vec & omega, arma::uwo //omega(i) = do_rgig1(gig_lambda, gig_chi, gig_psi(i)); omega(i) = do_rgig1(gig_lambda, gig_chi(i), gig_psi); } - // Update lambda - lambda_mu = R::rgamma((double)nm * phi_mu + c0, 1.0/(0.5 * phi_mu * arma::accu(omega) + c1)); // Check parametrization + lambda_mu = R::rgamma((double)nm * phi_mu + c0, 1.0/(0.5 * phi_mu * arma::accu(omega) + c1)); // Update phi double phi_mu_proposal = phi_mu * std::exp(R::rnorm(0.0, s)); diff --git a/tests/testthat/test_mcmc.R b/tests/testthat/test_mcmc.R index 0074f11..ea2bc79 100644 --- a/tests/testthat/test_mcmc.R +++ b/tests/testthat/test_mcmc.R @@ -4,7 +4,7 @@ test_that("Mixed", { set.seed(10237) Y <- mfbvar::mf_sweden prior_obj <- set_prior(Y = Y, freq = c(rep("m", 4), "q"), - n_lags = 4, n_burnin = 1000, n_reps = 1000) + n_lags = 4, n_burnin = 10, n_reps = 10) prior_intervals <- matrix(c( 6, 7, 0.1, 0.2, @@ -18,29 +18,33 @@ test_that("Mixed", { prior_psi_Omega = prior_psi_Omega, n_fcst = 4, n_fac = 1) testthat::skip_on_cran() - set.seed(100) - mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "minn", variance = "iw") - mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ss", variance = "iw") - mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ssng", variance = "iw") - - mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "minn", variance = "diffuse") - mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ss", variance = "diffuse") - mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ssng", variance = "diffuse") - - mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "minn", variance = "csv") - mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ss", variance = "csv") - mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ssng", variance = "csv") - - mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "minn", variance = "fsv") - mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ss", variance = "fsv") - mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ssng", variance = "fsv") + set.seed(10) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "minn", variance = "iw"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ss", variance = "iw"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ssng", variance = "iw"), NA) + #expect_error(mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "dl", variance = "iw")) + + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "minn", variance = "diffuse"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ss", variance = "diffuse"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ssng", variance = "diffuse"), NA) + #mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "dl", variance = "diffuse") + + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "minn", variance = "csv"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ss", variance = "csv"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ssng", variance = "csv"), NA) + #expect_error(mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "dl", variance = "csv")) + + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "minn", variance = "fsv"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ss", variance = "fsv"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ssng", variance = "fsv"), NA) + #mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "dl", variance = "fsv") }) test_that("Quarterly", { set.seed(10237) Y <- mfbvar::mf_sweden prior_obj <- set_prior(Y = Y[seq(2, nrow(Y), by = 3), ], freq = rep("q", 5), - n_lags = 4, n_burnin = 100, n_reps = 100) + n_lags = 4, n_burnin = 10, n_reps = 10) prior_intervals <- matrix(c( 6, 7, 0.1, 0.2, @@ -54,25 +58,33 @@ test_that("Quarterly", { prior_psi_Omega = prior_psi_Omega, n_fcst = 4, n_fac = 1) testthat::skip_on_cran() - set.seed(100) - mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "minn", variance = "iw") - mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ss", variance = "iw") - mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ssng", variance = "iw") - - mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "minn", variance = "csv") - mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ss", variance = "csv") - mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ssng", variance = "csv") - - mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "minn", variance = "fsv") - mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ss", variance = "fsv") - mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ssng", variance = "fsv") + set.seed(10) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "minn", variance = "iw"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ss", variance = "iw"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ssng", variance = "iw"), NA) + #expect_error(mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "dl", variance = "iw")) + + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "minn", variance = "csv"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ss", variance = "csv"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ssng", variance = "csv"), NA) + #expect_error(mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "dl", variance = "csv")) + + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "minn", variance = "fsv"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ss", variance = "fsv"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ssng", variance = "fsv"), NA) + #mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "dl", variance = "fsv") + + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "minn", variance = "diffuse"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ss", variance = "diffuse"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ssng", variance = "diffuse"), NA) + #mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "dl", variance = "diffuse") }) test_that("Monthly", { set.seed(10237) Y <- mfbvar::mf_sweden prior_obj <- set_prior(Y = Y[, -5], freq = rep("m", 4), - n_lags = 4, n_burnin = 100, n_reps = 100) + n_lags = 4, n_burnin = 10, n_reps = 10) prior_intervals <- matrix(c( 6, 7, 0.1, 0.2, @@ -85,16 +97,150 @@ test_that("Monthly", { prior_psi_Omega = prior_psi_Omega, n_fcst = 4, n_fac = 1) testthat::skip_on_cran() - set.seed(100) - mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "minn", variance = "iw") - mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ss", variance = "iw") - mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ssng", variance = "iw") - - mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "minn", variance = "csv") - mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ss", variance = "csv") - mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ssng", variance = "csv") - - mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "minn", variance = "fsv") - mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ss", variance = "fsv") - mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ssng", variance = "fsv") + set.seed(10) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "minn", variance = "iw"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ss", variance = "iw"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ssng", variance = "iw"), NA) + #expect_error(mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "dl", variance = "iw")) + + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "minn", variance = "csv"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ss", variance = "csv"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ssng", variance = "csv"), NA) + #expect_error(mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "dl", variance = "csv")) + + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "minn", variance = "fsv"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ss", variance = "fsv"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ssng", variance = "fsv"), NA) + #mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "dl", variance = "fsv") + + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "minn", variance = "diffuse"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ss", variance = "diffuse"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ssng", variance = "diffuse"), NA) + #mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "dl", variance = "diffuse") +}) + +test_that("Block exogenous 1", { + set.seed(10237) + Y <- mfbvar::mf_sweden + prior_obj <- set_prior(Y = Y, freq = c(rep("m", 4), "q"), + n_lags = 4, n_burnin = 10, n_reps = 10, + block_exo = 2:3) + + prior_intervals <- matrix(c( 6, 7, + 0.1, 0.2, + 0, 0.5, + -0.5, 0.5, + 0.4, 0.6), ncol = 2, byrow = TRUE) + psi_moments <- interval_to_moments(prior_intervals) + prior_psi_mean <- psi_moments$prior_psi_mean + prior_psi_Omega <- psi_moments$prior_psi_Omega + prior_obj <- update_prior(prior_obj, d = "intercept", prior_psi_mean = prior_psi_mean, + prior_psi_Omega = prior_psi_Omega, n_fcst = 4, n_fac = 1) + + testthat::skip_on_cran() + set.seed(10) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "minn", variance = "iw"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ss", variance = "iw"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ssng", variance = "iw"), NA) + #expect_error(mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "dl", variance = "iw")) + + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "minn", variance = "diffuse"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ss", variance = "diffuse"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ssng", variance = "diffuse"), NA) + #mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "dl", variance = "diffuse") + + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "minn", variance = "csv"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ss", variance = "csv"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ssng", variance = "csv"), NA) + #expect_error(mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "dl", variance = "csv")) + + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "minn", variance = "fsv"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ss", variance = "fsv"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ssng", variance = "fsv"), NA) + #mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "dl", variance = "fsv", a = 1/10) +}) + + +test_that("Block exogenous 2", { + set.seed(10237) + Y <- mfbvar::mf_sweden + prior_obj <- set_prior(Y = Y, freq = c(rep("m", 4), "q"), + n_lags = 4, n_burnin = 10, n_reps = 10, + block_exo = c("infl", "ip")) + + prior_intervals <- matrix(c( 6, 7, + 0.1, 0.2, + 0, 0.5, + -0.5, 0.5, + 0.4, 0.6), ncol = 2, byrow = TRUE) + psi_moments <- interval_to_moments(prior_intervals) + prior_psi_mean <- psi_moments$prior_psi_mean + prior_psi_Omega <- psi_moments$prior_psi_Omega + prior_obj <- update_prior(prior_obj, d = "intercept", prior_psi_mean = prior_psi_mean, + prior_psi_Omega = prior_psi_Omega, n_fcst = 4, n_fac = 1) + + testthat::skip_on_cran() + set.seed(10) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "minn", variance = "iw"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ss", variance = "iw"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ssng", variance = "iw"), NA) + #expect_error(mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "dl", variance = "iw")) + + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "minn", variance = "diffuse"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ss", variance = "diffuse"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ssng", variance = "diffuse"), NA) + #mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "dl", variance = "diffuse") + + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "minn", variance = "csv"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ss", variance = "csv"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ssng", variance = "csv"), NA) + #expect_error(mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "dl", variance = "csv")) + + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "minn", variance = "fsv"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ss", variance = "fsv"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ssng", variance = "fsv"), NA) + #mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "dl", variance = "fsv", a = 1/10) +}) + +test_that("Weekly-Monthly MCMC", { + set.seed(10237) + Y <- matrix(rnorm(400), 100, 4) + Y[setdiff(1:100,seq(4, 100, by = 4)), 4] <- NA + + prior_obj <- set_prior(Y = Y, freq = c(rep("w", 3), "m"), + n_lags = 4, n_reps = 10) + + prior_intervals <- matrix(c( + -0.5, 0.5, + -0.5, 0.5, + -0.5, 0.5, + -0.5, 0.5), ncol = 2, byrow = TRUE) + psi_moments <- interval_to_moments(prior_intervals) + prior_psi_mean <- psi_moments$prior_psi_mean + prior_psi_Omega <- psi_moments$prior_psi_Omega + prior_obj <- update_prior(prior_obj, d = "intercept", prior_psi_mean = prior_psi_mean, + prior_psi_Omega = prior_psi_Omega, n_fcst = 4, + n_fac = 1) + + testthat::skip_on_cran() + set.seed(10) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "minn", variance = "iw"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ss", variance = "iw"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ssng", variance = "iw"), NA) + #expect_error(mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "dl", variance = "iw")) + + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "minn", variance = "diffuse"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ss", variance = "diffuse"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ssng", variance = "diffuse"), NA) + #mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "dl", variance = "diffuse") + + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "minn", variance = "csv"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ss", variance = "csv"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ssng", variance = "csv"), NA) + #expect_error(mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "dl", variance = "csv")) + + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "minn", variance = "fsv"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ss", variance = "fsv"), NA) + expect_error(estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ssng", variance = "fsv"), NA) + #mod <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "dl", variance = "fsv") }) diff --git a/tests/testthat/test_mfbvar.R b/tests/testthat/test_mfbvar.R index 19b7c60..da6fe1a 100644 --- a/tests/testthat/test_mfbvar.R +++ b/tests/testthat/test_mfbvar.R @@ -3,8 +3,8 @@ context("Output") test_that("Output correct", { set.seed(10237) Y <- mfbvar::mf_sweden - expect_warning(prior_obj <- set_prior(Y = Y, freq = c(rep("m", 4), "q"), - n_lags = 4, n_burnin = 100, n_reps = 1000)) + prior_obj <- set_prior(Y = Y, freq = c(rep("m", 4), "q"), + n_lags = 4, n_burnin = 100, n_reps = 300) prior_intervals <- matrix(c( 6, 7, 0.1, 0.2, @@ -20,41 +20,12 @@ test_that("Output correct", { expect_true(!is.null(prior_obj2$d_fcst)) testthat::skip_on_cran() - mod_minn <- estimate_mfbvar(mfbvar_prior = prior_obj, prior_type = "minn", n_fcst = 4, smooth_state = TRUE) - mod_ss <- estimate_mfbvar(prior_obj2, "ss", smooth_state = TRUE) + mod_minn <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "minn", n_fcst = 4) + mod_ss <- estimate_mfbvar(prior_obj2, "ss") mdd_minn <- mdd(mod_minn, p_trunc = 0.5) - mdd_ss1 <- mdd(mod_ss, method = 1) - mdd_ss2 <- mdd(mod_ss, method = 2, p_trunc = 0.5) - - expect_equal(c(mod_ss$Y[!is.na(Y[,1]), 1]), c(mod_ss$Z[!is.na(Y[,1]), 1, 100])) - expect_equal(c(mod_minn$Y[!is.na(Y[,1]), 1]), c(mod_minn$Z[!is.na(Y[,1]), 1, 100])) - - # saveRDS(mod_minn$Z[,5, 100], file = "tests/testthat/Z_minn.rds") - # saveRDS(mod_minn$Pi[,, 100], file = "tests/testthat/Pi_minn.rds") - # saveRDS(mod_minn$Sigma[,, 100], file = "tests/testthat/Sigma_minn.rds") - # - # saveRDS(mod_ss$Z[,5, 100], file = "tests/testthat/Z_ss.rds") - # saveRDS(mod_ss$Pi[,, 100], file = "tests/testthat/Pi_ss.rds") - # saveRDS(mod_ss$Sigma[,, 100], file = "tests/testthat/Sigma_ss.rds") - # saveRDS(mod_ss$psi[100,], file = "tests/testthat/psi_ss.rds") - # - # saveRDS(mdd_minn, file = "tests/testthat/mdd_minn.rds") - # saveRDS(mdd_ss1, file = "tests/testthat/mdd_ss1.rds") - # saveRDS(mdd_ss2, file = "tests/testthat/mdd_ss2.rds") - - expect_equal_to_reference(mod_minn$Z[,5, 100], "Z_minn.rds") - expect_equal_to_reference(mod_minn$Pi[,, 100], "Pi_minn.rds") - expect_equal_to_reference(mod_minn$Sigma[,, 100], "Sigma_minn.rds") - - expect_equal_to_reference(mod_ss$Z[,5, 100], "Z_ss.rds") - expect_equal_to_reference(mod_ss$Pi[,, 100], "Pi_ss.rds") - expect_equal_to_reference(mod_ss$psi[100, ], "psi_ss.rds") - expect_equal_to_reference(mod_ss$Sigma[,, 100], "Sigma_ss.rds") - - expect_equal_to_reference(mdd_minn, "mdd_minn.rds") - expect_equal_to_reference(mdd_ss1, "mdd_ss1.rds") - expect_equal_to_reference(mdd_ss2, "mdd_ss2.rds") + mdd_ss1 <- mdd(mod_ss) + }) context("Prior checks") test_that("Prior checks correct", { @@ -64,28 +35,100 @@ test_that("Prior checks correct", { # If Y is not matrix/df expect_error(prior_obj <- set_prior(Y = "test", freq = c(rep("m", 4), "q"), n_lags = 4, n_burnin = 100, n_reps = 1000)) - # Still a matrix - expect_warning(prior_obj <- set_prior(Y = as.ts(Y), freq = c(rep("m", 4), "q"), - n_lags = 4, n_burnin = 100, n_reps = 1000)) + # Including d - expect_warning(prior_obj <- set_prior(Y = Y, d = "intercept", freq = c(rep("m", 4), "q"), - n_lags = 4, n_burnin = 100, n_reps = 1000)) - expect_warning(prior_obj <- set_prior(Y = Y, d = matrix(1, nrow = nrow(Y), 1), freq = c(rep("m", 4), "q"), - n_lags = 4, n_burnin = 100, n_reps = 1000)) - expect_warning(prior_obj <- set_prior(Y = Y, d = cbind(1, 1:nrow(Y)), freq = c(rep("m", 4), "q"), + expect_error(prior_obj <- set_prior(Y = Y, d = matrix(1, nrow = nrow(Y)-1, 1), freq = c(rep("m", 4), "q"), n_lags = 4, n_burnin = 100, n_reps = 1000)) # freq expect_error(prior_obj <- set_prior(Y = Y, freq = c(rep("m", 4), "s"), n_lags = 4, n_burnin = 100, n_reps = 1000)) - expect_error(prior_obj <- set_prior(Y = Y, freq = c(rep("m", 4)), + expect_error(prior_obj <- set_prior(Y = Y, freq = list(c(rep("m", 4), "s")), + n_lags = 4, n_burnin = 100, n_reps = 1000)) + + + expect_error(prior_obj <- set_prior(Y = Y, freq = c(rep("m", 4), "q"), + aggregation = "triangular", + n_lags = 4, n_burnin = 100, n_reps = 1000)) + expect_error(prior_obj <- set_prior(Y = Y, freq = c(rep("m", 4), "q"), + aggregation = "average", + n_lags = 2, n_burnin = 100, n_reps = 1000)) + # Using update + prior_obj <- set_prior(Y = Y, freq = c(rep("m", 4), "q"), + n_lags = 4, n_burnin = 100, n_reps = 300) + prior_obj2 <- update_prior(prior_obj, d = "intercept", Y = Y[1:100, ], n_fcst = 4) + expect_is(prior_obj2$d_fcst, "matrix") + expect_is(prior_obj2$d, "matrix") + + prior_obj2 <- update_prior(prior_obj, d = "intercept", Y = Y[1:90, ]) + prior_obj2 <- update_prior(prior_obj2, n_fcst = 4) + expect_is(prior_obj2$d_fcst, "matrix") + expect_true(all(dim(prior_obj2$d_fcst) == c(4, 1))) +}) + +test_that("list_to_matrix", { + variables <- c("CPIAUCSL", "UNRATE", "GDPC1") + convert_ts <- function(x, frequency) { + ts(x, + start = c(1980, 1), + frequency = frequency) + } + convert_tsz <- function(x, frequency) { + zoo::zooreg(x, + start = c(1980, 1), + frequency = frequency) + } + out <- list(rnorm(466), rnorm(467), rnorm(155)) + ts_list <- c(lapply(out[1:2], convert_ts, frequency = 12), + lapply(out[3], convert_ts, frequency = 4)) + names(ts_list) <- variables + + tsz_list <- c(lapply(out[1:2], convert_tsz, frequency = 12), + lapply(out[3], convert_tsz, frequency = 4)) + names(tsz_list) <- variables + + ts_list2 <- list(monthly = cbind(CPIAUCSL = ts_list[[1]], UNRATE = ts_list[[2]]), GDPC1 = ts_list[[3]]) + tsz_list2 <- list(monthly = cbind(CPIAUCSL = tsz_list[[1]], UNRATE = tsz_list[[2]]), GDPC1 = tsz_list[[3]]) + + expect_equal(list_to_matrix(tsz_list), list_to_matrix(ts_list)) + expect_equal(list_to_matrix(tsz_list), list_to_matrix(ts_list)) + expect_equal(list_to_matrix(tsz_list), list_to_matrix(ts_list2)) + expect_equal(list_to_matrix(tsz_list), list_to_matrix(tsz_list2)) + + + + +}) + + +test_that("Prior checks correct", { + set.seed(10237) + Y <- mfbvar::mf_sweden + + # If Y is not matrix/df + expect_error(prior_obj <- set_prior(Y = "test", freq = c(rep("m", 4), "q"), + n_lags = 4, n_burnin = 100, n_reps = 1000)) + + # Including d + expect_error(prior_obj <- set_prior(Y = Y, d = matrix(1, nrow = nrow(Y)-1, 1), freq = c(rep("m", 4), "q"), + n_lags = 4, n_burnin = 100, n_reps = 1000)) + + # freq + expect_error(prior_obj <- set_prior(Y = Y, freq = c(rep("m", 4), "s"), n_lags = 4, n_burnin = 100, n_reps = 1000)) expect_error(prior_obj <- set_prior(Y = Y, freq = list(c(rep("m", 4), "s")), n_lags = 4, n_burnin = 100, n_reps = 1000)) + + expect_error(prior_obj <- set_prior(Y = Y, freq = c(rep("m", 4), "q"), + aggregation = "triangular", + n_lags = 4, n_burnin = 100, n_reps = 1000)) + expect_error(prior_obj <- set_prior(Y = Y, freq = c(rep("m", 4), "q"), + aggregation = "average", + n_lags = 2, n_burnin = 100, n_reps = 1000)) # Using update - expect_warning(prior_obj <- set_prior(Y = Y, freq = c(rep("m", 4), "q"), - n_lags = 4, n_burnin = 100, n_reps = 1000)) + prior_obj <- set_prior(Y = Y, freq = c(rep("m", 4), "q"), + n_lags = 4, n_burnin = 100, n_reps = 300) prior_obj2 <- update_prior(prior_obj, d = "intercept", Y = Y[1:100, ], n_fcst = 4) expect_is(prior_obj2$d_fcst, "matrix") expect_is(prior_obj2$d, "matrix") @@ -94,24 +137,29 @@ test_that("Prior checks correct", { prior_obj2 <- update_prior(prior_obj2, n_fcst = 4) expect_is(prior_obj2$d_fcst, "matrix") expect_true(all(dim(prior_obj2$d_fcst) == c(4, 1))) +}) + +test_that("List as input, no names", { + set.seed(10237) + Y <- mfbvar::mf_sweden + Y_list <- c(lapply(Y[,1:4], function(x) ts(x, frequency = 12, start = c(1996, 8))), + list(gdp = ts(Y[seq(from = 2, to = nrow(Y), by = 3), 5], frequency = 4, start = c(1996, 3)))) + names(Y_list) <- NULL + set.seed(10237) + prior_obj2 <- set_prior(Y = Y_list, n_lags = 4, n_burnin = 10, n_reps = 10) + prior_intervals <- matrix(c( 6, 7, + 0.1, 0.2, + 0, 0.5, + -0.5, 0.5, + 0.4, 0.6), ncol = 2, byrow = TRUE) + psi_moments <- interval_to_moments(prior_intervals) + prior_psi_mean <- psi_moments$prior_psi_mean + prior_psi_Omega <- psi_moments$prior_psi_Omega + prior_obj2 <- update_prior(prior_obj2, d = "intercept", prior_psi_mean = prior_psi_mean, + prior_psi_Omega = prior_psi_Omega, n_fcst = 4) + set.seed(10) + mod_minn2 <- estimate_mfbvar(mfbvar_prior = prior_obj2, prior = "minn", n_fcst = 12) + expect_error(predict(mod_minn2), NA) - expect_warning({ - prior_obj <- set_prior(Y = mf_sweden, freq = c(rep("m", 4), "q"), - n_lags = 4, n_burnin = 100, n_reps = 100, - prior_Pi_AR1 = 0) - summary(prior_obj) - }) - expect_warning({ - prior_obj <- set_prior(Y = mf_sweden, freq = c(rep("m", 4), "q"), - n_lags = 4, n_burnin = 100, n_reps = 100, - prior_Pi_AR1 = 1:5) - summary(prior_obj) - }) - expect_warning({ - prior_obj <- set_prior(Y = cbind(mf_sweden[,1:4], mf_sweden), freq = c(rep("m", 8), "q"), - n_lags = 4, n_burnin = 100, n_reps = 100, - prior_Pi_AR1 = 1:9) - summary(prior_obj) - }) }) diff --git a/tests/testthat/test_plot.R b/tests/testthat/test_plot.R index 023c1a3..a44ad60 100644 --- a/tests/testthat/test_plot.R +++ b/tests/testthat/test_plot.R @@ -4,7 +4,7 @@ test_that("Forecasts (minn)", { set.seed(10237) Y <- mfbvar::mf_sweden prior_obj <- set_prior(Y = Y, freq = c(rep("m", 4), "q"), - n_lags = 4, n_burnin = 100, n_reps = 100) + n_lags = 4, n_burnin = 10, n_reps = 10) prior_intervals <- matrix(c( 6, 7, 0.1, 0.2, @@ -18,14 +18,14 @@ test_that("Forecasts (minn)", { prior_psi_Omega = prior_psi_Omega, n_fcst = 4) testthat::skip_on_cran() - set.seed(100) + set.seed(10) mod_minn <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "minn", n_fcst = 12) expect_error(plot(mod_minn), NA) expect_error(plot(mod_minn, plot_start = "2013-07-31"), NA) rownames(Y) <- as.character(floor_date(as_date(rownames(Y)), unit = "month")) - set.seed(100) + set.seed(10) mod_minn <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "minn", n_fcst = 12, Y = Y) expect_error(plot(mod_minn), NA) @@ -33,19 +33,17 @@ test_that("Forecasts (minn)", { rownames(Y) <- NULL - set.seed(100) + set.seed(10) mod_minn <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "minn", n_fcst = 12, Y = Y) expect_error(plot(mod_minn)) - expect_error(plot(mod_minn, fcst_start = "2016-01-01"), NA) - expect_error(plot(mod_minn, fcst_start = "2016-01-01", plot_start = "2013-07-01"), NA) }) test_that("Forecasts (ss)", { set.seed(10237) Y <- mfbvar::mf_sweden prior_obj <- set_prior(Y = Y, freq = c(rep("m", 4), "q"), - n_lags = 4, n_burnin = 100, n_reps = 100) + n_lags = 4, n_burnin = 10, n_reps = 10) prior_intervals <- matrix(c( 6, 7, 0.1, 0.2, @@ -59,37 +57,35 @@ test_that("Forecasts (ss)", { prior_psi_Omega = prior_psi_Omega, n_fcst = 12) testthat::skip_on_cran() - set.seed(100) + set.seed(10) mod_ss <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ss") expect_error(plot(mod_ss), NA) expect_error(plot(mod_ss, plot_start = "2013-07-31"), NA) rownames(Y) <- as.character(floor_date(as_date(rownames(Y)), unit = "month")) - set.seed(100) + set.seed(10) mod_ss <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ss", Y = Y) expect_error(plot(mod_ss), NA) expect_error(plot(mod_ss, plot_start = "2013-07-01"), NA) rownames(Y) <- NULL - set.seed(100) + set.seed(10) mod_ss <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ss", Y = Y) expect_error(plot(mod_ss)) - expect_error(plot(mod_ss, fcst_start = "2016-01-01"), NA) - expect_error(plot(mod_ss, fcst_start = "2016-01-01", plot_start = "2013-07-01"), NA) }) test_that("Prior", { set.seed(10237) Y <- mfbvar::mf_sweden prior_obj <- set_prior(Y = Y, freq = c(rep("m", 4), "q"), - n_lags = 4, n_burnin = 100, n_reps = 100) + n_lags = 4, n_burnin = 10, n_reps = 10) expect_error(plot(prior_obj), NA) rownames(Y) <- NULL prior_obj <- set_prior(Y = Y, freq = c(rep("m", 4), "q"), - n_lags = 4, n_burnin = 100, n_reps = 100) + n_lags = 4, n_burnin = 10, n_reps = 10) plot(prior_obj) }) @@ -97,7 +93,7 @@ test_that("varplot", { set.seed(10237) Y <- mfbvar::mf_sweden prior_obj <- set_prior(Y = Y, freq = c(rep("m", 4), "q"), - n_lags = 4, n_burnin = 100, n_reps = 100, n_fac = 1) + n_lags = 4, n_burnin = 10, n_reps = 10, n_fac = 1) prior_intervals <- matrix(c( 6, 7, 0.1, 0.2, @@ -111,17 +107,44 @@ test_that("varplot", { prior_psi_Omega = prior_psi_Omega, n_fcst = 12) testthat::skip_on_cran() - set.seed(100) + set.seed(10) mod_ss <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ss", variance = "fsv") expect_error(varplot(mod_ss, variables = "gdp"), NA) rownames(Y) <- NULL - set.seed(100) + set.seed(10) mod_ss <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ss", Y = Y, variance = "fsv") expect_error(varplot(mod_ss, variables = "gdp"), NA) colnames(Y) <- NULL - set.seed(100) + set.seed(10) mod_ss <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ss", Y = Y, variance = "fsv") expect_error(varplot(mod_ss, variables = 1), NA) }) + +test_that("Weekly-Monthly plots", { + set.seed(10237) + Y <- matrix(rnorm(400), 100, 4) + Y[setdiff(1:100,seq(4, 100, by = 4)), 4] <- NA + + prior_obj <- set_prior(Y = Y, freq = c(rep("w", 3), "m"), + n_lags = 4, n_reps = 10) + + prior_intervals <- matrix(c( + -0.5, 0.5, + -0.5, 0.5, + -0.5, 0.5, + -0.5, 0.5), ncol = 2, byrow = TRUE) + psi_moments <- interval_to_moments(prior_intervals) + prior_psi_mean <- psi_moments$prior_psi_mean + prior_psi_Omega <- psi_moments$prior_psi_Omega + prior_obj <- update_prior(prior_obj, d = "intercept", prior_psi_mean = prior_psi_mean, + prior_psi_Omega = prior_psi_Omega, n_fcst = 4) + + testthat::skip_on_cran() + set.seed(10) + mod_ss <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "ss", variance = "csv") + expect_error(varplot(mod_ss, variables = 1), NA) + expect_error(plot(mod_ss)) +}) + diff --git a/tests/testthat/test_predict.R b/tests/testthat/test_predict.R index 0c2e97b..b5a9ec2 100644 --- a/tests/testthat/test_predict.R +++ b/tests/testthat/test_predict.R @@ -3,8 +3,9 @@ context("Predict") test_that("Forecasts (mf)", { set.seed(10237) Y <- mfbvar::mf_sweden + rownames(Y) <- as.character(lubridate::floor_date(lubridate::ymd(rownames(Y)), unit = "months")) prior_obj <- set_prior(Y = Y, freq = c(rep("m", 4), "q"), - n_lags = 4, n_burnin = 100, n_reps = 100) + n_lags = 4, n_burnin = 10, n_reps = 10) prior_intervals <- matrix(c( 6, 7, 0.1, 0.2, @@ -18,23 +19,52 @@ test_that("Forecasts (mf)", { prior_psi_Omega = prior_psi_Omega, n_fcst = 4) testthat::skip_on_cran() - set.seed(100) + set.seed(10) mod_minn <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "minn", n_fcst = 12) expect_equal(predict(mod_minn) %>% - filter(variable == "gdp") %>% - pull(median), + dplyr::filter(variable == "gdp") %>% + .$median, c(median(colMeans(mod_minn$Z_fcst[2:4,5,])), median(colMeans(mod_minn$Z_fcst[5:7,5,])), median(colMeans(mod_minn$Z_fcst[8:10,5,])), median(colMeans(mod_minn$Z_fcst[11:13,5,])), median(colMeans(mod_minn$Z_fcst[14:16,5,])))) + + expect_equal(predict(mod_minn, aggregate_fcst = FALSE) %>% + dplyr::filter(variable == "gdp") %>% + .$median, + c(apply(mod_minn$Z_fcst[-1,5,], 1, median), use.names = FALSE)) + + + Y_list <- c(lapply(Y[,1:4], function(x) ts(x, frequency = 12, start = c(1996, 8))), + list(gdp = ts(Y[seq(from = 2, to = nrow(Y), by = 3), 5], frequency = 4, start = c(1996, 3)))) + + set.seed(10237) + prior_obj2 <- set_prior(Y = Y_list, n_lags = 4, n_burnin = 10, n_reps = 10) + + prior_intervals <- matrix(c( 6, 7, + 0.1, 0.2, + 0, 0.5, + -0.5, 0.5, + 0.4, 0.6), ncol = 2, byrow = TRUE) + psi_moments <- interval_to_moments(prior_intervals) + prior_psi_mean <- psi_moments$prior_psi_mean + prior_psi_Omega <- psi_moments$prior_psi_Omega + prior_obj2 <- update_prior(prior_obj2, d = "intercept", prior_psi_mean = prior_psi_mean, + prior_psi_Omega = prior_psi_Omega, n_fcst = 4) + set.seed(10) + mod_minn2 <- estimate_mfbvar(mfbvar_prior = prior_obj2, prior = "minn", n_fcst = 12) + + expect_equal(predict(mod_minn), predict(mod_minn2)) + }) test_that("Forecasts (monthly)", { set.seed(10237) Y <- mfbvar::mf_sweden + rownames(Y) <- as.character(lubridate::floor_date(lubridate::ymd(rownames(Y)), unit = "months")) prior_obj <- set_prior(Y = Y[, -5], freq = rep("m", 4), - n_lags = 4, n_burnin = 100, n_reps = 100) + n_lags = 4, n_burnin = 10, n_reps = 10) prior_intervals <- matrix(c( 6, 7, 0.1, 0.2, @@ -47,19 +77,39 @@ test_that("Forecasts (monthly)", { prior_psi_Omega = prior_psi_Omega, n_fcst = 4) testthat::skip_on_cran() - set.seed(100) + set.seed(10) mod_minn <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "minn", n_fcst = 12) expect_equal(predict(mod_minn) %>% - filter(variable == "eti") %>% + dplyr::filter(variable == "eti") %>% pull(median), - as.numeric(apply(mod_minn$Z_fcst[-(1:4),4,], 1, median))) + c(apply(mod_minn$Z_fcst[-(1:4),4,], 1, median), use.names = FALSE)) + + Y_list <- lapply(Y[,1:4], function(x) ts(x, frequency = 12, start = c(1996, 8))) + + set.seed(10237) + prior_obj2 <- set_prior(Y = Y_list, n_lags = 4, n_burnin = 10, n_reps = 10) + + prior_intervals <- matrix(c( 6, 7, + 0.1, 0.2, + 0, 0.5, + -0.5, 0.5), ncol = 2, byrow = TRUE) + psi_moments <- interval_to_moments(prior_intervals) + prior_psi_mean <- psi_moments$prior_psi_mean + prior_psi_Omega <- psi_moments$prior_psi_Omega + prior_obj2 <- update_prior(prior_obj2, d = "intercept", prior_psi_mean = prior_psi_mean, + prior_psi_Omega = prior_psi_Omega, n_fcst = 4) + set.seed(10) + mod_minn2 <- estimate_mfbvar(mfbvar_prior = prior_obj2, prior = "minn", n_fcst = 12) + + expect_equal(predict(mod_minn), predict(mod_minn2)) }) test_that("Forecasts (quarterly)", { set.seed(10237) Y <- mfbvar::mf_sweden + rownames(Y) <- as.character(lubridate::floor_date(lubridate::ymd(rownames(Y)), unit = "months")) prior_obj <- set_prior(Y = Y[seq(2, nrow(Y), by = 3), ], freq = rep("q", 5), - n_lags = 4, n_burnin = 100, n_reps = 100) + n_lags = 4, n_burnin = 10, n_reps = 10) prior_intervals <- matrix(c( 6, 7, 0.1, 0.2, @@ -73,10 +123,60 @@ test_that("Forecasts (quarterly)", { prior_psi_Omega = prior_psi_Omega, n_fcst = 4) testthat::skip_on_cran() - set.seed(100) + set.seed(10) mod_minn <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "minn", n_fcst = 12) expect_equal(predict(mod_minn) %>% - filter(variable == "eti") %>% + dplyr::filter(variable == "eti") %>% pull(median), as.numeric(apply(mod_minn$Z_fcst[-(1:4),4,], 1, median))) + + Y <- Y[seq(2, nrow(Y), by = 3), ] + Y_list <- lapply(Y, function(x) ts(x, frequency = 4, start = c(1996, 3))) + + set.seed(10237) + prior_obj2 <- set_prior(Y = Y_list, n_lags = 4, n_burnin = 10, n_reps = 10) + + prior_intervals <- matrix(c( 6, 7, + 0.1, 0.2, + 0, 0.5, + -0.5, 0.5, + 1, 3), ncol = 2, byrow = TRUE) + psi_moments <- interval_to_moments(prior_intervals) + prior_psi_mean <- psi_moments$prior_psi_mean + prior_psi_Omega <- psi_moments$prior_psi_Omega + prior_obj2 <- update_prior(prior_obj2, d = "intercept", prior_psi_mean = prior_psi_mean, + prior_psi_Omega = prior_psi_Omega, n_fcst = 4) + set.seed(10) + mod_minn2 <- estimate_mfbvar(mfbvar_prior = prior_obj2, prior = "minn", n_fcst = 12) + + expect_equal(predict(mod_minn), predict(mod_minn2)) +}) + +test_that("Forecasts (weekly-monthly)", { + set.seed(10237) + Y <- matrix(rnorm(400), 100, 4) + Y[setdiff(1:100,seq(4, 100, by = 4)), 4] <- NA + + prior_obj <- set_prior(Y = Y, freq = c(rep("w", 3), "m"), + n_lags = 4, n_reps = 10) + + prior_intervals <- matrix(c( + -0.5, 0.5, + -0.5, 0.5, + -0.5, 0.5, + -0.5, 0.5), ncol = 2, byrow = TRUE) + psi_moments <- interval_to_moments(prior_intervals) + prior_psi_mean <- psi_moments$prior_psi_mean + prior_psi_Omega <- psi_moments$prior_psi_Omega + prior_obj <- update_prior(prior_obj, d = "intercept", prior_psi_mean = prior_psi_mean, + prior_psi_Omega = prior_psi_Omega, n_fcst = 4) + + testthat::skip_on_cran() + set.seed(10) + mod_minn <- estimate_mfbvar(mfbvar_prior = prior_obj, prior = "minn", n_fcst = 12) + expect_equal(predict(mod_minn, aggregate_fcst = FALSE) %>% + dplyr::filter(variable == 4) %>% + .$median, + c(apply(mod_minn$Z_fcst[-(1:4),4,], 1, median), use.names = FALSE)) + }) diff --git a/vignettes/.DS_Store b/vignettes/.DS_Store new file mode 100644 index 0000000..d3a62bc Binary files /dev/null and b/vignettes/.DS_Store differ diff --git a/vignettes/alfred_data.RData b/vignettes/alfred_data.RData new file mode 100644 index 0000000..4c4e68a Binary files /dev/null and b/vignettes/alfred_data.RData differ diff --git a/vignettes/figures/ridges-1.pdf b/vignettes/figures/ridges-1.pdf new file mode 100644 index 0000000..8e5511b Binary files /dev/null and b/vignettes/figures/ridges-1.pdf differ diff --git a/vignettes/figures/ss_plots-1.pdf b/vignettes/figures/ss_plots-1.pdf new file mode 100644 index 0000000..61c9df1 Binary files /dev/null and b/vignettes/figures/ss_plots-1.pdf differ diff --git a/vignettes/figures/ss_plots-2.pdf b/vignettes/figures/ss_plots-2.pdf new file mode 100644 index 0000000..840fd5f Binary files /dev/null and b/vignettes/figures/ss_plots-2.pdf differ diff --git a/vignettes/figures/varplot-1.pdf b/vignettes/figures/varplot-1.pdf new file mode 100644 index 0000000..d18b60e Binary files /dev/null and b/vignettes/figures/varplot-1.pdf differ diff --git a/vignettes/figures/varplot-2.pdf b/vignettes/figures/varplot-2.pdf new file mode 100644 index 0000000..eb382f0 Binary files /dev/null and b/vignettes/figures/varplot-2.pdf differ diff --git a/vignettes/mfbvar_jss.Rnw b/vignettes/mfbvar_jss.Rnw new file mode 100644 index 0000000..b1997a5 --- /dev/null +++ b/vignettes/mfbvar_jss.Rnw @@ -0,0 +1,900 @@ +\documentclass[article,nojss]{jss} + +%% -- LaTeX packages and custom commands --------------------------------------- + +%% recommended packages +\usepackage{thumbpdf,lmodern} +\usepackage{amsmath} +\usepackage{amssymb} +%% another package (only for this demo article) +\usepackage{framed} +\usepackage{mathtools} +\usepackage{subfig} + +\mathtoolsset{showonlyrefs} + +%% new custom commands +\newcommand{\class}[1]{`\code{#1}'} +\newcommand{\fct}[1]{\code{#1()}} + + +\usepackage{rotating} +\usepackage[utf8]{inputenc} +\usepackage{array} + +\newcolumntype{R}{@{\extracolsep{5pt}}c@{\extracolsep{0pt}}}% + +%\VignetteEngine{knitr::knitr} +%\VignetteIndexEntry{Bayesian Mixed-Frequency VARs} +\newcommand{\GG}[1]{} + +%% For Sweave-based articles about R packages: +%% need no \usepackage{Sweave} +<>= +options(prompt = "R> ", continue = "+ ", width = 70, useFancyQuotes = FALSE) +run_mod <- FALSE +@ + +<>= +library(knitr) +render_sweave() +local({ + hook_error = knit_hooks$get('warning') + knit_hooks$set(warning = function(x, options) { + x <- gsub("Warning:", "Warning:\n ", x) + hook_error(x, options) + }) +}) +set.seed(100) +@ + + +%% -- Article metainformation (author, title, ...) ----------------------------- + +%% - \author{} with primary affiliation +%% - \Plainauthor{} without affiliations +%% - Separate authors by \And or \AND (in \author) or by comma (in \Plainauthor). +%% - \AND starts a new line, \And does not. +\author{Sebastian Ankargren\\Uppsala University, \\ National Institute of Economic Research + \And Yukai Yang\\Uppsala University, \\Stockholm School of Economics} +\Plainauthor{Sebastian Ankargren, Yukai Yang} + +%% - \title{} in title case +%% - \Plaintitle{} without LaTeX markup (if any) +%% - \Shorttitle{} with LaTeX markup (if any), used as running title +\title{Mixed-Frequency Bayesian VAR Models in \proglang{R}: the \pkg{mfbvar} package} +\Plaintitle{Mixed-Frequency Bayesian VAR Models in R: the mfbvar package} +\Shorttitle{Mixed-Frequency Bayesian VAR Models in \proglang{R}} + +%% - \Abstract{} almost as usual +\Abstract{ + Time series are often sampled at different frequencies, which leads to mixed-frequency data. Mixed frequencies are often neglected in applications as high-frequency series are aggregated to lower frequencies. In the \pkg{mfbvar} package, we introduce the possibility to estimate Bayesian vector autoregressive (VAR) models when the set of included time series consists of monthly and quarterly variables. The package implements several common prior distributions as well as stochastic volatility methods. The mixed-frequency nature of the data is handled by assuming that quarterly variables are weighted averages of unobserved monthly observations. We provide a user-friendly interface for model estimation and forecasting. The capabilities of the package are illustrated in an application. +} + + +%% - \Keywords{} with LaTeX markup, at least one required +%% - \Plainkeywords{} without LaTeX markup (if necessary) +%% - Should be comma-separated and in sentence case. +\Keywords{vector autoregression, steady-state prior, stochastic volatility, time series, \proglang{R}} +\Plainkeywords{vector autoregression, steady-state prior, stochastic volatility, stochastic volatility, time series, R} + +%% - \Address{} of at least one author +%% - May contain multiple affiliations for each author +%% (in extra lines, separated by \emph{and}\\). +%% - May contain multiple authors for the same affiliation +%% (in the same first line, separated by comma). +\Address{ + Sebastian Ankargren, Yukai Yang\\ + Department of Statistics\\ + Uppsala University\\ + P.O. Box 513, 751 20 Uppsala\\ + Sweden\\ + E-mail: \email{sebastian.ankargren@statistics.uu.se}\\ + URL: \url{http://ankargren.github.io/}\\ + \emph{and}\\ + Yukai Yang\\ + Center for Data Analytics\\ + Stockholm School of Economics\\ + P.O. Box 6501, 113 83 Stockholm\\ + Sweden\\ + E-mail: \email{yukai.yang@statistics.uu.se}\\ + URL: \url{http://yukai-yang.github.io/}\\ +} +\IfFileExists{upquote.sty}{\usepackage{upquote}}{} +\begin{document} + +%% -- Introduction ------------------------------------------------------------- + +%% - In principle "as usual". +%% - But should typically have some discussion of both _software_ and _methods_. +%% - Use \proglang{}, \pkg{}, and \code{} markup throughout the manuscript. +%% - If such markup is in (sub)section titles, a plain text version has to be +%% added as well. +%% - All software mentioned should be properly \cite-d. +%% - All abbreviations should be introduced. +%% - Unless the expansions of abbreviations are proper names (like "Journal +%% of Statistical Software" above) they should be in sentence case (like +%% "generalized linear models" below). + +\section{Introduction} \label{sec:intro} +Vector autoregressive (VAR) models constitute an important tool for multivariate time series analysis. They are, in their original form, easy to fit and to use and have hence been used for various types of policy analyses as well as for forecasting purposes. A major obstacle in applied VAR modeling is the curse of dimensionality: the number of parameters grows quadratically in the number of variables, and having several hundred or even thousands of parameters is not uncommon. Thus, VAR models estimated by maximum likelihood are usually associated with bad precision. As a remedy, Bayesian estimation has become widely popular following \cite{Litterman1986} and the so-called Minnesota prior, which regularizes the estimation such that the parameters are shrunk towards a stylized view of macroeconomic time series. In the traditional Minnesota prior, the prior belief is that the time series are independent random walks. The prior puts prior densities more tightly around zero for higher-order lags, thus implying that recent lags should be relatively more important than more distant lags. The Minnesota prior, and variations thereof, has been successful in forecasting; for examples, the reader is referred to \cite{Banbura2010} and \cite{Karlsson2013} and the references therein. For an accessible introduction to VAR modeling in macroeconomics, see \cite{Stock2001}. + +Another prior that has shown promising results with respect to forecasting is the steady-state prior proposed by \cite{Villani2009}. In the Minnesota prior, common practice is to put a loose prior on the intercept in the VAR model. The steady-state prior employs an alternative parametrization of the model in which the unconditional mean (the steady state) is present. Thus, one can put a prior distribution on the steady states, for which there are often beliefs, rather than on the constant term. Numerous applications of Bayesian VARs with this prior exist in the literature, see for instance \cite{Jarocinski2008,Osterholm2010,Clark2011,Ankargren2016}. + +In most applications, researchers use single-frequency aggregated data in order to effortlessly be able to estimate the models. A common situation in macroeconomics is to include both the rate of inflation and GDP growth in a model. The inflation rate is typically published monthly, whereas GDP growth is published quarterly. Thus, a necessary first step in order to use traditional approaches is to aggregate the monthly inflation rate to the quarterly frequency. + +By using newer techniques, there is no need to aggregate to the lowest common frequency. Mixed data sampling (MIDAS) methods allow for various frequencies of the data to co-exist in the model \citep{Ghysels2007}. Moreover, bridge methods can be used to tackle the mixed-frequency problem \citep{Baffigi2004}. \cite{Foroni2013} provided a survey of mixed-frequency methods. + +The approach that the \pkg{mfbvar} package implements is a state-space based approach in which it is assumed that low-frequency variables are observed linear combinations of underlying high-frequency processes. This implies that there is a latent monthly process for GDP growth that is unobserved, and what is observed is a weighted average of said latent process. By assuming such a structure, the model can be estimated by extending the single-frequency Bayesian VAR model estimation techniques with an auxiliary step that draws from the posterior distribution of the latent process. + +The state-space-based mixed-frequency Bayesian VAR was proposed by \cite{Schorfheide2015} using a Minnesota-style normal inverse Wishart prior. In \cite{AnkargrenUnossonYang2019}, a similar model was presented but with a steady-state prior. The \pkg{mfbvar} package implements the mixed-frequency VAR with Minnesota and steady-state priors and stochastic volatility in a user-friendly way. + +The implementation of Bayesian VARs in \proglang{R} is not new; the \pkg{BMR} package \citep{OHara2017} presents the possibility to estimate single-frequency BVARs with either the Minnesota or steady-state prior. The BEAR toolbox \citep{Dieppe2016}, developed at the European Central Bank, provides the same functionality and more for \proglang{MATLAB} users. Moreover, various Bayesian VARs can also be estimated in \proglang{EViews}. However, none of these alternatives provide mixed-frequency estimation. The existing implementations of mixed-frequency estimation closest to ours is the \proglang{MATLAB} code accompanying the paper by \cite{Schorfheide2015} and the \pkg{midasr} package \citep{Ghysels2016b} implementing MIDAS regression in \proglang{R.} + +%% -- Manuscript --------------------------------------------------------------- + +%% - In principle "as usual" again. +%% - When using equations (e.g., {equation}, {eqnarray}, {align}, etc. +%% avoid empty lines before and after the equation (which would signal a new +%% paragraph. +%% - When describing longer chunks of code that are _not_ meant for execution +%% (e.g., a function synopsis or list of arguments), the environment {Code} +%% is recommended. Alternatively, a plain {verbatim} can also be used. +%% (For executed code see the next section.) +\clearpage +\section{Mixed-Frequency Bayesian VAR models} \label{sec:models} +Suppose that the system evolves at the monthly frequency. Let $x_t$ be an $n\times 1$ monthly process. Decompose $x_t=(x_{m, t}^\top, x_{q, t}^\top)^\top$ into $n_m$ monthly variables, and a $n_q$-dimensional latent process for the quarterly observations. By letting $y_t=(y_{m, t}^\top, y_{q, t}^\top)^\top$ denote observations, it is implied that $y_{m, t}=x_{m, t}$ as the monthly part is always observed. For the remaining quarterly variables, we instead observe a weighted average of $x_q$. There are two common aggregations used in the literature: intra-quarterly averaging and triangular aggregation. The former assumes the relation between observed and latent variables to be +\begin{align} +y_{q, t}=\begin{cases}\frac{1}{3}(x_{q, t}+x_{q, t-1}+x_{q, t-2}), &\quad t\in \{\text{Mar}, \text{Jun}, \text{Sep}, \text{Dec}\}\\ +\varnothing, &\quad \text{otherwise},\end{cases} +\end{align} +and was used by e.g. \cite{Schorfheide2015} for modeling data in log-levels. The second alternative is the triangular weighting scheme employed by \cite{Mariano2003}, where +\begin{align} +y_{q, t}=\begin{cases}\frac{1}{9}(x_{q, t}+2x_{q, t-1}+3x_{q, t-2}+2x_{q,t-3}+x_{q,t-4}), &t\in \left\{\begin{matrix}\text{Mar},& \text{Jun}\\\text{Sep}, &\text{Dec}\end{matrix}\right\}\\ +\varnothing, &\text{otherwise}.\end{cases} +\end{align} +Intra-quarterly averaging is recommended when the dataset consists of series in log-levels, whereas the triangular weighting scheme is appropriate when the data enter the model as growth rates. In practice, the difference is often negligible. + +As the system is assumed to evolve at the monthly frequency, we specify a VAR($p$) model for $x_t$: +\begin{equation} +x_t=\phi+\Phi_1 x_{t-1}+\cdots+\Phi_p x_{t-p}+\epsilon_t, \quad \epsilon_t \sim \operatorname{N}(0, \Sigma).\label{eq:original} +\end{equation} +The VAR($p$) model can be written in companion form, where we let $z_t=(x_t^\top, x_{t-1}^\top, \dots, x_{t-p+1}^\top)^\top$. Thus, we obtain +\begin{equation} +z_t=\pi+\Pi z_{t-1}+u_t, \quad u_t \sim \operatorname{N}(0, \Omega), \label{eq:trans} +\end{equation} +where $\pi$, $\Pi$ and $\Omega$ are the corresponding companion form matrices constructed from $(\phi, \Phi_1, \dots, \Phi_p, \Sigma)$; see \cite{Hamilton1994}. + +It is now possible to specify the observation equation as +\begin{equation} +y_t = M_t\Lambda z_t \label{eq:obs}, +\end{equation} +where $M_t$ is a deterministic selection matrix and $\Lambda$ an aggregation matrix based on the weighting scheme employed. The $M_t$ matrix in Equation (\ref{eq:obs}) yields a time-varying observation vector by selecting rows corresponding to variables which are observed, whereas $\Lambda$ aggregates the underlying latent process. For more details, see \cite{Schorfheide2015} and \cite{AnkargrenUnossonYang2019}. + +The posterior distribution of interest is $p(X, \Theta|Y)$, where $X=(x_1, \dots, x_T)^\top$, $Y=(y_1, \dots, y_T)^\top$ and $\Theta$ collects the parameters of the model. This posterior distribution is intractable, but a Gibbs sampler can be employed in order to numerically approximate the posterior. Thus, estimation can be carried out by Markov Chain Monte Carlo (MCMC) and Gibbs sampling. We alternate between drawing from the conditional posterior of $X$ given the parameters and from the conditional posterior of the parameters given $X$. That is, we alternate between drawing from the conditional distributions +\begin{equation} +\begin{aligned} +p(X| \Theta, Y)\quad \text{ and }\quad p(\Theta|X). +\end{aligned} +\end{equation} + The two equations \eqref{eq:trans} and \eqref{eq:obs} constitute the transition and measurement equations of a state-space model. By conditioning on the parameters and the data, one can make a draw from the conditional posterior $p(X| \Theta, Y)$ by use of a simulation smoother \citep{Durbin2002}. Given $X$, the parameters are conditionally independent of $Y$ and a draw from $p(\Theta|X)$ can be made as in the familiar single-frequency case with the data being $X$. + +The preceding description of the VAR model assumes a constant error covariance matrix. In recent years, there has been a growing interest in relaxing this assumption and modeling heteroskedasticity by use of stochastic volatility models. Seminal work include \cite{Primiceri2005} and \cite{Cogley2005}, who used VARs with time-varying parameters and stochastic volatilities and have had large influence ever since. For forecasting, allowing for stochastic volatility often improves the predictive ability, in particular when the forecasting performance is evaluated with respect to density forecasts. Important work in this regard include \cite{Clark2011,DAgostino2013,Clark2015}, whose results demonstrate the usefulness of stochastic volatilities. In the \pkg{mfbvar} package, a time-varying error covariance matrix can be modeled using either the common stochastic volatility model by \cite{Carriero2016} or using the factor stochastic volatility model based on the work by \cite{Kastner2017}. + +In the remainder of this section, we describe the prior distributions available in \pkg{mfbvar} and discuss some aspects of the implementations for sampling from the posterior distribution. + +\subsection{Priors for Regression Parameters} + +\subsubsection{Minnesota-style priors} +The model can be written on matrix form as +\begin{align} +X=W \Gamma + E, +\end{align} +where $W=(W_1, \dots, W_T)^\top$ with $W_t=(x_{t-1}^\top, \dots, x_{t-p}^\top, 1)^\top$, $E=(\epsilon_1, \dots, \epsilon_T)^\top$, and $\Gamma=(\Phi^\top, \phi)^\top$. The Minnesota prior for $\Gamma$ takes one of two forms depending on the specification of the error covariance component. We will use the term ``Minnesota prior'' to refer to the prior for the model with intercept in order to more easily contrast it with an alternative specification discussed in a later section. It does not, however, refer to the original Minnesota prior with a fixed diagonal error covariance matrix as used by \cite{Litterman1986}; rather, it should be interpreted as a normal prior for the regression parameters---including intercept---based on the Minnesota prior beliefs. + +\paragraph{Conditional normal prior} +A common prior for VAR models is a joint normal inverse Wishart prior for $(\Gamma, \Sigma)$ in which the prior $\Gamma$ is constructed conditionally on the error covariance $\Sigma$. The conditional prior distribution for $\Gamma$ is in this case a multivariate normal distribution of the form +\begin{align} +\operatorname{vec}(\Gamma)|\Sigma \sim \operatorname{N}(\operatorname{vec}(\underline{\Gamma}), \Sigma\otimes\underline{\Xi}),\label{eq:prior} +\end{align} +where $\underline{\Gamma}, \underline{\Xi}$ are prior parameters specified by the researcher. The \pkg{mfbvar} package follows common practice and specifies the structure of the moments of the prior distribution along the lines of the Minnesota prior beliefs, yielding +\begin{align} +\underline{\Gamma}(\underline{\gamma}) &= \begin{pmatrix}\operatorname{diag}(\underline{\gamma}) & 0_{n\times[(p-1)+1]}\end{pmatrix}^\top\label{eq:gamma}\\ +\xi_{i} &= \begin{cases}\frac{\lambda_1^2}{(l^{\lambda_3}s_r)^2}, &\text{lag $l$ of variable $r, i =(l-1)n+r$}\label{eq:xi}\\ +\lambda_4^2, &i=np+1\end{cases}, +\end{align} +where $\xi_i$ are the diagonal elements of $\underline{\Xi}$, and $s_j^2$ for $j=1, \dots, p$ are obtained as the residual variances from AR(4) regressions. As is customary, the prior means of all regression parameters are set to zero, except the AR(1) parameters ($\underline{\gamma}$). + +\paragraph{Independent normal prior} +The conditional normal prior imposes a symmetry in the prior for $\Gamma$ through the Kronecker structure. An alternative specification is to assume $\Gamma$ to be independent of the error covariance a priori. The prior is +\begin{align}\label{eq:independent} +\operatorname{vec}(\Gamma)\sim \operatorname{N}(\operatorname{vec}(\underline{\Gamma}), \underline{\Omega}) +\end{align} +where $\underline{\Omega}$ is the $n(np+1)\times n(np+1)$ diagonal matrix containing the prior variances of $\Phi_{l}^{(i,j)}$, i.e., element $(i, j)$ of $\Phi_l$ that relates lag $l$ of variable $j$ to variable $i$, and the vector of intercepts $\phi$. + +The prior variances are given by +\begin{align} +\VAR(\Phi_l^{(i, j)})=\begin{cases}\frac{\lambda_1^2}{(l^{\lambda_3})^2}, & \text{if } i=j\\ +\frac{\lambda_1^2\lambda_2^2}{(l^{\lambda_3})^2}\frac{s_i^2}{s_j^2}, &\text{otherwise}.\end{cases}\label{eq:uncondprior} +\end{align} +The main difference between \eqref{eq:xi} and \eqref{eq:uncondprior} is that the conditional normal prior in \eqref{eq:xi} enforces the restriction of symmetrical shrinkage of parameters in all equations. This restriction is not imposed in \eqref{eq:uncondprior}. The implication is that in \eqref{eq:xi} $\lambda_2$ is implicitly set to $\lambda_2=1$ (i.e., no penalization of terms corresponding to lags of other variables in a given equation), whereas \eqref{eq:uncondprior} allows cross-variable shrinkage to be enforced. The prior variance of the intercept is as before $\VAR(\phi)=10^4 I_n$. + +\subsubsection{Steady-state prior} +The steady-state prior proposed by \cite{Villani2009} reformulates \eqref{eq:original} to be on the mean-adjusted form \begin{align} +\Phi(L)(x_t-\Psi d_t)=\epsilon_t,\label{eq:meanadj} +\end{align} +where $\Phi(L)=(I_{n}-\Phi_1 L -\cdots - \Phi_p L^p)$ is an invertible lag polynomial. The intercept $\phi$ in \eqref{eq:original} can be replaced by the more general deterministic term $\Phi_0d_t$, where $\Phi_0$ is $n\times m$ and $d_t$ is $m\times 1$. The steady-state parameters $\Psi$ in \eqref{eq:meanadj} relate to $\Phi_0$ through $\Psi=[\Phi(L)]^{-1}\Phi_0$. By the reformulation, we obtain parameters $\Psi$ that immediately yield the unconditional mean of $x_t$---the steady state. The rationale is that while it is potentially difficult to express prior beliefs about $\Phi_0$, eliciting prior beliefs about $\Psi$ is often easier. + +\paragraph{Original steady-state prior} + +In the original steady-state prior proposed by \cite{Villani2009}, the prior for $\psi = \operatorname{vec}(\Psi)$ is given by $\psi \sim \operatorname{N}(\underline{\psi}, \underline{\Omega}_{\psi})$. The prior distribution for $\Phi$ in \pkg{mfbvar} is either of the conditional or independent normal priors in \eqref{eq:prior} and \eqref{eq:independent}, respectively. The only modification is that the constant column of $\Gamma$ is excluded. + +\paragraph{Hierarchical steady-state prior} + +\cite{Louzis2019} suggested an extension of the steady-state prior that utilizes a hierarchical specification based on the hierarchical normal-gamma shrinkage prior proposed by \cite{Griffin2010}; see also \cite{Huber2019} for an application of the normal-gamma prior to VAR models. There are two main reasons that justify the hierarchical steady-state prior. First, the normal-gamma prior induces a heavy-tailed unconditional prior for the steady-state parameters, which means that the prior generally pulls the posterior distribution towards the prior means, but with the possibility of larger deviations due to the excess kurtosis of the prior. Second, \cite{Louzis2019} relied on default values for the hyperpriors following the suggestion by \cite{Huber2019}. The consequence is that only prior means must be specified. It is oftentimes easier to have a reasonable prior belief for the means of the steady-state parameters than for the variances, and the issue of specifying the prior is therefore simplified. The structure of the prior is +\begin{equation} +\begin{aligned} +\psi_j|\omega_{\psi ,j}&\sim \operatorname{N}(\underline{\psi}_j, \omega_{\psi, j})\\ +\omega_{\psi, j}|\phi_\psi, \lambda_\psi &\sim \operatorname{G}(\phi_\psi, 0.5\phi_\psi\lambda_\psi)\\ +\phi_\psi &\sim \operatorname{Exp}(1)\\ +\lambda_\psi &\sim \operatorname{G}(c_0, c_1)\\ +j&=1, \dots, nm +\end{aligned}\label{eq:hss} +\end{equation} +where $\operatorname{G}(a,b)$ denotes the gamma distribution with shape-rate parametrization, and $\operatorname{Exp}(c)$ denotes the exponential distribution. \cite{AnkargrenUnossonYang2019} employed both types of steady-state priors in mixed-frequency BVARs and found that the hierarchical prior performed well. + +\subsection{Error Covariance Priors} +The \pkg{mfbvar} package includes both homoskedastic and heteroskedastic specifications, which are described next. +\subsubsection{Constant volatility} +Two of the more common priors for $(\Gamma, \Sigma)$ that are used for homoskedastic VAR models are the normal inverse Wishart and normal-diffuse priors. The former combines the conditional normal prior for $\Gamma$ with an inverse Wishart prior for $\Sigma$, and the latter uses the independent normal prior for $\Gamma$ in conjunction with a diffuse Jeffreys' prior for $\Sigma$. For this reason, the two alternatives for a homoskedastic $\Sigma$ in \pkg{mfbvar} are the inverse Wishart and diffuse priors. +\paragraph{Inverse Wishart prior} +The inverse Wishart prior is specified as +\begin{equation} +\begin{aligned} +\Sigma & \sim \operatorname{iW}(\underline{S}, \underline{\nu})\\ +\underline{S}&=(\underline{\nu}-n-1)\operatorname{diag}(s_1^2, \dots, s_n^2)\\ +\underline{\nu}&= n+2 +\end{aligned}\label{eq:iw} +\end{equation} +where $s_i^2$ are $n$ residual variances from auxiliary AR(4) regressions. The degrees of freedom is fixed to $n+2$ to ensure that the prior variance exists. The inverse Wishart prior for $\Sigma$ is in \pkg{mfbvar} always used with a conditional normal prior for the regression parameters---with or without a separate steady-state prior---to yield a standard normal inverse Wishart prior. + +\paragraph{Diffuse prior} +The diffuse prior for $\Sigma$ is the Jeffreys' prior given by +\begin{align} +p(\Sigma )\propto |\Sigma|^{-(n+1)/2}. +\end{align} +The \pkg{mfbvar} package uses the diffuse prior only in combination with the independent normal prior for the regression parameters---possibly with an additional prior for the steady-state parameters---so that the standard normal-diffuse prior is obtained. + +\subsubsection{Stochastic volatility} +The \pkg{mfbvar} package also includes two methods that relax the usual assumption of the error covariance matrix being constant over time. These two methods are common stochastic volatility, proposed by \cite{Carriero2016}, and factor stochastic volatility using the methodology developed by \cite{Kastner2017}. +\paragraph{Common stochastic volatility} +The common stochastic volatility specification presented by \cite{Carriero2016} assumes that the covariance structure in the model is constant over time, but adds a factor that enables time-dependent scaling of the error covariance matrix. More specifically, it is assumed that +\begin{align} +\VAR(\epsilon_t|f_t, \Sigma)=f_t\Sigma, +\end{align} +where $f_t$ is a scalar, $\Sigma$ is inverse Wishart as in \eqref{eq:iw}, and +\begin{equation} +\begin{aligned} +\log f_t&=\rho\log f_{t-1}+v_t\\ +v_t&\sim \operatorname{N}(0, \sigma^2)\\ +\rho&\sim \operatorname{N}(\underline{\mu}_\rho, \underline{\Omega}_\rho; |\rho|<1)\\ +\sigma^2&\sim \operatorname{IG}(\underline{d}\cdot\underline{\sigma}^2, \, \underline{d}), +\end{aligned}\label{eq:csv} +\end{equation} +where $\operatorname{N}(a, b; |x| " should be used with "+ " as the +%% continuation prompt. +%% - Comments within the code chunks should be avoided - these should be made +%% within the regular LaTeX text. + +\section[The R Package mfbvar]{The \proglang{R} Package \pkg{mfbvar}} + +The workflow promoted by the \pkg{mfbvar} package consists of three main steps. First, an object containing data, hyperparameters and settings is constructed. Second, the prior object is used as input to the main estimation function. Third, the results are processed. The purpose for disentangling the first and second steps is to separate specification from estimation, thereby making the calls in \proglang{R} easier to decipher. + +\subsection{Specification} +\label{sec:spec} +The main function for performing the first step is \code{set_prior}. Its arguments are listed in Table \ref{tab:overview}. The most important arguments are described below. +\begin{itemize} +\item \code{Y}: data input. Should be a list with components containing regularly spaced time series (that inherit from \code{ts} or \code{zooreg}). Names of variables are collected from the names of components that contain single time series. If a component stores multiple time series in a multivariate \code{ts} or \code{zooreg} object, then the names are instead collected from the corresponding column names. Monthly variables can only contain missing values at the end of the sample, and should precede quarterly variables in the list. Matrices in which quarterly variables are padded with \code{NA} and observations stored at the end of each quarter are also accepted, but then the frequency of each variable must be given in the argument \code{freq}. Tibbles and data frames can also be given as input, but require the same use of the \code{freq} argument. Both matrices and data frames should store dates (\code{YYYY-MM-DD}) as row names, or, for data frames, as a separate column. If the data input does not contain mixed frequencies, the \pkg{mfbvar} package provides some, but limited, support if no observations are missing. +\item \code{aggregation}: the aggregation scheme, either \code{``average''} for the intra-quarterly average, or \code{``triangular''} for the triangular weighting used by \cite{Mariano2003}. The latter is typically used for modeling growth rates, and the former for log-levels; our experience, however, indicates that results tend to be relatively indifferent to the choice. +\item \code{prior_Pi_AR1}: a numeric vector providing the prior mean for the AR(1) parameters ($\underline{\gamma}$). +\item \code{lambda1, lambda2, lambda3, lambda4}: one-dimensional numeric vectors providing the hyperparameters for overall tightness, cross-variable tightness, lag decay and intercept variance. The defaults are 0.2, 0.5, 1, and $10^4$, respectively. Note that \code{lambda2} is only used in the independent normal specification for $\Gamma$, which is only employed if the diffuse prior or a factor stochastic volatility model is used for $\Sigma$. The prior variance for the intercept, \code{lambda4}, is only used if the steady-state prior is not used. +\item \code{prior_psi_mean, prior_psi_Omega}: numeric vector and matrix giving the prior mean and covariance of the steady-state parameters. If the deterministic component of the model includes more terms than a constant, the prior moments are for $\psi=\operatorname{vec}(\Psi)$ and this order should be respected. The package includes a helper function, \code{interval_to_moments}, for simplifying the specification of priors for the steady states. The function \code{interval_to_moments} takes a matrix of prior $100*(1-\alpha)$ \% intervals and returns the mean vector and covariance matrix needed for \code{set_prior}. +\end{itemize} + +\setlength{\tabcolsep}{5pt} +\begin{sidewaystable} +\footnotesize +\centering +\begin{tabular}{rrlccccRcccc} +&&&&\multicolumn{3}{c}{Regression prior (\code{prior})} & \multicolumn{4}{c}{Variance (\code{var})}\\ +\cline{5-7} \cline{8-11} +Category&Argument & Description & Default & \code{``minn''} & \code{``ss''} & \code{``ssng''} & \code{``iw''} & \code{``diffuse''} & \code{``csv''} & \code{``fsv''}\\ +\hline +General&\code{Y} & data & & \checkmark&\checkmark & \checkmark&\checkmark&\checkmark&\checkmark & \checkmark\\ +&\code{aggregation} & aggregation scheme & \code{``average''} & \checkmark&\checkmark & \checkmark&\checkmark&\checkmark & \checkmark&\checkmark\\ +&\code{prior_Pi_AR1} & $\underline{\gamma}$ in \eqref{eq:gamma} & 0 & \checkmark&\checkmark & \checkmark&\checkmark&\checkmark & \checkmark&\checkmark\\ +&\code{lambda1} & overall tightness; $\lambda_1$ in \eqref{eq:xi} & 0.2 & \checkmark&\checkmark & \checkmark&\checkmark&\checkmark & \checkmark&\checkmark\\ +&\code{lambda2} & cross-var shrinkage; $\lambda_2$ in \eqref{eq:xi} & 0.5 & \checkmark&\checkmark&\checkmark &&\checkmark & &\checkmark\\ +&\code{lambda3} & lag decay; $\lambda_3$ in \eqref{eq:xi} & 1 & \checkmark&\checkmark& \checkmark&\checkmark&\checkmark & \checkmark&\checkmark\\ +&\code{lambda4} & intercept variance; $\lambda_4$ in \eqref{eq:xi} &10,000& \checkmark& & &\checkmark & \checkmark&\checkmark&\checkmark\\ +&\code{block\_exo} & block exogeneity & &\checkmark & \checkmark&\checkmark&&\checkmark & &\checkmark\\ +&\code{n_lags} & number of lags ($p$) & & \checkmark&\checkmark & \checkmark&\checkmark&\checkmark & \checkmark&\checkmark\\ +&\code{n_fcst} & number of forecasts & 0 &\checkmark&\checkmark&\checkmark&\checkmark &\checkmark & \checkmark&\checkmark\\ +&\code{n\_thin} & thinning frequency & 1&\checkmark&\checkmark&\checkmark&\checkmark &\checkmark & \checkmark&\checkmark\\ +&\code{n_reps} & main draws & & \checkmark&\checkmark&\checkmark&\checkmark&\checkmark & \checkmark&\checkmark\\ +&\code{n_burnin} & burn-in draws & \code{n_reps} & \checkmark&\checkmark& \checkmark&\checkmark&\checkmark & \checkmark&\checkmark\\ +&\code{freq} & frequencies of variables (if \code{Y} is matrix) & yes, if \code{Y} is list &\checkmark&\checkmark & \checkmark&\checkmark&\checkmark & \checkmark&\checkmark \\ +&\code{verbose}&progress bar in console & \code{FALSE}&\checkmark&\checkmark&\checkmark&\checkmark&\checkmark & \checkmark&\checkmark\\ +\hline +SS & \code{d} & deterministc term && & \checkmark&\checkmark&\checkmark&\checkmark & \checkmark&\checkmark\\ +prior &\code{d_fcst} & \code{d} for forecasting period && &\checkmark&\checkmark&\checkmark&\checkmark & \checkmark&\checkmark\\ +&\code{prior_psi_mean} & prior mean, $\underline{\psi}$ & &&\checkmark&\checkmark&\checkmark&\checkmark & \checkmark&\checkmark\\ +&\code{prior_psi_Omega}& prior covariance, $\underline{\Omega}_\psi$ && & \checkmark&\checkmark&\checkmark&\checkmark & \checkmark&\checkmark\\ +&\code{check_roots}& keep only stable $\Phi(L)$ & \code{FALSE}&&\checkmark&\checkmark&\checkmark&\checkmark & \checkmark&\checkmark\\ +\hline +Hier.&\code{s} & scale of proposal for $\phi_\psi$ & -1000 (adaptive)&& &\checkmark & \checkmark&\checkmark&\checkmark & \checkmark \\ +SS&\code{prior\_ng} & $(c_0, c_1)$ for $\lambda_\psi$ in \eqref{eq:hss} & 0.01&& &\checkmark & \checkmark&\checkmark&\checkmark & \checkmark\\ +\hline +Common & \code{prior\_phi} & mean and variance of $\phi$ \eqref{eq:csv}&(0.9, 0.1)&\checkmark&\checkmark & \checkmark& & & \checkmark\\ +SV& \code{prior\_sigma2} & df and mean of $\sigma^2$ \eqref{eq:csv}&(0.01, 4)&\checkmark&\checkmark & \checkmark& & & \checkmark\\ +\hline +Factor&\code{n_fac} & number of factors &&\checkmark&\checkmark & \checkmark& & && \checkmark \\ +SV&\code{n\_cores} & number of cores &1&\checkmark&\checkmark & \checkmark& & && \checkmark\\ +&\code{...} & additional fsv args &&\checkmark&\checkmark & \checkmark& & && \checkmark +\end{tabular} +\caption{Overview of elements in the prior object. Checkmarks indicate that the argument applies for the prior or variance specification that is given at the top of the respective columns.} +\label{tab:overview} +\end{sidewaystable} + +In order to encourage and enable a pipe-like process of specification, the function \code{update_prior(prior_obj, ...)} can be used to add further specifications to \code{prior_obj}, where \code{prior_obj} is the object returned from \code{set_prior}. The class of the return from \code{set_prior} and \code{update_prior} is \code{mfbvar_prior} for which methods for the generic functions \code{summary}, \code{print} and \code{plot} are implemented (see Section \ref{sec:illustrations}). + +\subsection{Estimation} +With an object containing the specifications in place, the second step of the \pkg{mfbvar} workflow is to estimate the model. The function for doing so is \code{estimate_mfbvar}, with arguments: +\begin{itemize} +\item \code{mfbvar_prior}: an object of class \code{mfbvar_prior} obtained from \code{set_prior}/\code{update_prior} +\item \code{prior}: a string equal to \code{"minn"}, \code{"ss"} or \code{``ssng''} for estimating the model using the Minnesota prior, the steady-state prior, or the hierarchical steady-state prior +\item \code{variance}: a string equal to \code{"iw"}, \code{``diffuse''}, \code{``csv''} or \code{"fsv"} for selecting the inverse Wishart prior, the diffuse prior, the common stochastic volatility model, or the factor stochastic volatility model for the error covariance matrix in the model +\item \code{...}: additional arguments that are passed on to \code{update_prior} for temporarily overriding settings in \code{mfbvar_prior} +\end{itemize} + +\subsection{Processing} +For processing the results, \pkg{mfbvar} provides three functions to simplify this step: +\begin{itemize} +\item \code{predict}: a method for the generic \code{predict} function is implemented. The function returns a data frame according to the concept of tidy data \citep{Wickham2014} with the forecasts for the variables. The forecasts of the quarterly variables are returned either as monthly or quarterly forecasts. The data frame includes all post-burn-in draws, or quantiles of the posterior predictive distribution. +\item \code{plot}: the generic \code{plot} function can be used on the estimated object. The plot displays the forecasts and, if applicable, the posterior steady-state intervals. +\item \code{volplot}: a plotting function for displaying the posterior standard deviation of the errors over time. Only applicable if stochastic volatility is used. +\end{itemize} + +As discussed in Section \ref{sec:models}, the package also includes estimators for the marginal data density when the normal inverse Wishart prior is used. Given an object \code{x} obtained from \code{estimate_mfbvar} with \code{variance = "iw"}, the marginal data density is estimated by calling \code{mdd(x)}. If \code{prior = "minn"}, the only argument that can be provided is \code{p_trunc} giving the degree of truncation of the truncated normal distribution. + +\subsection{Use by Other Packages} +The computational burden can be large for mixed-frequency VARs and special care has been paid to the implementation of, in particular, the simulation smoother and the sampling of regression parameters. Because of the modularity of MCMC, these implementations can be leveraged by other packages extending the mixed-frequency VAR further. The following functions, implemented in \proglang{C++} via \pkg{RcppArmadillo}, can therefore easily be imported by other packages: +\begin{itemize} +\item \code{simsm_adaptive}, \code{simsm_adaptive_univariate}: the adaptive simulation smoother presented by \cite{Ankargren2019}, and its extension with univariate filtering suggested by \cite{Ankargren2019b} +\item \code{mvn_rue}, \code{mvn_rue_int}, \code{mvn_bcm}: procedures for sampling from multivariate normal posterior distributions using the algorithm by \cite{Rue2001}, the \cite{Rue2001} algorithm including non-zero means for the AR(1) parameters, and the \cite{Bhattacharya2016} algorithm +\item \code{update\_csv}, \code{update\_fsv}, \code{update\_ng}: functions for the sampling steps required for common stochastic volatility, factor stochastic volatility, and the hierarchical steady-state prior +\end{itemize} + +The functions are available as header files in \code{mfbvar/inst/include} and can therefore easily be imported by other packages, see e.g., \citet[chap.~10]{Wickham2015}. + + + +\newpage + +\section{Illustration} \label{sec:illustrations} +To illustrate the basic funcionality of the \pkg{mfbvar} package we here estimate mixed-frequency Bayesian VAR models on US data. The data can be retrieved from the ALFRED database provided by the Federal Reserve Bank of St. Louis through the \pkg{alfred} package \citep{Kleen2018}. The model we will use includes inflation and unemployment, which are published monthly, and GDP growth, which is published quarterly. +<>= +library("dplyr") +library("ggplot2") +library("alfred") + +variables <- c("CPIAUCSL", "UNRATE", "GDPC1") +out <- lapply(variables, get_alfred_series, + observation_start = "1980-01-01", + observation_end = "2018-11-01", + realtime_start = "2018-12-10", + realtime_end = "2018-12-10") + +@ + +<>= +library("dplyr") +library("ggplot2") +library("alfred") + +variables <- c("CPIAUCSL", "UNRATE", "GDPC1") +load("alfred_data.RData") + +@ + +The data start in January 1980 and we retrieve the vintage available on December 10, 2018. For correctly identifying monthly and quarterly data, the \pkg{mfbvar} package expects data to be provided in a list. Each component should contain regularly spaced time series that inherit from class \code{ts} \citep{R2019} or \code{zooreg} \citep{Zeileis2005}, see Section \ref{sec:spec}. Multiple time series sampled at the same frequency can either be stored together in a single component, or separately in individual components. + +To transform the data obtained from \pkg{alfred} into a form compatible with \pkg{mfbvar}, we create a helper function to aid in preparing the time series: +<<>>= +alfred_to_ts <- function(x, freq) { + ts(x[, 3], + start = c(1980, 1), + frequency = freq) +} + +mf_list <- mapply(alfred_to_ts, x = out, freq = c(12, 12, 4)) +names(mf_list) <- variables +@ +The list \code{mf\_list} is now of a form that \pkg{mfbvar} understands. However, the steady-state prior requires a stable VAR model, and so we need to transform consumer price index and real GDP into growth rates. To this end, we use the annualized log-difference of the variables. +<<>>= +log_diff <- function(x) { + freq <- frequency(x) + 100 * freq * diff(log(x)) +} + +mf_list[c("CPIAUCSL", "GDPC1")] <- + lapply(mf_list[c("CPIAUCSL", "GDPC1")], log_diff) +@ + +Finally, we trim the beginning of the sample so that the series do not start with missing values. +<<>>= +mf_list <- mapply(window, x = mf_list, + start = list(c(1980, 4), c(1980, 4), c(1980, 2))) +@ + +The data object provided to \pkg{mfbvar} is thus a list of three \code{ts} time series of different frequencies and lengths. +<<>>= +str(mf_list, vec.len = 2) +@ + +With the data in place, the workflow of the package next requires the user to first specify an object containing all the prior information, and then calling the main function to estimate the model. +\subsection{Setting the Prior} + +To create an initial, minimal prior we call \code{set_prior()} with the following arguments: +<<>>= +library("mfbvar") +prior <- set_prior(Y = mf_list, n_lags = 4, n_reps = 1000) +@ + +The \code{print} method for the prior displays what model specifications can be used with the provided information. +<<>>= +prior +@ + +As is indicated, we need to provide the deterministic term $d_t$ as well as the prior mean for the parameters in $\psi$ to also enable the possibility of estimating the model using the (hierarchical) steady-state prior. In many applications, $d_t=1$ and so the only deterministic term is the unconditional mean. The prior is commonly specified as independent 95 \% prior probability intervals. The prior intervals used here are given in Table~\ref{tab:ss} and mirror those used by \cite{Louzis2019,AnkargrenUnossonYang2019}. +\begin{table}[!t] +\centering +\begin{tabular}{ccc} +Inflation & Unemployment & GDP\\ +\hline +$(1, 3)$ & $(4, 8)$ & $(1, 3)$ +\end{tabular} +\caption{95 \% prior probability intervals for steady states (unconditional means)} +\label{tab:ss} +\end{table} + +A helper function \code{interval_to_moments()} is included in \pkg{mfbvar} to convert intervals to prior moments $\underline{\psi}$ and $\underline{\Omega}_\psi$. Having obtainted the moments, the prior is updated to include also specifications that enable estimation of the model using the steady-state prior. +<<>>= +prior_intervals <- matrix(c(1, 3, + 4, 8, + 1, 3), ncol = 2, byrow = TRUE) +moments <- interval_to_moments(prior_intervals) +prior <- update_prior(prior, + d = "intercept", + prior_psi_mean = moments$prior_psi_mean, + prior_psi_Omega = moments$prior_psi_Omega) +@ +The argument \code{d} should generally be a matrix, but because intercept-only applications are common passing only the string \code{"intercept"} is allowed. The prior steady-state intervals can be visualized by calling \code{plot} on the prior object. The intervals used here are displayed in Figure \ref{fig:ss_plot}. + +<>= +plot(prior) +@ + +We will next make forecasts 24 months ahead and must therefore update the prior to accomodate this request. The argument \code{n_fcst} takes the number of forecasts desired in terms of months, i.e., \code{n_fcst = 24} corresponds to two years. +<<>>= +prior <- update_prior(prior, n_fcst = 24) +@ + +The prior is now fully specified also for the steady-state prior. A summary of the specification can be obtained from the \code{summary} method implemented for the prior object: +<<>>= +summary(prior) +@ + + +\subsection{Estimating the Model} +The prior is prepared and its necessary components have been provided and so estimation is possible by calling the \code{estimate_mfbvar} function. In calling the function, either \code{prior = "minn"}, \code{prior = "ss"} or \code{prior = "ssng"} should be provided to indicate which regression prior to use. The argument \code{variance} determines the form of the error covariance matrix. We first consider the steady-state prior with and without hierachical shrinkage, and with an inverse Wishart prior for the error covariance matrix. + +<>= +if (run_mod) { + mod_ss_iw <- estimate_mfbvar(prior, prior = "ss", variance = "iw") + mod_ssng_iw <- estimate_mfbvar(prior, prior = "ssng", variance = "iw") + mod_ss_csv <- estimate_mfbvar(prior, prior = "ss", variance = "csv") + mod_ss_fsv <- estimate_mfbvar(prior, prior = "ss", variance = "fsv", + n_fac = 1) + + predict_example <- predict(mod_ss_iw, pred_bands = 0.8) + p1 <- plot(mod_ss_iw, plot_start = "2010-01-01", nrow_facet = 3) + p2 <- plot(mod_ssng_iw, plot_start = "2010-01-01", nrow_facet = 3) + pred_df <- bind_rows("Inverse Wishart" = predict(mod_ss_iw, pred_bands = NULL), + "Common stochastic volatility" = predict(mod_ss_csv, pred_bands = NULL), + "Factor stochastic volatility" = predict(mod_ss_fsv, pred_bands = NULL), + .id = "Variance") %>% + filter(variable == "GDPC1") + p3 <- ggplot(pred_df, aes(y = factor(fcst_date), x = fcst, fill = Variance)) + + ggridges::stat_density_ridges(quantile_lines = TRUE, + quantiles = 2, alpha = 0.5) + + labs(x = "US GDP Growth", + y = "Date of Forecast") + + coord_cartesian(xlim = c(-5, 10)) + + theme_minimal() + + scale_fill_brewer(palette = "YlGnBu") + + const_vol <- median(sqrt(mod_ss_iw$Sigma[3, 3, ])) + + p4 <- varplot(mod_ss_fsv, variables = "GDPC1") + + geom_hline(yintercept = const_vol , + color = "red", linetype = "dashed") + + coord_cartesian(ylim = c(0, 20)) + + p5 <- varplot(mod_ss_csv, variables = "GDPC1") + + geom_hline(yintercept = const_vol, + color = "red", linetype = "dashed") + + coord_cartesian(ylim = c(0, 20)) + + mdd_example <- mdd(mod_ss_iw) + + library("parallel") + par_fun <- function(lambda1, prior) { + set.seed(2019) + mod_par <- estimate_mfbvar(prior, prior = "ss", variance = "iw", + lambda1 = lambda1, lambda3 = 1) + mdd(mod_par) + } + + cl <- makeCluster(2) + clusterEvalQ(cl, library("mfbvar")) + lambda1_seq <- seq(0.05, 1, by = 0.05) + result <- parSapply(cl, lambda1_seq, + par_fun, prior = prior) + stopCluster(cl) + max_val <- tibble(lambda1 = lambda1_seq, + mdd = result) %>% + filter(mdd == max(mdd)) %>% .$lambda1 + + + save(predict_example, mdd_example, lambda1_seq, result, max_val, + file = "vignettes/vignette_data.RData", + compress = "xz") + ggsave("vignettes/figures/ss_plots-1.pdf", p1, "pdf", width = 5.5, height = 5.5*1.5, units = "in") + ggsave("vignettes/figures/ss_plots-2.pdf", p2, "pdf", width = 5.5, height = 5.5*1.5, units = "in") + ggsave("vignettes/figures/ridges-1.pdf", p3, "pdf", width = 7, height = 7*0.65, units = "in") + ggsave("vignettes/figures/varplot-1.pdf", p4, "pdf", width = 5, height = 5*0.5, units = "in") + ggsave("vignettes/figures/varplot-2.pdf", p5, "pdf", width = 5, height = 5*0.5, units = "in") +} else { + load("vignette_data.RData") +} +@ + +<>= +mod_ss_iw <- estimate_mfbvar(prior, prior = "ss", variance = "iw") +mod_ssng_iw <- estimate_mfbvar(prior, prior = "ssng", variance = "iw") +@ + + +To estimate models with stochastic volatility, only the \code{variance} argument needs to be changed. For the factor stochastic volatility model, the number of factors must also be provided. +<>= +mod_ss_csv <- estimate_mfbvar(prior, prior = "ss", variance = "csv") +mod_ss_fsv <- estimate_mfbvar(prior, prior = "ss", variance = "fsv", + n_fac = 1) +@ +Temporary arguments can be added to the call to \code{estimate_mfbvar}, like \code{n_fac = 1} above. The purpose of first creating a prior object and then estimating the model is illustrated in the preceding code snippet. For two models with different priors but similar settings, we can leverage the same object (i.e., \code{prior}). Estimation of multiple models is thereby simplified as we can reuse the previous settings. + +\subsection{Processing the Results} + +The principle of tidy data \citep{Wickham2014} has been preserved in creating a method for the \code{predict} function. +<>= +predict(mod_ss_iw, pred_bands = 0.8) +@ +<>= +predict_example +@ +The forecasts produced by a mixed-frequency VAR are at the monthly frequency. Because the frequency of interest for quarterly variables is the quarterly frequency, forecasts are aggregated by default in the \code{predict} method. To visualize forecasts and posterior intervals for the steady states (if available), \code{plot} can be called directly on the output of \code{estimate_mfbvar}. + +<>= +plot(mod_ss_iw, plot_start = "2010-01-01", nrow_facet = 3) +plot(mod_ssng_iw, plot_start = "2010-01-01", nrow_facet = 3) +@ +\begin{figure}[!htb] +\subfloat[Steady-state prior\label{fig:ss_plots1}]{\includegraphics[width=0.49\linewidth]{figures/ss_plots-1} }\subfloat[Hierarchical steady-state prior\label{fig:ss_plots2}]{\includegraphics[width=0.49\linewidth]{figures/ss_plots-2} }\caption[Forecasts and posterior steady-state intervals]{Forecasts and posterior steady-state intervals}\label{fig:ss_plots} +\end{figure} +Figure \ref{fig:ss_plots} displays the forecasts and posterior steady-state intervals obtained using the steady-state prior with and without hierarchical shrinkage. The posterior intervals are similar, with a narrower interval for unemployment using the hierarchical specification, and with almost indistinguishable differences for inflation and GDP. Because of the similarity between the models, the forecasts from both models in Figure \ref{fig:ss_plots} show that the assessment made by the model is that the economy is in a relatively steady state with a stable rate of growth in the future. Unemployment is expected to rise slowly and return to the steady state in the long run. By default, the plotting method displays the forecasts for the quarterly variables on the quarterly scale, as in the output from \code{predict}. + +In order to see the difference between constant and time-varying error covariance matrices, we next compare the distributions of the GDP forecasts. By letting \code{pred_bands = NULL} be an argument to \code{predict}, we obtain the entire set of forecasts from the model. Combining them and plotting the distributions using the \pkg{ggridges} package \citep{Wilke2018} allows us to easily compare the distributions. + +<>= +pred_iw <- predict(mod_ss_iw, pred_bands = NULL) +pred_csv <- predict(mod_ss_csv, pred_bands = NULL) +pred_fsv <- predict(mod_ss_fsv, pred_bands = NULL) +pred_df <- bind_rows("Inverse Wishart" = pred_iw, + "Common stochastic volatility" = pred_csv, + "Factor stochastic volatility" = pred_fsv, + .id = "Variance") %>% + filter(variable == "GDPC1") +ggplot(pred_df, aes(y = factor(fcst_date), x = fcst, fill = Variance)) + + ggridges::stat_density_ridges(quantile_lines = TRUE, + quantiles = 2, alpha = 0.5) + + labs(x = "US GDP Growth", + y = "Date of Forecast") + + coord_cartesian(xlim = c(-5, 10)) + + theme_minimal() + + scale_fill_brewer(palette = "YlGnBu") +@ +\begin{figure}[!htb] +\includegraphics[width=\maxwidth]{figures/ridges-1} \caption[Distributions of forecasts produced using the steady-state prior with constant or time-varying error covariance]{Distributions of forecasts produced using the steady-state prior with constant or time-varying error covariance. The vertical lines represent the medians.}\label{fig:ridges} +\end{figure} +Figure \ref{fig:ridges} shows that stochastic volatility leads to narrower predictive distributions for the current forecast. In terms of the central tendencies, however, the differences are negligible. The resemblance is greater between the predictive distributions obtained from the two models using stochastic volatility than between any of the models with stochastic volatility and the model with constant volatility. Thus, the choice of stochastic volatility may be less important than whether to include it at all. + +The reason for the somewhat wider distribution obtained using constant volatility is that the error covariance matrix that is obtained is a compromise between regimes of high and low volatility. Using stochastic volatility, there is no need to make this compromise. To see this point more clearly, the error standard deviations implied by the stochastic volatility models can be plotted using the \code{varplot} function. + +<>= +const_vol <- median(sqrt(mod_ss_iw$Sigma[3, 3, ])) + +varplot(mod_ss_fsv, variables = "GDPC1") + + geom_hline(yintercept = const_vol , + color = "red", linetype = "dashed") + + coord_cartesian(ylim = c(0, 20)) + +varplot(mod_ss_csv, variables = "GDPC1") + + geom_hline(yintercept = const_vol, + color = "red", linetype = "dashed") + + coord_cartesian(ylim = c(0, 20)) +@ +\begin{figure} +\subfloat[Factor stochastic volatility\label{fig:varplot1}]{\includegraphics[width=0.49\linewidth]{figures/varplot-1} }\subfloat[Common stochastic volatility\label{fig:varplot2}]{\includegraphics[width=0.49\linewidth]{figures/varplot-2} }\caption[Standard deviation of the error term in the equation for GDP growth]{Standard deviation of the error term in the equation for GDP growth. The black solid and red dashed lines are the medians from the models with factor stochastic and constant volatility, respectively. The bands are obtained from the 95 \% posterior point-wise intervals.}\label{fig:varplot} +\end{figure} +Figure \ref{fig:varplot} shows that the standard deviation of the GDP error term has varied substantially. Periods of particularly high volatility are evident in the early 1980s, 1990s and 2000s as well as during the recent financial crisis at the end of the 2000s. The estimated volatilities are similar between the two models. They both capture the same peaks with the sole difference being that the model with common stochastic volatility estimated a peak in volatility around 2005 that the model with factor stochastic volatility did not. Volatility has in recent years been lower than usual. The line displaying the standard deviation from the model with constant volatility appears to capture a baseline, which the time-varying volatility is currently deviating somewhat from. For this reason, we see that the distribution of the forecasts in Figure \ref{fig:ridges} is narrower under a stochastic volatility specification. + +\subsection{Marginal Data Density} + +A generic function \code{mdd()} is provided that facilitates estimation of the marginal data density if the inverse Wishart prior is used for $\Sigma$. The marginal data density for the model using the steady-state prior with constant volatility is +<>= +mdd(mod_ss_iw) +@ +<>= +mdd_example +@ + +The marginal data density in itself is not particularly informative. One of its roles, however, is that it can be used as a way of selecting hyperparameters. By searching over a grid of values, the pair ($\lambda_1, \lambda_3$) that maximizes the marginal data density can be used. Such a grid search is embarrassingly parallel and can be computed using the \pkg{parallel} package \citep{R2019}. + + +<>= +library("parallel") +par_fun <- function(lambda1, prior) { + set.seed(2019) + mod_par <- estimate_mfbvar(prior, prior = "ss", variance = "iw", + lambda1 = lambda1, lambda3 = 1) + mdd(mod_par) +} + +cl <- makeCluster(4) +clusterEvalQ(cl, library("mfbvar")) +lambda1_seq <- seq(0.05, 1, by = 0.05) +result <- parSapply(cl, lambda1_seq, + par_fun, prior = prior) +stopCluster(cl) +@ + + +We are here fixing the lag decay to $\lambda_3=1$ and consider the grid of values $\{0.05, 0.1, \dots, 0.95, 1.00\}$ for $\lambda_1$. Figure \ref{fig:mdd_plot} displays the results. + +<>= +plot_df <- tibble(lambda1 = lambda1_seq, + mdd = result) +ggplot(plot_df, aes(x = lambda1, y = mdd)) + + geom_line() + + geom_point(data = filter(plot_df, mdd == max(mdd))) + + labs(y = "Marginal data density (log)", + x = bquote(lambda[1])) + + theme_minimal() +@ + +The highest value of the marginal data density is obtained for $\lambda_1=0.4$, indicating a lower degree of shrinkage in the model than what has been used in the previous estimations. However, the marginal data density is relatively flat as a function of $\lambda_1$ around 0.2--0.5, thereby demonstrating a certain degree of indifference with respect to $\lambda_1$ in this neighborhood. + + +%% -- Summary/conclusions/discussion ------------------------------------------- +\newpage +\section{Conclusion} \label{sec:summary} +The \pkg{mfbvar} package introduces a user-friendly interface for estimating mixed-frequency vector autoregressions using Bayesian techniques. As such, it fills a void and provides additional functionality compared to existing packages such as \pkg{midasr} \citep{Ghysels2016b} and \pkg{BMR} \citep{OHara2017}. + +We have discussed the models that can be estimated in the \pkg{mfbvar} and the workflow of the package. An application to a small mixed-frequency VAR using monthly inflation and unemployment, and quarterly GDP growth was used to illustrate the functionality of the package. Aside from the features directly visible to the user, the package also provides header files for the key steps of the MCMC algorithm used for estimating the models. These can easily be imported and used by other packages, and thus we hope to make also the use of other, more customized mixed-frequency VARs more frequent. + + +%% -- Optional special unnumbered sections ------------------------------------- + + +\section*{Acknowledgments} + +This work has been supported by Jan Wallanders och Tom Hedelius stiftelse samt Tore Browaldhs stiftelse, grant number P2016-0293:1. + +%% -- Bibliography ------------------------------------------------------------- +%% - References need to be provided in a .bib BibTeX database. +%% - All references should be made with \cite, \citet, \citep, \citealp etc. +%% (and never hard-coded). See the FAQ for details. +%% - JSS-specific markup (\proglang, \pkg, \code) should be used in the .bib. +%% - Titles in the .bib should be in title case. +%% - DOIs should be included where available. + +\bibliography{refs} + + +%% -- Appendix (if any) -------------------------------------------------------- +%% - After the bibliography with page break. +%% - With proper section titles and _not_ just "Appendix". + +%% ----------------------------------------------------------------------------- + + +\end{document} diff --git a/vignettes/refs.bib b/vignettes/refs.bib new file mode 100644 index 0000000..3e041bc --- /dev/null +++ b/vignettes/refs.bib @@ -0,0 +1,759 @@ +@article{Aguilar2000, + abstract = {We discuss the development of dynamic factor models for multivariate financial time series, and the incorporation of stochastic volatility components for latent factor processes. Bayesian inference and computation is developed and explored in a study of the dynamic factor structure of daily spot exchange rates for a selection of international currencies. The models are direct generalizations of univariate stochastic volatility models and represent specific varieties of models recently discussed in the growing multivariate stochastic volatility literature. We discuss model fitting based on retrospective data and sequential analysis for forward filtering and short-term forecasting. Analyses are compared with results from the much simpler method of dynamic variance-matrix discounting that, for over a decade, has been a standard approach in applied financial econometrics. We study these models in analysis, forecasting, and sequential portfolio allocation for a selected set of international exchange-rate-return time series. Our goals are to understand a range of modeling questions arising in using these factor models and to explore empirical performance in portfolio construction relative to discount approaches. We report on our experiences and conclude with comments about the practical utility of structured factor models and on future potential model extensions.}, + author = {Omar Aguilar and Mike West}, + journal = {Journal of Business \& Economic Statistics}, + number = {3}, + pages = {338--357}, + publisher = {[American Statistical Association, Taylor & Francis, Ltd.]}, + title = {Bayesian Dynamic Factor Models and Portfolio Allocation}, + volume = {18}, + year = {2000}, + doi={10.1080/07350015.2000.10524875} +} + @Manual{Allaire2019, + title = {RcppParallel: Parallel Programming Tools for 'Rcpp'}, + author = {JJ Allaire and Romain Francois and Kevin Ushey and Gregory Vandenbrouck and Marcus Geelnard and {Intel}}, + year = {2019}, + note = {R package version 4.4.3}, + url = {https://CRAN.R-project.org/package=RcppParallel}, + } +@article{Ankargren2016, +abstract = {{\textcopyright} 2016 The Author(s)This paper first describes financial variables that have been constructed to correspond to various channels in the transmission mechanism. Next, a Bayesian VAR model for the macroeconomy, with priors on the steady states, is augmented with these financial variables and estimated using Swedish data for 1989–2015. The results support three conclusions. First, the financial system is important and the strength of the results is dependent on identification, with the financial variables accounting for 10–25 {\%} of the forecast error variance of Swedish GDP growth. Second, the suggested model produces an earlier signal regarding the probability of recession, compared to a model without financial variables. Third, the model's forecasts for the deep downturn in 2008 and 2009, conditional on the development of the financial variables, outperform a macro-model that lacks financial variables. Furthermore, this improvement in modelling Swedish GDP growth during the financial crisis does not come at the expense of unconditional predictive power. Taken together, the results suggest that the proposed model presents an accessible possibility to analyse the macro-financial linkages and the GDP developments, especially during a financial crisis.}, +author = {Ankargren, Sebastian and Bjellerup, M{\aa}rten and Shahnazarian, Hovick}, +doi = {10.1007/s00181-016-1175-4}, +file = {:C$\backslash$:/Users/seban876/Documents/Mendeley Desktop/Ankargren, Bjellerup, Shahnazarian{\_}2016.pdf:pdf}, +journal = {Empirical Economics}, +keywords = {Bayesian VAR,Business cycle,Credit cycle,Financial indicators,Macroeconomy,Transmission channels}, +pages = {1553--1586}, +publisher = {Springer Berlin Heidelberg}, +title = {{The Importance of the Financial System for the Real Economy}}, +year = {2017}, +volume = {53}, +number = {4} +} +@techreport{Ankargren2018, +author = {Ankargren, Sebastian and Unosson, M{\aa}ns and Yang, Yukai}, +file = {:C$\backslash$:/Users/seban876/Documents/Mendeley Desktop/Ankargren, Yang, Unosson{\_}2017.pdf:pdf}, +keywords = {forecast-,macroeconometrics,marginal data density,state space models,var}, +pages = {1--30}, +title = {{A Mixed-Frequency Bayesian Vector Autoregression with a Steady-State Prior}}, +year = {2018}, +institution = {Department of Statistics, Uppsala University}, +type = {Working Paper No.}, +number = {2018:2} +} +@ARTICLE{AnkargrenUnossonYang2019, +author = {Ankargren, Sebastian and Unosson, M\r{a}ns and Yang, Yukai}, +file = {:Users/sebastianankargren/Documents/Mendeley Desktop/Ankargren, Unosson, Yang - 2018 - A mixed-frequency Bayesian vector autoregression with a steady-state prior.pdf:pdf}, +title = {A Flexible Mixed-Frequency Vector Autoregression with a Steady-State Prior}, +year = {2019}, +arxivId = {arXiv:1911.09151}, +note = {\href{http://arxiv.org/abs/1911.09151}{arXiv:1911.09151}}, +journal = {Preprint available on arXiv} +} + +@ARTICLE{Ankargren2019, +author = {Ankargren, Sebastian and Jon\'{e}us, Paulina}, +file = {:C$\backslash$:/Users/seban876/Documents/Mendeley Desktop/Ankargren, Yang, Unosson{\_}2017.pdf:pdf}, +keywords = {forecast-,macroeconometrics,marginal data density,state space models,var}, +title = {\GG{1}Simulation smoothing for large mixed-frequency VARs}, +year = {2019}, +arxivId = {arXiv:1907.01075}, +note = {\href{http://arxiv.org/abs/1907.01075}{arXiv:1907.01075}}, +journal = {Preprint available on arXiv} +} +@ARTICLE{Ankargren2019b, +author = {Ankargren, Sebastian and Jon\'{e}us, Paulina}, +file = {:C$\backslash$:/Users/seban876/Documents/Mendeley Desktop/Ankargren, Yang, Unosson{\_}2017.pdf:pdf}, +keywords = {forecast-,macroeconometrics,marginal data density,state space models,var}, +title = {\GG{2}Estimating large mixed-frequency Bayesian VAR Models}, +year = {2019}, +arxivId = {arXiv:1912.02231}, +note = {\href{http://arxiv.org/abs/1912.02231}{arXiv:1912.02231}}, +journal = {Preprint available on arXiv} +} + +@article{Baffigi2004, +abstract = {Quantitative information on the current state of the economy is crucial to economic policy-making and to early understanding of the economic situation, but the quarterly national account (NA) data for GDP in the euro area are released with a substantial delay. The aim of the paper is to examine the forecast ability of bridge models (BM) for GDP growth in the euro area. BM 'bridge the gap' between the information content of timely updated indicators and the delayed (but more complete) NA. In this paper, BM are estimated for aggregate GDP and components both area-wide and for the three main countries of the euro area. Their short-term (one- and two-quarter ahead) forecasting performance is assessed with respect to benchmark univariate/multivariate statistical models, and a small structural model. The paper shows that national BM fare better than benchmark models. In addition, euro area GDP and its components are more precisely predicted by aggregating national forecasts. {\textcopyright} 2003 International Institute of Forecasters. Published by Elsevier B.V. All rights reserved.}, +author = {Baffigi, Alberto and Golinelli, Roberto and Parigi, Giuseppe}, +doi = {10.1016/S0169-2070(03)00067-0}, +file = {:C$\backslash$:/Users/seban876/Documents/Mendeley Desktop/Baffigi, Golinelli, Parigi{\_}2004.pdf:pdf}, +isbn = {1390512092600}, +journal = {International Journal of Forecasting}, +keywords = {Bridge model,Out-of-sample forecasting accuracy,Short-term GDP forecast for the euro area}, +number = {3}, +pages = {447--460}, +title = {{Bridge models to forecast the euro area GDP}}, +volume = {20}, +year = {2004} +} +@article{Banbura2010, +abstract = {This paper shows that vector auto regression (VAR) with Bayesian shrinkage is an appropriate tool for large dynamic models. We build on the results of De Mol and co-workers (2008) and show that, when the degree of shrinkage is set in relation to the cross-sectional dimension, the forecasting performance of small monetary VARs can be improved by adding additional macroeconomic variables and sectoral information. In addition, we show that large VARs with shrinkage produce credible impulse responses and are suitable for structural analysis.}, +author = {Ba{\'{n}}bura, Marta and Giannone, Domenico and Reichlin, Lucrezia}, +doi = {10.1002/jae.1137}, +file = {:C$\backslash$:/Users/seban876/Documents/Mendeley Desktop/Ba{\'{n}}bura, Giannone, Reichlin{\_}2010.pdf:pdf}, +journal = {Journal of Applied Econometrics}, +number = {1}, +pages = {71--92}, +title = {{Large Bayesian Vector Auto Regressions}}, +volume = {25}, +year = {2010} +} +@article{Bhattacharya2016, +abstract = {SUMMARY We propose an efficient way to sample from a class of structured multivariate Gaussian distributions. The proposed algorithm only requires matrix multiplications and linear system solutions. Its computational complexity grows linearly with the dimension, unlike existing algorithms that rely on Cholesky factoriza-tions with cubic complexity. The algorithm is broadly applicable in settings where Gaussian scale mixture priors are used on high-dimensional parameters. Its effectiveness is illustrated through a high-dimensional regression problem with a horseshoe prior on the regression coefficients. Other potential applications are outlined.}, +archivePrefix = {arXiv}, +arxivId = {1506.04778}, +author = {Bhattacharya, Anirban and Chakraborty, Antik and Mallick, Bani K.}, +doi = {10.1093/biomet/asw042}, +file = {:Users/sebastianankargren/Documents/Mendeley Desktop/Bhattacharya, Chakraborty, Mallick - 2016 - Fast sampling with Gaussian scale mixture priors in high-dimensional regression.pdf:pdf}, +journal = {Biometrika}, +keywords = {Confidence interval,Gaussian scale mixture,Global-local prior,Shrinkage,Sparsity.,bayesian,high dimensional,mcmc}, +mendeley-tags = {bayesian,high dimensional,mcmc}, +number = {4}, +pages = {985--991}, +title = {{Fast sampling with Gaussian scale mixture priors in high-dimensional regression}}, +volume = {103}, +year = {2016} +} +@article{Carriero2015, +abstract = {This paper develops a method for producing current-quarter forecasts of GDP growth with a (possibly large) range of available within-the-quarter monthly observations of economic indicators, such as employment and industrial production, and financial indicators, such as stock prices and interest rates. In light of existing evidence of time variation in the variances of shocks to GDP, we consider versions of the model with both constant variances and stochastic volatility. We also evaluate models with either constant or time-varying regression coefficients. We use Bayesian methods to estimate the model, in order to facilitate providing shrinkage on the (possibly large) set of model parameters and conveniently generate predictive densities. We provide results on the accuracy of nowcasts of real-time GDP growth in the U.S. from 1985 through 2011. In terms of point forecasts, our proposal is comparable to alternative econometric methods and survey forecasts. In addition, it provides reliable density forecasts, for which the stochastic volatility specification is quite useful, while parameter time-variation does not seem to matter.}, +author = {Carriero, Andrea and Clark, Todd E. and Marcellino, Massimiliano}, +doi = {10.1111/rssa.12092}, +file = {:C$\backslash$:/Users/seban876/Documents/Mendeley Desktop/Carriero, Clark, Marcellino{\_}2015(2).pdf:pdf}, +journal = {Journal of the Royal Statistical Society. Series A: Statistics in Society}, +keywords = {Bayesian methods,Forecasting,Mixed frequency models,Prediction}, +number = {4}, +pages = {837--862}, +title = {{Realtime Nowcasting with a Bayesian Mixed Frequency Model with Stochastic Volatility}}, +volume = {178}, +year = {2015} +} +@article{Carriero2016, +author = {Carriero, Andrea and Clark, Todd E. and Marcellino, Massimiliano}, +doi = {10.1080/07350015.2015.1040116}, +file = {:C$\backslash$:/Users/seban876/Documents/Mendeley Desktop/Carriero, Clark, Marcellino - 2016 - Common Drifting Volatility in Large Bayesian VARs.pdf:pdf}, +journal = {Journal of Business {\&} Economic Statistics}, +keywords = {bayesian,forecasting,high dimensional,mcmc,prediction,stochastic volatility,var}, +mendeley-tags = {bayesian,high dimensional,mcmc,stochastic volatility,var}, +number = {3}, +pages = {375--390}, +title = {Common Drifting Volatility in Large {Bayesian} {VARs}}, +volume = {34}, +year = {2016} +} +@article{Chib1995, +abstract = {In the context of Bayes estimation via Gibbs sampling, with or without data augmentation, a simple approach is developed for computing the marginal density of the sample data (marginal likelihood) given parameter draws from the posterior distribution. Consequently, Bayes factors for model comparisons can be routinely computed as a by-product of the simulation. Hitherto, this calculation has proved extremely challenging. Our approach exploits the fact that the marginal density can be expressed as the prior times the likelihood function over the posterior density. This simple identity holds for any parameter value. An estimate of the posterior density is shown to be available if all complete conditional densities used in the Gibbs sampler have closed-form expressions. To improve accuracy, the posterior density is estimated at a high density point, and the numerical standard error of resulting estimate is derived. The ideas are applied to probit regression and finite mixture models.}, +author = {Chib, Siddhartha}, +doi = {10.1080/01621459.1995.10476635}, +file = {:C$\backslash$:/Users/seban876/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Chib - 1995 - Marginal Likelihood from the Gibbs Output(2).pdf:pdf}, +isbn = {0162-1459}, +issn = {1537274X}, +journal = {Journal of the American Statistical Association}, +keywords = {Bayes factor,Estimation of normalizing constant,Finite mixture models,Linear regression,Markov chain Monte Carlo,Markov mixture model,Multivariate density estimation,Numerical standard error,Probit regression,Reduced conditional density}, +number = {432}, +pages = {1313--1321}, +pmid = {12510683}, +title = {Marginal Likelihood from the {Gibbs} Output}, +volume = {90}, +year = {1995} +} +@article{Clark2011, +abstract = {Central banks and other forecasters are increasingly interested in various aspects of density forecasts. However, recent sharp changes in macroeconomic volatility, including the Great Moderation and the more recent sharp rise in volatility associated with increased variation in energy prices and the deep global recession?pose significant challenges to density forecasting. Accordingly, this paper examines, with real-time data, density forecasts of U.S. GDP growth, unemployment, inflation, and the federal funds rate from Bayesian vector autoregression (BVAR) models with stochastic volatility. The results indicate that adding stochastic volatility to BVARs materially improves the real-time accuracy of density forecasts. This article has supplementary material online.$\backslash$nCentral banks and other forecasters are increasingly interested in various aspects of density forecasts. However, recent sharp changes in macroeconomic volatility, including the Great Moderation and the more recent sharp rise in volatility associated with increased variation in energy prices and the deep global recession?pose significant challenges to density forecasting. Accordingly, this paper examines, with real-time data, density forecasts of U.S. GDP growth, unemployment, inflation, and the federal funds rate from Bayesian vector autoregression (BVAR) models with stochastic volatility. The results indicate that adding stochastic volatility to BVARs materially improves the real-time accuracy of density forecasts. This article has supplementary material online.}, +author = {Clark, Todd E.}, +doi = {10.1198/jbes.2010.09248}, +file = {:C$\backslash$:/Users/seban876/Documents/Mendeley Desktop/Clark{\_}2011.pdf:pdf}, +journal = {Journal of Business {\&} Economic Statistics}, +keywords = {bayesian methods,steady-state prior}, +number = {3}, +pages = {327--341}, +title = {{Real-Time Density Forecasts From Bayesian Vector Autoregressions With Stochastic Volatility}}, +volume = {29}, +year = {2011} +} +@article{Clark2015, +author = {Clark, Todd E. and Ravazzolo, Francesco}, +title = {Macroeconomic Forecasting Performance under Alternative Specifications of Time-Varying Volatility}, +journal = {Journal of Applied Econometrics}, +volume = {30}, +number = {4}, +pages = {551-575}, +doi = {10.1002/jae.2379}, +abstract = {SummaryThis paper compares alternative models of time-varying volatility on the basis of the accuracy of real-time point and density forecasts of key macroeconomic time series for the USA. We consider Bayesian autoregressive and vector autoregressive models that incorporate some form of time-varying volatility, precisely random walk stochastic volatility, stochastic volatility following a stationary AR process, stochastic volatility coupled with fat tails, GARCH and mixture of innovation models. The results show that the AR and VAR specifications with conventional stochastic volatility dominate other volatility specifications, in terms of point forecasting to some degree and density forecasting to a greater degree. Copyright © 2014 John Wiley \& Sons, Ltd.}, +year = {2015} +} + + +@article{Cogley2005, +title = "Drifts and Volatilities: {Monetary} Policies and Outcomes in the Post {WWII} {US}", +journal = "Review of Economic Dynamics", +volume = "8", +number = "2", +pages = "262--302", +year = "2005", +doi = "10.1016/j.red.2004.10.009", +author = "Timothy Cogley and Thomas J. Sargent", +abstract = "For a VAR with drifting coefficients and stochastic volatilities, we present posterior densities for several objects that are pertinent for designing and evaluating monetary policy. These include measures of inflation persistence, the natural rate of unemployment, a core rate of inflation, and ‘activism coefficients’ for monetary policy rules. Our posteriors imply substantial variation of all of these objects for post WWII US data. After adjusting for changes in volatility, persistence of inflation increases during the 1970s, then falls in the 1980s and 1990s. Innovation variances change systematically, being substantially larger in the late 1970s than during other times. Measures of uncertainty about core inflation and the degree of persistence covary positively. We use our posterior distributions to evaluate the power of several tests that have been used to test the null hypothesis of time-invariance of autoregressive coefficients of VARs against the alternative of time-varying coefficients. Except for one, we find that those tests have low power against the form of time variation captured by our model." +} +@article{DAgostino2013, +author = {D'Agostino, Antonello and Gambetti, Luca and Giannone, Domenico}, +title = {Macroeconomic Forecasting and Structural Change}, +journal = {Journal of Applied Econometrics}, +volume = {28}, +number = {1}, +pages = {82--101}, +doi = {10.1002/jae.1257}, +abstract = {SUMMARY The aim of this paper is to assess whether modeling structural change can help improving the accuracy of macroeconomic forecasts. We conduct a simulated real-time out-of-sample exercise using a time-varying coefficients vector autoregression (VAR) with stochastic volatility to predict the inflation rate, unemployment rate and interest rate in the USA. The model generates accurate predictions for the three variables. In particular, the forecasts of inflation are much more accurate than those obtained with any other competing model, including fixed coefficients VARs, time-varying autoregressions and the naïve random walk model. The results hold true also after the mid 1980s, a period in which forecasting inflation was particularly hard. Copyright © 2011 John Wiley \& Sons, Ltd.}, +year = {2013} +} +@incollection{DelNegro2011, +address = {Oxford}, +author = {{Del Negro}, Marco and Schorfheide, Frank}, +booktitle = {The Oxford Handbook of Bayesian Econometrics}, +doi = {10.1093/oxfordhb/9780199559084.013.0008}, +editor = {Geweke, John and Koop, Gary and van Dijk, Herman}, +file = {:C$\backslash$:/Users/seban876/Documents/Mendeley Desktop/Del Negro, Schorfheide{\_}2011.pdf:pdf}, +keywords = {Autocovariance properties,Bayesian methods,Macroeconomists,Vector autoregressive models}, +pages = {293--389}, +publisher = {Oxford University Press}, +title = {{Bayesian Macroeconometrics}}, +year = {2011} +} +@techreport{Dieppe2016, +author = {Dieppe, Alistair and Legrand, Romain and van Roye, Bj{\"{o}}rn}, +file = {:C$\backslash$:/Users/seban876/Documents/Mendeley Desktop/Dieppe, Legrand, van Roye{\_}2016.pdf:pdf}, +institution = {European Central Bank}, +keywords = {Bayesian VAR,VAR,econometric software,forecasting,panel Bayesian VAR,structural}, +type = {Working Paper No.}, +title = {{The BEAR Toolbox}}, +year = {2016}, +number= {1934} +} +@article{Durbin2002, +author = {Durbin, J and Koopman, S J}, +doi = {10.1093/biomet/89.3.603}, +file = {:C$\backslash$:/Users/seban876/Documents/Mendeley Desktop/Durbin, Koopman{\_}2002.pdf:pdf}, +journal = {Biometrika}, +number = {3}, +pages = {603--615}, +title = {{A Simple and Efficient Simulation Smoother for State Space Time Series Analysis}}, +volume = {89}, +year = {2002} +} +@book{Durbin2012, +address = {Oxford, UK}, +author = {Durbin, James and Koopman, Siem Jan}, +doi = {10.1093/acprof:oso/9780199641178.001.0001}, +edition = {Second}, +publisher = {Oxford University Press}, +title = {{Time Series Analysis by State Space Methods}}, +year = {2012} +} +@article{Eddelbuettel2011, +author = {Eddelbuettel, Dirk and Francois, Romain}, +doi = {10.18637/jss.v040.i08}, +file = {:C$\backslash$:/Users/seban876/Documents/Mendeley Desktop/Eddelbuettel, Francois{\_}2011.pdf:pdf}, +isbn = {9781461468677}, +journal = {Journal of Statistical Software}, +keywords = {c,call,foreign function interface,r}, +number = {8}, +title = {{Seamless R and C++ Intgration with Rcpp}}, +volume = {40}, +year = {2011} +} +@article{Eddelbuettel2014, +abstract = {The R statistical environment and language has demonstrated particular strengths for interactive development of statistical algorithms, as well as data modelling and visualisation. Its current implementation has an interpreter at its core which may result in a performance penalty in comparison to directly executing user algorithms in the native machine code of the host CPU. In contrast, the C++ language has no built-in visualisation capabilities, handling of linear algebra or even basic statistical algorithms; however, user programs are converted to high-performance machine code, ahead of execution. A new method avoids possible speed penalties in R by using the Rcpp extension package in conjunction with the Armadillo C++ matrix library. In addition to the inherent performance advantages of compiled code, Armadillo provides an easy-to-use template-based meta-programming framework, allowing the automatic pooling of several linear algebra operations into one, which in turn can lead to further speedups. With the aid of Rcpp and Armadillo, conversion of linear algebra centred algorithms from R to C++ becomes straightforward. The algorithms retain the overall structure as well as readability, all while maintaining a bidirectional link with the host R environment. Empirical timing comparisons of R and C++ implementations of a Kalman filtering algorithm indicate a speedup of several orders of magnitude. {\textcopyright} 2013 Elsevier Inc. All rights reserved.}, +author = {Eddelbuettel, Dirk and Sanderson, Conrad}, +doi = {10.1016/j.csda.2013.02.005}, +file = {:C$\backslash$:/Users/seban876/Documents/Mendeley Desktop/Eddelbuettel, Sanderson{\_}2014.pdf:pdf}, +journal = {Computational Statistics and Data Analysis}, +keywords = {C++,Linear algebra,R,Software}, +pages = {1054--1063}, +publisher = {Elsevier B.V.}, +title = {{RcppArmadillo: Accelerating R with High-Performance C++ Linear Algebra}}, +volume = {71}, +year = {2014} +} +@article{Foroni2013, +abstract = {The development of models for variables sampled at di¤erent frequencies has attracted substantial interest in the recent econometric literature. In this paper we provide an overview of the most common techniques, including bridge equa- tions, MIxed DAta Sampling (MIDAS) models, mixed frequency VARs, and mixed frequency factor models. We also consider alternative techniques for handling the ragged edge of the data, due to asynchronous publication. Finally, we survey the main empirical applications based on alternative mixed frequency models. J.E.L.}, +author = {Foroni, Claudia and Marcellino, Massimiliano}, +doi = {10.2139/ssrn.2268912}, +file = {:C$\backslash$:/Users/seban876/Documents/Mendeley Desktop/Foroni, Marcellino{\_}2013.pdf:pdf}, +keywords = {Claudia Foroni,Massimiliano Marcellino}, +title = {{A Survey of Econometric Methods for Mixed-Frequency Data}}, +year = {2013} +} + +@article{FuentesAlbero2013, +abstract = {We introduce two estimators for estimating the Marginal Data Density (MDD) from the Gibbs output. Our methods are based on exploiting the analytical tractability condition, which requires that some parameter blocks can be analytically integrated out from the conditional posterior densities. This condition is satisfied by several widely used time series models. An empirical application to six-variate VAR models shows that the bias of a fully computational estimator is sufficiently large to distort the implied model rankings. One of the estimators is fast enough to make multiple computations of MDDs in densely parameterized models feasible. ?? 2013 Elsevier B.V. All rights reserved.}, +author = {Fuentes-Albero, Cristina and Melosi, Leonardo}, +doi = {10.1016/j.jeconom.2013.03.002}, +file = {:C$\backslash$:/Users/seban876/Documents/Mendeley Desktop/Fuentes-Albero, Melosi{\_}2013.pdf:pdf}, +journal = {Journal of Econometrics}, +keywords = {Bayesian econometrics,Gibbs sampler,Marginal likelihood,Reciprocal importance sampling,Time series econometrics}, +number = {2}, +pages = {132--141}, +publisher = {Elsevier B.V.}, +title = {{Methods for Computing Marginal Data Densities from the Gibbs Output}}, +volume = {175}, +year = {2013} +} +@article{Gelfand1992, +author = {Gelfand, Alan E and Smith, Adrian F M and Lee, Tai-ming}, +doi = {10.2307/2290286}, +file = {:C$\backslash$:/Users/seban876/Documents/Mendeley Desktop/Gelfand, Smith, Lee{\_}1992.pdf:pdf}, +journal = {Journal of the American Statistical Association}, +keywords = {actuarial graduation,bayesian inference,constrained parameter problems arise,constrained parameters,dinal categorical data,gibbs sampler,in a wide variety,including bioassay,of applications,or-,reliability devel-,response surfaces,truncated data}, +number = {418}, +pages = {523--532}, +title = {{Bayesian Analysis of Constrained Parameter and Truncated Data Problems Using Gibbs Sampling}}, +volume = {87}, +year = {1992} +} +@article{Ghysels2016b, +author = {Ghysels, Eric and Kvedaras, Virmantas and Zemlys, Vaidotas}, +doi = {10.18637/jss.v072.i04}, +file = {:C$\backslash$:/Users/seban876/Documents/Mendeley Desktop/Ghysels, Kvedaras, Zemlys{\_}2016.pdf:pdf}, +journal = {Journal of Statistical Software}, +keywords = {midas,specification test}, +number = {4}, +title = {{Mixed Frequency Data Sampling Regression Models: The R Package midasr}}, +volume = {72}, +year = {2016} +} +@article{Ghysels2007, +abstract = {We explore mixed data sampling (henceforth MIDAS) regression models. The regressions involve time series data sampled at different frequencies. Volatility and related processes are our prime focus, though the regression method has wider applications in macroeconomics and finance, among other areas. The regressions combine recent developments regarding estimation of volatility and a not-so-recent literature on distributed lag models. We study various lag structures to parameterize parsimoniously the regressions and relate them to existing models. We also propose several new extensions of the MIDAS framework. The paper concludes with an empirical section where we provide further evidence and new results on the risk–return trade-off. We also report empirical evidence on microstructure noise and volatility forecasting.}, +author = {Ghysels, Eric and Sinko, Arthur and Valkanov, Rossen}, +doi = {10.1080/07474930600972467}, +file = {:C$\backslash$:/Users/seban876/Documents/Mendeley Desktop/Ghysels, Sinko, Valkanov{\_}2007.pdf:pdf}, +journal = {Econometric Reviews}, +keywords = {C22,C53,Microstructure noise,Nonlinear MIDAS,Risk,Tick-by-tick applications,Volatility}, +number = {1}, +pages = {53--90}, +title = {{MIDAS Regressions: Further Results and New Directions}}, +volume = {26}, +year = {2007} +} +@techreport{Gotz2018, +author = {G{\"{o}}tz, Thomas B. and Hauzenberger, Klemens}, +file = {:C$\backslash$:/Users/seban876/Documents/Mendeley Desktop/G{\"{o}}tz, Hauzenberger - 2018 - Large mixed-frequency VARs with a parsimonious time-varying parameter structure.pdf:pdf}, +institution = {Deutsche Bundesbank}, +keywords = {Bayesian VAR,Common Stochastic,Forecasting,Mixed Frequencies,Time-Varying Intercepts,Volatility}, +number = {40}, +type = {Discussion Paper No.}, +title = {Large Mixed-Frequency {VARs} with a Parsimonious Time-Varying Parameter Structure}, +year = {2018} +} +@article{Griffin2010, +abstract = {This paper considers the effects of placing an absolutely continuous prior distribution on the regression coefficients of a linear model. We show that the posterior expectation is a matrix-shrunken version of the least squares estimate where the shrinkage matrix depends on the derivatives of the prior predictive density of the least squares estimate. The special case of the normal-gamma prior, which generalizes the Bayesian Lasso (Park and Casella 2008), is studied in depth. We discuss the prior interpretation and the posterior effects of hyperparameter choice and suggest a data-dependent default prior. Simulations and a chemometric example are used to compare the performance of the normal-gamma and the Bayesian Lasso in terms of out-of-sample predictive performance.}, +author = {Griffin, Jim E. and Brown, Philip J.}, +doi = {10.1214/10-BA507}, +file = {:C$\backslash$:/Users/seban876/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Griffin, Brown - 2010 - Inference with normal-gamma prior distributions in regression problems.pdf:pdf}, +isbn = {1936-0975}, +issn = {19360975}, +journal = {Bayesian Analysis}, +keywords = {"Spike-and-slab" prior,Bayesian lasso,Markov chain monte carlo,Multiple regression,Normal-gamma prior,Posterior moments,Scale mixture of normals,Shrinkage,p ≥ n}, +number = {1}, +pages = {171--188}, +title = {Inference with Normal-Gamma Prior Distributions in Regression Problems}, +volume = {5}, +year = {2010} +} +@book{Hamilton1994, +address = {Princeton, NJ}, +author = {Hamilton, James D.}, +file = {:C$\backslash$:/Users/seban876/Documents/Mendeley Desktop/Hamilton{\_}1994.pdf:pdf}, +publisher = {Princeton University Press}, +title = {{Time Series Analysis}}, +year = {1994} +} + +@Article{Hormann2014, +author="H{\"o}rmann, Wolfgang +and Leydold, Josef", +title="Generating generalized inverse Gaussian random variates", +journal="Statistics and Computing", +year="2014", +month="Jul", +day="01", +volume="24", +number="4", +pages="547--557", +abstract="The generalized inverse Gaussian distribution has become quite popular in financial engineering. The most popular random variate generator is due to Dagpunar (Commun. Stat., Simul. Comput. 18:703--710, 1989). It is an acceptance-rejection algorithm method based on the Ratio-of-Uniforms method. However, it is not uniformly fast as it has a prohibitive large rejection constant when the distribution is close to the gamma distribution. Recently some papers have discussed universal methods that are suitable for this distribution. However, these methods require an expensive setup and are therefore not suitable for the varying parameter case which occurs in, e.g., Gibbs sampling. In this paper we analyze the performance of Dagpunar's algorithm and combine it with a new rejection method which ensures a uniformly fast generator. As its setup is rather short it is in particular suitable for the varying parameter case.", +issn="1573-1375", +doi="10.1007/s11222-013-9387-3", +url="https://doi.org/10.1007/s11222-013-9387-3" +} + + +@article{Huber2019, +author = {Florian Huber and Martin Feldkircher}, +title = {Adaptive Shrinkage in {Bayesian} Vector Autoregressive Models}, +journal = {Journal of Business \& Economic Statistics}, +volume = {37}, +number = {1}, +pages = {27--39}, +year = {2019}, +publisher = {Taylor & Francis}, +doi = {10.1080/07350015.2016.1256217} +} +@article{Jarocinski2015, +abstract = {The correct implementation of the Durbin and Koopman simulation smoother is explained. A possible misunderstanding is pointed out and clarified for both the basic state space model with a non-zero mean of the initial state and with time-varying intercepts (mean adjustments).}, +author = {Jaroci{\'{n}}ski, Marek}, +doi = {10.1016/j.csda.2015.05.001}, +file = {:C$\backslash$:/Users/seban876/Documents/Mendeley Desktop/Jaroci{\'{n}}ski{\_}2015.pdf:pdf}, +journal = {Computational Statistics {\&} Data Analysis}, +keywords = {Simulation smoother,State space model,Trend output}, +pages = {1--3}, +publisher = {Elsevier B.V.}, +title = {{A Note on Implementing the Durbin and Koopman Simulation Smoother}}, +volume = {91}, +year = {2015} +} +@techreport{Jarocinski2008, +abstract = {This paper estimates a Bayesian VAR for the US economy which includes a housing sector and addresses the following questions. Can developments in the housing sector be explained on the basis of developments in real and nominal GDP and interest rates? What are the effects of housing demand shocks on the economy? How does monetary policy affect the housing market? What are the implications of house price developments for the stance of monetary policy? Regarding the latter question, we implement a version of a Monetary Conditions Index (MCI) due to C{\'{e}}spedes et al. (2006).}, +author = {Jaroci{\'{n}}ski, Marek and Smets, Frank}, +file = {:C$\backslash$:/Users/seban876/Documents/Mendeley Desktop/Jaroci{\'{n}}ski, Smets{\_}2008.pdf:pdf}, +type = {Working Paper No.}, +institution = {European Central Bank}, +keywords = {Bayesian VAR,JEL Classification: E3-E4,Keywords: House prices,conditional forecast.,monetary conditions index,monetary policy shock}, +title = {{House Prices and the Stance of Monetary Policy}}, +number = {891}, +year = {2008} +} +@book{Karlsson2013, +abstract = {This chapter reviews Bayesian methods for inference and forecasting with VAR models. Bayesian inference and, by extension, forecasting depends on numerical methods for simulating from the posterior distribution of the parameters and special attention is given to the implementation of the simulation algorithm. ?? 2013 Elsevier B.V.}, +author = {Karlsson, Sune}, +booktitle = {Handbook of Economic Forecasting}, +doi = {10.1016/B978-0-444-62731-5.00015-4}, +file = {:C$\backslash$:/Users/seban876/Documents/Mendeley Desktop/Karlsson{\_}2013.pdf:pdf}, +keywords = {Cointegration,Conditional forecasts,Large VAR,Markov chain Monte Carlo,Model selection,Stochastic volatility,Structural VAR,Time-varying parameters}, +pages = {791--897}, +publisher = {Elsevier B.V.}, +title = {{Forecasting with Bayesian Vector Autoregression}}, +volume = {2}, +year = {2013} +} + +@article{Kastner2014, +abstract = {Bayesian inference for stochastic volatility models using MCMC methods highly depends on actual parameter values in terms of sampling efficiency. While draws from the posterior utilizing the standard centered parameterization break down when the volatility of volatility parameter in the latent state equation is small, non-centered versions of the model show deficiencies for highly persistent latent variable series. The novel approach of ancillarity-sufficiency interweaving has recently been shown to aid in overcoming these issues for a broad class of multilevel models. It is demonstrated how such an interweaving strategy can be applied to stochastic volatility models in order to greatly improve sampling efficiency for all parameters and throughout the entire parameter range. Moreover, this method of "combining best of different worlds" allows for inference for parameter constellations that have previously been infeasible to estimate without the need to select a particular parameterization beforehand. {\textcopyright} 2014 Elsevier Ltd. All rights reserved.}, +archivePrefix = {arXiv}, +arxivId = {1706.05280}, +author = {Kastner, Gregor and Fr{\"{u}}hwirth-Schnatter, Sylvia}, +doi = {10.1016/j.csda.2013.01.002}, +file = {:C$\backslash$:/Users/seban876/Documents/Mendeley Desktop/Kastner, Fr{\"{u}}hwirth-Schnatter - 2014 - Ancillarity-sufficiency interweaving strategy (ASIS) for boosting MCMC estimation of stochastic vo.pdf:pdf}, +journal = {Computational Statistics and Data Analysis}, +keywords = {Auxiliary mixture sampling,Exchange rate data,Markov chain Monte Carlo,Massively parallel computing,Non-centering,State space model}, +pages = {408--423}, +publisher = {Elsevier B.V.}, +title = {Ancillarity-Sufficiency Interweaving Strategy ({ASIS}) for Boosting {MCMC} Estimation of Stochastic Volatility Models}, +volume = {76}, +year = {2014} +} + + +@article{Kastner2016, +abstract = {The R package stochvol provides a fully Bayesian implementation of heteroskedasticity modeling within the framework of stochastic volatility. It utilizes Markov chain Monte Carlo (MCMC) samplers to conduct inference by obtaining draws from the posterior distribution of parameters and latent variables which can then be used for predicting future volatilities. The package can straightforwardly be employed as a stand-alone tool; moreover, it allows for easy incorporation into other MCMC samplers. The main focus of this paper is to show the functionality of stochvol. In addition, it provides a brief mathematical description of the model, an overview of the sampling schemes used, and several illustrative examples using exchange rate data.}, +author = {Kastner, Gregor}, +doi = {10.18637/jss.v069.i05}, +file = {:C$\backslash$:/Users/seban876/Documents/Mendeley Desktop/Kastner - 2016 - Dealing with Stochastic Volatility in Time Series Using the iRi Package bstochvolb.pdf:pdf}, +journal = {Journal of Statistical Software}, +keywords = {ancillarity-sufficiency interweaving strategy,asis,auxiliary mixture sam-,bayesian inference,financial time series,heteroskedas-,markov chain monte carlo,mcmc,pling,state-space model,ticity}, +number = {5}, +title = {Dealing with Stochastic Volatility in Time Series Using the \texttt{R} Package \texttt{stochvol}}, +volume = {69}, +year = {2016} +} + +@article{Kastner2017, +archivePrefix = {arXiv}, +arxivId = {arXiv:1602.08154v3}, +author = {Kastner, Gregor and Fr{\"{u}}hwirth-Schnatter, Sylvia and {Freitas Lopes}, Hedibert}, +doi = {10.1080/10618600.2017.1322091}, +file = {:C$\backslash$:/Users/seban876/Documents/Mendeley Desktop/Kastner, Fr{\"{u}}hwirth-Schnatter, Freitas Lopes - 2017 - Efficient Bayesian Inference for Multivariate Factor Stochastic Volatility Model(2).pdf:pdf;:C$\backslash$:/Users/seban876/Documents/Mendeley Desktop/Kastner, Fr{\"{u}}hwirth-Schnatter, Freitas Lopes - 2017 - Efficient Bayesian Inference for Multivariate Factor Stochastic Volatility Models.pdf:pdf}, +journal = {Journal of Computational and Graphical Statistics}, +keywords = {bayesian,mcmc,stochastic volatility,time series}, +mendeley-tags = {bayesian,mcmc,stochastic volatility,time series}, +number = {4}, +pages = {905--917}, +title = {Efficient {Bayesian} Inference for Multivariate Factor Stochastic Volatility Models}, +volume = {26}, +year = {2017} +} +@ARTICLE{KastnerHuber2018, + author = {{Kastner}, Gregor and {Huber}, Florian}, + title = {Sparse {Bayesian} Vector Autoregressions in Huge Dimensions}, + keywords = {Statistics - Computation, Economics - Econometrics, Statistics - Applications, Statistics - Methodology}, + year = "2018", + arxiv = {1704.03239}, + primaryClass = {stat.CO}, + adsnote = {Provided by the SAO/NASA Astrophysics Data System}, +arxivId = {arXiv:1704.0323}, +note = {\href{http://arxiv.org/abs/1704.03239}{arXiv:1704.03239}}, +journal = {Preprint available on arXiv} +} + +@Manual{Kastner2018, + title = {{factorstochvol}: {B}ayesian Estimation of (Sparse) Latent Factor Stochastic Volatility Models}, + author = {Gregor Kastner}, + year = {2019}, + note = {R package version 0.9}, + url = {https://cran.r-project.org/package=factorstochvol}, +} + +@Manual{Kastner2019, + title = {{stochvol}: Efficient Bayesian Inference for Stochastic Volatility ({SV}) Models}, + author = {Gregor Kastner and Darjus Hosszejni}, + year = {2019}, + note = {R package version 2.0.4}, + url = {https://CRAN.R-project.org/package=stochvol}, +} + +@Article{Hosszejni2019, + title = {Modeling Univariate and Multivariate Stochastic Volatility in {R} with {stochvol} and {factorstochvol}}, + author = {Darjus Hosszejni and Gregor Kastner}, + journal = {R package vignette}, + year = {2019}, + url = {https://CRAN.R-project.org/package=factorstochvol/vignettes/paper.pdf}, +} + +@Article{Zeileis2005, + title = {zoo: S3 Infrastructure for Regular and Irregular Time Series}, + author = {Achim Zeileis and Gabor Grothendieck}, + journal = {Journal of Statistical Software}, + year = {2005}, + volume = {14}, + number = {6}, + pages = {1--27}, + doi = {10.18637/jss.v014.i06}, +} + +@article{Kim1998, + author = {Kim, Sangjoon and Shephard, Neil and Chib, Siddhartha}, + title = {Stochastic Volatility: {Likelihood} Inference and Comparison with {ARCH} Models}, + journal = {The Review of Economic Studies}, + volume = {65}, + number = {3}, + pages = {361--393}, + year = {1998}, + month = {07}, + abstract = "{In this paper, Markov chain Monte Carlo sampling methods are exploited to provide a unified, practical likelihood-based framework for the analysis of stochastic volatility models. A highly effective method is developed that samples all the unobserved volatilities at once using an approximating offset mixture model, followed by an importance reweighting procedure. This approach is compared with several alternative methods using real data. The paper also develops simulation-based methods for filtering, likelihood evaluation and model failure diagnostics. The issue of model choice using non-nested likelihood ratios and Bayes factors is also investigated. These methods are used to compare the fit of stochastic volatility and GARCH models. All the procedures are illustrated in detail.}", + issn = {0034-6527}, + doi = {10.1111/1467-937X.00050}, +} + + + @Manual{Kleen2018, + title = {alfred: Downloading Time Series from ALFRED Database for Various +Vintages}, + author = {Onno Kleen}, + year = {2018}, + note = {R package version 0.1.6}, + url = {https://CRAN.R-project.org/package=alfred}, + } + +@article{Litterman1986, +author = {Litterman, Robert B}, +doi = {10.1080/07350015.1986.10509485}, +file = {:C$\backslash$:/Users/seban876/Documents/Mendeley Desktop/Litterman{\_}1986.pdf:pdf}, +journal = {Journal of Business {\&} Economic Statistics}, +keywords = {appears to generate fore-,bayesian,developed that,for the first time,has been disenchanted with,in recent years a,models,not,outlook,statistical time series model,the large structural econometric,then commonly in use,this disenchantment was based,time series}, +number = {1}, +pages = {1--4}, +title = {{A Statistical Approach to Economic Forecasting}}, +volume = {4}, +year = {1986} +} + +@article{Louzis2019, +author = {Louzis, Dimitrios P.}, +title = {Steady-state modeling and macroeconomic forecasting quality}, +journal = {Journal of Applied Econometrics}, +volume = {34}, +number = {2}, +pages = {285--314}, +doi = {10.1002/jae.2657}, +abstract = {Summary Vector autoregressions (VARs) with informative steady-state priors are standard forecasting tools in empirical macroeconomics. This study proposes (i) an adaptive hierarchical normal-gamma prior on steady states, (ii) a time-varying steady-state specification which accounts for structural breaks in the unconditional mean, and (iii) a generalization of steady-state VARs with fat-tailed and heteroskedastic error terms. Empirical analysis, based on a real-time dataset of 14 macroeconomic variables, shows that, overall, the hierarchical steady-state specifications materially improve out-of-sample forecasting for forecasting horizons longer than 1 year, while the time-varying specifications generate superior forecasts for variables with significant changes in their unconditional mean.}, +year = {2019} +} +@article{Mariano2003, +abstract = {Popular monthly coincident indices of business cycles, e.g. the composite index and the Stock–Watson coincident index, have two shortcomings. First, they ignore information contained in quarterly indicators such as real GDP. Second, they lack economic interpretation; hence the heights of peaks and the depths of troughs depend on the choice of an index. This paper extends the Stock–Watson coincident index by applying maximum likelihood factor analysis to a mixed-frequency series of quarterly real GDP and monthly coincident business cycle indicators. The resulting index is related to latent monthly real GDP. Copyright {\textcopyright} 2002 John Wiley {\&} Sons, Ltd.}, +author = {Mariano, Roberto S. and Murasawa, Yasutomo}, +doi = {10.1002/jae.695}, +file = {:C$\backslash$:/Users/seban876/Documents/Mendeley Desktop/Mariano, Murasawa{\_}2003.pdf:pdf}, +issn = {08837252}, +journal = {Journal of Applied Econometrics}, +number = {4}, +pages = {427--443}, +title = {A New Coincident Index of Business Cycles Based on Monthly and Quarterly Series}, +volume = {18}, +year = {2003} +} +@article{McCausland2011, +title = "Simulation smoothing for state-space models: A computational efficiency analysis", +journal = "Computational Statistics \& Data Analysis", +volume = "55", +number = "1", +pages = "199--212", +year = "2011", +doi = "10.1016/j.csda.2010.07.009", +author = "William J. McCausland and Shirley Miller and Denis Pelletier", +keywords = "State–space models, Markov chain Monte Carlo, Importance sampling, Count data, High frequency financial data", +abstract = "Simulation smoothing involves drawing state variables (or innovations) in discrete time state–space models from their conditional distribution given parameters and observations. Gaussian simulation smoothing is of particular interest, not only for the direct analysis of Gaussian linear models, but also for the indirect analysis of more general models. Several methods for Gaussian simulation smoothing exist, most of which are based on the Kalman filter. Since states in Gaussian linear state–space models are Gaussian Markov random fields, it is also possible to apply the Cholesky Factor Algorithm (CFA) to draw states. This algorithm takes advantage of the band diagonal structure of the Hessian matrix of the log density to make efficient draws. We show how to exploit the special structure of state–space models to draw latent states even more efficiently. We analyse the computational efficiency of Kalman-filter-based methods, the CFA, and our new method using counts of operations and computational experiments. We show that for many important cases, our method is most efficient. Gains are particularly large for cases where the dimension of observed variables is large or where one makes repeated draws of states for the same parameter values. We apply our method to a multivariate Poisson model with time-varying intensities, which we use to analyse financial market transaction count data." +} +@misc{OHara2017, +author = {O'Hara, Keith}, +title = {{Bayesian Macroeconometrics in R}}, +url = {http://www.kthohr.com/bmr.html}, +urldate = {2017-10-17}, +year = {2017} +} +@article{Primiceri2005, + author = {Primiceri, Giorgio E.}, + title = {Time Varying Structural Vector Autoregressions and Monetary Policy}, + journal = {The Review of Economic Studies}, + volume = {72}, + number = {3}, + pages = {821-852}, + year = {2005}, + month = {07}, + abstract = "{Monetary policy and the private sector behaviour of the U.S. economy are modelled as a time varying structural vector autoregression, where the sources of time variation are both the coefficients and the variance covariance matrix of the innovations. The paper develops a new, simple modelling strategy for the law of motion of the variance covariance matrix and proposes an efficient Markov chain Monte Carlo algorithm for the model likelihood/posterior numerical evaluation. The main empirical conclusions are: (1) both systematic and non-systematic monetary policy have changed during the last 40 years—in particular, systematic responses of the interest rate to inflation and unemployment exhibit a trend toward a more aggressive behaviour, despite remarkable oscillations; (2) this has had a negligible effect on the rest of the economy. The role played by exogenous non-policy shocks seems more important than interest rate policy in explaining the high inflation and unemployment episodes in recent U.S. economic history.}", + doi = {10.1111/j.1467-937X.2005.00353.x} +} +@article{Roberts2009, +author = {Gareth O. Roberts and Jeffrey S. Rosenthal}, +title = {Examples of Adaptive {MCMC}}, +journal = {Journal of Computational and Graphical Statistics}, +volume = {18}, +number = {2}, +pages = {349--367}, +year = {2009}, +publisher = {Taylor & Francis}, +doi = {10.1198/jcgs.2009.06134} +} +@article{Rue2001, +author = {Rue, H{\aa}vard}, +file = {:Users/sebastianankargren/Documents/Mendeley Desktop/Rue - 2001 - Fast sampling of Gaussian Markov random fields.pdf:pdf}, +journal = {Journal of the Royal Statistical Society. Series B (Statistical Methodology)}, +keywords = {block sampling,conditional autoregressive model,divide-and-conquer strategy,eld,gaussian markov random,markov chain monte carlo,methods}, +pages = {325--339}, +title = {{Fast sampling of Gaussian Markov random fields}}, +volume = {63}, +year = {2001} +} +@article{Sanderson2016, +abstract = {The C++ language is often used for implementing functionality that is performance and/or resource sensitive. While the standard C++ library provides many useful algorithms (such as sorting), in its current form it does not provide direct handling of linear algebra (matrix maths). Armadillo is an open source linear algebra library for the C++ language, aiming towards a good balance between speed and ease of use. Its high-level Application Programming Interface (API) is deliberately similar to the widely Matlab and Octave languages (Eaton et al. 2015), so that mathematical operations can be expressed in a familiar and natural manner. The library is useful for algorithm development directly in C++, or relatively quick conversion of research code into production environments. Armadillo provides efficient objects for vectors, matrices and cubes (third order tensors), as well as over 200 associated functions for manipulating data stored in the objects. Integer, floating point and complex numbers are supported, as well as dense and sparse storage formats. Various matrix factorisations are provided through integration with LAPACK (Demmel 1997), or one of its high performance drop-in replacements such as Intel MKL (Intel 2016) or OpenBLAS (Xianyi, Qian, and Saar 2016). It is also possible to use Armadillo in conjunction with NVBLAS to obtain GPU-accelerated matrix multiplication (NVIDIA 2015). Armadillo is used as a base for other open source projects, such as MLPACK, a C++ library for machine learning and pattern recognition (Curtin et al. 2013), and RcppArmadillo, a bridge between the R language and C++ in order to speed up computations (Eddelbuettel and Sanderson 2014). Armadillo internally employs an expression evaluator based on template meta-programming techniques (Abrahams and Gurtovoy 2004), to automatically combine several operations in order to increase speed and efficiency. An overview of the internal architecture is given in (Sanderson 2010).}, +author = {Sanderson, Conrad and Curtin, Ryan}, +doi = {10.21105/joss.00026}, +file = {:C$\backslash$:/Users/seban876/Documents/Mendeley Desktop/Sanderson, Curtin{\_}2016.pdf:pdf}, +journal = {The Journal of Open Source Software}, +number = {2}, +title = {{Armadillo: A Template-Based C++ Library for Linear Algebra}}, +volume = {1}, +year = {2016} +} +@article{Schorfheide2015, +abstract = {This paper develops a vector autoregression (VAR) for macroeconomic time series which are observed at mixed frequencies – quarterly and monthly. The mixed-frequency VAR is cast in state-space form and estimated with Bayesian methods under a Minnesota-style prior. Using a real-time data set, we generate and evaluate forecasts from the mixed-frequency VAR and compare them to forecasts from a VAR that is estimated based on data time-aggregated to quarterly frequency. We document how information that becomes available within the quarter improves the forecasts in real time.}, +author = {Schorfheide, Frank and Song, Dongho}, +doi = {10.1080/07350015.2014.954707}, +file = {:C$\backslash$:/Users/seban876/Documents/Mendeley Desktop/Schorfheide, Song{\_}2015.pdf:pdf}, +journal = {Journal of Business {\&} Economic Statistics}, +keywords = {bayesian methods,macroeconomic forecasting,real-time data,vector autore-}, +number = {3}, +pages = {366--380}, +title = {{Real-Time Forecasting with a Mixed-Frequency VAR}}, +volume = {33}, +year = {2015} +} +@article{Stock2001, +author = {Stock, James H and Watson, Mark W}, +doi = {10.1257/jep.15.4.101}, +file = {:C$\backslash$:/Users/seban876/Documents/Mendeley Desktop/Stock, Watson{\_}2001.pdf:pdf}, +journal = {Journal of Economic Perspectives}, +number = {4}, +pages = {101--115}, +title = {{Vector Autoregressions}}, +volume = {15}, +year = {2001} +} + @Manual{R2019, + title = {R: A Language and Environment for Statistical Computing}, + author = {{R Core Team}}, + organization = {R Foundation for Statistical Computing}, + address = {Vienna, Austria}, + year = {2019}, + url = {https://www.R-project.org/}, + } +@article{Tusell2011, +author = {Tusell, Fernando}, +doi = {10.18637/jss.v039.i02}, +file = {:C$\backslash$:/Users/seban876/Documents/Mendeley Desktop/Tusell{\_}2011.pdf:pdf}, +journal = {Journal of Statistical Software}, +keywords = {chain linking,item response theory,mixed-format tests,r,separate calibration}, +number = {2}, +title = {{Kalman Filtering in R}}, +volume = {39}, +year = {2011} +} +@book{Wickham2009, +address = {New York}, +author = {Wickham, Hadley}, +doi = {10.1007/978-0-387-98141-3}, +publisher = {Springer-Verlag}, +title = {{ggplot2: Elegant Graphics for Data Analysis}}, +year = {2009} +} +@article{Wickham2014, +abstract = {In this paper we present the R package gRain for propagation in graphical indepen- dence networks (for which Bayesian networks is a special instance). The paper includes a description of the theory behind the computations. The main part of the paper is an illustration of how to use the package. The paper also illustrates how to turn a graphical model and data into an independence network.}, +archivePrefix = {arXiv}, +arxivId = {arXiv:1501.0228}, +author = {Wickham, Hadley}, +doi = {10.18637/jss.v059.i10}, +file = {:C$\backslash$:/Users/seban876/Documents/Mendeley Desktop/Wickham{\_}2014.pdf:pdf}, +journal = {Journal of Statistical Software}, +keywords = {data cleaning,data tidying,r,relational databases}, +number = {10}, +title = {{Tidy Data}}, +volume = {59}, +year = {2014} +} +@book{Wickham2015, +address = {Philadelphia, PA}, +author = {Wickham, Hadley}, +publisher = {Chapman and Hall/CRC}, +title = {Advanced {R}}, +year = {2015} +} + @Manual{Wilke2018, + title = {ggridges: Ridgeline Plots in 'ggplot2'}, + author = {Claus O. Wilke}, + year = {2018}, + note = {R package version 0.5.1}, + url = {https://CRAN.R-project.org/package=ggridges}, + } + +@article{Villani2009, +author = {Villani, Mattias}, +doi = {10.1002/jae.1065}, +file = {:C$\backslash$:/Users/seban876/Documents/Mendeley Desktop/Villani{\_}2009.pdf:pdf}, +journal = {Journal of Applied Econometrics}, +keywords = {bayesian inference,c11,c32,c53,cointegration,e50,forecasting,jel classification,unconditional mean,var,vecm}, +number = {4}, +pages = {630--650}, +title = {{Steady State Priors for Vector Autoregressions}}, +volume = {24}, +year = {2009} +} +@article{Osterholm2010, +abstract = {The theory that we shall seek to elaborate here puts considerable emphasis on the impact of cross-border capital flows on domestic financial conditions, the internal financing power of corporate growth, the limits of outside financing through equity capital, the internal financing power of corporate growth, and the convergence of financial systems. The paper generates insights about the emergence of financial conglomerates, the challenges raised by the integration of the financial markets, the global evolution of the economic and financial activity, the European market of financial services, and the creation of the single market for financial services.}, +author = {{\"{O}}sterholm, P{\"{a}}r}, +doi = {10.1080/09603100903357408}, +file = {:C$\backslash$:/Users/seban876/Documents/Mendeley Desktop/{\"{O}}sterholm{\_}2010.pdf:pdf}, +journal = {Applied Financial Economics}, +number = {4}, +pages = {265--274}, +title = {{The Effect on the Swedish Real Economy of the Financial Crisis}}, +volume = {20}, +year = {2010} +} + + diff --git a/vignettes/vignette_data.RData b/vignettes/vignette_data.RData new file mode 100644 index 0000000..f74b721 Binary files /dev/null and b/vignettes/vignette_data.RData differ