MCMCmagic

Author

Sarah Urbut

Published

August 30, 2024

First how does our data look?


Attaching package: 'dplyr'
The following objects are masked from 'package:stats':

    filter, lag
The following objects are masked from 'package:base':

    intersect, setdiff, setequal, union

Attaching package: 'reshape'
The following object is masked from 'package:dplyr':

    rename

Attaching package: 'MASS'
The following object is masked from 'package:dplyr':

    select

Attaching package: 'tidyr'
The following objects are masked from 'package:reshape':

    expand, smiths
Shape of mu_d_matrix: 12 56 
Shape of mean_lambda: 10 2 
Shape of mu_i: 10 56 

Now we initialize:

Code
warm_gp_initialization <- function(Y, mu_d_logit, g_i, n_topics) {
  N <- dim(Y)[1]
  D <- dim(Y)[2]
  T <- dim(Y)[3]
  P <- ncol(g_i)
  
  # Convert Y to logit scale, with smoothing to avoid infinite values
  Y_smooth <- (Y * (N * D - 1) + 0.5) / (N * D)
  Y_logit <- log(Y_smooth / (1 - Y_smooth))
  
  # Center Y_logit by subtracting mu_d_logit
  Y_centered <- array(0, dim = c(N, D, T))
  for (d in 1:D) {
    Y_centered[, d, ] <- Y_logit[, d, ] - matrix(mu_d_logit[[d]], nrow = N, ncol = T, byrow = TRUE)
  }
  
  # Perform SVD on the reshaped Y_centered
  Y_reshaped <- matrix(Y_centered, nrow = N * D, ncol = T)
  svd_result <- svd(Y_reshaped, nu = n_topics, nv = T)
  
  # Initialize Phi using right singular vectors
  Phi <- array(0, dim = c(n_topics, D, T))
  for (k in 1:n_topics) {
    Phi[k, , ] <- matrix(svd_result$v[, k], nrow = D, ncol = T, byrow = TRUE)
  }
  
  # Initialize Lambda using left singular values and vectors
  Lambda <- array(0, dim = c(N, n_topics, T))
  for (i in 1:N) {
    for (k in 1:n_topics) {
      Lambda[i, k, ] <- svd_result$d[k] * svd_result$u[(i-1)*D + 1, k] * svd_result$v[, k]
    }
  }
  
  # Initialize Gamma using regression on the mean of Lambda
  Lambda_mean <- apply(Lambda, c(1, 2), mean)
  Gamma <- t(solve(t(g_i) %*% g_i) %*% t(g_i) %*% Lambda_mean)
  
  # Estimate GP hyperparameters
  estimate_gp_params <- function(x) {
    acf_x <- acf(x, plot = FALSE)$acf[-1]
    length_scale <- -1 / log(acf_x[1])
    var_scale <- var(x)
    return(c(length_scale, var_scale))
  }
  
  length_scales_lambda <- numeric(n_topics)
  var_scales_lambda <- numeric(n_topics)
  for (k in 1:n_topics) {
    gp_params <- estimate_gp_params(colMeans(Lambda[, k, ]))
    length_scales_lambda[k] <- gp_params[1]
    var_scales_lambda[k] <- gp_params[2]
  }
  
  length_scales_phi <- numeric(n_topics * D)
  var_scales_phi <- numeric(n_topics * D)
  for (k in 1:n_topics) {
    for (d in 1:D) {
      idx <- (k-1)*D + d
      gp_params <- estimate_gp_params(Phi[k, d, ])
      length_scales_phi[idx] <- gp_params[1]
      var_scales_phi[idx] <- gp_params[2]
    }
  }
  
  return(list(
    Lambda = Lambda,
    Phi = Phi,
    Gamma = Gamma,
    length_scales_lambda = length_scales_lambda,
    var_scales_lambda = var_scales_lambda,
    length_scales_phi = length_scales_phi,
    var_scales_phi = var_scales_phi
  ))
}

Create some nice functions for likelihood calculation:

Code
library(MASS)

log_likelihood_vec <- function(y, Lambda, Phi, mu_d_logit) {
  n_individuals <- dim(Lambda)[1]
  n_topics <- dim(Lambda)[2]
  T <- dim(Lambda)[3]
  n_diseases <- dim(Phi)[2]
  
  logit_pi <- array(0, dim = c(n_individuals, n_diseases, T))
  
  for (k in 1:n_topics) {
    for (t in 1:T) {
      logit_pi[, , t] <- logit_pi[, , t] + Lambda[, k, t] %*% t(Phi[k, , t])
    }
  }
  
  # Add mu_d_logit
  for (d in 1:n_diseases) {
    logit_pi[, d, ] <- logit_pi[, d, ] + matrix(mu_d_logit[[d]], nrow = n_individuals, ncol = T, byrow = TRUE)
  }
  
  pi <- 1 / (1 + exp(-logit_pi))
  log_lik <- sum(dbinom(y, 1, pi, log = TRUE))
  return(log_lik)
}
log_gp_prior_vec <- function(x, mean, length_scale, var_scale) {
  T <- length(x)
  time_points <- 1:T
  K <- var_scale * exp(-0.5 * outer(time_points, time_points, "-")^2 / length_scale^2)
  K <- K + diag(1e-6, T)
  
  # Ensure mean is the same length as x
  if (length(mean) == 1) {
    mean <- rep(mean, T)
  } else if (length(mean) != T) {
    stop(paste("Length of mean must be 1 or equal to length of x. Length of x:", T, "Length of mean:", length(mean)))
  }
  
  centered_x <- x - mean
  
  # Use more stable computation for log determinant
  log_det_K <- sum(log(eigen(K, symmetric = TRUE, only.values = TRUE)$values))
  
  # Use Cholesky decomposition for more stable matrix inversion
  L <- chol(K)
  quad_form <- sum(backsolve(L, centered_x, transpose = TRUE)^2)
  
  return(-0.5 * (log_det_K + quad_form + T * log(2 * base::pi)))
}

Here’s the workhorse:

Code
mcmc_sampler_optimized <- function(y, mu_d_logit, g_i, n_iterations, initial_values,
                                   alpha_lambda, beta_lambda, alpha_sigma, beta_sigma,
                                   alpha_phi, beta_phi, alpha_sigma_phi, beta_sigma_phi,
                                   alpha_Gamma, beta_Gamma) {
  current_state <- initial_values
  n_individuals <- dim(current_state$Lambda)[1]
  n_topics <- dim(current_state$Lambda)[2]
  T <- dim(current_state$Lambda)[3]
  n_diseases <- dim(current_state$Phi)[2]
  
  # Initialize adaptive proposal standard deviations
  adapt_sd <- list(
    Lambda = array(0.01, dim = dim(current_state$Lambda)),
    Phi = array(0.01, dim = dim(current_state$Phi)),
    Gamma = matrix(0.01, nrow = nrow(current_state$Gamma), ncol = ncol(current_state$Gamma)),
    length_scales_lambda = rep(0.1, n_topics),
    var_scales_lambda = rep(0.1, n_topics),
    length_scales_phi = rep(0.1, n_topics * n_diseases),
    var_scales_phi = rep(0.1, n_topics * n_diseases)
  )
  
  # Initialize storage for samples
  samples <- list(
    Lambda = array(0, dim = c(n_iterations, dim(current_state$Lambda))),
    Phi = array(0, dim = c(n_iterations, dim(current_state$Phi))),
    Gamma = array(0, dim = c(n_iterations, dim(current_state$Gamma))),
    length_scales_lambda = matrix(0, nrow = n_iterations, ncol = length(current_state$length_scales_lambda)),
    var_scales_lambda = matrix(0, nrow = n_iterations, ncol = length(current_state$var_scales_lambda)),
    length_scales_phi = matrix(0, nrow = n_iterations, ncol = length(current_state$length_scales_phi)),
    var_scales_phi = matrix(0, nrow = n_iterations, ncol = length(current_state$var_scales_phi))
  )
  
  for (iter in 1:n_iterations) {
    # Update Lambda
    proposed_Lambda <- current_state$Lambda + array(rnorm(prod(dim(current_state$Lambda)), 0, adapt_sd$Lambda), dim = dim(current_state$Lambda))
    
    current_log_lik <- log_likelihood_vec(y, current_state$Lambda, current_state$Phi, mu_d_logit)
    proposed_log_lik <- log_likelihood_vec(y, proposed_Lambda, current_state$Phi, mu_d_logit)
    
    lambda_mean <- g_i %*% t(current_state$Gamma)
    current_log_prior <- sum(sapply(1:n_individuals, function(i) {
      sapply(1:n_topics, function(k) {
        log_gp_prior_vec(current_state$Lambda[i, k, ], lambda_mean[i, k], 
                         current_state$length_scales_lambda[k], 
                         current_state$var_scales_lambda[k])
      })
    }))
    
    proposed_log_prior <- sum(sapply(1:n_individuals, function(i) {
      sapply(1:n_topics, function(k) {
        log_gp_prior_vec(proposed_Lambda[i, k, ], lambda_mean[i, k], 
                         current_state$length_scales_lambda[k], 
                         current_state$var_scales_lambda[k])
      })
    }))
    
    log_accept_ratio <- (proposed_log_lik + proposed_log_prior) - (current_log_lik + current_log_prior)
    
    if (log(runif(1)) < log_accept_ratio) {
      current_state$Lambda <- proposed_Lambda
      adapt_sd$Lambda <- adapt_sd$Lambda * 1.01
    } else {
      adapt_sd$Lambda <- adapt_sd$Lambda * 0.99
    }
    
    # Update Phi
    proposed_Phi <- current_state$Phi + array(rnorm(prod(dim(current_state$Phi)), 0, adapt_sd$Phi), dim = dim(current_state$Phi))
    
    current_log_lik <- log_likelihood_vec(y, current_state$Lambda, current_state$Phi, mu_d_logit)
    proposed_log_lik <- log_likelihood_vec(y, current_state$Lambda, proposed_Phi, mu_d_logit)
    
    current_log_prior <- sum(sapply(1:n_topics, function(k) {
      sapply(1:n_diseases, function(d) {
        idx <- (k-1)*n_diseases + d
        log_gp_prior_vec(current_state$Phi[k, d, ], 0, 
                         current_state$length_scales_phi[idx], 
                         current_state$var_scales_phi[idx])
      })
    }))
    
    proposed_log_prior <- sum(sapply(1:n_topics, function(k) {
      sapply(1:n_diseases, function(d) {
        idx <- (k-1)*n_diseases + d
        log_gp_prior_vec(proposed_Phi[k, d, ], 0, 
                         current_state$length_scales_phi[idx], 
                         current_state$var_scales_phi[idx])
      })
    }))
    
    log_accept_ratio <- (proposed_log_lik + proposed_log_prior) - (current_log_lik + current_log_prior)
    
    if (log(runif(1)) < log_accept_ratio) {
      current_state$Phi <- proposed_Phi
      adapt_sd$Phi <- adapt_sd$Phi * 1.01
    } else {
      adapt_sd$Phi <- adapt_sd$Phi * 0.99
    }
    
    # Update Gamma
    proposed_Gamma <- current_state$Gamma + matrix(rnorm(prod(dim(current_state$Gamma)), 0, adapt_sd$Gamma), nrow = nrow(current_state$Gamma))
    
    current_log_prior <- sum(dnorm(current_state$Gamma, 0, sqrt(alpha_Gamma / beta_Gamma), log = TRUE))
    proposed_log_prior <- sum(dnorm(proposed_Gamma, 0, sqrt(alpha_Gamma / beta_Gamma), log = TRUE))
    
    lambda_mean_current <- g_i %*% t(current_state$Gamma)
    lambda_mean_proposed <- g_i %*% t(proposed_Gamma)
    
    current_log_likelihood <- sum(sapply(1:n_individuals, function(i) {
      sapply(1:n_topics, function(k) {
        log_gp_prior_vec(current_state$Lambda[i, k, ], lambda_mean_current[i, k], 
                         current_state$length_scales_lambda[k], 
                         current_state$var_scales_lambda[k])
      })
    }))
    proposed_log_likelihood <- sum(sapply(1:n_individuals, function(i) {
      sapply(1:n_topics, function(k) {
        log_gp_prior_vec(current_state$Lambda[i, k, ], lambda_mean_proposed[i, k], 
                         current_state$length_scales_lambda[k], 
                         current_state$var_scales_lambda[k])
      })
    }))
    
    log_accept_ratio <- (proposed_log_likelihood + proposed_log_prior) - (current_log_likelihood + current_log_prior)
    
    if (log(runif(1)) < log_accept_ratio) {
      current_state$Gamma <- proposed_Gamma
      adapt_sd$Gamma <- adapt_sd$Gamma * 1.01
    } else {
      adapt_sd$Gamma <- adapt_sd$Gamma * 0.99
    }
    
    # Update length scales and variance scales for Lambda
    for (k in 1:n_topics) {
      proposed_length_scale <- exp(log(current_state$length_scales_lambda[k]) + rnorm(1, 0, adapt_sd$length_scales_lambda[k]))
      proposed_var_scale <- exp(log(current_state$var_scales_lambda[k]) + rnorm(1, 0, adapt_sd$var_scales_lambda[k]))
      
      current_log_prior <- dgamma(current_state$length_scales_lambda[k], shape = alpha_lambda, rate = beta_lambda, log = TRUE) +
        dgamma(current_state$var_scales_lambda[k], shape = alpha_sigma, rate = beta_sigma, log = TRUE)
      proposed_log_prior <- dgamma(proposed_length_scale, shape = alpha_lambda, rate = beta_lambda, log = TRUE) +
        dgamma(proposed_var_scale, shape = alpha_sigma, rate = beta_sigma, log = TRUE)
      
      current_log_likelihood <- sum(sapply(1:n_individuals, function(i) {
        log_gp_prior_vec(current_state$Lambda[i, k, ], lambda_mean_current[i, k], 
                         current_state$length_scales_lambda[k], 
                         current_state$var_scales_lambda[k])
      }))
      proposed_log_likelihood <- sum(sapply(1:n_individuals, function(i) {
        log_gp_prior_vec(current_state$Lambda[i, k, ], lambda_mean_current[i, k], 
                         proposed_length_scale, proposed_var_scale)
      }))
      
      log_accept_ratio <- (proposed_log_likelihood + proposed_log_prior) - (current_log_likelihood + current_log_prior)
      
      if (log(runif(1)) < log_accept_ratio) {
        current_state$length_scales_lambda[k] <- proposed_length_scale
        current_state$var_scales_lambda[k] <- proposed_var_scale
        adapt_sd$length_scales_lambda[k] <- adapt_sd$length_scales_lambda[k] * 1.01
        adapt_sd$var_scales_lambda[k] <- adapt_sd$var_scales_lambda[k] * 1.01
      } else {
        adapt_sd$length_scales_lambda[k] <- adapt_sd$length_scales_lambda[k] * 0.99
        adapt_sd$var_scales_lambda[k] <- adapt_sd$var_scales_lambda[k] * 0.99
      }
    }
    
    # Update length scales and variance scales for Phi
    for (k in 1:n_topics) {
      for (d in 1:n_diseases) {
        idx <- (k-1)*n_diseases + d
        proposed_length_scale <- exp(log(current_state$length_scales_phi[idx]) + rnorm(1, 0, adapt_sd$length_scales_phi[idx]))
        proposed_var_scale <- exp(log(current_state$var_scales_phi[idx]) + rnorm(1, 0, adapt_sd$var_scales_phi[idx]))
        
        current_log_prior <- dgamma(current_state$length_scales_phi[idx], shape = alpha_phi, rate = beta_phi, log = TRUE) +
          dgamma(current_state$var_scales_phi[idx], shape = alpha_sigma_phi, rate = beta_sigma_phi, log = TRUE)
        proposed_log_prior <- dgamma(proposed_length_scale, shape = alpha_phi, rate = beta_phi, log = TRUE) +
          dgamma(proposed_var_scale, shape = alpha_sigma_phi, rate = beta_sigma_phi, log = TRUE)
        
        current_log_likelihood <- log_gp_prior_vec(current_state$Phi[k, d, ], 0, 
                                                   current_state$length_scales_phi[idx], 
                                                   current_state$var_scales_phi[idx])
        proposed_log_likelihood <- log_gp_prior_vec(current_state$Phi[k, d, ], 0, 
                                                    proposed_length_scale, proposed_var_scale)
        
      
        
        log_accept_ratio <- (proposed_log_likelihood + proposed_log_prior) - (current_log_likelihood + current_log_prior)
     
        if (log(runif(1)) < log_accept_ratio) {
          current_state$length_scales_phi[idx] <- proposed_length_scale
          current_state$var_scales_phi[idx] <- proposed_var_scale
          adapt_sd$length_scales_phi[idx] <- adapt_sd$length_scales_phi[idx] * 1.01
          adapt_sd$var_scales_phi[idx] <- adapt_sd$var_scales_phi[idx] * 1.01
        } else {
          adapt_sd$length_scales_phi[idx] <- adapt_sd$length_scales_phi[idx] * 0.99
          adapt_sd$var_scales_phi[idx] <- adapt_sd$var_scales_phi[idx] * 0.99
        }
      }
    }
    
    # Store samples
    samples$Lambda[iter, , , ] <- current_state$Lambda
    samples$Phi[iter, , , ] <- current_state$Phi
    samples$Gamma[iter, , ] <- current_state$Gamma
    samples$length_scales_lambda[iter, ] <- current_state$length_scales_lambda
    samples$var_scales_lambda[iter, ] <- current_state$var_scales_lambda
    samples$length_scales_phi[iter, ] <- current_state$length_scales_phi
    samples$var_scales_phi[iter, ] <- current_state$var_scales_phi
    
    
      #print(paste("Iteration", iter))
    
  }
  
  return(samples)
}

And now we run!

Code
# Assuming you have already defined y, mu_d_logit, g_i, and n_topics

# Initialize
initial_values <- warm_gp_initialization(y, mu_d_logit, g_i, n_topics)

# Run MCMC
# Define hyperparameters
alpha_lambda <- 2
beta_lambda <- 0.5
alpha_sigma <- 2
beta_sigma <- 2
alpha_phi <- 2
beta_phi <- 0.5
alpha_sigma_phi <- 2
beta_sigma_phi <- 2
alpha_Gamma <- 2
beta_Gamma <- 2

# Run MCMC
mcmc_results <- mcmc_sampler_optimized(y, mu_d_logit, g_i, n_iterations = 5000, initial_values,
                                       alpha_lambda, beta_lambda, alpha_sigma, beta_sigma,
                                       alpha_phi, beta_phi, alpha_sigma_phi, beta_sigma_phi,
                                       alpha_Gamma, beta_Gamma)


Iterations = 1:5000
Thinning interval = 1 
Number of chains = 1 
Sample size per chain = 5000 

1. Empirical mean and standard deviation for each variable,
   plus standard error of the mean:

          Mean             SD       Naive SE Time-series SE 
     7.965e-03      4.746e-03      6.712e-05      1.607e-03 

2. Quantiles for each variable:

    2.5%      25%      50%      75%    97.5% 
0.001085 0.004428 0.007620 0.010563 0.021212 

    var1 
8.726205 


Iterations = 1:5000
Thinning interval = 1 
Number of chains = 1 
Sample size per chain = 5000 

1. Empirical mean and standard deviation for each variable,
   plus standard error of the mean:

          Mean             SD       Naive SE Time-series SE 
     0.1378736      0.0122489      0.0001732      0.0068060 

2. Quantiles for each variable:

  2.5%    25%    50%    75%  97.5% 
0.1266 0.1290 0.1316 0.1447 0.1683 

    var1 
3.239018 


Iterations = 1:5000
Thinning interval = 1 
Number of chains = 1 
Sample size per chain = 5000 

1. Empirical mean and standard deviation for each variable,
   plus standard error of the mean:

          Mean             SD       Naive SE Time-series SE 
    -0.3993709      0.0071782      0.0001015      0.0034949 

2. Quantiles for each variable:

   2.5%     25%     50%     75%   97.5% 
-0.4181 -0.4034 -0.3992 -0.3938 -0.3879 

    var1 
4.218384 
[1] "Overall acceptance rate: 0.468"


Iterations = 1:5000
Thinning interval = 1 
Number of chains = 1 
Sample size per chain = 5000 

1. Empirical mean and standard deviation for each variable,
   plus standard error of the mean:

          Mean             SD       Naive SE Time-series SE 
     -0.783790       0.200259       0.002832       0.069990 

2. Quantiles for each variable:

   2.5%     25%     50%     75%   97.5% 
-1.1324 -0.9476 -0.8020 -0.6241 -0.3698 

   var1 
8.18684 


Iterations = 1:5000
Thinning interval = 1 
Number of chains = 1 
Sample size per chain = 5000 

1. Empirical mean and standard deviation for each variable,
   plus standard error of the mean:

          Mean             SD       Naive SE Time-series SE 
     -0.178708       0.378524       0.005353       0.242464 

2. Quantiles for each variable:

    2.5%      25%      50%      75%    97.5% 
-1.18346 -0.23113 -0.02400  0.06656  0.19386 

    var1 
2.437192 


Iterations = 1:5000
Thinning interval = 1 
Number of chains = 1 
Sample size per chain = 5000 

1. Empirical mean and standard deviation for each variable,
   plus standard error of the mean:

          Mean             SD       Naive SE Time-series SE 
      7.808309       0.218415       0.003089       0.104091 

2. Quantiles for each variable:

 2.5%   25%   50%   75% 97.5% 
7.329 7.708 7.804 7.940 8.207 

    var1 
4.402884 
[1] "Overall acceptance rate: 0.505"


Iterations = 1:5000
Thinning interval = 1 
Number of chains = 1 
Sample size per chain = 5000 

1. Empirical mean and standard deviation for each variable,
   plus standard error of the mean:

          Mean             SD       Naive SE Time-series SE 
       -2.5676         1.0679         0.0151         0.5692 

2. Quantiles for each variable:

   2.5%     25%     50%     75%   97.5% 
-4.3484 -3.2507 -2.8063 -1.6897 -0.4076 

    var1 
3.520475 


Iterations = 1:5000
Thinning interval = 1 
Number of chains = 1 
Sample size per chain = 5000 

1. Empirical mean and standard deviation for each variable,
   plus standard error of the mean:

          Mean             SD       Naive SE Time-series SE 
      -48.8098        23.1416         0.3273        20.8284 

2. Quantiles for each variable:

  2.5%    25%    50%    75%  97.5% 
-82.54 -70.80 -47.73 -29.51 -11.87 

    var1 
1.234454 


Iterations = 1:5000
Thinning interval = 1 
Number of chains = 1 
Sample size per chain = 5000 

1. Empirical mean and standard deviation for each variable,
   plus standard error of the mean:

          Mean             SD       Naive SE Time-series SE 
       -3.9658         1.2095         0.0171         0.5620 

2. Quantiles for each variable:

   2.5%     25%     50%     75%   97.5% 
-5.6484 -4.8701 -4.2541 -3.3206 -0.9343 

    var1 
4.631304 
[1] "Overall acceptance rate: 0.523"


Iterations = 1:5000
Thinning interval = 1 
Number of chains = 1 
Sample size per chain = 5000 

1. Empirical mean and standard deviation for each variable,
   plus standard error of the mean:

      Mean     SD Naive SE Time-series SE
[1,] 1.356 0.2432  0.00344        0.02390
[2,] 1.326 0.9756  0.01380        0.09703

2. Quantiles for each variable:

      2.5%   25%   50%   75% 97.5%
var1 1.178 1.293 1.361 1.387 1.423
var2 1.101 1.216 1.258 1.278 1.304

    var1     var2 
103.5917 101.0924 
[1] "Overall acceptance rate: 0.483"


Iterations = 1:5000
Thinning interval = 1 
Number of chains = 1 
Sample size per chain = 5000 

1. Empirical mean and standard deviation for each variable,
   plus standard error of the mean:

          Mean             SD       Naive SE Time-series SE 
     7.965e-03      4.746e-03      6.712e-05      1.607e-03 

2. Quantiles for each variable:

    2.5%      25%      50%      75%    97.5% 
0.001085 0.004428 0.007620 0.010563 0.021212 

    var1 
8.726205 


Iterations = 1:5000
Thinning interval = 1 
Number of chains = 1 
Sample size per chain = 5000 

1. Empirical mean and standard deviation for each variable,
   plus standard error of the mean:

          Mean             SD       Naive SE Time-series SE 
     0.1378736      0.0122489      0.0001732      0.0068060 

2. Quantiles for each variable:

  2.5%    25%    50%    75%  97.5% 
0.1266 0.1290 0.1316 0.1447 0.1683 

    var1 
3.239018 


Iterations = 1:5000
Thinning interval = 1 
Number of chains = 1 
Sample size per chain = 5000 

1. Empirical mean and standard deviation for each variable,
   plus standard error of the mean:

          Mean             SD       Naive SE Time-series SE 
    -0.3993709      0.0071782      0.0001015      0.0034949 

2. Quantiles for each variable:

   2.5%     25%     50%     75%   97.5% 
-0.4181 -0.4034 -0.3992 -0.3938 -0.3879 

    var1 
4.218384 
[1] "Overall acceptance rate: 0.468"


Iterations = 1:5000
Thinning interval = 1 
Number of chains = 1 
Sample size per chain = 5000 

1. Empirical mean and standard deviation for each variable,
   plus standard error of the mean:

          Mean             SD       Naive SE Time-series SE 
     -0.783790       0.200259       0.002832       0.069990 

2. Quantiles for each variable:

   2.5%     25%     50%     75%   97.5% 
-1.1324 -0.9476 -0.8020 -0.6241 -0.3698 

   var1 
8.18684 


Iterations = 1:5000
Thinning interval = 1 
Number of chains = 1 
Sample size per chain = 5000 

1. Empirical mean and standard deviation for each variable,
   plus standard error of the mean:

          Mean             SD       Naive SE Time-series SE 
     -0.178708       0.378524       0.005353       0.242464 

2. Quantiles for each variable:

    2.5%      25%      50%      75%    97.5% 
-1.18346 -0.23113 -0.02400  0.06656  0.19386 

    var1 
2.437192 


Iterations = 1:5000
Thinning interval = 1 
Number of chains = 1 
Sample size per chain = 5000 

1. Empirical mean and standard deviation for each variable,
   plus standard error of the mean:

          Mean             SD       Naive SE Time-series SE 
      7.808309       0.218415       0.003089       0.104091 

2. Quantiles for each variable:

 2.5%   25%   50%   75% 97.5% 
7.329 7.708 7.804 7.940 8.207 

    var1 
4.402884 
[1] "Overall acceptance rate: 0.505"


Iterations = 1:5000
Thinning interval = 1 
Number of chains = 1 
Sample size per chain = 5000 

1. Empirical mean and standard deviation for each variable,
   plus standard error of the mean:

          Mean             SD       Naive SE Time-series SE 
       -2.5676         1.0679         0.0151         0.5692 

2. Quantiles for each variable:

   2.5%     25%     50%     75%   97.5% 
-4.3484 -3.2507 -2.8063 -1.6897 -0.4076 

    var1 
3.520475 


Iterations = 1:5000
Thinning interval = 1 
Number of chains = 1 
Sample size per chain = 5000 

1. Empirical mean and standard deviation for each variable,
   plus standard error of the mean:

          Mean             SD       Naive SE Time-series SE 
      -48.8098        23.1416         0.3273        20.8284 

2. Quantiles for each variable:

  2.5%    25%    50%    75%  97.5% 
-82.54 -70.80 -47.73 -29.51 -11.87 

    var1 
1.234454 


Iterations = 1:5000
Thinning interval = 1 
Number of chains = 1 
Sample size per chain = 5000 

1. Empirical mean and standard deviation for each variable,
   plus standard error of the mean:

          Mean             SD       Naive SE Time-series SE 
       -3.9658         1.2095         0.0171         0.5620 

2. Quantiles for each variable:

   2.5%     25%     50%     75%   97.5% 
-5.6484 -4.8701 -4.2541 -3.3206 -0.9343 

    var1 
4.631304 
[1] "Overall acceptance rate: 0.523"


Iterations = 1:5000
Thinning interval = 1 
Number of chains = 1 
Sample size per chain = 5000 

1. Empirical mean and standard deviation for each variable,
   plus standard error of the mean:

      Mean     SD Naive SE Time-series SE
[1,] 1.356 0.2432  0.00344        0.02390
[2,] 1.326 0.9756  0.01380        0.09703

2. Quantiles for each variable:

      2.5%   25%   50%   75% 97.5%
var1 1.178 1.293 1.361 1.387 1.423
var2 1.101 1.216 1.258 1.278 1.304

    var1     var2 
103.5917 101.0924 
[1] "Overall acceptance rate: 0.483"