#Ensemble Quadratic MCMC algorithm example for fitting a Weibull
#distribution
#Assume the true parameters are
a_shape = 20
sigma_scale = 900
#Random sample from the Weibull distribution with a = 20 and sigma = 900,
#Y~WEI(a = 20, sigma = 900)
num_ran_samples = 50
data_weibull = matrix(NA, nrow = 1, ncol = num_ran_samples)
#Set the seed for this example
set.seed(10)
data_weibull = rweibull(num_ran_samples, shape = a_shape, scale = sigma_scale)
#We want to estimate a_shape and sigma_scale
#Log prior function for a_shape and sigma_scale
#(assumed priors a_shape ~ U(1e-2, 1e2) and sigma_scale ~ U(1, 1e4))
logp <- function(param)
{
a_shape_use = param[1]
sigma_scale_use = param[2]
logp_val = dunif(a_shape_use, min = 1e-2, max = 1e2, log = TRUE) +
dunif(sigma_scale_use, min = 1, max = 1e4, log = TRUE)
return(logp_val)
}
#Log likelihood function for a_shape and sigma_scale
logl <- function(param)
{
a_shape_use = param[1]
sigma_scale_use = param[2]
logl_val = sum(dweibull(data_weibull, shape = a_shape_use, scale = sigma_scale_use, log = TRUE))
return(logl_val)
}
logfuns = list(logp = logp, logl = logl)
num_par = 2
#It is recommended to use at least twice as many chains as the number of
#parameters to be estimated.
num_chains = 2*num_par
#Generate initial guesses for the MCMC chains
theta0 = matrix(0, nrow = num_par, ncol = num_chains)
temp_val = 0
j = 0
while(j < num_chains)
{
initial = c(runif(1, 1e-2, 1e2), runif(1, 1, 1e4))
temp_val = logl(initial) + logp(initial)
while(is.na(temp_val) || is.infinite(temp_val))
{
initial = c(runif(1, 1e-2, 1e2), runif(1, 1, 1e4))
temp_val = logl(initial) + logp(initial)
}
j = j + 1
message(paste('j:', j))
theta0[1,j] = initial[1]
theta0[2,j] = initial[2]
}
num_chain_iterations = 1e4
thin_val_par = 10
#The total number of returned samples is given by
#(num_chain_iterations/thin_val_par)*num_chains = 4e3
#Ensemble Quadratic MCMC algorithm
Weibull_Quad_result = ensemblealg(theta0, logfuns,
T_iter = num_chain_iterations, Thin_val = thin_val_par)
my_samples = Weibull_Quad_result$theta_sample
my_log_prior = Weibull_Quad_result$log_prior_sample
my_log_like = Weibull_Quad_result$log_like_sample
#Burn-in 25% of each chain
my_samples_burn_in = my_samples[,,-c(1:floor((num_chain_iterations/thin_val_par)*0.25))]
my_log_prior_burn_in = my_log_prior[,-c(1:floor((num_chain_iterations/thin_val_par)*0.25))]
my_log_like_burn_in = my_log_like[,-c(1:floor((num_chain_iterations/thin_val_par)*0.25))]
#Calculate potential scale reduction factors
diagnostic_result = psrfdiagnostic(my_samples_burn_in, 0.05)
diagnostic_result$p_s_r_f_vec
#log unnormalized posterior samples
log_un_post_vec = as.vector(my_log_prior_burn_in + my_log_like_burn_in)
#a_shape posterior samples
k1 = as.vector(my_samples_burn_in[1,,])
#sigma_scale posterior samples
k2 = as.vector(my_samples_burn_in[2,,])
#Calculate posterior median, 95% credible intervals, and maximum posterior for
#the parameters
median(k1)
hpdparameter(k1, 0.05)
median(k2)
hpdparameter(k2, 0.05)
k1[which.max(log_un_post_vec)]
k2[which.max(log_un_post_vec)]
#These plots display the silhouette of the unnormalized posterior surface from
#the chosen parameter's perspective
plot(k1, exp(log_un_post_vec), xlab="a_shape", ylab="unnormalized posterior density")
plot(k2, exp(log_un_post_vec), xlab="sigma_scale", ylab="unnormalized posterior density")
Run the code above in your browser using DataLab