# NOT RUN {
#=========================== The first example ======================================
# It is sufficient to run the function with default variable
datasets <- validation.dataset_srsc()
#============================= The second example ======================================
# If user do not familiar with the values of thresholds, then
# it would be better to use the actual estimated values
# as an example of true parameters. In the following,
# I explain this.
# First, to get estimates, we run the following:
fit <- fit_Bayesian_FROC(dataList.Chakra.1,ite = 1111,summary =FALSE,cha=3)
# Secondly, extract the expected a posterior estimators (EAPs) from the object fit
z <- rstan::get_posterior_mean(fit,par=c("z"))[,"mean-all chains"]
# Thirdly we use this z as a true values.
datasets <- validation.dataset_srsc(z.truth = z)
#========================================================================================
# 1) extract replicated fitted model object
#========================================================================================
# Replicates models
a <- validation.dataset_srsc(replicate.datset = 3,ite = 111)
# Check convergence, in the above MCMC iterations = 111 which is too small to get
# a convergence MCMC chain, and thus the following example will the example
# of a non-convergent model in the r hat criteria.
ConfirmConvergence( a$fit[[3]])
# Check trace plot to confirm whether MCMC chain do converge or not.
stan_trace( a$fit[[3]],pars = "A")
# Check p value
ppp( a$fit[[3]])
# In the above example, the posterior predictive p value is enough large,
# but the model did not converge in R that criteria, which will cause
# that the model does not fit to data. However p value is said
# we can not reject the null hypothesis that the model does fit.
# The author think this contradiction cause that the
# number of MCMC iterations are too small which leads us to incorrect
# Monte Carlo integral for p value. Thu p value is not correct.
# Calculation of p value relies on the law of large number and thus
# to obtain reliable posterior predictive p value, we need enough large
# MCMC samples. 2019 August 29
# Revised in 2019 August 29
#========================================================================================
# 1) Histogram of error of postrior means for replicated datasets
#========================================================================================
#'
a<- validation.dataset_srsc(replicate.datset = 100)
hist(a$error.of.AUC,breaks = 111)
hist(a$error.of.AUC,breaks = 30)
#========================================================================================
# absolute.errors = FALSE generates negative biases
#========================================================================================
validation.dataset_srsc(absolute.errors = FALSE)
#========================================================================================
# absolute.errors = TRUE dose not generate negative biases
#========================================================================================
validation.dataset_srsc(absolute.errors = TRUE)
# }
# NOT RUN {
# dontrun
# }
Run the code above in your browser using DataLab