# NOT RUN {
#=========================== The first example ======================================
# It is sufficient to run the function with default variable
datasets <- validation.dataset_srsc()
# Today 2020 Nov 29 I have completely forgotten this function, oh it helps me. Thank me.
#============================= The second example ======================================
# If user does not familiar with the values of thresholds, then
# it would be better to use the actual estimated values
# as an example of true parameters. In the following,
# I explain this.
# First, to get posterior mean estimates, we run the following:
fit <- fit_Bayesian_FROC(dataList.Chakra.1,ite = 1111,summary =FALSE,cha=3)
# Secondly, extract the expected a posterior estimates (EAPs) from the object fit
# Note that EAP is also called "posterior mean"
z <- rstan::get_posterior_mean(fit,par=c("z"))[,"mean-all chains"]
# Thirdly we use this z as a true value.
datasets <- validation.dataset_srsc(z.truth = z)
#========================================================================================
# 1) extract replicated fitted model object
#========================================================================================
# Replicates models
a <- validation.dataset_srsc(replicate.datset = 3,ite = 111)
# Check convergence, in the above MCMC iterations = 111 which is too small to get
# a convergence MCMC chain, and thus the following example will the example
# of a non-convergent model in the r hat criteria.
ConfirmConvergence( a$fit[[3]])
# Check trace plot to confirm whether MCMC chain converge or not.
stan_trace( a$fit[[3]],pars = "A")
# Check p value, for chi square goodness of fit whose null hypothesis is that
# the model is fitted well.
fit@posterior_predictive_pvalue_for_chi_square_goodness_of_fit
# ppp( a$fit[[3]]) # this did not coincide the above. Oh my gosh.
# This ppp() would be incorrectly made. 2020 Nov 29
# In the above example, the posterior predictive p value is enough large,
# but the model did not converge in R that criteria, which will cause
# that the model does not fit to data. However p value is said
# we can not reject the null hypothesis that the model does fit.
# The author think this contradiction cause that the
# number of MCMC iterations are too small which leads us to incorrect
# Monte Carlo integral for p value. Thu p value is not correct.
# Calculation of p value relies on the law of large number and thus
# to obtain reliable posterior predictive p value, we need enough large
# MCMC samples. 2019 August 29
# Revised in 2019 August 29
# Revised in 2020 Nov 28
# It is weird, awesome,
# What a fucking English,...I fix it.
#========================================================================================
# 1) Histogram of error of postrior means for replicated datasets
#========================================================================================
#'
a<- validation.dataset_srsc(replicate.datset = 100)
hist(a$error.of.AUC,breaks = 111)
hist(a$error.of.AUC,breaks = 30)
#========================================================================================
# absolute.errors = FALSE generates negative biases
#========================================================================================
validation.dataset_srsc(absolute.errors = FALSE)
#========================================================================================
# absolute.errors = TRUE coerce negative biases to positives, i.e., L^2 norm
#========================================================================================
validation.dataset_srsc(absolute.errors = TRUE)
# }
# NOT RUN {
# dontrun
# }
Run the code above in your browser using DataLab