# NOT RUN {
# }
# NOT RUN {
# The 1-st example: MRMC data
#========================================================================================
# 1) Fit a Model to MRMC Data
#========================================================================================
fit <- fit_Bayesian_FROC( ite = 33, dataList = ddd )
#========================================================================================
# 2) Evaluate Posterior Predictive P value for the Goodness of Fit
#========================================================================================
ppp(fit)
# If this quantity, namely a p value, is greater,
# then we may say that our goodness of fit is better. (accept the null hypothesis)
# In the traditional procedure, if p-value is less than 0.05 or 0.01 then we reject
# the null hypothesis that our model fit to data well.
# Of course, even if p-values is small, we should not ignore our result.
# P value bitch is not so clear what it does and in frequentist methods,
# we experianced p value is bitch with respect to sample size.
# So, in Bayesian context, this bitch might be bitch with respect to ...
# Anyway, but ha....many statisticians like this bitch.
# The 2-nd example uses data named d
#========================================================================================
# 1) Fit a Model to Data
#========================================================================================
fitt <- fit_Bayesian_FROC( ite = 33, dataList = d )
#========================================================================================
# 2) Evaluate Posterior Predictive P value for the Goodness of Fit
#========================================================================================
ppp(fitt)
# If this p value is greater, then we may say that our model is better.
# I made this ppp at 2019 August 25.
# I cannot belive,..., now, one year will have gone 15 August 2020
#========================================================================================
# PPP is problematic
#========================================================================================
# Consider the dataset:
dat <- list(c=c(4,3,2,1), # Confidence level. Note that c is ignored.
h=c(77,97,32,31), # Number of hits for each confidence level
f=c(77,1,14,74), # Number of false alarms for each confidence level
NL=259, # Number of lesions
NI=57, # Number of images
C=4) # Number of confidence level#'
# Fit a model to the data
fit <- fit_Bayesian_FROC(dat, ite = 33)
# calculate p value
ppp(fit)
# In our model, we expect the monotonicity condition, namely
#
# h[1] > h[2] > h[3] > h[4]
# f[1] < f[2] < f[3] < f[4]
#
# However the above dataset is far from this condition, and it results the
# above undesired p value.
# Revised 2019 Sept 7; 2020 Aug
# Of course it is no need to satisfy this monotonicity precisely, but good data
# would satisfy it.
# Since physician will (false positive) diagnose more correctly
# if his high confidence is greater.
Close_all_graphic_devices() # 2020 August
# }
# NOT RUN {
# }
Run the code above in your browser using DataLab