# NOT RUN {
# }
# NOT RUN {
# The 1-st example: MRMC data
#========================================================================================
# 1) Fit a Model to MRMC Data
#========================================================================================
fit <- fit_Bayesian_FROC( ite = 111, dataList = ddd )
#========================================================================================
# 2) Evaluate Posterior Predictive P value for the Goodness of Fit
#========================================================================================
ppp(fit)
# If this quantity, namely a p value, is greater,
# then we may say that our goodness of fit is better. (accept the null hypothesis)
# In the traditional procedure, if p-value is less than 0.05 or 0.01 then we reject
# the null hypothesis that our model fit to data well.
# Of course, even if p-values is small, we should not ignore our result.
# P value bitch is not so clear what it does and in frequentist methods,
# we experianced p value is bitch with respect to sample size.
# So, in Bayesian context, this bitch might be bitch with respect to ...
# Anyway, but ha....many statisticians like this bitch.
# The 2-nd example uses data named d
#========================================================================================
# 1) Fit a Model to Data
#========================================================================================
fitt <- fit_Bayesian_FROC( ite = 111, dataList = d )
#========================================================================================
# 2) Evaluate Posterior Predictive P value for the Goodness of Fit
#========================================================================================
ppp(fitt)
# If this quantity is greater, then we may say that our model is better.
# I made this ppp at 2019 August 25.
#========================================================================================
# PPP is problematic
#========================================================================================
# Consider the dataset:
dat <- list(c=c(4,3,2,1), # Confidence level. Note that c is ignored.
h=c(77,97,32,31), # Number of hits for each confidence level
f=c(77,1,14,74), # Number of false alarms for each confidence level
NL=259, # Number of lesions
NI=57, # Number of images
C=4) # Number of confidence level#'
# Fit a model to the data
fit <- fit_Bayesian_FROC(dat,ite=111)
# calculate p value
ppp(fit)
# Then we can see that FPF and TPF are far from FROC curve, but p value is not
# so small, and thus in this case, ppp is not the desired one for us.
# In our model, we need monotonicity condition, namely
#
# h[1] > h[2] > h[3] > h[4]
# f[1] < f[2] < f[3] < f[4]
#
# However the above dataset is far from this condition, and it would relate the
# above undesired p value.
# Revised 2019 Sept 7
# Of course it is no need to satisfy this monotonicity precisely, but good data
# should satisfy.
# Since doctor should not wrong (false positive) diagnosis with his high confidence.
# }
# NOT RUN {
# }
Run the code above in your browser using DataLab