# NOT RUN {
# See another demonstration under the Note section of statTn().
# }
# NOT RUN {
# Joe (2014, p. 237, table 5.2)
# The Gumbel-Hougaard copula and Plackett copula below each have a Kendall Tau of
# about 0.5. Joe (2014) lists in the table that Jeffrey divergence is about 0.110
# and the Kullback-Leibler sample size is 133. Joe (2014) does not list the
# parameters for either copula, just that Kendall Tau = 0.5.
KL <- kullCOP(cop1=GHcop, para1=2, cop2=PLACKETTcop, para2=11.40484)
# Reports Jeffrey divergence = 0.1063858
# Kullback-Leibler sample size = 136 (another run produces 131)
S <- replicate(20, kullCOP(cop1=GHcop, para1=2, cop2=PLACKETTcop,
para2=11.40484, verbose=FALSE)$KL.sample.size)
print(as.integer(c(mean(S), sd(S)))) # 134 plus/minus 5
# Joe (2014, p. 237, table 5.3)
# The Gumbel-Hougaard copula and Plackett copula below each have a Spearman Rho of
# about 0.5. Joe (2014) lists in the table that Jeffrey divergence is about 0.063
# and the Kullback-Leibler sample size is 210. Joe (2014) does not list the
# parameters for either copula, just that for Spearman Rho = 0.5.
KL <- kullCOP(cop1=GHcop, para1=1.541071, cop2=PLACKETTcop, para2=5.115658)
# Reports Jeffrey divergence = 0.06381151
# Kullback-Leibler sample size = 220 (another run produces 203)
S <- replicate(20, kullCOP(cop1=GHcop, para1=1.541071, cop2=PLACKETTcop,
para2=5.115658, verbose=FALSE)$KL.sample.size)
print(as.integer(c(mean(S), sd(S)))) # 220 plus/minus 16
# Joe (2014) likely did the numerical integrations using analytical solutions to the
# probability densities and not rectangular approximations as in densityCOP().
# }
# NOT RUN {
# }
# NOT RUN {
# Compare Jeffery Divergence estimates as functions of sample size when computed
# using Sobol sequences or not---Sobol sequences have less sampling variability.
GHpar <- PApar <- 2 # Spearman Rho = 0.6822339
Ns <- as.integer(10^c(seq(2.0, 3.5,by=0.01), seq(3.6, 5,by=0.2)))
JDuni <- sapply(1:length(Ns), function(i) {
kullCOP(cop1=GHcop, para1=GHpar, verbose=FALSE,
cop2=PARETOcop, para2=PApar, n=Ns[i],
sobol=FALSE)$Jeffrey.divergence })
JDsob <- sapply(1:length(Ns), function(i) {
kullCOP(cop1=GHcop, para1=GHpar, verbose=FALSE,
cop2=PARETOcop, para2=PApar, n=Ns[i],
sobol=TRUE )$Jeffrey.divergence })
plot(Ns, JDuni, type="l", log="x", # black line, notice likely outliers too
xlab="Sample Size", ylab="Jeffery Divergence")
lines(Ns, JDsob, col=2) # red line
print(c(mean(JDuni), sd(JDuni))) # [1] 0.05923462 0.01544651
print(c(mean(JDsob), sd(JDsob))) # [1] 0.05863623 0.01184879
# So we see a slightly smaller variation when the Sobol sequence is used.
# }
Run the code above in your browser using DataLab