# NOT RUN {
# Generate some (not so) high-dimensional data witn (not so) many samples
ns <- c(4, 5, 6)
Ylist <- createS(n = ns, p = 6, dataset = TRUE)
Slist <- lapply(Ylist, covML)
Tlist <- default.target.fused(Slist, ns, type = "DIAES")
# Grid-based
lambdas <- 10^seq(-5, 3, length.out = 7)
a <- optPenalty.fused.grid(Ylist, Tlist,
lambdas = lambdas,
cv.method = "LOOCV", maxit = 1000)
b <- optPenalty.fused.grid(Ylist, Tlist,
lambdas = lambdas,
cv.method = "aLOOCV", maxit = 1000)
c <- optPenalty.fused.grid(Ylist, Tlist,
lambdas = lambdas,
cv.method = "sLOOCV", maxit = 1000)
d <- optPenalty.fused.grid(Ylist, Tlist,
lambdas = lambdas,
cv.method = "kCV", k = 2, maxit = 1000)
# Numerical optimization (uses the default "optim" optimizer with method "BFGS")
aa <- optPenalty.fused.auto(Ylist, Tlist, cv.method = "LOOCV", method = "BFGS")
print(aa)
bb <- optPenalty.fused.auto(Ylist, Tlist, cv.method = "aLOOCV", method = "BFGS")
print(bb)
cc <- optPenalty.fused.auto(Ylist, Tlist, cv.method = "sLOOCV", method = "BFGS")
print(cc)
dd <- optPenalty.fused.auto(Ylist, Tlist, cv.method = "kCV", k=3, method="BFGS")
print(dd)
#
# Plot the results
#
# LOOCV
# Get minimums and plot
amin <- log(expand.grid(a$lambda, a$lambdaF))[which.min(a$fcvl), ]
aamin <- c(log(aa$lambda[1,1]), log(aa$lambda[1,2]))
# Plot
filled.contour(log(a$lambda), log(a$lambdaF), log(a$fcvl), color = heat.colors,
plot.axes = {points(amin[1], amin[2], pch = 16);
points(aamin[1], aamin[2], pch = 16, col = "purple");
axis(1); axis(2)},
xlab = "lambda", ylab = "lambdaF", main = "LOOCV")
# Approximate LOOCV
# Get minimums and plot
bmin <- log(expand.grid(b$lambda, b$lambdaF))[which.min(b$fcvl), ]
bbmin <- c(log(bb$lambda[1,1]), log(unique(bb$lambda[1,2])))
filled.contour(log(b$lambda), log(b$lambdaF), log(b$fcvl), color = heat.colors,
plot.axes = {points(bmin[1], bmin[2], pch = 16);
points(bbmin[1], bbmin[2], pch = 16, col ="purple");
axis(1); axis(2)},
xlab = "lambda", ylab = "lambdaF", main = "Approximate LOOCV")
#
# Arbitrary penalty graphs
#
# Generate some new high-dimensional data and a 2 by 2 factorial design
ns <- c(6, 5, 3, 2)
df <- expand.grid(Factor1 = LETTERS[1:2], Factor2 = letters[3:4])
Ylist <- createS(n = ns, p = 4, dataset = TRUE)
Tlist <- lapply(lapply(Ylist, covML), default.target, type = "Null")
# Construct penalty matrix
lambda <- default.penalty(df, type = "CartesianUnequal")
# Find optimal parameters,
# Using optim with method "Nelder-Mead" with "special" LOOCV
ans1 <- optPenalty.fused(Ylist, Tlist, lambda = lambda,
cv.method = "sLOOCV", verbose = FALSE)
print(ans1$lambda.unique)
# By approximate LOOCV using optim with method "BFGS"
ans2 <- optPenalty.fused(Ylist, Tlist, lambda = lambda,
cv.method = "aLOOCV", verbose = FALSE,
method = "BFGS")
print(ans2$lambda.unique)
# By LOOCV using nlm
lambda.init <- matrix(1, 4, 4)
lambda.init[cbind(1:4,4:1)] <- 0
ans3 <- optPenalty.fused(Ylist, Tlist, lambda = lambda,
lambda.init = lambda.init,
cv.method = "LOOCV", verbose = FALSE,
optimizer = "nlm")
print(ans3$lambda.unique)
# Quite different results!
#
# Arbitrary penalty graphs with fixed penalties!
#
# Generate some new high-dimensional data and a 2 by 2 factorial design
ns <- c(6, 5, 5, 5)
df <- expand.grid(DS = LETTERS[1:2], ER = letters[3:4])
Ylist <- createS(n = ns, p = 4, dataset = TRUE)
Tlist <- lapply(lapply(Ylist, covML), default.target, type = "Null")
lambda <- default.penalty(df, type = "Tensor")
print(lambda) # Say we want to penalize the pair (1,2) with strength 2.1;
lambda[2,1] <- lambda[1,2] <- 2.1
print(lambda)
# Specifiying starting values is also possible:
init <- diag(length(ns))
init[2,1] <- init[1,2] <- 2.1
res <- optPenalty.fused(Ylist, Tlist, lambda = lambda, lambda.init = init,
cv.method = "aLOOCV", optimizer = "nlm")
print(res)
# }
Run the code above in your browser using DataLab