# \donttest{
# Generate sample data from a known GKw distribution
set.seed(123)
true_par <- c(alpha = 2, beta = 3, gamma = 1.0, delta = 0.5, lambda = 0.5)
sample_data <- rgkw(100, alpha = true_par[1], beta = true_par[2],
gamma = true_par[3], delta = true_par[4], lambda = true_par[5])
# --- Use in Optimization (e.g., with optim using analytical gradient) ---
start_par <- c(1.5, 2.5, 1.2, 0.3, 0.6) # Initial guess
# Optimization using analytical gradient
mle_result_gr <- stats::optim(par = start_par,
fn = llgkw, # Objective function (Negative LL)
gr = grgkw, # Gradient function
method = "BFGS", # Method using gradient
hessian = TRUE,
data = sample_data)
if (mle_result_gr$convergence == 0) {
print("Optimization with analytical gradient converged.")
mle_par_gr <- mle_result_gr$par
print("Estimated parameters:")
print(mle_par_gr)
} else {
warning("Optimization with analytical gradient failed!")
}
# --- Compare analytical gradient to numerical gradient ---
# Requires the 'numDeriv' package
if (requireNamespace("numDeriv", quietly = TRUE) && mle_result_gr$convergence == 0) {
cat("\nComparing Gradients at MLE estimates:\n")
# Numerical gradient of the negative log-likelihood function
num_grad <- numDeriv::grad(func = llgkw, x = mle_par_gr, data = sample_data)
# Analytical gradient (output of grgkw)
ana_grad <- grgkw(par = mle_par_gr, data = sample_data)
cat("Numerical Gradient:\n")
print(num_grad)
cat("Analytical Gradient:\n")
print(ana_grad)
# Check differences (should be small)
cat("Max absolute difference between gradients:\n")
print(max(abs(num_grad - ana_grad)))
} else {
cat("\nSkipping gradient comparison (requires 'numDeriv' package or convergence).\n")
}
# }
Run the code above in your browser using DataLab