# NOT RUN {
################################################################################
#
# Example 1:
# rejection sampling in 2 dimensions
#
################################################################################
sigma <- matrix(c(4,2,2,3), ncol=2)
x <- rtmvnorm(n=500, mean=c(1,2), sigma=sigma, upper=c(1,0))
plot(x, main="samples from truncated bivariate normal distribution",
xlim=c(-6,6), ylim=c(-6,6),
xlab=expression(x[1]), ylab=expression(x[2]))
abline(v=1, lty=3, lwd=2, col="gray")
abline(h=0, lty=3, lwd=2, col="gray")
################################################################################
#
# Example 2:
# Gibbs sampler for 4 dimensions
#
################################################################################
C <- matrix(0.8, 4, 4)
diag(C) <- rep(1, 4)
lower <- rep(-4, 4)
upper <- rep(-1, 4)
# acceptance rate alpha
alpha <- pmvnorm(lower=lower, upper=upper, mean=rep(0,4), sigma=C)
alpha
# Gibbs sampler
X1 <- rtmvnorm(n=20000, mean = rep(0,4), sigma=C, lower=lower, upper=upper,
algorithm="gibbs", burn.in.samples=100)
# Rejection sampling
X2 <- rtmvnorm(n=5000, mean = rep(0,4), sigma=C, lower=lower, upper=upper)
colMeans(X1)
colMeans(X2)
plot(density(X1[,1], from=lower[1], to=upper[1]), col="red", lwd=2,
main="Kernel density estimates from random samples
generated by Gibbs vs. Rejection sampling")
lines(density(X2[,1], from=lower[1], to=upper[1]), col="blue", lwd=2)
legend("topleft",legend=c("Gibbs Sampling","Rejection Sampling"),
col=c("red","blue"), lwd=2, bty="n")
################################################################################
#
# Example 3:
# Autocorrelation plot for Gibbs sampler
# with and without thinning
#
################################################################################
sigma <- matrix(c(4,2,2,3), ncol=2)
X1 <- rtmvnorm(n=10000, mean=c(1,2), sigma=sigma, upper=c(1,0),
algorithm="rejection")
acf(X1)
# no autocorrelation among random points
X2 <- rtmvnorm(n=10000, mean=c(1,2), sigma=sigma, upper=c(1,0),
algorithm="gibbs")
acf(X2)
# exhibits autocorrelation among random points
X3 <- rtmvnorm(n=10000, mean=c(1,2), sigma=sigma, upper=c(1,0),
algorithm="gibbs", thinning=2)
acf(X3)
# reduced autocorrelation among random points
plot(density(X1[,1], to=1))
lines(density(X2[,1], to=1), col="blue")
lines(density(X3[,1], to=1), col="red")
################################################################################
#
# Example 4: Univariate case
#
################################################################################
X <- rtmvnorm(100, mean=0, sigma=1, lower=-1, upper=1)
################################################################################
#
# Example 5: Linear Constraints
#
################################################################################
mean <- c(0, 0)
sigma <- matrix(c(10, 0,
0, 1), 2, 2)
# Linear Constraints
#
# a1 <= x1 + x2 <= b2
# a2 <= x1 - x2 <= b2
#
# [ a1 ] <= [ 1 1 ] [ x1 ] <= [b1]
# [ a2 ] [ 1 -1 ] [ x2 ] [b2]
a <- c(-2, -2)
b <- c( 2, 2)
D <- matrix(c(1, 1,
1, -1), 2, 2)
X <- rtmvnorm(n=10000, mean, sigma, lower=a, upper=b, D=D, algorithm="gibbsR")
plot(X, main="Gibbs sampling for multivariate normal
with linear constraints according to Geweke (1991)")
# mark linear constraints as lines
for (i in 1:nrow(D)) {
abline(a=a[i]/D[i, 2], b=-D[i,1]/D[i, 2], col="red")
abline(a=b[i]/D[i, 2], b=-D[i,1]/D[i, 2], col="red")
}
################################################################################
#
# Example 6: Using precision matrix H rather than sigma
#
################################################################################
lower <- c(-1, -1)
upper <- c(1, 1)
mean <- c(0.5, 0.5)
sigma <- matrix(c(1, 0.8, 0.8, 1), 2, 2)
H <- solve(sigma)
D <- matrix(c(1, 1, 1, -1), 2, 2)
X <- rtmvnorm(n=1000, mean=mean, H=H, lower=lower, upper=upper, D=D, algorithm="gibbs")
plot(X, main="Gibbs sampling with precision matrix and linear constraints")
################################################################################
#
# Example 7: Using sparse precision matrix H in high dimensions
#
################################################################################
# }
# NOT RUN {
d <- 1000
I_d <- sparseMatrix(i=1:d, j=1:d, x=1)
W <- sparseMatrix(i=c(1:d, 1:(d-1)), j=c(1:d, (2:d)), x=0.5)
H <- t(I_d - 0.5 * W) <!-- %*% (I_d - 0.5 * W) -->
lower <- rep(0, d)
upper <- rep(2, d)
# Gibbs sampler generates n=100 draws in d=1000 dimensions
X <- rtmvnorm.sparseMatrix(n=100, mean = rep(0,d), H=H, lower=lower, upper=upper,
burn.in.samples=100)
colMeans(X)
cov(X)
# }
Run the code above in your browser using DataCamp Workspace