#First of all we load and attach the data:
data(mldata)
attach(mldata)
#Then we define the inputs:
# nimp, nburn and nbetween are smaller than they should. This is
#just because of CRAN policies on the examples.
Y_con=data.frame(measure,age)
Y_cat=data.frame(social)
Y_numcat=matrix(4,1,1)
X=data.frame(rep(1,1000),sex)
Z<-data.frame(rep(1,1000))
clus<-data.frame(city)
betap<-matrix(0,2,5)
up<-matrix(0,10,5)
covp<-diag(1,5)
covu<-diag(1,5)
Sp=diag(1,5);
Sup=diag(1,5);
nburn=as.integer(100);
nbetween=as.integer(100);
nimp=as.integer(5);
#Then we can run the sampler:
imp<-jomo1ranmix(Y_con, Y_cat, Y_numcat, X,Z,clus,betap,up,covp, covu,Sp,Sup,nburn,nbetween,nimp)
# We run our substantive model on the 5 imputed datasets:
estimates<-matrix(0,5,5)
ses<-matrix(0,5,5)
for (i in 1:5) {
dat<-imp[imp$Imputation==i,]
fit<-lm(measure~age+sex+factor(social)+factor(clus),data=dat)
estimates[i,1:5]<-coef(summary(fit))[2:6,1]
ses[i,1:5]<-coef(summary(fit))[2:6,2]
}
# And finally we aggregate results with Rubin's rules, using BaBooN package:
#library("BaBooN")
#MI.inference(estimates[,1], ses[,1]^2)
#MI.inference(estimates[,2], ses[,2]^2)
#MI.inference(estimates[,3], ses[,3]^2)
#MI.inference(estimates[,4], ses[,4]^2)
#MI.inference(estimates[,5], ses[,5]^2)
Run the code above in your browser using DataLab