#First of all we load and attach sldata
data(sldata)
attach(sldata)
#Then, we define all the inputs:
# nimp, nburn and nbetween are smaller than they should. This is
#just because of CRAN policies on the examples.
Y_con=data.frame(measure,age)
Y_cat=data.frame(social)
Y_numcat=matrix(4,1,1)
X=data.frame(rep(1,300),sex)
betap<-matrix(0,2,5)
covp<-diag(1,5)
Sp=diag(1,5);
nburn=as.integer(100);
nbetween=as.integer(100);
nimp=as.integer(5);
#Then we run the sampler:
imp<-jomo1mix(Y_con,Y_cat,Y_numcat,X,betap,covp,Sp,nburn,nbetween,nimp)
#Finally we analyze datasets:
estimates<-matrix(0,5,5)
ses<-matrix(0,5,5)
for (i in 1:5) {
dat<-imp[imp$Imputation==i,]
fit<-lm(measure~age+sex+factor(social),data=dat)
estimates[i,1:5]<-coef(summary(fit))[2:6,1]
ses[i,1:5]<-coef(summary(fit))[2:6,2]
}
# and we aggregate the results with Rubin's rules using the BaBooN package:
#library("BaBooN")
#MI.inference(estimates[,1], ses[,1]^2)
#MI.inference(estimates[,2], ses[,2]^2)
#MI.inference(estimates[,3], ses[,3]^2)
#MI.inference(estimates[,4], ses[,4]^2)
#MI.inference(estimates[,5], ses[,5]^2)
Run the code above in your browser using DataLab