#First of all we load and attach the data:
data(mldata)
attach(mldata)
#Then we define the inputs
# nimp, nburn and nbetween are smaller than they should. This is
#just because of CRAN policies on the examples.
Y<-data.frame(measure,age)
X<-data.frame(rep(1,1000),sex)
Z<-data.frame(rep(1,1000))
clus<-data.frame(city)
betap<-matrix(0,2,2)
up<-matrix(0,10,2)
covp<-matrix(diag(1,2),20,2,2)
covu<-diag(1,2)
Sp=diag(1,2);
nburn=as.integer(50);
nbetween=as.integer(20);
nimp=as.integer(5);
Sup=diag(1,5);
a=3
# And finally we can run either the model with fixed or random cluster-specific covariance matrices:
imp<-jomo1ranconhr(Y,X,Z,clus,betap,up,covp, covu,Sp,Sup,nburn,nbetween,nimp,meth="fixed")
#or:
#imp<-jomo1ranconhr(Y,X,Z,clus,betap,up,covp, covu,Sp,Sup,nburn,nbetween,nimp,a,meth="random")
# Then we analyse the imputed datasets:
estimates<-rep(0,5)
ses<-rep(0,5)
estimates2<-rep(0,5)
ses2<-rep(0,5)
for (i in 1:5) {
dat<-imp[imp$Imputation==i,]
fit<-lm(measure~age+sex+factor(clus),data=dat)
estimates[i]<-coef(summary(fit))[2,1]
ses[i]<-coef(summary(fit))[2,2]
estimates2[i]<-coef(summary(fit))[3,1]
ses2[i]<-coef(summary(fit))[3,2]
}
# And finally we combine results with Rubin's rules, using package BaBooN.
#library("BaBooN")
#MI.inference(estimates, ses^2)
#MI.inference(estimates2, ses2^2)
Run the code above in your browser using DataLab