# NOT RUN {
library(irr)
library(TAM)
library(WrightMap)
library(lattice)
library(grid)
library(lme4)
library(lavaan)
library(xtable)
summary.VarComp <- function(mod){
var.c <- VarCorr(mod)
var.c <- c(unlist(var.c) , attr(var.c , "sc")^2)
names(var.c)[length(var.c)] <- "Residual"
dfr1 <- data.frame(var.c)
colnames(dfr1) <- "Varianz"
dfr1 <- rbind(dfr1, colSums(dfr1))
rownames(dfr1)[nrow(dfr1)] <- "Total"
dfr1$prop.Varianz <- 100 * (dfr1$Varianz / dfr1$Varianz[nrow(dfr1)])
dfr1 <- round(dfr1,2)
return(dfr1)
}
data(datenKapitel07)
prodRat <- datenKapitel07$prodRat
prodRatL <- datenKapitel07$prodRatL
prodPRat <- datenKapitel07$prodPRat
# }
# NOT RUN {
## -------------------------------------------------------------
## Abschnitt 7.2: Beurteiler<U+00FC>bereinstimmung
## -------------------------------------------------------------
# -------------------------------------------------------------
# Abschnitt 7.2, Listing 1: Berechnen der H<U+00E4>ufigkeitstabellen
#
# Items ausw<U+00E4>hlen
items <- c("TA", "CC", "GR", "VO")
# Tabelle erzeugen
tab <- apply(prodRat[, items], 2,
FUN=function(x){
prop.table(table(x))*100})
print(tab, digits = 2)
# Mittelwert der Ratings berechnen
round(apply(prodRat[, items], 2, mean), 2)
# -------------------------------------------------------------
# Abschnitt 7.2, Listing 2: Beurteiler<U+00FC>bereinstimmung berechnen
#
items <- c("TA", "CC", "GR", "VO")
dfr <- data.frame(items, agree = NA,
kappa = NA, wkappa = NA, korr = NA)
for(i in 1:length(items)){
dat.i <- prodPRat[, grep(items[i], colnames(prodPRat))]
dfr[i, "agree"] <- agree(dat.i, tolerance = 1)["value"]
dfr[i, "kappa"] <- kappa2(dat.i)["value"]
dfr[i, "wkappa"] <- kappa2(dat.i, weight = "squared")["value"]
dfr[i, "korr"] <- cor(dat.i[,1], dat.i[,2])
dfr[i, "icc"] <- icc(dat.i, model = "twoway")["value"]
}
print(dfr, digits = 3)
## -------------------------------------------------------------
## Abschnitt 7.3: Skalierungsmodelle
## -------------------------------------------------------------
# -------------------------------------------------------------
# Abschnitt 7.3, Listing 1: Skalierungsmodell mit TAM
#
set.seed(1234)
# Rater-Facette definieren
facets <- prodRat[, "rater", drop = FALSE]
# Response Daten definieren
vars <- c("TA", "CC", "GR", "VO")
resp <- prodRat[, vars]
# Personen-ID definieren
pid <- prodRat$idstud
# Formel f<U+00FC>r Modell
formulaA <- ~item*step+item*rater
# Modell berechnen
mod <- tam.mml.mfr(resp = resp, facets = facets, formulaA = formulaA,
pid = pid, control=list(xsi.start0 = 1,
fac.oldxsi = 0.1,
increment.factor = 1.05))
summary(mod, file="TAM_MFRM")
# Personenparameter und Rohscores
persons.mod <- tam.wle(mod)
persons.mod$raw.score <- persons.mod$PersonScores / (persons.mod$N.items)
# -------------------------------------------------------------
# Abschnitt 7.3, Listing 1b: Erg<U+00E4>nzung zum Buch
# Modellvergleich aller besprochenen Modelle
#
f1 <- ~item * rater * step
mod1 <- tam.mml.mfr(resp = resp, facets = facets, formulaA = f1,
pid = pid, control=list(xsi.start0 = 1,
fac.oldxsi = 0.1,
increment.factor = 1.05))
f2 <- ~item*step+item*rater
mod2 <- tam.mml.mfr(resp = resp, facets = facets, formulaA = f2,
pid = pid, control=list(xsi.start0 = 1,
fac.oldxsi = 0.1,
increment.factor = 1.05))
f3 <- ~item * step + rater
mod3 <- tam.mml.mfr(resp = resp, facets = facets, formulaA = f3,
pid = pid, control=list(xsi.start0 = 1,
fac.oldxsi = 0.1,
increment.factor = 1.05))
f4 <- ~item + step + rater
mod4 <- tam.mml.mfr(resp = resp, facets = facets, formulaA = f4,
pid = pid, control=list(xsi.start0 = 1,
fac.oldxsi = 0.1,
increment.factor = 1.05))
mod4$xsi.facets
IRT.compareModels(mod1, mod2, mod3, mod4)
# -------------------------------------------------------------
# Abschnitt 7.3, Listing 1c: Erg<U+00E4>nzung zum Buch
# Wright-Map: Items und Rater
#
item.labs <- vars
rater.labs <- unique(prodRat$rater)
item.labs <- c(item.labs, rep(NA, length(rater.labs) -
length(item.labs)))
pars <- mod$xsi.facets$xsi
facet <- mod$xsi.facets$facet
item.par <- pars[facet == "item"]
rater.par <- pars[facet == "rater"]
item_rat <- pars[facet == "item:rater"]
len <- length(item_rat)
item.long <- c(item.par, rep(NA, len - length(item.par)))
rater.long <- c(rater.par, rep(NA, len - length(rater.par)))
wrightMap(persons.mod$theta, rbind(item.long, rater.long),
label.items = c("Items", "Rater"),
thr.lab.text = rbind(item.labs, rater.labs),
axis.items = "", min.l=-3, max.l=3,
axis.persons = "Personen")
# -------------------------------------------------------------
# Abschnitt 7.3, Listing 2: Fit-Indices berechnen
#
# Infit/Outfit berechnen
pseudo_items <- colnames(mod$resp)
pss <- strsplit(pseudo_items , split="-")
item_parm <- unlist(lapply(pss, FUN = function(ll){ll[1]}))
rater_parm <- unlist(lapply(pss, FUN = function(ll){ll[2]}))
# Fit Items
res.items <- msq.itemfitWLE(mod, item_parm)
summary(res.items)
# Fit Rater
res.rater <- msq.itemfitWLE(mod, rater_parm)
summary(res.rater)
# -------------------------------------------------------------
# Abschnitt 7.3, Listing 2a: Erg<U+00E4>nzung zum Buch
# Abbildung: Histogramm, Rohscores
#
dev.off()
par(mfcol=c(1,2))
hist(persons.mod$theta, col="grey", breaks=40,
main = "",
xlab = "Theta (logits)",
ylab = "H<U+00E4>ufigkeit")
with(persons.mod, scatter.smooth(raw.score, theta,
pch = 1, cex = .6, xlab = "Rohscores",
ylab = "Theta (logits)",
lpars = list(col = "darkgrey", lwd = 2,
lty = 1)))
# Abbildung: Fit-Statistik
par(mfcol=c(1,2))
fitdat <- res.rater$fit_data
fitdat$var <- factor(substr(fitdat$item, 1, 2))
boxplot(Outfit~var, data=fitdat,
ylim=c(0,2), main="Outfit")
boxplot(Infit~var, data=fitdat,
ylim=c(0,2), main="Infit")
# -------------------------------------------------------------
# Abschnitt 7.3, Listing 2b: Erg<U+00E4>nzung zum Buch
# Korrelationen
#
korr <- c(with(persons.mod, cor(raw.score, theta,
method = "pearson")),
with(persons.mod, cor(raw.score, theta,
method = "spearman")))
print(korr)
# -------------------------------------------------------------
# Abschnitt 7.3, Listing 3: Q3-Statistik berechnen
#
# Q3 Statistik
mfit.q3 <- tam.modelfit(mod)
rater.pairs <- mfit.q3$stat.itempair
# Nur Paare gleicher Rater w<U+00E4>hlen
unique.rater <- which(substr(rater.pairs$item1, 4,12) ==
substr(rater.pairs$item2, 4,12))
rater.q3 <- rater.pairs[unique.rater, ]
# Spalten einf<U+00FC>gen: Rater, Kombinationen
rater.q3$rater <- substr(rater.q3$item1, 4, 12)
rater.q3 <- rater.q3[order(rater.q3$rater),]
rater.q3$kombi <- as.factor(paste(substr(rater.q3$item1, 1, 2),
substr(rater.q3$item2, 1, 2), sep="_"))
# Statistiken aggregieren: Rater, Kombinationen
dfr.raterQ3 <- aggregate(rater.q3$aQ3, by = list(rater.q3$rater), mean)
colnames(dfr.raterQ3) <- c("Rater", "Q3")
dfr.itemsQ3 <- aggregate(rater.q3$aQ3, by = list(rater.q3$kombi), mean)
colnames(dfr.itemsQ3) <- c("Items", "Q3")
dfr.itemsQ3
# -------------------------------------------------------------
# Abschnitt 7.3, Listing 3a: Erg<U+00E4>nzung zum Buch
# Lattice Dotplot
#
# Lattice Dotplot
mean.values <- aggregate(rater.q3$aQ3, list(rater.q3$kombi), mean)[["x"]]
dotplot(aQ3~kombi, data=rater.q3, main="Q3-Statistik", ylab="Q3 (adjustiert)",
col="darkgrey",
panel = function(x,...){
panel.dotplot(x,...)
panel.abline(h = 0, col.line = "grey", lty=3)
grid.points(1:6, mean.values, pch=17)
})
## -------------------------------------------------------------
## Abschnitt 7.4: Generalisierbarkeitstheorie
## -------------------------------------------------------------
# -------------------------------------------------------------
# Abschnitt 7.4, Listing 1: Varianzkomponenten mit lme4 berechnen
#
# Formel definieren
formula1 <- response ~ (1|idstud) + (1|item) + (1|rater) +
(1|rater:item) + (1|idstud:rater) +
(1|idstud:item)
# Modell mit Interaktionen
mod.vk <- lmer(formula1, data=prodRatL)
# Zusammenfassung der Ergebnisse
summary(mod.vk)
# -------------------------------------------------------------
# Abschnitt 7.4, Listing 1a: Erg<U+00E4>nzung zum Buch
# Helper-Function um die Varianzkomponenten zu extrahieren
#
summary.VarComp <- function(mod){
var.c <- VarCorr(mod)
var.c <- c(unlist(var.c) , attr(var.c , "sc")^2)
names(var.c)[length(var.c)] <- "Residual"
dfr1 <- data.frame(var.c)
colnames(dfr1) <- "Varianz"
dfr1 <- rbind(dfr1, colSums(dfr1))
rownames(dfr1)[nrow(dfr1)] <- "Total"
dfr1$prop.Varianz <- 100 * (dfr1$Varianz / dfr1$Varianz[nrow(dfr1)])
dfr1 <- round(dfr1,2)
return(dfr1)
}
summary.VarComp(mod.vk)
# -------------------------------------------------------------
# Abschnitt 7.4, Listing 2: Berechnung des G-Koeffizienten
#
vk <- summary.VarComp(mod.vk)
n.p <- length(unique(prodRatL$idstud)) # Anzahl Sch<U+00FC>ler
n.i <- 4 # Anzahl Items
n.r <- c(1,2,5) # Anzahl Rater
# Varianzkomponenten extrahieren
sig2.p <- vk["idstud", "Varianz"]
sig2.i <- vk["item", "Varianz"]
sig2.r <- vk["rater", "Varianz"]
sig2.ri <- vk["rater:item", "Varianz"]
sig2.pr <- vk["idstud:rater", "Varianz"]
sig2.pi <- vk["idstud:item", "Varianz"]
sig2.pir <- vk["Residual", "Varianz"]
# Fehlervarianz berechnen
sig2.delta <- sig2.pi/n.i + sig2.pr/n.r + sig2.pir/(n.i*n.r)
# G-Koeffizient berechnen
g.koeff <- sig2.p / (sig2.p + sig2.delta)
print(data.frame(n.r, g.koeff), digits = 3)
# -------------------------------------------------------------
# Abschnitt 7.4, Listing 2a: Erg<U+00E4>nzung zum Buch
# Phi-Koeffizient berechnen
#
sig2.D <- sig2.r/n.r + sig2.i/n.i + sig2.pi/n.i + sig2.pr/n.r +
sig2.ri/(n.i*n.r) + sig2.pir/(n.i*n.r)
phi.koeff <- sig2.p / (sig2.p + sig2.D)
print(data.frame(n.r, phi.koeff), digits = 3)
# Konfidenzintervalle
1.96*sqrt(sig2.D)
# -------------------------------------------------------------
# Abschnitt 7.4, Listing 2c: Erg<U+00E4>nzung zum Buch
# Variable Rateranzahl
#
dev.off()
n.i <- 4 # Anzahl Items
dn.r <- seq(1,10)# 1 bis 10 m<U+00F6>gliche Rater
delta.i <- sig2.pi/n.i + sig2.pr/dn.r + sig2.pir/(n.i*dn.r)
g.koeff <- sig2.p / (sig2.p + delta.i)
names(g.koeff) <- paste("nR", dn.r, sep="_")
print(g.koeff[1:4])
# Abbildung variable Rateranzahl
plot(g.koeff, type = "b", pch = 19, lwd = 2, bty = "n",
main = "G-Koeffizient: Raters",
ylab = "G-Koeffizient",
xlab = "Anzahl Raters", xlim = c(0,10))
abline(v=2, col="darkgrey")
## -------------------------------------------------------------
## Abschnitt 7.5: Strukturgleichungsmodelle
## -------------------------------------------------------------
# -------------------------------------------------------------
# Abschnitt 7.5, Listing 1: SEM
#
# SEM Modell definieren
lv.mod <- "
# Messmodell
TA =~ 1*TA_R1 + 1*TA_R2
CC =~ 1*CC_R1 + 1*CC_R2
GR =~ 1*GR_R1 + 1*GR_R2
VO =~ 1*VO_R1 + 1*VO_R2
# Varianz Personen
TA ~~ Vta * TA
CC ~~ Vcc * CC
GR ~~ Vgr * GR
VO ~~ Vvo * VO
# Varianz Rater X Personen
TA_R1 ~~ Vta_R12 * TA_R1
TA_R2 ~~ Vta_R12 * TA_R2
CC_R1 ~~ Vcc_R12 * CC_R1
CC_R2 ~~ Vcc_R12 * CC_R2
GR_R1 ~~ Vgr_R12 * GR_R1
GR_R2 ~~ Vgr_R12 * GR_R2
VO_R1 ~~ Vvo_R12 * VO_R1
VO_R2 ~~ Vvo_R12 * VO_R2
# Kovarianz
TA_R1 ~~ Kta_cc * CC_R1
TA_R2 ~~ Kta_cc * CC_R2
TA_R1 ~~ Kta_gr * GR_R1
TA_R2 ~~ Kta_gr * GR_R2
TA_R1 ~~ Kta_vo * VO_R1
TA_R2 ~~ Kta_vo * VO_R2
CC_R1 ~~ Kcc_gr * GR_R1
CC_R2 ~~ Kcc_gr * GR_R2
CC_R1 ~~ Kcc_vo * VO_R1
CC_R2 ~~ Kcc_vo * VO_R2
GR_R1 ~~ Kgr_vo * VO_R1
GR_R2 ~~ Kgr_vo * VO_R2
# ICC berechnen
icc_ta := Vta / (Vta + Vta_R12)
icc_cc := Vcc / (Vcc + Vcc_R12)
icc_gr := Vgr / (Vgr + Vgr_R12)
icc_vo := Vvo / (Vvo + Vvo_R12)
"
# Sch<U+00E4>tzung des Modells
mod1 <- sem(lv.mod, data = prodPRat)
summary(mod1, standardized = TRUE)
# Inspektion der Ergebnisse
show(mod1)
fitted(mod1)
inspect(mod1,"cov.lv")
inspect(mod1, "free")
# -------------------------------------------------------------
# Abschnitt 7.5, Listing 2: Kompakte Darstellung der Ergebnisse
#
parameterEstimates(mod1, ci = FALSE,
standardized = TRUE)
# -------------------------------------------------------------
# Abschnitt 7.5, Listing 2a: Erg<U+00E4>nzung zum Buch
# Schreibe Ergebnisse in Latex-Tabelle:
#
xtable(parameterEstimates(mod1, ci = FALSE,
standardized = TRUE), digits = 3)
# }
Run the code above in your browser using DataLab