# create the data set to analyse:
dataset <- data.frame(
outcome = c( 1,2,3, 2,3,4, 5,6,7 ),
group = factor(c( "a","a","a", "b","b","b","c","c","c"))
)
#
# outcome group
# 1 1 a
# 2 2 a
# 3 3 a
# 4 2 b
# 5 3 b
# 6 4 b
# 7 5 c
# 8 6 c
# 9 7 c
#
# run the ANOVA and print out the ANOVA table:
anova1 <- aov( outcome ~ group, data = dataset )
summary(anova1)
#
# Df Sum Sq Mean Sq F value Pr(>F)
# group 2 26 13 13 0.006592 **
# Residuals 6 6 1
#
# The F-test is significant, and we had no theoretical reason
# think that any particular group difference would be the
# source of the effect. So we run post hoc t-tests comparing
# all pairs of groups, making adjustments to control the
# overall Type I error rate. Since Bonferroni (and Holm)
# methods are among the easiest to understand, and are typically
# the first methods taught in undergraduate classes, we'll use
# those.
#
# Currently, the following two commands are equivalent:
posthocPairwiseT( anova1 )
pairwise.t.test( dataset$outcome, dataset$group )
#
# Pairwise comparisons using t tests with pooled SD
#
# data: dataset$outcome and dataset$group
#
# a b
# b 0.2666 -
# c 0.0081 0.0208
#
# P value adjustment method: holm
#
Run the code above in your browser using DataLab