# sample size of 50 per group, "medium" effect size
p_t.test(n=50, d=0.5)
# point-biserial correlation effect size
p_t.test(n=50, r=.3)
# second group 2x as large as the first group
p_t.test(n=50, d=0.5, n2_n1 = 2)
# specify mean/SDs explicitly
p_t.test(n=50, means = c(0,1), sds = c(2,2))
# paired and one-sample tests
p_t.test(n=50, d=0.5, type = 'paired') # n = number of pairs
p_t.test(n=50, d=0.5, type = 'one.sample')
# return analysis object
p_t.test(n=50, d=0.5, return_analysis=TRUE)
# \donttest{
# compare simulated results to pwr package
pwr::pwr.t.test(d=0.2, n=60, sig.level=0.10,
type="one.sample", alternative="two.sided")
p_t.test(n=60, d=0.2, type = 'one.sample', two.tailed=TRUE) |>
Spower(sig.level=.10)
pwr::pwr.t.test(d=0.3, power=0.80, type="two.sample",
alternative="greater")
p_t.test(n=interval(10, 200), d=0.3, type='two.sample', two.tailed=FALSE) |>
Spower(power=0.80)
# }
###### Custom data generation function
# Generate data such that:
# - group 1 is from a negatively distribution (reversed X2(10)),
# - group 2 is from a positively skewed distribution (X2(5))
# - groups have equal variance, but differ by d = 0.5
args(gen_t.test) ## can use these arguments as a basis, though must include ...
# arguments df1 and df2 added; unused arguments caught within ...
my.gen_fun <- function(n, d, df1, df2, ...){
group1 <- -1 * rchisq(n, df=df1)
group2 <- rchisq(n, df=df2)
# scale groups first given moments of the chi-square distribution,
# then add std mean difference
group1 <- ((group1 + df1) / sqrt(2*df1))
group2 <- ((group2 - df2) / sqrt(2*df2)) + d
dat <- list(group1, group2)
dat
}
# check the sample data properties
dat <- my.gen_fun(n=10000, d=.5, df1=10, df2=5)
sapply(dat, mean)
sapply(dat, sd)
p_t.test(n=100, d=0.5, gen_fun=my.gen_fun, df1=10, df2=5)
# \donttest{
# power given Gaussian distributions
p_t.test(n=100, d=0.5) |> Spower(replications=30000)
# estimate power given the customized data generating function
p_t.test(n=100, d=0.5, gen_fun=my.gen_fun, df1=10, df2=5) |>
Spower(replications=30000)
# evaluate Type I error rate to see if liberal/conservative given
# assumption violations (should be close to alpha/sig.level)
p_t.test(n=100, d=0, gen_fun=my.gen_fun, df1=10, df2=5) |>
Spower(replications=30000)
# }
Run the code above in your browser using DataLab