R code for data simulations with both large and small effect sizes to see the variation in p-values and Bayes factors. Inspiration comes from work done by Geoff Cumming and Daniel Lakens.

############################################
########Simulation of data with large effect size#######
############################################

##set up
library(BayesFactor)
library(MBESS)
library(pwr)
library(ggplot2)
options(scipen=20)
N.Sim = 1000 #number of simulated experiments
n<-30 #sample size in each group
sd = 12.5 #average cohen’s d of .8
#Cohen’s D independent samples
d.indt = function (m1 = m1, m2 = m2, sd1 = sd1, sd2 = sd2, n1 = n, n2 = n) {
spooled = sqrt( ((n1-1)*sd1^2 + (n2-1)*sd2^2) / (n1+n2 – 2))
d = (m1 – m2) / spooled
d
}
#z test
ztest = function(m1 = m1, m2 = m2, sd1 = sd, sd2 = sd, n1 = n, n2 = n){
denom = sqrt(((sd^2)/n1)+((sd^2)/n2))
z = (m1-m2)/denom
p = 2*pnorm(-abs(z))
list(Z = z, p = p)
}
mydata = data.frame(sim = 1:100, P.Value = 1:100, PosthocPower = 1:100,
CohensD = 1:100,
CILo = 1:100, CIhi = 1:100, BayesFactor = 1:100)
round = 0
#Simulation Loop
for(i in 1:N.Sim){
#data
x<-rnorm(n = n, mean = 20, sd = sd) #Cohen’s d of .8
y<-rnorm(n = n, mean = 10, sd = sd) #Cohen’s d of .8
#descriptives
m1 = mean(x)
m2 = mean(y)
sd1 = sd(x)
sd2 = sd(y)
n1 = n
n2 = n
#z-test
z=ztest(m1 = m1, m2 = m2, sd1 = sd, sd2 = sd, n1 = n, n2 = n)
round = round+1
mydata$P.Value[round] = z$p
CI = ci.smd(ncp = z$Z, n.1=n, n.2=n, conf.level=0.95) #CI of Cohen’s d .8
mydata$CILo[round] = CI$Lower.Conf.Limit.smd
mydata$CIhi[round] = CI$Upper.Conf.Limit.smd
D = d.indt(m1 = m1, m2 = m2, sd1 = sd1, sd2 = sd2, n1 = n, n2 = n)
mydata$CohensD[round] = D
power = pwr.t.test(n=n, sig.level = .05, d=D,
type = “two.sample”,
alternative = “two.sided”)
mydata$PosthocPower[round] = power$power
#BayesFactor
BF10.current<-exp(ttest.tstat(z$Z,n,n,rscale=sqrt(2)/2)$bf)
mydata$BayesFactor[round] = BF10.current #add value to the vector
}
summary(mydata)
hist(exp(mydata$P.Value), breaks = 100)
hist(mydata$BayesFactor, breaks = 100)
hist(mydata$PosthocPower, breaks = 100)

############################################
########Simulation of data with small effect size#######
############################################

#set up
N.Sim = 100 #number of simulated experiments
n<-30 #sample size in each group
sd = 25 #cohen’s d of .2

mydata2 = data.frame(sim = 1:100, P.Value = 1:100, PosthocPower = 1:100,
CohensD = 1:100,
CILo = 1:100, CIhi = 1:100, BayesFactor = 1:100)
round = 0
#Simulation Loop
for(i in 1:N.Sim){
#data
x<-rnorm(n = n, mean = 15, sd = sd)
y<-rnorm(n = n, mean = 10, sd = sd)
#descriptives
m1 = mean(x)
m2 = mean(y)
sd1 = sd(x)
sd2 = sd(y)
n1 = n
n2 = n
#z-test
z=ztest(m1 = m1, m2 = m2, sd1 = sd, sd2 = sd, n1 = n, n2 = n)
round = round+1
mydata2$P.Value[round] = z$p
CI = ci.smd(smd = z$Z, n.1=n, n.2=n, conf.level=0.95)
mydata2$CILo[round] = CI$Lower.Conf.Limit.smd
mydata2$CIhi[round] = CI$Upper.Conf.Limit.smd
D = d.indt(m1 = m1, m2 = m2, sd1 = sd1, sd2 = sd2, n1 = n, n2 = n)
mydata2$CohensD[round] = D
power = pwr.t.test(n=n, sig.level = .05, d=D,
type = “two.sample”,
alternative = “two.sided”)
mydata2$PosthocPower[round] = power$power
#BayesFactor
BF10.current<-exp(ttest.tstat(z$Z,n,n,rscale=sqrt(2)/2)$bf)
mydata2$BayesFactor[round] = BF10.current #add value to the vector
}
summary(mydata2)
hist(exp(mydata2$P.Value), breaks = 100)
hist(mydata2$BayesFactor, breaks = 100)
hist(mydata2$PosthocPower, breaks = 100)
################################################################

Advertisements