Comparison of BayesFactor against other packages

alt text


Comparison of BayesFactor against other packages

This R markdown file runs a series of tests to ensure that the BayesFactor package is giving correct answers, and can gracefully handle probable input.

library(arm)
library(lme4)

ANOVA

First we generate some data.

# Number of participants
N <- 20
sig2 <- 1
sig2ID <- 1

# 3x3x3 design, with participant as random factor
effects <- expand.grid(A = c("A1","A2","A3"),
                       B = c("B1","B2","B3"),
                       C = c("C1","C2","C3"),
                       ID = paste("Sub",1:N,sep="")
)
Xdata <- model.matrix(~ A*B*C + ID, data=effects)
beta <- matrix(c(50,
          -.2,.2,
          0,0,
          .1,-.1,
          rnorm(N-1,0,sqrt(sig2ID)),
          0,0,0,0,
          -.1,.1,.1,-.1,
          0,0,0,0,
          0,0,0,0,0,0,0,0),
               ncol=1)
effects$y = rnorm(Xdata%*%beta,Xdata%*%beta,sqrt(sig2))
# Typical repeated measures ANOVA
summary(fullaov <- aov(y ~ A*B*C + Error(ID/(A*B*C)),data=effects))
## 
## Error: ID
##           Df Sum Sq Mean Sq F value Pr(>F)
## Residuals 19    648    34.1               
## 
## Error: ID:A
##           Df Sum Sq Mean Sq F value Pr(>F)   
## A          2   13.8    6.92    8.12 0.0012 **
## Residuals 38   32.4    0.85                  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Error: ID:B
##           Df Sum Sq Mean Sq F value Pr(>F)
## B          2    2.4    1.19    1.18   0.32
## Residuals 38   38.4    1.01               
## 
## Error: ID:C
##           Df Sum Sq Mean Sq F value Pr(>F)  
## C          2    5.5   2.767    2.95  0.064 .
## Residuals 38   35.6   0.937                 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Error: ID:A:B
##           Df Sum Sq Mean Sq F value Pr(>F)
## A:B        4    1.6   0.402    0.41    0.8
## Residuals 76   73.8   0.971               
## 
## Error: ID:A:C
##           Df Sum Sq Mean Sq F value Pr(>F)  
## A:C        4   12.4   3.103    3.33  0.014 *
## Residuals 76   70.7   0.931                 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Error: ID:B:C
##           Df Sum Sq Mean Sq F value Pr(>F)
## B:C        4    2.3   0.583    0.46   0.77
## Residuals 76   96.6   1.271               
## 
## Error: ID:A:B:C
##            Df Sum Sq Mean Sq F value Pr(>F)
## A:B:C       8   12.6    1.58     1.4    0.2
## Residuals 152  170.7    1.12

We can plot the data with standard errors:

mns <- tapply(effects$y,list(effects$A,effects$B,effects$C),mean)
stderr = sqrt((sum(resid(fullaov[[3]])^2)/fullaov[[3]]$df.resid)/N)

par(mfrow=c(1,3),cex=1.1)
for(i in 1:3){
  matplot(mns[,,i],xaxt='n',typ='b',xlab="A",main=paste("C",i), 
          ylim=range(mns)+c(-1,1)*stderr,ylab="y")
  axis(1,at=1:3,lab=1:3)
  segments(1:3 + mns[,,i]*0,mns[,,i] + stderr,1:3 + mns[,,i]*0,mns[,,i] - stderr,col=rgb(0,0,0,.3))
}
plot of chunk unnamed-chunk-5

plot of chunk unnamed-chunk-5

Bayes factor

Compute the Bayes factors, while testing the Laplace approximation

t.is = system.time(bfs.is <- anovaBF(y ~ A*B*C + ID, data = effects, 
                                     whichRandom="ID")
)
t.la = system.time(bfs.la <- anovaBF(y ~ A*B*C + ID, data = effects, 
                                     whichRandom="ID",
                                     method = "laplace")
)
t.is
##    user  system elapsed 
##    2.63    0.01    2.86
t.la
##    user  system elapsed 
##   1.083   0.008   1.259
plot(log(extractBF(sort(bfs.is))$bf),log(extractBF(sort(bfs.la))$bf),
     xlab="Default Sampler",ylab="Laplace approximation",
     pch=21,bg=rgb(0,0,1,.2),col="black",asp=TRUE,cex=1.2)
abline(0,1)
plot of chunk unnamed-chunk-7

plot of chunk unnamed-chunk-7

bfs.is
## Bayes factor analysis
## --------------
## [1] A + ID                                    : 9.02     ±0.93%
## [2] B + ID                                    : 0.059    ±1.76%
## [3] A + B + ID                                : 0.563    ±6.68%
## [4] A + B + A:B + ID                          : 0.00796  ±4.56%
## [5] C + ID                                    : 0.229    ±0.85%
## [6] A + C + ID                                : 2.29     ±5.06%
## [7] B + C + ID                                : 0.0133   ±1.08%
## [8] A + B + C + ID                            : 0.141    ±7.7%
## [9] A + B + A:B + C + ID                      : 0.00196  ±4.48%
## [10] A + C + A:C + ID                         : 2.49     ±13.1%
## [11] A + B + C + A:C + ID                     : 0.138    ±2.12%
## [12] A + B + A:B + C + A:C + ID               : 0.00206  ±2.86%
## [13] B + C + B:C + ID                         : 0.000251 ±1.44%
## [14] A + B + C + B:C + ID                     : 0.00253  ±2.14%
## [15] A + B + A:B + C + B:C + ID               : 3.67e-05 ±2.09%
## [16] A + B + C + A:C + B:C + ID               : 0.00264  ±1.66%
## [17] A + B + A:B + C + A:C + B:C + ID         : 4.11e-05 ±2.89%
## [18] A + B + A:B + C + A:C + B:C + A:B:C + ID : 8.47e-06 ±1.88%
## 
## Against denominator:
##   y ~ ID 
## ---
## Bayes factor type: BFlinearModel, JZS

Comparison to lmer and arm

We can use samples from the posterior distribution to compare BayesFactor with lmer and arm.

chains <- lmBF(y ~ A + B + C + ID, data=effects, whichRandom = "ID", posterior=TRUE, iterations=10000)

lmerObj <- lmer(y ~ A + B + C + (1|ID), data=effects)
# Use arm function sim() to sample from posterior
chainsLmer = sim(lmerObj,n.sims=10000)

Compare estimates of variance

BF.sig2 <- chains[,colnames(chains)=="sig2"]
AG.sig2 <- (chainsLmer@sigma)^2
qqplot(log(BF.sig2),log(AG.sig2),pch=21,bg=rgb(0,0,1,.2),
       col=NULL,asp=TRUE,cex=1,xlab="BayesFactor samples",
       ylab="arm samples",main="Posterior samples of\nerror variance")
abline(0,1)
plot of chunk unnamed-chunk-9

plot of chunk unnamed-chunk-9

Compare estimates of participant effects:

AG.raneff <- chainsLmer@ranef$ID[,,1]
BF.raneff <-  chains[,grep('ID-',colnames(chains),fixed='TRUE')]
plot(colMeans(BF.raneff),colMeans(AG.raneff),pch=21,bg=rgb(0,0,1,.2),col="black",asp=TRUE,cex=1.2,xlab="BayesFactor estimate",ylab="arm estimate",main="Random effect posterior means")
abline(0,1)
plot of chunk unnamed-chunk-10

plot of chunk unnamed-chunk-10

Compare estimates of fixed effects:

AG.fixeff <- chainsLmer@fixef
BF.fixeff <-  chains[,1:10]

# Adjust AG results from reference cell to sum to 0
Z = c(1,  1/3,  1/3,  1/3,  1/3,  1/3,  1/3,
      0, -1/3, -1/3,    0,    0,    0,    0,
      0,  2/3, -1/3,    0,    0,    0,    0,
      0, -1/3,  2/3,    0,    0,    0,    0,
      0,     0,   0, -1/3, -1/3,    0,    0,
      0,     0,   0,  2/3, -1/3,    0,    0,
      0,     0,   0, -1/3,  2/3,    0,    0,
      0,     0,   0,    0,    0, -1/3, -1/3,
      0,     0,   0,    0,    0,  2/3, -1/3,
      0,     0,   0,    0,    0, -1/3,  2/3)
dim(Z) = c(7,10)
Z = t(Z)

AG.fixeff2 = t(Z%*%t(AG.fixeff))

## Our grand mean has heavier tails
qqplot(BF.fixeff[,1],AG.fixeff2[,1],pch=21,bg=rgb(0,0,1,.2),col=NULL,asp=TRUE,cex=1,xlab="BayesFactor estimate",ylab="arm estimate",main="Grand mean posterior samples")
abline(0,1)
plot of chunk unnamed-chunk-11

plot of chunk unnamed-chunk-11

plot(colMeans(BF.fixeff[,-1]),colMeans(AG.fixeff2[,-1]),pch=21,bg=rgb(0,0,1,.2),col="black",asp=TRUE,cex=1.2,xlab="BayesFactor estimate",ylab="arm estimate",main="Fixed effect posterior means")
abline(0,1)
plot of chunk unnamed-chunk-11

plot of chunk unnamed-chunk-11

## Compare posterior standard deviations
BFsd = apply(BF.fixeff[,-1],2,sd)
AGsd = apply(AG.fixeff2[,-1],2,sd)
plot(sort(AGsd/BFsd),pch=21,bg=rgb(0,0,1,.2),col="black",cex=1.2,ylab="Ratio of posterior standard deviations (arm/BF)",xlab="Fixed effect index")
plot of chunk unnamed-chunk-11

plot of chunk unnamed-chunk-11

## AG estimates are slightly larger, consistent with sig2 estimates
## probably due to prior

Another comparison with lmer

We begin by loading required packages…

library(languageR)
library(xtable)

…and creating the data set to analyze.

data(primingHeidPrevRT)

primingHeidPrevRT$lRTmin1 <- log(primingHeidPrevRT$RTmin1)

###Frequentist 

lr4 <- lmer(RT ~ Condition + (1|Word)+ (1|Subject) + lRTmin1 + RTtoPrime + ResponseToPrime + ResponseToPrime*RTtoPrime +BaseFrequency ,primingHeidPrevRT)
# Get rid rid of some outlying response times
INDOL <- which(scale(resid(lr4)) < 2.5)
primHeidOL <- primingHeidPrevRT[INDOL,]

The first thing we have to do is center the continuous variables. This is done automatically by lmBF(), as required by Liang et al. (2008). This, of course, changes the definition of the intercept.

# Center continuous variables
primHeidOL$BaseFrequency <- primHeidOL$BaseFrequency - mean(primHeidOL$BaseFrequency)
primHeidOL$lRTmin1 <- primHeidOL$lRTmin1 - mean(primHeidOL$lRTmin1)
primHeidOL$RTtoPrime <- primHeidOL$RTtoPrime - mean(primHeidOL$RTtoPrime)

Now we perform both analyses on the same data, and place the fixed effect estimates for both packages into their own vectors.

# LMER
lr4b <- lmer(  RT ~ Condition + ResponseToPrime +  (1|Word)+ (1|Subject) + lRTmin1 + RTtoPrime + ResponseToPrime*RTtoPrime + BaseFrequency , primHeidOL)
# BayesFactor
B5out <- lmBF( RT ~ Condition + ResponseToPrime +     Word +    Subject  + lRTmin1 + RTtoPrime + ResponseToPrime*RTtoPrime + BaseFrequency  , primHeidOL , whichRandom = c("Word", "Subject"),  posterior = TRUE, iteration = 50000,columnFilter=c("Word","Subject"))

lmerEff <- fixef(lr4b)
bfEff <- colMeans(B5out[,1:10])

lmer uses a “reference cell” parameterization, rather than imposing sum-to-0 constraints. We can tell what the reference cell is by looking at the parameter names.

print(xtable(cbind("lmer fixed effects"=names(lmerEff))), type='html')
lmer fixed effects
1 (Intercept)
2 Conditionheid
3 ResponseToPrimeincorrect
4 lRTmin1
5 RTtoPrime
6 BaseFrequency
7 ResponseToPrimeincorrect:RTtoPrime

Notice what’s missing: for the categorical parameters, we are missing Conditionbaseheid and ResponseToPrimecorrect. For the slope parameters, we are missing ResponseToPrimecorrect:RTtoPrime. The missing effects tell us what the reference cells are. Since the reference cell parameterization is just a linear transformation of the sum-to-0 parameterization, we can create a matrix that allows us to move from one to the other. We call this \(10 \times 7\) matrix Z. It takes the 7 “reference-cell” parameters from lmer and maps them into the 10 linearly constrained parameters from lmBF.

The first row of Z transforms the intercept (reference cell) to the grand mean (sum-to-0). We have to add half of the two fixed effects back into the intercept. The second and third row divide the totl effect of Condition into two equal parts, one for baseheid and one for heid. Rows four and five do the same for ResponseToPrime.

The slopes that do not enter into interactions are fine as they are; however, ResponseToPrimecorrect:RTtoPrime serves as our reference cell for the ResponseToPrime:RTtoPrime interaction. We treat these slopes analogously to the grand mean; we take RTtoPrime and add half the ResponseToPrimeincorrect:RTtoPrime effect to it, to make it a grand mean slope. The last two rows divide up the ResponseToPrimeincorrect:RTtoPrime effect between ResponseToPrimeincorrect:RTtoPrime and ResponseToPrimecorrect:RTtoPrime.

# Adjust lmer results from reference cell to sum to 0
Z = c(1,   1/2, 1/2,    0,    0,    0,    0,
      0,  -1/2,   0,    0,    0,    0,    0,
      0,   1/2,   0,    0,    0,    0,    0,
      0,     0,-1/2,    0,    0,    0,    0,
      0,     0, 1/2,    0,    0,    0,    0,
      0,     0,   0,    1,    0,    0,    0,
      0,     0,   0,    0,    1,    0,  1/2,
      0,     0,   0,    0,    0,    1,    0,
      0,     0,   0,    0,    0,    0, -1/2,
      0,     0,   0,    0,    0,    0,  1/2)
dim(Z) = c(7,10)
Z = t(Z)

# Do reparameterization by pre-multimplying the parameter vector by Z
reparLmer <- Z %*% matrix(lmerEff,ncol=1)

# put results in data.frame for comparison
sideBySide <- data.frame(BayesFactor=bfEff,lmer=reparLmer)

We can look at them side by side for comparison:

print(xtable(sideBySide,digits=4), type='html')
BayesFactor lmer
mu 6.6419 6.6430
Condition-baseheid 0.0170 0.0192
Condition-heid -0.0170 -0.0192
ResponseToPrime-correct -0.0597 -0.0643
ResponseToPrime-incorrect 0.0597 0.0643
lRTmin1-lRTmin1 0.0987 0.1037
RTtoPrime-RTtoPrime 0.1186 0.1281
BaseFrequency-BaseFrequency -0.0089 -0.0092
ResponseToPrime:RTtoPrime-correct.&.RTtoPrime 0.0957 0.1084
ResponseToPrime:RTtoPrime-incorrect.&.RTtoPrime -0.0957 -0.1084

…and plot them:

# Notice Bayesian shrinkage
par(cex=1.5)
plot(sideBySide[-1,],pch=21,bg=rgb(0,0,1,.2),col="black",asp=TRUE,cex=1.2, main="fixed effects\n (excluding grand mean)")
abline(0,1, lty=2)