rm(list=ls(all=TRUE)) # clear all variables graphics.off() # clear all graphics # Weapons priming of aggressive words # Greg Francis # PSY 626 # 12 September 2025 # Bayesian dependent t-test # load data file WPdata<-read.csv(file="WeaponPrime.csv",sep=",",header=TRUE,stringsAsFactors=TRUE) #---------------------- # Run traditional dependent t-test WeaponData = subset(WPdata, Prime=="Weapon") NeutralData = subset(WPdata, Prime=="Neutral") traditional <- t.test(NeutralData$Time, WeaponData$Time, paired=TRUE) print(traditional) #------------------------ # Dummy variable to indicate condition WPdata$ConditionIndex <-0* WPdata$Time WPdata$ConditionIndex[WPdata$Prime =="Weapon"] =1 WPdata$ConditionIndex[WPdata$Prime =="Neutral"] =2 # Bayesian models # load the rethinking library library(rethinking) # Null model: one mean for both prime conditions # Different baseline (intercept) for each participant WPmodel0 <- quap( alist( Time ~ dnorm(mu, sigma), mu <- a[Subject], a[Subject] ~ dnorm(500, 500), sigma ~ dunif(0, 1000) ), data= WPdata , control=list(maxit=10000) ) cat("Finished WPmodel0\n") # Alternative model 1: different mean for each condition WPmodel1 <- quap( alist( Time ~ dnorm(mu, sigma), mu <- a[Subject] +b[ConditionIndex], a[Subject] ~ dnorm(500, 500), b[ConditionIndex] ~ dnorm(0, 100), sigma ~ dunif(0, 1000) ), data= WPdata , control=list(maxit=10000) ) cat("Finished WPmodel1\n") # Compare models print(compare(WPmodel0, WPmodel1) ) #----------- # Better way to structure model to not have redundancy # Use subject variable a[Subject] as mean of Neutral condition # and have Weapon variable adjust that mean WPdata$IsWeapon <-0* WPdata$Time WPdata$IsWeapon[WPdata$Prime =="Weapon"] =1 # Alternative model 2: different mean for Weapon condition WPmodel2 <- quap( alist( Time ~ dnorm(mu, sigma), mu <- a[Subject] +b*IsWeapon, a[Subject] ~ dnorm(500, 500), b ~ dnorm(0, 100), sigma ~ dunif(0, 1000) ), data= WPdata , control=list(maxit=10000) ) cat("Finished WPmodel2\n") print(compare(WPmodel0, WPmodel1, WPmodel2) )