rm(list=ls(all=TRUE)) # clear all variables graphics.off() # clear all graphics # Visual Search # Greg Francis # PSY 646 # 11 October 2018 # load full data file ZCdata<-read.csv(file="ZennerCards.csv",header=TRUE,stringsAsFactors=FALSE) # load the brms library library(brms) #----------------------------------- # Estimate underlying guess rate (produces a warning worth discussing) model1 = brm(Score ~ 1, data = ZCdata, family="binomial") #----------------------------------- # Estimate underlying success rates model1 = brm(Score|trials(1) ~ 1, data = ZCdata, family="binomial") print(summary(model1)) # compute means of posteriors post<-posterior_samples(model1) props<-plogis(post$b_Intercept) dev.new() plot(density(props)) betterThanChance <-length(props[props > 0.2])/length(props) cat("Probability that success rate is better than pure chance = ", betterThanChance, "\n") #----------------------------------- # Skeptical prior lgSkeptical = qlogis(0.2) stanvars <-stanvar(lgSkeptical, name='lgSkeptical') prs <- c(prior(normal(lgSkeptical, 0.01), class = "Intercept") ) model2 = brm(Score|trials(1) ~ 1, data = ZCdata, family="binomial", prior = prs, stanvars=stanvars ) print(summary(model2)) # compute means of posteriors post<-posterior_samples(model2) props<-plogis(post$b_Intercept) dev.new() plot(density(props)) betterThanChance <-length(props[props > 0.2])/length(props) cat("Probability that success rate is better than pure chance = ", betterThanChance, "\n") #----------------------------------- # Different success rates for different actual cards model3 = brm(Score|trials(1) ~ 0 + ActualCard, data = ZCdata, family="binomial") print(summary(model3)) dev.new() plot(marginal_effects(model3) ) # Compare models (using waic because loo takes a long time) print(model_weights(model1, model3, weights="waic")) #----------------------------------- # Different success rates for different guessed cards model4 = brm(Score|trials(1) ~ 0 + GuessedCard, data = ZCdata, family="binomial") print(summary(model4)) dev.new() plot(marginal_effects(model4) ) # Compare models (using waic because loo takes a long time) print(model_weights(model1, model3, model4, weights="waic")) #----------------------------------- # Different success rates for different guessed cards and different actual cards # stupid model, but makes a point model5 = brm(Score|trials(1) ~ 0 + GuessedCard*ActualCard, data = ZCdata, family="binomial") print(summary(model5)) # Compare models (using waic because loo takes a long time) print(model_weights(model1, model3, model4, model5, weights="waic")) #----------------------------------- # Different success rates for different trials (could predict final trial, in principle) model6 = brm(Score|trials(1) ~ 0 + Trial, data = ZCdata, family="binomial") print(summary(model6)) dev.new() plot(marginal_effects(model6) ) # Compare models (using waic because loo takes a long time) print(model_weights(model1, model3, model4, model6, weights="waic")) #----------------------------------- # Effect of subjects # By default, participant numbers are treated as _numbers_. Need to correct that. ZCdata$ParticipantFactor = factor(ZCdata$Participant) model7 = brm(Score|trials(1) ~ 0 + ParticipantFactor, data = ZCdata, family="binomial") print(summary(model7)) dev.new() plot(marginal_effects(model7) ) # Compare models (using waic because loo takes a long time) print(model_weights(model1, model3, model4, model6, model7, weights="waic"))