### R code from vignette source '../hierarchical_models_chapter/hierarchical_models.Rnw' ################################################### ### code chunk number 1: loadLibs ################################################### library(rjags) library(coda) library(lme4) ################################################### ### code chunk number 2: trueFormantData ################################################### pb <- read.table("../data/peterson_barney_data/pb.txt",header=T) aa <- subset(pb,Vowel=="aa" & Type=="m") aa$Speaker <- factor(aa$Speaker) speaker.means.f1 <- tapply(aa$F1,aa$Speaker,mean) speaker.means.f2 <- tapply(aa$F2,aa$Speaker,mean) f1 <- aa$F1 - speaker.means.f1[aa$Speaker] f2 <- aa$F2 - speaker.means.f2[aa$Speaker] ################################################### ### code chunk number 3: trueFormantDataPlot ################################################### plot(aa$F1,aa$F2,pch=as.numeric(aa$Speaker),xlab="F1 formant", ylab="F2 formant") ################################################### ### code chunk number 4: residualsPlotted ################################################### plot(jitter(f1), jitter(f2),pch=19,xlab="F1 deviation from speaker mean", ylab="F2 deviation from speaker mean") ################################################### ### code chunk number 5: simulatedFormantData ################################################### ## get realistic means and sigmas from Peterson & Barney's data set.seed(9) library(mvtnorm) f1.mean <- mean(speaker.means.f1) f2.mean <- mean(speaker.means.f2) sigma <- matrix(c(var(f1),cov(f1,f2),cov(f1,f2),var(f2)),2,2) sigmaS <-matrix(c(var(speaker.means.f1), cov(speaker.means.f1,speaker.means.f2), cov(speaker.means.f1,speaker.means.f2), var(speaker.means.f2)),2,2) ## generate new data M <- 6 N <- 15 mu.i <- rmvnorm(M,mean=c(f1.mean,f2.mean),sigma=sigmaS) y <- c() for(i in 1:M) { y <- rbind(y,rmvnorm(N,mean=mu.i[i,],sigma=sigma)) } ################################################### ### code chunk number 6: simulatedFormantDataPlot ################################################### plot(y, col=ceiling((1:(M*N))/N),pch=as.character(ceiling((1:(M*N))/N)),xlab="F1 formant",ylab="F2 formant") points(mu.i, pch=16,col=1:(length(speaker.means.f1))) ################################################### ### code chunk number 7: aaF1MLE ################################################### aa.f1.lmer <- lmer(F1 ~ 1 + (1 | Speaker), data=aa, family="gaussian", REML=F) # REML=F means "find the maximum-likelihood fit" #summary(aa.f1.lmer) ################################################### ### code chunk number 8: aaF2MLE ################################################### aa.f2.lmer <- lmer(F2 ~ 1 + (1 | Speaker), data=aa, family="gaussian",REML=F) #summary(aa.f2.lmer) ################################################### ### code chunk number 9: aaF1plotted ################################################### plot(as.numeric(aa$Speaker),aa$F1,xlab="Speaker number", ylab="recorded F1",type="p") ################################################### ### code chunk number 10: aaF1BLUP ################################################### rs <- ranef(aa.f1.lmer)$Speaker[,1] lims <- c(-180,180) plot(1:length(rs),speaker.means.f1 - f1.mean,pch=22,col=1,bg=1,ylim=lims,xlab="Speaker number", ylab="F1 deviation from inter-speaker mean") points(1:length(rs),rs,pch=21,ylim=lims,bg="magenta",col="magenta") abline(0,0) legend(0,185,c("Averages from data","Conditional modes in hierarchical model"), pch=c(22,21),col=c(1,"magenta"),pt.bg=c(1,"magenta")) ################################################### ### code chunk number 11: rmSigma ################################################### rm(sigma) ################################################### ### code chunk number 12: bayesianExampleHLM ################################################### set.seed(1) M <- length(levels(aa$Speaker)) N <- 2 y <- aa$F1 dim(y) <- c(N,M) y <- t(y) f1.params <- c("mu","sigma","sigma.b") dat.for.jags <- list(M=M,N=N,y=y) m <- jags.model("../f1_f2_hierarchical_modeling/f1f2_log.bug",data=dat.for.jags) update(m,1000) res <- coda.samples(m,f1.params,n.iter=2000,thin=10) # draw samples ##coda::HPDinterval(res) # 95\% HPD CI ##coda::HPDinterval(res, prob=0.001) # conditional modes ################################################### ### code chunk number 13: bayesianExampleHLMgraph ################################################### old.par <- par(mfrow=c(1,3)) plot(res,trace=F,auto.layout=F) par(old.par) ################################################### ### code chunk number 14: multivariateBayesianF1F2 ################################################### pb <- read.table("../data/peterson_barney_data/pb.txt",header=T) ## The following three functions are for transforming the samples into ## covariance-matrix representation matr <- function(Omega, i) { dim(Omega) <- c(2,2) solve(Omega) } as.sdcor <- function(matr) { matr[1,1] <- sqrt(matr[1,1]) matr[2,2] <- sqrt(matr[2,2]) matr[1,2] <- matr[1,2]/(matr[1,1]*matr[2,2]) matr[2,1] <- matr[2,1]/(matr[1,1]*matr[2,2]) matr } sdcor.sample <- function(x) { # convert Omega's to vcov matrices result <- c(as.sdcor(matr(x[1:4])), as.sdcor(matr(x[5:8])), x[9:10]) names(result) <- c("Sigma.b[1,1]","Sigma.b[1,2]","Sigma.b[2,1]","Sigma.b[2,2]", "Sigma.y[1,1]","Sigma.y[1,2]","Sigma.y[2,1]","Sigma.y[2,2]","mu[1]","mu[2]") result } convert.mcmc.list.sdcor <- function(mcmc.list) { result <- mcmc.list for(i in 1:length(result)) { mtr <- t(apply(result[[i]], 1, sdcor.sample)) result[[i]][1:(dim(mtr)[1]),1:(dim(mtr)[2])] <- mtr[1:(dim(mtr)[1]),1:dim(mtr)[2]] varnames(result[[i]]) <- c("Sigma.b[1,1]","Sigma.b[1,2]","Sigma.b[2,1]","Sigma.b[2,2]", "Sigma.y[1,1]","Sigma.y[1,2]","Sigma.y[2,1]","Sigma.y[2,2]","mu[1]","mu[2]") } result } ## now the stuff we need to do: set.seed(18) aa <- subset(pb,Vowel=="aa" & Type=="m") ##aa <- subset(pb,Vowel=="aa" & Type=="w") y <- cbind(aa$F1,aa$F2) speaker <- as.numeric(aa$Speaker) speaker <- speaker - min(speaker) + 1 ##y <- rbind(y,y) ##speaker <- c(speaker,speaker+28) f1.params <- c("mu","Omega.b","Omega.y") ## inits might help? ## b.matr <- solve(matrix(c(50,0.9,0.9,100),2,2)) ## y.matr <- solve(matrix(c(35,0,0,75),2,2)) ## my.inits.1 <- list("Omega.b[1,1]"=b.matr[1,1],"Omega.b[1,2]"=b.matr[1,2],"Omega.b[2,1]"=b.matr[2,1],"Omega.b[2,2]"=b.matr[2,2],"Omega.y[1,1]"=y.matr[1,1],"Omega.y[1,2]"=y.matr[1,2],"Omega.y[2,1]"=y.matr[2,1],"Omega.y[2,2]"=y.matr[2,2],"mu[1]"=400,"mu[2]"=800) ## b.matr <- b.matr * matrix(c(1,-1,-1,1),2,2) ## my.inits.2 <- list("Omega.b[1,1]"=b.matr[1,1],"Omega.b[1,2]"=b.matr[1,2],"Omega.b[2,1]"=b.matr[2,1],"Omega.b[2,2]"=b.matr[2,2],"Omega.y[1,1]"=y.matr[1,1],"Omega.y[1,2]"=y.matr[1,2],"Omega.y[2,1]"=y.matr[2,1],"Omega.y[2,2]"=y.matr[2,2],"mu[1]"=800,"mu[2]"=1600) ##m <- jags.model("../f1_f2_hierarchical_modeling/f1f2bivariate.bug",nchain=2,inits dat.for.jags=list(M=max(speaker),N=dim(y)[1],y=y,speaker=speaker) m <- jags.model("../f1_f2_hierarchical_modeling/f1f2bivariate.bug",data=dat.for.jags,n.chains=1) update(m,by=100) res <- coda.samples(m,f1.params,n.iter=2000,thin=10) ## res.sig <- lapply(res, function(x) t(apply(x, 1, sdcor.sample))) res.sdcor <- apply(res[[1]],1,sdcor.sample) ## res.sdcor now uses covariance-matrix representations for Sigma and Sigma.b hpds <- lme4::HPDinterval(res.sdcor) row.names(hpds) <- c("Sigma.b[1,1]","Sigma.b[1,2]","Sigma.b[2,1]", "Sigma.b[2,2]","Sigma.y[1,1]","Sigma.y[1,2]","Sigma.y[2,1]","Sigma.y[2,2]", "mu[1]","mu[2]") hpds.narrow <- lme4::HPDinterval(res.sdcor,prob=0.00001) row.names(hpds.narrow) <- c("Sigma.b[1,1]","Sigma.b[1,2]","Sigma.b[2,1]", "Sigma.b[2,2]","Sigma.y[1,1]","Sigma.y[1,2]","Sigma.y[2,1]","Sigma.y[2,2]", "mu[1]","mu[2]") ##round(hpds,2) ################################################### ### code chunk number 15: speakerMeansOutTest ################################################### aa$Speaker <- factor(aa$Speaker) speaker.means.f1 <- tapply(aa$F1,aa$Speaker,mean) speaker.means.f2 <- tapply(aa$F2,aa$Speaker,mean) f1 <- aa$F1 - speaker.means.f1[aa$Speaker] f2 <- aa$F2 - speaker.means.f2[aa$Speaker] ct.y <- cor.test(f1, f2, method="pearson") ct.b <- cor.test(speaker.means.f1, speaker.means.f2, method="pearson") ################################################### ### code chunk number 16: rCode2008StartsHere ################################################### ### R code for Stanford 2008 dataset starts here ### ################################################### ### code chunk number 17: suiTones ################################################### speaker.origin <- function(x) { ifelse(x %in% c(1,2,3,4,5,6,7,8,11,15,35,36,37,38,41,42,43), "South", ifelse(x %in% c(9,10,12,14,16,17,18,19,20,21,22,23,30,31,32,33,34,39,40),"North","Other")) } dat.raw <- read.table("../data/sui_tone_data/sui_tone_raw_F0",header=T,sep="\t") dat.raw$Origin <- speaker.origin(dat.raw$Speaker) time.startpoint <- with(dat.raw,tapply(Time,Trial,min)) time.endpoint <- with(dat.raw,tapply(Time,Trial,max)) dat.raw$TimeNormalized <- with(dat.raw,(Time - time.endpoint[Trial])) dat.raw$TimeNormalized <- with(dat.raw,(Time - time.startpoint[Trial]) / (time.endpoint[Trial] - time.startpoint[Trial])) dat.raw$PitchLog <- 12 * log(dat.raw$Pitch/100,2) f3.means <- with(subset(dat.raw, Tone==3),tapply(PitchLog, Speaker,mean)) dat.raw$PitchNormalized <- dat.raw$PitchLog - f3.means[dat.raw$Speaker] ################################################### ### code chunk number 18: suiTonesPlot ################################################### with(subset(dat.raw, Trial=="35sm1_junl1.txt"& TimeNormalized >= 0.25 & TimeNormalized < 0.9),plot(TimeNormalized,PitchNormalized,type="l",lwd=3,lty=2,xlab="Normalized time", ylab="Deviation from mean Tone 3 semitone",ylim=c(-7,7))) with(subset(dat.raw, Trial=="21junl1.txt"& TimeNormalized >= 0.25 & TimeNormalized < 0.9),lines(TimeNormalized,PitchNormalized,type="l",lwd=3,lty=1,col="magenta")) text(0.35,-4,"Northern speaker", col="magenta") text(0.35,0,"Southern speaker", col="black") ################################################### ### code chunk number 19: northSpeakerPlots ################################################### t1 <- read.table("../data/sui_tone_data/sui_tone1.txt",header=T) with(subset(t1,Origin=="North"),plot(Slope ~ Speaker,pch=ifelse(MarriedToSoutherner, 22,23),col=ifelse(MarriedToSoutherner,"green","blue"),bg=ifelse(MarriedToSoutherner,"green","blue"),ylim=c(-25,30))) ################################################### ### code chunk number 20: southSpeakerPlots ################################################### with(subset(t1,Origin=="South"),plot(Slope ~ Speaker,pch=ifelse(MarriedToNortherner, 24,21),col=ifelse(MarriedToNortherner,"black","magenta"),bg=ifelse(MarriedToNortherner,"black","magenta"),ylim=c(-25,30))) ################################################### ### code chunk number 21: speakerInterceptsTest ################################################### m.interspeaker <- lmer(Slope ~ Origin + MarriedNorth + MarriedSouth + (1 | Speaker),data=t1,REML=F,family="gaussian") m.interspeaker.reml <- lmer(Slope ~ Origin + MarriedNorth + MarriedSouth + (1 | Speaker),data=t1,REML=T,family="gaussian") ##print(m.interspeaker,corr=F) m.constant <- lm(Slope ~ Origin + MarriedToNortherner + MarriedToSoutherner, data=t1,REML=T) m.stderrs <- sqrt(diag(vcov(m.interspeaker))) # We can't use anova() due to incompleteness of implementation in lme4, but # we can conduct a direct likelihood-ratio test. ##as.numeric(logLik(m.interspeaker)) - as.numeric(logLik(m.constant)) # This is a big difference in the log-likelihood. ##pchisq(2 * (as.numeric(logLik(m.interspeaker)) - as.numeric(logLik(m.constant))), df=1) ################################################### ### code chunk number 22: tStatisticTestForHLM (eval = FALSE) ################################################### ## print(m.interspeaker,corr=F) ## # the "t value" for OriginSouth is 9.61, certainly significant ################################################### ### code chunk number 23: mcmcSampledPosteriorForHLM ################################################### samples <- mcmcsamp(m.interspeaker,n=1000,thin=10) hpds <- lme4::HPDinterval(samples,prob=0.95)$fixef ## not coda::HPDinterval() hpds.narrow <- lme4::HPDinterval(samples,prob=0.0001)$fixef ## not coda::HPDinterval() ################################################### ### code chunk number 24: migrationTest ################################################### # we construct a new 3-level factor so that R will assess a single # F-statistic for the effect of migration: t1$Migration <- as.factor(ifelse(t1$MarriedToNortherner, "North", ifelse(t1$MarriedToSoutherner,"South", "None"))) m.mig <- lmer(Slope ~ Origin + Migration + (1 | Speaker),data=t1,REML=F, family="gaussian") ## print(m.mig,corr=F) f.stat <- anova(m.mig)[2,4] ## pf(f.stat, 2, 887) ################################################### ### code chunk number 25: mcmcWithHeteroscedasticity ################################################### ## In order to run this model, we set up the observations ## y, the speaker (cluster) identities as the vector speaker, and the ## dummy predictor variable as the vector origin, which takes on value 1 for ## native-southern speakers and 0 for native-northern speakers. set.seed(2) y <- t1$Slope M <- 43 speaker <- t1$Speaker origin <- ifelse(t1$Origin=="South",1,0) migrated.north <- ifelse(t1$MarriedToNortherner,1,0) migrated.south <- ifelse(t1$MarriedToSoutherner,1,0) m <- jags.model("../data/sui_tone_data/sui_tone.bug",data=list("y"=y,"M"=M,"speaker"=speaker,"origin"=origin,"migrated.north"=migrated.north,"migrated.south"=migrated.south)) update(m,100) m.params <- c("o","b","sigma.y","mu","sigma.b","mn","ms") res <- coda.samples(m, m.params, n.iter=2000,thin=10) ##HPDinterval(res)[[1]][c("mu","o","mn","ms"),] hpds <- coda::HPDinterval(res[[1]]) hpds.narrow <- coda::HPDinterval(res[[1]],prob=0.001) ################################################### ### code chunk number 26: rCode2008Ends ################################################### ### R code for Stanford 2008 dataset ends here ################################################### ### code chunk number 27: rohdeLevyKehlerSetupDatasetAndModel ################################################### dat <- read.table("../data/rohde_levy_kehler_data/rohde-levy-kehler-to-analyze.txt") # introduce contrast codings that will make the predictors orthogonal d <- subset(dat, crit=="RC_VERB+1") V <- ifelse(d$verb=="IC",0.5,-0.5) A <- ifelse(d$attachment=="high",0.5,-0.5) C <- factor(paste(d$verb, d$attachment)) rt.lmer.full <- lmer(rt ~ V * A + ( (C - 1) | subj) + ( (C - 1) | item), data=d,REML=F) Sigma.S <- attr(VarCorr(rt.lmer.full)$subj,"correlation") Sigma.S[cbind(1:4,1:4)] <- attr(VarCorr(rt.lmer.full)$subj,"stddev") Sigma.I <- attr(VarCorr(rt.lmer.full)$item,"correlation") Sigma.I[cbind(1:4,1:4)] <- attr(VarCorr(rt.lmer.full)$item,"stddev") beta <- fixef(rt.lmer.full) beta.se <- sqrt(vcov(rt.lmer.full)[cbind(1:4,1:4)]) ################################################### ### code chunk number 28: rohdeLevyKehlerHLM ################################################### print(rt.lmer.full, corr=F) ################################################### ### code chunk number 29: fullyBayesianCrossedSubjItems ################################################### library(rjags) rm(list=ls()) dat <- read.table("../data/rohde_levy_kehler_data/rohde-levy-kehler-to-analyze.txt") # introduce contrast codings that will make the predictors orthogonal d <- subset(dat, crit=="RC_VERB+1") V <- ifelse(d$verb=="IC",0.5,-0.5) A <- ifelse(d$attachment=="high",0.5,-0.5) C <- factor(paste(d$verb, d$attachment)) subj <- dat$subjNum item <- dat$itemNum NumItems <- max(item) NumSubjs <- max(subj) rt <- dat$rt m <- jags.model("../data/rohde_levy_kehler_data/rohde_levy_kehler_analysis.bug",data=list(d=d,V=V,A=A,C=C,subj=subj,item=item,NumItems=NumItems,NumSubjs=NumSubjs,rt=rt)) update(m,100) m.params <- c("beta.A","beta.V","beta.VA","mu","sigma","sigma.subj","sigma.item") res <- coda.samples(m,m.params,n.iter=2000,thin=10) hpds <- coda::HPDinterval(res[[1]]) hpds.narrow <- coda::HPDinterval(res[[1]],prob=0.0001) ################################################### ### code chunk number 30: dativeGLMMloadLibs ################################################### library(languageR) ################################################### ### code chunk number 31: dativeGLMM ################################################### DiscStatusOfRec <- factor(ifelse(dative$AccessOfRec=="given","given","new")) DiscStatusOfTheme <- factor(ifelse(dative$AccessOfTheme=="given","given","new")) dative.glmm <- lmer(RealizationOfRecipient ~ log(LengthOfRecipient) + log(LengthOfTheme) + AnimacyOfRec + AnimacyOfTheme + DiscStatusOfRec + DiscStatusOfTheme + PronomOfRec + PronomOfTheme + DefinOfRec + DefinOfTheme + (1 | Verb), dative,family="binomial") stderrs <- sqrt(diag(vcov(dative.glmm))) ################################################### ### code chunk number 32: fullyBayesianDativeGLMM ################################################### library(languageR) library(rjags) library(coda) data(dative) DiscStatusOfRec <- factor(ifelse(dative$AccessOfRec=="given","given","new")) DiscStatusOfTheme <- factor(ifelse(dative$AccessOfTheme=="given","given","new")) dat <- list(y=ifelse(dative$RealizationOfRecipient=="PP",1,0), LT=dative$LengthOfTheme, AT=ifelse(dative$AnimacyOfTheme=="animate",1,0), ST=ifelse(DiscStatusOfTheme=="given",1,0), PT=ifelse(dative$PronomOfTheme=="pronominal",1,0), DT=ifelse(dative$DefinOfTheme=="pronominal",1,0), LR=dative$LengthOfRec, AR=ifelse(dative$AnimacyOfRec=="animate",1,0), SR=ifelse(DiscStatusOfRec=="given",1,0), PR=ifelse(dative$PronomOfRec=="pronominal",1,0), DR=ifelse(dative$DefinOfRec=="pronominal",1,0), verb=as.numeric(dative$Verb), N=length(y)) dat$M <- max(dat$verb) m <- jags.model("../dative_jags/dative_jags_intermediate.bug",data=dat) m.params <- c("mu","sigma.b","b.LT","b.AT","b.ST","b.PT","b.DT","b") update(m,100) res <- coda.samples(m,m.params,n.iter=1000,thin=1) hpds <- coda::HPDinterval(res[[1]]) hpds.narrow <- coda::HPDinterval(res[[1]],prob=0.0001) b.pm <- hpds.narrow[1:75,1] b.lower <- hpds[1:75,1] b.upper <- hpds[1:75,2] my.col <- ifelse(b.upper * b.lower > 0, "magenta","gray") nms <- levels(dative$Verb) support <- tapply(dative$Verb,dative$Verb,length) labels <- paste(nms, support) ################################################### ### code chunk number 33: fullyBayesianDativeGLMMposteriorOnB ################################################### barplot(b.pm[order(b.pm)],names.arg=labels[order(b.pm)], las=3,ylim=c(-10,10),width=1,space=0.25,col=my.col[order(b.pm)]) # mgp fix to give room for verb names x.place <- 1:75*1.25 - 0.5 arrows(x.place,b.lower[order(b.pm)],x.place,b.upper[order(b.pm)],angle=90,length=0.05,code=3) ################################################### ### code chunk number 34: dativeLogitHierarchicalLRT ################################################### dm <- update(dative.glmm, RealizationOfRecipient ~ . - DiscStatusOfRec - DiscStatusOfTheme + AccessOfRec + AccessOfTheme) devDiff <- 2 * (logLik(dm) - logLik(dative.glmm)) ################################################### ### code chunk number 35: dativeGlmmProbabilityProportionFit ################################################### library(Hmisc) f <- function (x, data, method = "cut", where = seq(0, 1, by = 0.1), scalesize = NA,pch=21) { y = attr(x@frame, "terms") depvar = names(attr(terms(y), "dataClasses")[attr(terms(y), "response")]) probs = fitted(x) classes = cut2(probs, where, levels.mean = TRUE) classCounts = table(classes) means = tapply(as.numeric(data[, depvar]) - 1, classes, mean) supports <- tapply(as.numeric(data[, depvar]) - 1, classes, length) plot(as.numeric(names(means)), means, xlab = "mean predicted probabilities", ylab = "observed proportions", type = "n",pch=pch) abline(0, 1, col = "magenta",lwd=3) points(as.numeric(names(means)), means,pch=pch,cex=sqrt(supports)/mean(sqrt(supports))) mtext(paste("R-squared: ", round(cor(as.numeric(names(means)), means)^2, 2), sep = ""), 3, 1.5) } f(dative.glmm,dative,where=seq(0,1,by=0.05),pch=19) ################################################### ### code chunk number 36: probabilityProportionPlotDative ################################################### plotlogistic.fit.fnc(dative.glmm,dative) ################################################### ### code chunk number 37: childrenF1plot ################################################### pb <- read.table("../data/peterson_barney_data/pb.txt",header=T) library(lattice) print(densityplot(~ F1 | Vowel, data=subset(pb, Type=="c"),layout=c(5,2))) ################################################### ### code chunk number 38: shrinkageBLUPS (eval = FALSE) ################################################### ## mu <- fixef(aa.f1.lmer)[1] ## sigma.b <- 107.971 ## sigma.y <- 34.952 ## speaker.means <- with(aa,tapply(F1, Speaker, mean)) ## blup.means <- ranef(aa.f1.lmer)$Speaker[,1] + mu ################################################### ### code chunk number 39: shrinkageLogLik (eval = FALSE) ################################################### ## logLik.means <- sum(log(dnorm(speaker.means, mu,sigma.b))) ## + sum(log(dnorm(aa$F1, speaker.means[aa$Speaker], sigma.y))) ## logLik.blup <- sum(log(dnorm(blup.means, mu,sigma.b))) ## + sum(log(dnorm(aa$F1, blup.means[aa$Speaker], sigma.y))) ## logLik.blup - logLik.means # the difference is positive! ################################################### ### code chunk number 40: pbF1nonLogPrior ################################################### pb <- read.table("../data/peterson_barney_data/pb.txt",header=T) aa <- subset(pb,Vowel=="aa" & Type=="m") aa$Speaker <- factor(aa$Speaker) y <- aa$F1 M <- length(levels(aa$Speaker)) N <- 2 dim(y) <- c(N,M) y <- t(y) f1.params <- c("mu","sigma","sigma.b") dat.for.jags <- list(M=M,N=N,y=y) m <- jags.model("../f1_f2_hierarchical_modeling/f1f2.bug",data=dat.for.jags) update(m,1000) res <- coda.samples(m,f1.params,n.iter=2000,thin=10) # draw samples print(coda::HPDinterval(res)) # 95\% HPD CI ################################################### ### code chunk number 41: suiHomoscedastic (eval = FALSE) ################################################### ## rm(sigma.y) ## y <- t1$Slope ## M <- 43 ## speaker <- t1$Speaker ## origin <- ifelse(t1$Origin=="South",1,0) ## migrated.north <- ifelse(t1$MarriedToNortherner,1,0) ## migrated.south <- ifelse(t1$MarriedToSoutherner,1,0) ## m <- jags.model("../data/sui_tone_data/sui_tone_homoscedastic.bug",data=list(y=y,M=M,speaker=speaker,origin=origin,migrated.north=migrated.north,migrated.south=migrated.south)) ## update(m,100) ## m.params <- c("o","b","sigma.y","mu","sigma.b","mn","ms") ## res <- coda.samples(m, m.params, n.iter=2000,thin=10) ## HPDinterval(res)[[1]][c("mu","o","mn","ms"),] ################################################### ### code chunk number 42: ranefHypothesisTest (eval = FALSE) ################################################### ## rt.lmer.intercepts <- lmer(rt ~ v * a + ( 1 | subj) + ( 1 | item), ## data=d, REML=F) ## anova(rt.lmer.intercepts,rt.lmer.full) ################################################### ### code chunk number 43: rohdeLevyKehlerRandomInteractions ################################################### dat <- read.table("../data/rohde_levy_kehler_data/rohde-levy-kehler-to-analyze.txt") # introduce contrast codings that will make the predictors orthogonal d <- subset(dat, crit=="RC_VERB+1") V <- ifelse(d$verb=="IC",0.5,-0.5) A <- ifelse(d$attachment=="high",0.5,-0.5) C <- factor(paste(d$verb, d$attachment)) subj <- dat$subjNum item <- dat$itemNum NumItems <- max(item) NumSubjs <- max(subj) rt <- dat$rt library(rjags) ##R <- diag(1.0E-5,4) #m.full <- jags.model("../data/rohde_levy_kehler_data/rohde_levy_kehler_analysis-random-interactions.bug",n.chains=3) #update(m.full,100) #m.params <- c("beta.A","beta.V","beta.VA","mu","sigma","si11","rhoi12","ss11","rhos12","xi","bS","bI") #res <- coda.samples(m.full,m.params,n.iter=2000,thin=10) ## alternative: scaled inverse Wishart (Gelman & Hill, 376) dat.for.jags <- with(d,list(NumItems=NumItems,NumSubjs=NumSubjs,rt=rt,V=V,A=A,C=C,subj=subj,item=item)) m.full <- jags.model("../data/rohde_levy_kehler_data/rohde_levy_kehler_analysis-random-interactions-scaled-iwish.bug",data=dat.for.jags,n.chains=3) update(m.full,100) m.params <- c("beta.A","beta.V","beta.VA","mu","sigma","si","rhoi","ss","rhos","xis","xii","bS","bI") system.time(res <- coda.samples(m.full,m.params,n.iter=2000,thin=10)) interesting.params <- c("beta.A","beta.V","beta.VA","mu","sigma",sprintf("si[%s]",1:4),sprintf("ss[%s]",1:4),sprintf("rhoi[%s,%s]",c(1,1,1,2,2,3),c(2,3,4,3,4,4)),sprintf("rhos[%s,%s]",c(1,1,1,2,2,3),c(2,3,4,3,4,4)))#sprintf("xis[%s]",1:4),sprintf("xii[%s]",1:4), res.subset <- res[,interesting.params,drop=F] hpds <- coda::HPDinterval(res[[1]]) hpds2 <- coda::HPDinterval(res[[2]]) hpds3 <- coda::HPDinterval(res[[3]]) hpds.narrow <- coda::HPDinterval(res.subset[[1]],prob=0.0001) ################################################### ### code chunk number 44: rohdeLevyKehlerRandomInteractions ################################################### res.rhos12 <- res[,"rhos[1,2]",drop=F] plot(res.rhos12,trace=F) ################################################### ### code chunk number 45: rohdeLevyKehlerRandomInteractions2 ################################################### res.rhoi12 <- res[,"rhoi[1,2]",drop=F] plot(res.rhoi12,trace=F)