# NOT RUN {
data(Primates301)
plot( log(brain) ~ log(body) , data=Primates301 )
data(Primates301_distance_matrix)
image(Primates301_distance_matrix)
# Gaussian process phylogenetic regression
# prep variables
d <- Primates301
d$name <- as.character(d$name)
dstan <- d[ complete.cases( d$social_learning, d$research_effort , d$body , d$brain ) , ]
# prune distance matrix to spp in dstan
spp_obs <- dstan$name
y <- Primates301_distance_matrix
y2 <- y[ spp_obs , spp_obs ]
# cbind( sort(spp_obs) , sort(colnames(y2)) )
# scale distances
y3 <- y2/max(y2)
mP301GP <- ulam(
alist(
social_learning ~ poisson( lambda ),
log(lambda) <- a + g[spp_id] + b_ef*log_research_effort + b_body*log_body + b_eq*log_brain,
a ~ normal(0,1),
vector[N_spp]: g ~ multi_normal( 0 , SIGMA ),
matrix[N_spp,N_spp]: SIGMA <- cov_GPL2( Dmat , etasq , rhosq , 0.01 ),
b_body ~ normal(0,1),
b_eq ~ normal(0,1),
b_ef ~ normal(1,1),
etasq ~ exponential(1),
rhosq ~ exponential(1)
),
data=list(
N_spp = nrow(dstan),
social_learning = dstan$social_learning,
spp_id = 1:nrow(dstan),
log_research_effort = log(dstan$research_effort),
log_body = log(dstan$body),
log_brain = log(dstan$brain),
Dmat = y3
) ,
control=list(max_treedepth=15,adapt_delta=0.95) ,
sample=FALSE , iter=400 )
# non-centered, Cholesky form
mP301GPnc <- ulam(
alist(
social_learning ~ poisson( lambda ),
log(lambda) <- a + g[spp_id] + b_ef*log_research_effort + b_body*log_body + b_eq*log_brain,
a ~ normal(0,1),
vector[N_spp]: g <<- L_SIGMA * eta,
vector[N_spp]: eta ~ normal( 0 , 1 ),
matrix[N_spp,N_spp]: L_SIGMA <<- cholesky_decompose( SIGMA ),
matrix[N_spp,N_spp]: SIGMA <- cov_GPL2( Dmat , etasq , rhosq , 0.01 ),
b_body ~ normal(0,1),
b_eq ~ normal(0,1),
b_ef ~ normal(1,1),
etasq ~ exponential(1),
rhosq ~ exponential(1)
),
data=list(
N_spp = nrow(dstan),
social_learning = dstan$social_learning,
spp_id = 1:nrow(dstan),
log_research_effort = log(dstan$research_effort),
log_body = log(dstan$body),
log_brain = log(dstan$brain),
Dmat = y3
) ,
control=list(max_treedepth=15,adapt_delta=0.95) ,
sample=FALSE , iter=400 )
# Pagel's lambda approach --- Not endorsed!
# This is of historical interest only
data(Primates301_vcov_matrix)
vcov_thin <- Primates301_vcov_matrix[ spp_obs , spp_obs ]
mP301L <- ulam(
alist(
social_learning ~ poisson( lambda ),
log(lambda) <- a + g[spp_id] + b_ef*log_research_effort + b_body*log_body + b_eq*log_brain,
a ~ normal(0,1),
vector[N_spp]: g <<- L_SIGMA * eta,
vector[N_spp]: eta ~ normal( 0 , 1 ),
matrix[N_spp,N_spp]: L_SIGMA <<- cholesky_decompose( SIGMA ),
matrix[N_spp,N_spp]: SIGMA <- cov_Pagel( SIGMA_raw , Plambda ),
b_body ~ normal(0,1),
b_eq ~ normal(0,1),
b_ef ~ normal(1,1),
Plambda ~ beta(2,2)
),
data=list(
N_spp = nrow(dstan),
social_learning = dstan$social_learning,
spp_id = 1:nrow(dstan),
log_research_effort = log(dstan$research_effort),
log_body = log(dstan$body),
log_brain = log(dstan$brain),
SIGMA_raw = vcov_thin
) ,
control=list(max_treedepth=15,adapt_delta=0.95) ,
sample=TRUE , iter=400 )
# centered version --- seems to mix better
mP301L2 <- ulam(
alist(
social_learning ~ poisson( lambda ),
log(lambda) <- a + g[spp_id] + b_ef*log_research_effort + b_body*log_body + b_eq*log_brain,
a ~ normal(0,1),
vector[N_spp]: g ~ multi_normal( 0 , SIGMA ),
matrix[N_spp,N_spp]: SIGMA <- cov_Pagel( SIGMA_raw , Plambda ),
b_body ~ normal(0,1),
b_eq ~ normal(0,1),
b_ef ~ normal(1,1),
Plambda ~ beta(2,2)
),
data=list(
N_spp = nrow(dstan),
social_learning = dstan$social_learning,
spp_id = 1:nrow(dstan),
log_research_effort = log(dstan$research_effort),
log_body = log(dstan$body),
log_brain = log(dstan$brain),
SIGMA_raw = vcov_thin
) ,
control=list(max_treedepth=15,adapt_delta=0.95) ,
sample=TRUE , iter=400 )
# }
Run the code above in your browser using DataLab