Julia posteriordb implementations
See the PosteriorDB.jl
extension at https://github.com/nsiccha/StanBlocks.jl/blob/main/ext/PosteriorDBExt.jl, also included below:
module PosteriorDBExt
import PosteriorDB
import StanBlocks
import StanBlocks: julia_implementation, @stan, @parameters, @transformed_parameters, @model, @broadcasted
import StanBlocks: bernoulli_lpmf, binomial_lpmf, log_sum_exp, logit, binomial_logit_lpmf, bernoulli_logit_lpmf, inv_logit, log_inv_logit, rep_vector, square, normal_lpdf, sd, multi_normal_lpdf, student_t_lpdf, gp_exp_quad_cov, log_mix, append_row, append_col, pow, diag_matrix, normal_id_glm_lpdf, rep_matrix, rep_row_vector, cholesky_decompose, dot_self, cumulative_sum, softmax, log1m_inv_logit, matrix_constrain, integrate_ode_rk45, integrate_ode_bdf, poisson_lpdf, nothrow_log, categorical_lpmf, dirichlet_lpdf, exponential_lpdf, sub_col, stan_tail, dot_product, segment, inv_gamma_lpdf, diag_pre_multiply, multi_normal_cholesky_lpdf, logistic_lpdf
using Statistics, LinearAlgebra
@inline PosteriorDB.implementation(model::PosteriorDB.Model, ::Val{:stan_blocks}) = julia_implementation(Val(Symbol(PosteriorDB.name(model))))
# @inline StanBlocks.stan_implementation(posterior::PosteriorDB.Posterior) = StanProblem(
# PosteriorDB.path(PosteriorDB.implementation(PosteriorDB.model(posterior), "stan")),
# PosteriorDB.load(PosteriorDB.dataset(posterior), String);
# nan_on_error=true
# )
julia_implementation(posterior::PosteriorDB.Posterior) = julia_implementation(
Val(Symbol(PosteriorDB.name(PosteriorDB.model(posterior))));
Dict([Symbol(k)=>v for (k, v) in pairs(PosteriorDB.load(PosteriorDB.dataset(posterior)))])...
)
julia_implementation(::Val{:earn_height}; N, earn, height, kwargs...) = begin
@stan begin
@parameters begin
::vector[2]
beta::real(lower=0.)
sigmaend
@model begin
~ normal(@broadcasted(beta[1] + beta[2] * height), sigma);
earn end
end
end
julia_implementation(::Val{:wells_dist}; N, switched, dist, kwargs...) = begin
@stan begin
@parameters begin
::vector[2]
betaend
@model begin
~ bernoulli_logit(@broadcasted(beta[1] + beta[2] * dist));
switched end
end
end
julia_implementation(::Val{:sesame_one_pred_a}; N, encouraged, watched, kwargs...) = begin
@stan begin
@parameters begin
::vector[2]
beta::real(lower=0.)
sigmaend
@model begin
~ normal(@broadcasted(beta[1] + beta[2] * encouraged), sigma);
watched end
end
end
julia_implementation(::Val{:Rate_1_model}; n, k, kwargs...) = begin
@stan begin
@parameters begin
::real(lower=0.,upper=1.)
thetaend
@model begin
~ beta(1, 1)
theta ~ binomial(n, theta)
k end
end
end
julia_implementation(::Val{:nes_logit_model}; N, income, vote, kwargs...) = begin
= reshape(income, (N, 1))
X @stan begin
@parameters begin
::real
alpha::vector[1]
betaend
@model begin
~ bernoulli_logit_glm(X, alpha, beta);
vote end
end
end
julia_implementation(::Val{:kidscore_momiq}; N, kid_score, mom_iq, kwargs...) = begin
@stan begin
@parameters begin
::vector[2]
beta::real(lower=0)
sigmaend
@model begin
~ cauchy(0, 2.5);
sigma ~ normal(@broadcasted(beta[1] + beta[2] * mom_iq), sigma);
kid_score end
end
end
julia_implementation(::Val{:kidscore_momhs}; N, kid_score, mom_hs, kwargs...) = begin
@stan begin
@parameters begin
::vector[2]
beta::real(lower=0)
sigmaend
@model begin
~ cauchy(0, 2.5);
sigma ~ normal(@broadcasted(beta[1] + beta[2] * mom_hs), sigma);
kid_score end
end
end
julia_implementation(::Val{:logearn_height}; N, earn, height, kwargs...) = begin
= @. log(earn)
log_earn @stan begin
@parameters begin
::vector[2]
beta::real(lower=0)
sigmaend
@model begin
~ normal(@broadcasted(beta[1] + beta[2] * height), sigma);
log_earn end
end
end
julia_implementation(::Val{:blr}; N, D, X, y, kwargs...) = begin
@assert size(X) == (N,D)
@stan begin
@parameters begin
::vector[D]
beta::real(lower=0)
sigmaend
@model begin
+= normal_lpdf(beta, 0, 10);
target += normal_lpdf(sigma, 0, 10);
target += normal_lpdf(y, X * beta, sigma);
target end
end
end
julia_implementation(::Val{:wells_dist100_model}; N, switched, dist, kwargs...) = begin
= @. dist / 100.
dist100 = reshape(dist100, (N, 1))
X @stan begin
@parameters begin
::real
alpha::vector[1]
betaend
@model begin
~ bernoulli_logit_glm(X, alpha, beta);
switched end
end
end
julia_implementation(::Val{:Rate_3_model}; n1, n2, k1, k2, kwargs...) = begin
@stan begin
@parameters begin
::real(lower=0,upper=1)
thetaend
@model begin
~ beta(1, 1)
theta ~ binomial(n1, theta)
k1 ~ binomial(n2, theta)
k2 end
end
end
julia_implementation(::Val{:logearn_height_male}; N, earn, height, male, kwargs...) = begin
= @. log(earn)
log_earn @stan begin
@parameters begin
::vector[3]
beta::real(lower=0)
sigmaend
@model begin
~ normal(@broadcasted(beta[1] + beta[2] * height + beta[3] * male), sigma);
log_earn end
end
end
julia_implementation(::Val{:kidscore_momhsiq}; N, kid_score, mom_iq, mom_hs, kwargs...) = begin
@stan begin
@parameters begin
::vector[3]
beta::real(lower=0)
sigmaend
@model begin
~ cauchy(0, 2.5);
sigma ~ normal(@broadcasted(beta[1] + beta[2] * mom_hs + beta[3] * mom_iq), sigma);
kid_score end
end
end
julia_implementation(::Val{:log10earn_height}; N, earn, height, kwargs...) = begin
= @. log10(earn)
log10_earn @stan begin
@parameters begin
::vector[2]
beta::real(lower=0)
sigmaend
@model begin
~ normal(@broadcasted(beta[1] + beta[2] * height), sigma);
log10_earn end
end
end
julia_implementation(::Val{:wells_dist100ars_model}; N, switched, dist, arsenic, kwargs...) = begin
= @. dist / 100.
dist100 = hcat(dist100, arsenic)
X @stan begin
@parameters begin
::real
alpha::vector[2]
betaend
@model begin
~ bernoulli_logit_glm(X, alpha, beta);
switched end
end
end
julia_implementation(::Val{:low_dim_gauss_mix_collapse}; N, y, kwargs...) = begin
@stan begin
@parameters begin
::vector[2]
mu::vector(lower=0)[2]
sigma::real(lower=0,upper=1)
thetaend
@model begin
~ normal(0, 2);
sigma ~ normal(0, 2);
mu ~ beta(5, 5);
theta for n in 1:N
+= log_mix(theta, normal_lpdf(y[n], mu[1], sigma[1]),
target normal_lpdf(y[n], mu[2], sigma[2]));
end
end
end
end
julia_implementation(::Val{:normal_mixture_k}; K, N, y, kwargs...) = begin
@stan begin
@parameters begin
::simplex[K]
theta::vector[K]
mu::vector(lower=0.,upper=10.)[K]
sigmaend
@model begin
~ normal(0., 10.);
mu for n in 1:N
= @broadcasted(log(theta) + normal_lpdf(y[n], mu, sigma))
ps += log_sum_exp(ps);
target end
end
end
end
julia_implementation(::Val{:low_dim_gauss_mix}; N, y, kwargs...) = begin
@stan begin
@parameters begin
::ordered[2]
mu::vector(lower=0)[2]
sigma::real(lower=0,upper=1)
thetaend
@model begin
~ normal(0, 2);
sigma ~ normal(0, 2);
mu ~ beta(5, 5);
theta for n in 1:N
+= log_mix(theta, normal_lpdf(y[n], mu[1], sigma[1]),
target normal_lpdf(y[n], mu[2], sigma[2]));
end
end
end
end
julia_implementation(::Val{:radon_county}; N, J, county, y, kwargs...) = begin
@stan begin
@parameters begin
::vector[J]
a::real
mu_a::real(lower=0, upper=100)
sigma_a::real(lower=0, upper=100)
sigma_yend
@model @views begin
= a[county]
y_hat
~ normal(0, 1);
mu_a ~ normal(mu_a, sigma_a);
a ~ normal(y_hat, sigma_y);
y end
end
end
julia_implementation(::Val{:logearn_logheight_male}; N, earn, height, male, kwargs...) = begin
= @. log(earn)
log_earn = @. log(height)
log_height @stan begin
@parameters begin
::vector[3]
beta::real(lower=0)
sigmaend
@model begin
~ normal(@broadcasted(beta[1] + beta[2] * log_height + beta[3] * male), sigma);
log_earn end
end
end
julia_implementation(::Val{:wells_dae_model}; N, switched, dist, arsenic, educ, kwargs...) = begin
= @. dist / 100.
dist100 = @. educ / 4.
educ4 = hcat(dist100, arsenic, educ4)
X @stan begin
@parameters begin
::real
alpha::vector[3]
betaend
@model begin
~ bernoulli_logit_glm(X, alpha, beta);
switched end
end
end
julia_implementation(::Val{:arK}; K, T, y, kwargs...) = begin
@stan begin
@parameters begin
::real
alpha::vector[K]
beta::real(lower=0)
sigmaend
@model begin
~ normal(0, 10);
alpha ~ normal(0, 10);
beta ~ cauchy(0, 2.5);
sigma for t in K+1:T
= alpha
mu for k in 1:K
+= beta[k] * y[t-k]
mu end
~ normal(mu, sigma)
y[t] end
end
end
end
julia_implementation(::Val{:wells_interaction_model}; N, switched, dist, arsenic, kwargs...) = begin
= @. dist / 100.
dist100 = @. dist100 * arsenic
inter = hcat(dist100, arsenic, inter)
X @stan begin
@parameters begin
::real
alpha::vector[3]
betaend
@model begin
~ bernoulli_logit_glm(X, alpha, beta);
switched end
end
end
julia_implementation(::Val{:radon_pooled}; N, floor_measure, log_radon, kwargs...) = begin
@stan begin
@parameters begin
::real
alpha::real
beta::real(lower=0)
sigma_yend
@model begin
~ normal(0, 1);
sigma_y ~ normal(0, 10);
alpha ~ normal(0, 10);
beta
~ normal(@broadcasted(alpha + beta * floor_measure), sigma_y)
log_radon end
end
end
julia_implementation(::Val{:logearn_interaction}; N, earn, height, male, kwargs...) = begin
= @. log(earn)
log_earn = @. height * male
inter @stan begin
@parameters begin
::vector[4]
beta::real(lower=0)
sigmaend
@model begin
~ normal(@broadcasted(beta[1] + beta[2] * height + beta[3] * male + beta[4] * inter), sigma);
log_earn end
end
end
julia_implementation(::Val{:logmesquite_logvolume}; N, weight, diam1, diam2, canopy_height, kwargs...) = begin
= @. log(weight);
log_weight = @. log(diam1 * diam2 * canopy_height);
log_canopy_volume @stan begin
@parameters begin
::vector[2]
beta::real(lower=0)
sigmaend
@model begin
~ normal(@broadcasted(beta[1] + beta[2] * log_canopy_volume), sigma);
log_weight end
end
end
julia_implementation(::Val{:garch11}; T, y, sigma1, kwargs...) = begin
@stan begin
@parameters begin
::real
mu::real(lower=0.)
alpha0::real(lower=0., upper=1.)
alpha1::real(lower=0., upper=1. - alpha1)
beta1end
@model begin
= sigma1
sigma 1] ~ normal(mu, sigma)
y[for t in 2:T
= sqrt(alpha0 + alpha1 * square(y[t - 1] - mu) + beta1 * square(sigma))
sigma ~ normal(mu, sigma)
y[t] end
end
end
end
julia_implementation(::Val{:eight_schools_centered}; J, y, sigma, kwargs...) = begin
@stan begin
@parameters begin
::vector[J]
theta::real
mu::real(lower=0)
tauend
@model begin
~ cauchy(0, 5);
tau ~ normal(mu, tau);
theta ~ normal(theta, sigma);
y ~ normal(0, 5);
mu end
end
end
julia_implementation(::Val{:kidscore_interaction}; N, kid_score, mom_iq, mom_hs, kwargs...) = begin
= @. mom_hs * mom_iq;
inter @stan begin
@parameters begin
::vector[4]
beta::real(lower=0)
sigmaend
@model begin
~ cauchy(0, 2.5);
sigma ~ normal(@broadcasted(beta[1] + beta[2] * mom_hs + beta[3] * mom_iq + beta[4] * inter), sigma);
kid_score end
end
end
julia_implementation(::Val{:mesquite}; N, weight, diam1, diam2, canopy_height, total_height, density, group, kwargs...) = begin
@stan begin
@parameters begin
::vector[7]
beta::real(lower=0)
sigmaend
@model begin
~ normal(@broadcasted(beta[1] + beta[2] * diam1 + beta[3] * diam2
weight + beta[4] * canopy_height + beta[5] * total_height
+ beta[6] * density + beta[7] * group), sigma);
end
end
end
julia_implementation(::Val{:gp_regr}; N, x, y, kwargs...) = begin
@stan begin
@parameters begin
::real(lower=0)
rho::real(lower=0)
alpha::real(lower=0)
sigmaend
@model begin
= gp_exp_quad_cov(x, alpha, rho) + diag_matrix(rep_vector(sigma, N));
cov # L_cov = cholesky_decompose(cov);
~ gamma(25, 4);
rho ~ normal(0, 2);
alpha ~ normal(0, 1);
sigma
~ multi_normal(rep_vector(0., N), cov);
y # Think about how to do this
# y ~ multi_normal_cholesky(rep_vector(0, N), L_cov);
end
end
end
julia_implementation(::Val{:kidscore_mom_work}; N, kid_score, mom_work, kwargs...) = begin
= @. Float64(mom_work == 2)
work2 = @. Float64(mom_work == 3)
work3 = @. Float64(mom_work == 4)
work4 @stan begin
@parameters begin
::vector[4]
beta::real(lower=0)
sigmaend
@model begin
~ normal(@broadcasted(beta[1] + beta[2] * work2 + beta[3] * work3
kid_score + beta[4] * work4), sigma);
end
end
end
julia_implementation(::Val{:Rate_2_model}; n1, n2, k1, k2, kwargs...) = begin
@stan begin
@parameters begin
::real(lower=0,upper=1)
theta1::real(lower=0,upper=1)
theta2end
= theta1 - theta2
delta @model begin
~ beta(1, 1)
theta1 ~ beta(1, 1)
theta2 ~ binomial(n1, theta1)
k1 ~ binomial(n2, theta2)
k2 end
end
end
julia_implementation(::Val{:wells_interaction_c_model}; N, switched, dist, arsenic, kwargs...) = begin
= @. (dist - $mean(dist)) / 100.0;
c_dist100 = @. arsenic - $mean(arsenic);
c_arsenic = @. c_dist100 * c_arsenic
inter = hcat(c_dist100, c_arsenic, inter)
X @stan begin
@parameters begin
::real
alpha::vector[3]
betaend
@model begin
~ bernoulli_logit_glm(X, alpha, beta);
switched end
end
end
julia_implementation(::Val{:radon_county_intercept}; N, J, county_idx, floor_measure, log_radon, kwargs...) = begin
@stan begin
@parameters begin
::vector[J]
alpha::real
beta::real(lower=0)
sigma_yend
@model begin
~ normal(0, 1);
sigma_y ~ normal(0, 10);
alpha ~ normal(0, 10);
beta for n in 1:N
= alpha[county_idx[n]] + beta * floor_measure[n];
mu += normal_lpdf(log_radon[n], mu, sigma_y);
target end
end
end
end
julia_implementation(::Val{:kidscore_interaction_c}; N, kid_score, mom_iq, mom_hs, kwargs...) = begin
= @. mom_hs - $mean(mom_hs);
c_mom_hs = @. mom_iq - $mean(mom_iq);
c_mom_iq = @. c_mom_hs * c_mom_iq;
inter @stan begin
@parameters begin
::vector[4]
beta::real(lower=0)
sigmaend
@model begin
~ normal(@broadcasted(beta[1] + beta[2] * c_mom_hs + beta[3] * c_mom_iq + beta[4] * inter), sigma);
kid_score end
end
end
julia_implementation(::Val{:kidscore_interaction_c2}; N, kid_score, mom_iq, mom_hs, kwargs...) = begin
= @. mom_hs - .5;
c_mom_hs = @. mom_iq - 100;
c_mom_iq = @. c_mom_hs * c_mom_iq;
inter @stan begin
@parameters begin
::vector[4]
beta::real(lower=0)
sigmaend
@model begin
~ normal(@broadcasted(beta[1] + beta[2] * c_mom_hs + beta[3] * c_mom_iq + beta[4] * inter), sigma);
kid_score end
end
end
julia_implementation(::Val{:gp_pois_regr}; N, x, k, kwargs...) = begin
= diag_matrix(rep_vector(1e-10, N))
nugget @stan begin
@parameters begin
::real(lower=0)
rho::real(lower=0)
alpha::vector[N]
f_tildeend
@transformed_parameters begin
= gp_exp_quad_cov(x, alpha, rho)
cov .+= nugget;
cov = cholesky_decompose(cov);
L_cov = L_cov * f_tilde;
f end
@model begin
~ gamma(25, 4);
rho ~ normal(0, 2);
alpha ~ normal(0, 1);
f_tilde
~ poisson_log(f);
k end
end
end
julia_implementation(::Val{:surgical_model}; N, r, n, kwargs...) = begin
@stan begin
@parameters begin
::real
mu::real(lower=0)
sigmasq::vector[N]
bend
@transformed_parameters begin
= sqrt(sigmasq)
sigma = @broadcasted inv_logit(b)
p end
@model begin
~ normal(0.0, 1000.0);
mu ~ inv_gamma(0.001, 0.001);
sigmasq ~ normal(mu, sigma);
b ~ binomial_logit(n, b);
r end
end
end
julia_implementation(::Val{:wells_dae_c_model}; N, switched, dist, arsenic, educ, kwargs...) = begin
= @. (dist - $mean(dist)) / 100.0;
c_dist100 = @. arsenic - $mean(arsenic);
c_arsenic = @. c_dist100 * c_arsenic;
da_inter = @. educ / 4.
educ4 = hcat(c_dist100, c_arsenic, da_inter, educ4)
X @stan begin
@parameters begin
::real
alpha::vector[4]
betaend
@model begin
~ bernoulli_logit_glm(X, alpha, beta);
switched end
end
end
julia_implementation(::Val{:Rate_4_model}; n, k, kwargs...) = begin
@stan begin
@parameters begin
::real(lower=0,upper=1)
theta::real(lower=0,upper=1)
thetapriorend
@model begin
~ beta(1, 1);
theta ~ beta(1, 1);
thetaprior ~ binomial(n, theta);
k end
end
end
julia_implementation(::Val{:logearn_interaction_z}; N, earn, height, male, kwargs...) = begin
= @. log(earn)
log_earn = @. (height - $mean(height)) / $sd(height);
z_height = @. z_height * male
inter @stan begin
@parameters begin
::vector[4]
beta::real(lower=0)
sigmaend
@model begin
~ normal(@broadcasted(beta[1] + beta[2] * z_height + beta[3] * male + beta[4] * inter), sigma);
log_earn end
end
end
julia_implementation(::Val{:normal_mixture}; N, y, kwargs...) = begin
@stan begin
@parameters begin
::real(lower=0,upper=1)
theta::vector[2]
muend
@model begin
~ uniform(0, 1);
theta for k in 1:2
~ normal(0, 10);
mu[k] end
for n in 1:N
+= log_mix(theta, normal_lpdf(y[n], mu[1], 1.0),
target normal_lpdf(y[n], mu[2], 1.0));
end
end
end
end
julia_implementation(::Val{:radon_partially_pooled_centered}; N, J, county_idx, log_radon, kwargs...) = begin
@stan begin
@parameters begin
::vector[J]
alpha::real
mu_alpha::real(lower=0)
sigma_alpha::real(lower=0)
sigma_yend
@model begin
~ normal(0, 1);
sigma_y ~ normal(0, 1);
sigma_alpha ~ normal(0, 10);
mu_alpha
~ normal(mu_alpha, sigma_alpha);
alpha for n in 1:N
= alpha[county_idx[n]];
mu += normal_lpdf(log_radon[n], mu, sigma_y);
target end
end
end
end
julia_implementation(::Val{:kidscore_interaction_z}; N, kid_score, mom_iq, mom_hs, kwargs...) = begin
= @. (mom_hs - $mean(mom_hs)) / (2 * $sd(mom_hs));
c_mom_hs = @. (mom_iq - $mean(mom_iq)) / (2 * $sd(mom_iq));
c_mom_iq = @. c_mom_hs * c_mom_iq;
inter @stan begin
@parameters begin
::vector[4]
beta::real(lower=0)
sigmaend
@model begin
~ normal(@broadcasted(beta[1] + beta[2] * c_mom_hs + beta[3] * c_mom_iq + beta[4] * inter), sigma);
kid_score end
end
end
julia_implementation(::Val{:kilpisjarvi}; N, x, y, xpred, pmualpha, psalpha, pmubeta, psbeta, kwargs...) = begin
= x
x_ @stan begin
@parameters begin
::real
alpha::real
beta::real(lower=0)
sigmaend
@model begin
~ normal(pmualpha, psalpha);
alpha ~ normal(pmubeta, psbeta);
beta ~ normal(@broadcasted(alpha + beta * x_), sigma);
y end
end
end
julia_implementation(::Val{:wells_daae_c_model}; N, switched, dist, arsenic, assoc, educ, kwargs...) = begin
= @. (dist - $mean(dist)) / 100.0;
c_dist100 = @. arsenic - $mean(arsenic);
c_arsenic = @. c_dist100 * c_arsenic;
da_inter = @. educ / 4.
educ4 = hcat(c_dist100, c_arsenic, da_inter, assoc, educ4)
X @stan begin
@parameters begin
::real
alpha::vector[5]
betaend
@model begin
~ bernoulli_logit_glm(X, alpha, beta);
switched end
end
end
julia_implementation(::Val{:eight_schools_noncentered}; J, y, sigma, kwargs...) = begin
@stan begin
@parameters begin
::vector[J]
theta_trans::real
mu::real(lower=0)
tauend
= @broadcasted(theta_trans * tau + mu);
theta @model begin
~ normal(0, 1);
theta_trans ~ normal(theta, sigma);
y ~ normal(0, 5);
mu ~ cauchy(0, 5);
tau end
end
end
julia_implementation(::Val{:Rate_5_model}; n1, n2, k1, k2, kwargs...) = begin
@stan begin
@parameters begin
::real(lower=0,upper=1)
thetaend
@model begin
~ beta(1, 1);
theta ~ binomial(n1, theta);
k1 ~ binomial(n2, theta);
k2 end
end
end
julia_implementation(::Val{:dugongs_model}; N, x, Y, kwargs...) = begin
= x
x_ @stan begin
@parameters begin
::real
alpha::real
beta::real(lower=.5,upper=1.)
lambda::real(lower=0.)
tauend
@transformed_parameters begin
= 1. / sqrt(tau);
sigma = logit(lambda);
U3 end
@model begin
for i in 1:N
= alpha - beta * pow(lambda, x_[i]);
m ~ normal(m, sigma);
Y[i] end
~ normal(0.0, 1000.);
alpha ~ normal(0.0, 1000.);
beta ~ uniform(.5, 1.);
lambda ~ gamma(.0001, .0001);
tau end
end
end
julia_implementation(::Val{:irt_2pl}; I, J, y, kwargs...) = begin
@stan begin
@parameters begin
::real(lower=0);
sigma_theta::vector[J];
theta
::real(lower=0);
sigma_a::vector(lower=0)[I];
a
::real;
mu_b::real(lower=0);
sigma_b::vector[I];
bend
@model begin
~ cauchy(0, 2);
sigma_theta ~ normal(0, sigma_theta);
theta
~ cauchy(0, 2);
sigma_a ~ lognormal(0, sigma_a);
a
~ normal(0, 5);
mu_b ~ cauchy(0, 2);
sigma_b ~ normal(mu_b, sigma_b);
b
for i in 1:I
:] ~ bernoulli_logit(@broadcasted(a[i] * (theta - b[i])));
y[i,end
end
end
end
julia_implementation(::Val{:logmesquite_logva}; N, weight, diam1, diam2, canopy_height, group, kwargs...) = begin
= @. log(weight);
log_weight = @. log(diam1 * diam2 * canopy_height);
log_canopy_volume = @. log(diam1 * diam2)
log_canopy_area @stan begin
@parameters begin
::vector[4]
beta::real(lower=0)
sigmaend
@model begin
~ normal(@broadcasted(beta[1] + beta[2] * log_canopy_volume
log_weight + beta[3] * log_canopy_area + beta[4] * group), sigma);
end
end
end
julia_implementation(::Val{:radon_variable_slope_centered}; J, N, county_idx, floor_measure, log_radon, kwargs...) = begin
@stan begin
@parameters begin
::real
alpha::vector[J]
beta::real
mu_beta::real(lower=0)
sigma_beta::real(lower=0)
sigma_yend
@model begin
~ normal(0, 10);
alpha ~ normal(0, 1);
sigma_y ~ normal(0, 1);
sigma_beta ~ normal(0, 10);
mu_beta
~ normal(mu_beta, sigma_beta);
beta for n in 1:N
= alpha + floor_measure[n] * beta[county_idx[n]];
mu += normal_lpdf(log_radon[n], mu, sigma_y);
target end
end
end
end
julia_implementation(::Val{:radon_variable_intercept_centered}; J, N, county_idx, floor_measure, log_radon, kwargs...) = begin
@stan begin
@parameters begin
::vector[J]
alpha::real
beta::real
mu_alpha::real(lower=0)
sigma_alpha::real(lower=0)
sigma_yend
@model begin
~ normal(0, 1);
sigma_y ~ normal(0, 1);
sigma_alpha ~ normal(0, 10);
mu_alpha ~ normal(0, 10);
beta
~ normal(mu_alpha, sigma_alpha);
alpha for n in 1:N
= alpha[county_idx[n]] + floor_measure[n] * beta;
mu += normal_lpdf(log_radon[n], mu, sigma_y);
target end
end
end
end
julia_implementation(::Val{:seeds_stanified_model}; I, n, N, x1, x2, kwargs...) = begin
= @. x1 * x2;
x1x2 @stan begin
@parameters begin
::real;
alpha0::real;
alpha1::real;
alpha12::real;
alpha2::vector[I];
b::real(lower=0);
sigmaend
@model begin
~ normal(0.0, 1.0);
alpha0 ~ normal(0.0, 1.0);
alpha1 ~ normal(0.0, 1.0);
alpha2 ~ normal(0.0, 1.0);
alpha12 ~ cauchy(0, 1);
sigma
~ normal(0.0, sigma);
b ~ binomial_logit(N,
n @broadcasted(alpha0 + alpha1 * x1 + alpha2 * x2 + alpha12 * x1x2 + b));
end
end
end
julia_implementation(::Val{:state_space_stochastic_level_stochastic_seasonal}; n, y, x, w, kwargs...) = begin
= x
x_ = mean(y) - 3 * sd(y)
mu_lower = mean(y) + 3 * sd(y)
mu_upper @stan begin
@parameters begin
::vector(lower=mu_lower, upper=mu_upper)[n]
mu::vector[n]
seasonal::real
beta::real
lambda::positive_ordered[3]
sigmaend
@model @views begin
for t in 12:n
~ normal(-sum(seasonal[t-11:t-1]), sigma[1]);
seasonal[t] end
for t in 2:n
~ normal(mu[t - 1], sigma[2]);
mu[t] end
~ normal(@broadcasted(mu + beta * x_ + lambda * w + seasonal), sigma[3]);
y
~ student_t(4, 0, 1);
sigma end
end
end
julia_implementation(::Val{:radon_partially_pooled_noncentered}; N, J, county_idx, log_radon, kwargs...) = begin
@stan begin
@parameters begin
::vector[J]
alpha_raw::real
mu_alpha::real(lower=0)
sigma_alpha::real(lower=0)
sigma_yend
= @.(mu_alpha + sigma_alpha * alpha_raw);
alpha @model begin
~ normal(0, 1);
sigma_y ~ normal(0, 1);
sigma_alpha ~ normal(0, 10);
mu_alpha ~ normal(0, 1);
alpha_raw
for n in 1:N
= alpha[county_idx[n]];
mu += normal_lpdf(log_radon[n], mu, sigma_y);
target end
end
end
end
julia_implementation(::Val{:seeds_model}; I, n, N, x1, x2, kwargs...) = begin
= @. x1 * x2;
x1x2 @stan begin
@parameters begin
::real;
alpha0::real;
alpha1::real;
alpha12::real;
alpha2::real(lower=0);
tau::vector[I];
bend
= 1.0 / sqrt(tau);
sigma @model begin
~ normal(0.0, 1.0E3);
alpha0 ~ normal(0.0, 1.0E3);
alpha1 ~ normal(0.0, 1.0E3);
alpha2 ~ normal(0.0, 1.0E3);
alpha12 ~ gamma(1.0E-3, 1.0E-3);
tau
~ normal(0.0, sigma);
b ~ binomial_logit(N,
n @broadcasted(alpha0 + alpha1 * x1 + alpha2 * x2 + alpha12 * x1x2 + b));
end
end
end
julia_implementation(::Val{:arma11}; T, y, kwargs...) = begin
@stan begin
@parameters begin
::real
mu::real
phi::real
theta::real(lower=0)
sigmaend
@model begin
~ normal(0, 10);
mu ~ normal(0, 2);
phi ~ normal(0, 2);
theta ~ cauchy(0, 2.5);
sigma = mu + phi * mu
nu = y[1] - nu
err ~ normal(0, sigma);
err for t in 2:T
= mu + phi * y[t-1] + theta * err
nu = y[t] - nu
err ~ normal(0, sigma);
err end
end
end
end
julia_implementation(::Val{:radon_hierarchical_intercept_centered}; J, N, county_idx, log_uppm, floor_measure, log_radon, kwargs...) = begin
@stan begin
@parameters begin
::vector[J]
alpha::vector[2]
beta::real
mu_alpha::real(lower=0)
sigma_alpha::real(lower=0)
sigma_yend
@model begin
~ normal(0, 1);
sigma_alpha ~ normal(0, 1);
sigma_y ~ normal(0, 10);
mu_alpha ~ normal(0, 10);
beta
~ normal(mu_alpha, sigma_alpha);
alpha for n in 1:N
= alpha[county_idx[n]] + log_uppm[n] * beta[1]
muj = muj + floor_measure[n] * beta[2];
mu += normal_lpdf(log_radon[n], mu, sigma_y);
target end
end
end
end
julia_implementation(::Val{:seeds_centered_model}; I, n, N, x1, x2, kwargs...) = begin
= @. x1 * x2;
x1x2 @stan begin
@parameters begin
::real;
alpha0::real;
alpha1::real;
alpha12::real;
alpha2::vector[I];
c::real(lower=0);
sigmaend
= @. c - $mean(c);
b @model begin
~ normal(0.0, 1.0);
alpha0 ~ normal(0.0, 1.0);
alpha1 ~ normal(0.0, 1.0);
alpha2 ~ normal(0.0, 1.0);
alpha12 ~ cauchy(0, 1);
sigma
~ normal(0.0, sigma);
c ~ binomial_logit(N,
n @broadcasted(alpha0 + alpha1 * x1 + alpha2 * x2 + alpha12 * x1x2 + b));
end
end
end
julia_implementation(::Val{:pilots}; N, n_groups, n_scenarios, group_id, scenario_id, y, kwargs...) = begin
@stan begin
@parameters begin
::vector[n_groups];
a::vector[n_scenarios];
b::real;
mu_a::real;
mu_b::real(lower=0, upper=100);
sigma_a::real(lower=0, upper=100);
sigma_b::real(lower=0, upper=100);
sigma_yend
= @broadcasted(a[group_id] + b[scenario_id]);
y_hat @model begin
~ normal(0, 1);
mu_a ~ normal(10 * mu_a, sigma_a);
a
~ normal(0, 1);
mu_b ~ normal(10 * mu_b, sigma_b);
b
~ normal(y_hat, sigma_y);
y end
end
end
julia_implementation(::Val{:wells_dae_inter_model}; N, switched, dist, arsenic, educ, kwargs...) = begin
= @. (dist - $mean(dist)) / 100.0;
c_dist100 = @. arsenic - $mean(arsenic);
c_arsenic = @. (educ - $mean(educ)) / 4.
c_educ4 = @. c_dist100 * c_arsenic;
da_inter = @. c_dist100 * c_educ4;
de_inter = @. c_arsenic * c_educ4;
ae_inter = hcat(c_dist100, c_arsenic, c_educ4, da_inter, de_inter, ae_inter, )
X @stan begin
@parameters begin
::real
alpha::vector[6]
betaend
@model begin
~ bernoulli_logit_glm(X, alpha, beta);
switched end
end
end
julia_implementation(::Val{:radon_variable_slope_noncentered}; J, N, county_idx, floor_measure, log_radon, kwargs...) = begin
@stan begin
@parameters begin
::real
alpha::vector[J]
beta_raw::real
mu_beta::real(lower=0)
sigma_beta::real(lower=0)
sigma_yend
= @. mu_beta + sigma_beta * beta_raw;
beta @model begin
~ normal(0, 10);
alpha ~ normal(0, 1);
sigma_y ~ normal(0, 1);
sigma_beta ~ normal(0, 10);
mu_beta ~ normal(0, 1);
beta_raw
for n in 1:N
= alpha + floor_measure[n] * beta[county_idx[n]];
mu += normal_lpdf(log_radon[n], mu, sigma_y);
target end
end
end
end
julia_implementation(::Val{:radon_variable_intercept_slope_centered}; J, N, county_idx, floor_measure, log_radon, kwargs...) = begin
@stan begin
@parameters begin
::real(lower=0)
sigma_y::real(lower=0)
sigma_alpha::real(lower=0)
sigma_beta::vector[J]
alpha::vector[J]
beta::real
mu_alpha::real
mu_betaend
@model begin
~ normal(0, 1);
sigma_y ~ normal(0, 1);
sigma_beta ~ normal(0, 1);
sigma_alpha ~ normal(0, 10);
mu_alpha ~ normal(0, 10);
mu_beta
~ normal(mu_alpha, sigma_alpha);
alpha ~ normal(mu_beta, sigma_beta);
beta for n in 1:N
= alpha[county_idx[n]] + floor_measure[n] * beta[county_idx[n]];
mu += normal_lpdf(log_radon[n], mu, sigma_y);
target end
end
end
end
julia_implementation(::Val{:radon_variable_intercept_noncentered}; J, N, county_idx, floor_measure, log_radon, kwargs...) = begin
@stan begin
@parameters begin
::vector[J]
alpha_raw::real
beta::real
mu_alpha::real(lower=0)
sigma_alpha::real(lower=0)
sigma_yend
= @. mu_alpha + sigma_alpha * alpha_raw;
alpha @model begin
~ normal(0, 1);
sigma_y ~ normal(0, 1);
sigma_alpha ~ normal(0, 10);
mu_alpha ~ normal(0, 10);
beta ~ normal(0, 1);
alpha_raw
for n in 1:N
= alpha[county_idx[n]] + floor_measure[n] * beta;
mu += normal_lpdf(log_radon[n], mu, sigma_y);
target end
end
end
end
julia_implementation(::Val{:GLM_Poisson_model}; n, C, year, kwargs...) = begin
= year .^ 2
year_squared = year .^ 3
year_cubed @stan begin
@parameters begin
::real(lower=-20, upper=+20)
alpha::real(lower=-10, upper=+10)
beta1::real(lower=-10, upper=+10)
beta2::real(lower=-10, upper=+10)
beta3end
= @broadcasted(alpha + beta1 * year + beta2 * year_squared + beta3 * year_cubed)
log_lambda @model begin
~ poisson_log(log_lambda);
C end
end
end
julia_implementation(::Val{:ldaK5}; V, M, N, w, doc, alpha, beta, kwargs...) = begin
@stan begin
@parameters begin
::simplex[M,5];
theta::simplex[5,V];
phiend
@model @views begin
for m in 1:M
:] ~ dirichlet(alpha);
theta[m,end
for k in 1:5
:] ~ dirichlet(beta);
phi[k,end
for n in 1:N
= @broadcasted(log(theta[doc[n], :]) + log(phi[:, w[n]]))
gamma += log_sum_exp(gamma);
target end
end
end
end
julia_implementation(::Val{:dogs}; n_dogs, n_trials, y, kwargs...) = begin
@stan begin
@parameters begin
::vector[3];
betaend
@model begin
~ normal(0, 100);
beta for i in 1:n_dogs
= 0
n_avoid = 0
n_shock for j in 1:n_trials
= beta[1] + beta[2] * n_avoid + beta[3] * n_shock
p ~ bernoulli_logit(p);
y[i, j] += 1 - y[i,j]
n_avoid += y[i,j]
n_shock end
end
end
end
end
julia_implementation(::Val{:nes}; N, partyid7, real_ideo, race_adj, educ1, gender, income, age_discrete, kwargs...) = begin
= @. Float64(age_discrete == 2);
age30_44 = @. Float64(age_discrete == 3);
age45_64 = @. Float64(age_discrete == 4);
age65up @stan begin
@parameters begin
::vector[9]
beta::real(lower=0)
sigmaend
@model begin
~ normal(@broadcasted(beta[1] + beta[2] * real_ideo + beta[3] * race_adj
partyid7 + beta[4] * age30_44 + beta[5] * age45_64
+ beta[6] * age65up + beta[7] * educ1 + beta[8] * gender
+ beta[9] * income), sigma);
end
end
end
julia_implementation(::Val{:dogs_log}; n_dogs, n_trials, y, kwargs...) = begin
@assert size(y) == (n_dogs, n_trials)
@stan begin
@parameters begin
::vector[2];
betaend
@model begin
1] ~ uniform(-100, 0);
beta[2] ~ uniform(0, 100);
beta[for i in 1:n_dogs
= 0
n_avoid = 0
n_shock for j in 1:n_trials
= inv_logit(beta[1] * n_avoid + beta[2] * n_shock)
p ~ bernoulli(p);
y[i, j] += 1 - y[i,j]
n_avoid += y[i,j]
n_shock end
end
end
end
end
julia_implementation(::Val{:GLM_Binomial_model};nyears, C, N, year, kwargs...) = begin
= year .^ 2
year_squared @stan begin
@parameters begin
::real
alpha::real
beta1::real
beta2end
= @broadcasted alpha + beta1 * year + beta2 * year_squared;
logit_p @model begin
~ normal(0, 100)
alpha ~ normal(0, 100)
beta1 ~ normal(0, 100)
beta2 ~ binomial_logit(N, logit_p)
C end
end
end
julia_implementation(::Val{:radon_hierarchical_intercept_noncentered}; J, N, county_idx, log_uppm, floor_measure, log_radon, kwargs...) = begin
@stan begin
@parameters begin
::vector[J]
alpha_raw::vector[2]
beta::real
mu_alpha::real(lower=0)
sigma_alpha::real(lower=0)
sigma_yend
= @. mu_alpha + sigma_alpha * alpha_raw;
alpha @model begin
~ normal(0, 1);
sigma_alpha ~ normal(0, 1);
sigma_y ~ normal(0, 10);
mu_alpha ~ normal(0, 10);
beta ~ normal(0, 1);
alpha_raw
for n in 1:N
= alpha[county_idx[n]] + log_uppm[n] * beta[1]
muj = muj + floor_measure[n] * beta[2];
mu += normal_lpdf(log_radon[n], mu, sigma_y);
target end
end
end
end
julia_implementation(::Val{:logmesquite_logvash}; N, weight, diam1, diam2, canopy_height, total_height, group, kwargs...) = begin
= @. log(weight);
log_weight = @. log(diam1 * diam2 * canopy_height);
log_canopy_volume = @. log(diam1 * diam2)
log_canopy_area = @. log(diam1 / diam2);
log_canopy_shape = @. log(total_height);
log_total_height @stan begin
@parameters begin
::vector[6]
beta::real(lower=0)
sigmaend
@model begin
~ normal(@broadcasted(beta[1] + beta[2] * log_canopy_volume
log_weight + beta[3] * log_canopy_area
+ beta[4] * log_canopy_shape
+ beta[5] * log_total_height + beta[6] * group), sigma);
end
end
end
julia_implementation(::Val{:ldaK2}; V, M, N, w, doc, kwargs...) = begin
= 2
K = fill(1, K)
alpha = fill(1, V)
beta @stan begin
@parameters begin
::simplex[M,K];
theta::simplex[K,V];
phiend
@model @views begin
for m in 1:M
:] ~ dirichlet(alpha);
theta[m,end
for k in 1:K
:] ~ dirichlet(beta);
phi[k,end
for n in 1:N
= @broadcasted log(theta[doc[n], :]) + log(phi[:, w[n]])
gamma += log_sum_exp(gamma);
target end
end
end
end
julia_implementation(::Val{:logmesquite}; N, weight, diam1, diam2, canopy_height, total_height, density, group, kwargs...) = begin
= @. log(weight);
log_weight = @. log(diam1);
log_diam1 = @. log(diam2);
log_diam2 = @. log(canopy_height);
log_canopy_height = @. log(total_height);
log_total_height = @. log(density);
log_density @stan begin
@parameters begin
::vector[7]
beta::real(lower=0)
sigmaend
@model begin
~ normal(@broadcasted(beta[1] + beta[2] * log_diam1 + beta[3] * log_diam2
log_weight + beta[4] * log_canopy_height
+ beta[5] * log_total_height + beta[6] * log_density
+ beta[7] * group), sigma);
end
end
end
julia_implementation(::Val{:rats_model}; N, Npts, rat, x, y, xbar, kwargs...) = begin
= x
x_ @stan begin
@parameters begin
::vector[N];
alpha::vector[N];
beta
::real;
mu_alpha::real;
mu_beta::real(lower=0);
sigma_y::real(lower=0);
sigma_alpha::real(lower=0);
sigma_betaend
@model begin
~ normal(0, 100);
mu_alpha ~ normal(0, 100);
mu_beta ~ normal(mu_alpha, sigma_alpha);
alpha ~ normal(mu_beta, sigma_beta);
beta for n in 1:Npts
= rat[n];
irat ~ normal(alpha[irat] + beta[irat] * (x_[n] - xbar), sigma_y);
y[n] end
end
end
end
julia_implementation(::Val{:logmesquite_logvas}; N, weight, diam1, diam2, canopy_height, total_height, density, group, kwargs...) = begin
= @. log(weight);
log_weight = @. log(diam1 * diam2 * canopy_height);
log_canopy_volume = @. log(diam1 * diam2)
log_canopy_area = @. log(diam1 / diam2);
log_canopy_shape = @. log(total_height);
log_total_height = @. log(density);
log_density @stan begin
@parameters begin
::vector[7]
beta::real(lower=0)
sigmaend
@model begin
~ normal(@broadcasted(beta[1] + beta[2] * log_canopy_volume
log_weight + beta[3] * log_canopy_area
+ beta[4] * log_canopy_shape
+ beta[5] * log_total_height + beta[6] * log_density
+ beta[7] * group), sigma);
end
end
end
julia_implementation(::Val{:lsat_model}; N, R, T, culm, response, kwargs...) = begin
= zeros(Int64, (T, N))
r for j in 1:culm[1], k in 1:T
= response[1, k];
r[k, j] end
for i in 2:R
for j in (culm[i-1]+1):culm[i], k in 1:T
= response[i, k];
r[k, j] end
end
= fill(1., N)
ones @stan begin
@parameters begin
::vector[T];
alpha::vector[N];
theta::real(lower=0);
betaend
@model @views begin
~ normal(0, 100.);
alpha ~ normal(0, 1);
theta ~ normal(0.0, 100.);
beta for k in 1:T
:] ~ bernoulli_logit(beta * theta - alpha[k] * ones);
r[k,end
end
end
end
julia_implementation(::Val{:radon_variable_intercept_slope_noncentered}; J, N, county_idx, floor_measure, log_radon, kwargs...) = begin
@stan begin
@parameters begin
::real(lower=0)
sigma_y::real(lower=0)
sigma_alpha::real(lower=0)
sigma_beta::vector[J]
alpha_raw::vector[J]
beta_raw::real
mu_alpha::real
mu_betaend
= @. mu_alpha + sigma_alpha * alpha_raw;
alpha = @. mu_beta + sigma_beta * beta_raw;
beta @model begin
~ normal(0, 1);
sigma_y ~ normal(0, 1);
sigma_beta ~ normal(0, 1);
sigma_alpha ~ normal(0, 10);
mu_alpha ~ normal(0, 10);
mu_beta ~ normal(0, 1);
alpha_raw ~ normal(0, 1);
beta_raw
for n in 1:N
= alpha[county_idx[n]] + floor_measure[n] * beta[county_idx[n]];
mu += normal_lpdf(log_radon[n], mu, sigma_y);
target end
end
end
end
julia_implementation(::Val{:GLMM_Poisson_model};n, C, year) = begin
= year .^ 2
year_squared = year .^ 3
year_cubed @stan begin
@parameters begin
::real(lower=-20, upper=+20)
alpha::real(lower=-10, upper=+10)
beta1::real(lower=-10, upper=+20)
beta2::real(lower=-10, upper=+10)
beta3::vector[n]
eps::real(lower=0, upper=5)
sigmaend
= @broadcasted alpha + beta1 * year + beta2 * year_squared + beta3 * year_cubed + eps
log_lambda @model begin
~ poisson_log(log_lambda)
C ~ normal(0, sigma)
eps end
end
end
julia_implementation(::Val{:GLMM1_model};nsite, nobs, obs, obsyear, obssite, misyear, missite, kwargs...) = begin
@stan begin
@parameters begin
::vector[nsite]
alpha::real
mu_alpha::real(lower=0,upper=5)
sd_alphaend
# log_lambda = rep_matrix(alpha', nyear);
@model begin
~ normal(mu_alpha, sd_alpha)
alpha ~ normal(0, 10)
mu_alpha for i in 1:nobs
# obs[i] ~ poisson_log(log_lambda[obsyear[i], obssite[i]])
~ poisson_log(alpha[obssite[i]])
obs[i] end
end
end
end
julia_implementation(::Val{:hier_2pl}; I, J, N, ii, jj, y, kwargs...) = begin
@stan begin
@parameters begin
::vector[J];
theta::vector[I];
xi1::vector[I];
xi2::vector[2];
mu::vector(lower=0)[2];
tau::cholesky_factor_corr[2]
L_Omegaend
= hcat(xi1, xi2)
xi = @. exp(xi1);
alpha = xi2;
beta @model begin
= diag_pre_multiply(tau, L_Omega);
L_Sigma for i in 1:I
+= multi_normal_cholesky_lpdf(xi[i, :], mu, L_Sigma);
target end
~ normal(0, 1);
theta ~ lkj_corr_cholesky(4);
L_Omega 1] ~ normal(0, 1);
mu[1] ~ exponential(.1);
tau[2] ~ normal(0, 5);
mu[2] ~ exponential(.1);
tau[~ bernoulli_logit(alpha[ii] .* (theta[jj] - beta[ii]));
y end
end
end
julia_implementation(::Val{:dogs_hierarchical}; n_dogs, n_trials, y, kwargs...) = begin
= n_dogs;
J = n_trials;
T = zeros((J,T));
prev_shock = zeros((J,T));
prev_avoid
for j in 1:J
1] = 0;
prev_shock[j, 1] = 0;
prev_avoid[j, for t in 2:T
= prev_shock[j, t - 1] + y[j, t - 1];
prev_shock[j, t] = prev_avoid[j, t - 1] + 1 - y[j, t - 1];
prev_avoid[j, t] end
end
@stan begin
@parameters begin
::real(lower=0, upper=1);
a::real(lower=0, upper=1);
bend
@model begin
~ bernoulli(@broadcasted(a ^ prev_shock * b ^ prev_avoid));
y end
end
end
julia_implementation(::Val{:dogs_nonhierarchical}; n_dogs, n_trials, y, kwargs...) = begin
= n_dogs;
J = n_trials;
T = zeros((J,T));
prev_shock = zeros((J,T));
prev_avoid
for j in 1:J
1] = 0;
prev_shock[j, 1] = 0;
prev_avoid[j, for t in 2:T
= prev_shock[j, t - 1] + y[j, t - 1];
prev_shock[j, t] = prev_avoid[j, t - 1] + 1 - y[j, t - 1];
prev_avoid[j, t] end
end
@stan begin
@parameters begin
::vector[2]
mu_logit_ab::vector(lower=0)[2]
sigma_logit_ab::cholesky_factor_corr[2]
L_logit_ab::matrix[J, 2]
zend
@model @views begin
= rep_vector(1, J) * mu_logit_ab' + z * diag_pre_multiply(sigma_logit_ab, L_logit_ab);
logit_ab = inv_logit.(logit_ab[ : , 1]);
a = inv_logit.(logit_ab[ : , 2]);
b ~ bernoulli(@broadcasted(a ^ prev_shock * b ^ prev_avoid));
y ~ logistic(0, 1);
mu_logit_ab ~ normal(0, 1);
sigma_logit_ab ~ lkj_corr_cholesky(2);
L_logit_ab ~ normal(0, 1);
(z) end
end
end
julia_implementation(::Val{:M0_model}; M, T, y, kwargs...) = begin
@assert size(y) == (M, T)
= 0
C = zeros(Int64, M)
s for i in 1:M
= sum(y[i, :])
s[i] if s[i] > 0
+= 1
C end
end
@stan begin
@parameters begin
::real(lower=0,upper=1)
omega::real(lower=0,upper=1)
pend
@model begin
for i in 1:M
if s[i] > 0
+= bernoulli_lpmf(1, omega) + binomial_lpmf(s[i], T, p)
target else
+= log_sum_exp(bernoulli_lpmf(1, omega)
target + binomial_lpmf(0, T, p),
bernoulli_lpmf(0, omega))
end
end
end
end
end
julia_implementation(::Val{:diamonds}; N, Y, K, X, prior_only, kwargs...) = begin
= K - 1;
Kc = zeros((N, Kc))
Xc = zeros(Kc)
means_X for i in 2:K
- 1] = mean(X[ : , i]);
means_X[i : , i - 1] = X[ : , i] - means_X[i - 1];
@. Xc[ end
@stan begin
@parameters begin
::vector[Kc];
b::real;
Intercept::real(lower=0.)
sigmaend
@model begin
+= normal_lpdf(b, 0., 1.);
target += student_t_lpdf(Intercept, 3., 8., 10.);
target += student_t_lpdf(sigma, 3., 0., 10.)# - 1 * student_t_lccdf(0, 3, 0, 10);
target if !(prior_only == 1)
+= normal_id_glm_lpdf(Y, Xc, Intercept, b, sigma);
target end
end
end
end
julia_implementation(::Val{:Mt_model}; M, T, y, kwargs...) = begin
@assert size(y) == (M, T)
= 0
C = zeros(Int64, M)
s for i in 1:M
= sum(y[i, :])
s[i] if s[i] > 0
+= 1
C end
end
@stan begin
@parameters begin
::real(lower=0,upper=1)
omega::vector(lower=0,upper=1)[T]
pend
@model @views begin
for i in 1:M
if s[i] > 0
+= bernoulli_lpmf(1, omega) + bernoulli_lpmf(y[i,:], p)
target else
+= log_sum_exp(bernoulli_lpmf(1, omega)
target + bernoulli_lpmf(y[i,:], p),
bernoulli_lpmf(0, omega))
end
end
end
end
end
julia_implementation(::Val{:election88_full};
N,
n_age,
n_age_edu,
n_edu,
n_region_full,
n_state,
age,
age_edu,
black,
edu,
female,
region_full,
state,
v_prev_full,
y,...) = begin
kwargs@stan begin
@parameters begin
::vector[n_age];
a::vector[n_edu];
b::vector[n_age_edu];
c::vector[n_state];
de::vector[n_region_full];
::vector[5];
beta::real(lower=0, upper=100);
sigma_a::real(lower=0, upper=100);
sigma_b::real(lower=0, upper=100);
sigma_c::real(lower=0, upper=100);
sigma_d::real(lower=0, upper=100);
sigma_eend
@model @views begin
= @broadcasted (beta[1] + beta[2] * black + beta[3] * female
y_hat + beta[5] * female * black + beta[4] * v_prev_full
+ a[age] + b[edu] + c[age_edu] + d[state]
+ e[region_full])
~ normal(0, sigma_a);
a ~ normal(0, sigma_b);
b ~ normal(0, sigma_c);
c ~ normal(0, sigma_d);
d e ~ normal(0, sigma_e);
~ normal(0, 100);
beta ~ bernoulli_logit(y_hat);
y end
end
end
julia_implementation(::Val{:nn_rbm1bJ10}; N, M, x, K, y, kwargs...) = begin
= 10
J = 0.5;
nu_alpha = (0.05 / M ^ (1 / nu_alpha)) ^ 2;
s2_0_alpha = 0.5;
nu_beta = (0.05 / J ^ (1 / nu_beta)) ^ 2;
s2_0_beta
= rep_vector(1., N);
ones = append_col(ones, x);
x1
return @stan begin end
@stan begin
@parameters begin
::real(lower=0);
sigma2_alpha::real(lower=0);
sigma2_beta::matrix[M, J];
alpha::matrix[J, K - 1];
beta::row_vector[J];
alpha1::row_vector[K - 1];
beta1end
@model @views begin
= append_col(
v
ones,append_col(
ones,tanh.(x1 * append_row(alpha1, alpha))
* append_row(beta1, beta)
)
);~ normal(0, 1);
alpha1 ~ normal(0, 1);
beta1 ~ inv_gamma(nu_alpha / 2, nu_alpha * s2_0_alpha / 2);
sigma2_alpha ~ inv_gamma(nu_beta / 2, nu_beta * s2_0_beta / 2);
sigma2_beta
~ normal(0, sqrt(sigma2_alpha));
(alpha) ~ normal(0, sqrt(sigma2_beta));
(beta) for n in 1:N
~ categorical_logit(v[n, :]);
y[n] end
end
end
end
julia_implementation(::Val{:nn_rbm1bJ100}; N, M, x, K, y, kwargs...) = begin
= 100
J = 0.5;
nu_alpha = (0.05 / M ^ (1 / nu_alpha)) ^ 2;
s2_0_alpha = 0.5;
nu_beta = (0.05 / J ^ (1 / nu_beta)) ^ 2;
s2_0_beta
= rep_vector(1., N);
ones = append_col(ones, x);
x1 return @stan begin end
@stan begin
@parameters begin
::real(lower=0);
sigma2_alpha::real(lower=0);
sigma2_beta::matrix[M, J];
alpha::matrix[J, K - 1];
beta::row_vector[J];
alpha1::row_vector[K - 1];
beta1end
@model @views begin
= append_col(
v
ones,append_col(
ones,tanh.(x1 * append_row(alpha1, alpha))
* append_row(beta1, beta)
)
);~ normal(0, 1);
alpha1 ~ normal(0, 1);
beta1 ~ inv_gamma(nu_alpha / 2, nu_alpha * s2_0_alpha / 2);
sigma2_alpha ~ inv_gamma(nu_beta / 2, nu_beta * s2_0_beta / 2);
sigma2_beta
~ normal(0, sqrt(sigma2_alpha));
(alpha) ~ normal(0, sqrt(sigma2_beta));
(beta) for n in 1:N
~ categorical_logit(v[n, :]);
y[n] end
end
end
end
julia_implementation(::Val{:Survey_model}; nmax, m, k) = begin
= maximum(k)
nmin @stan begin
@parameters begin
::real(lower=0, upper=1)
thetaend
@model begin
+= log_sum_exp([
target < nmin ? log(1.0 / nmax) - Inf : log(1.0 / nmax) + binomial_lpmf(k, n, theta)
n in 1:nmax
for n
])end
end
end
julia_implementation(::Val{:sir}; N_t, t, y0, stoi_hat, B_hat) = begin
= collect(Float64, y0)
y0 simple_SIR(t, y, theta, x_r, x_i) = begin
= zero(y)
dydt
1] = -theta[1] * y[4] / (y[4] + theta[2]) * y[1];
dydt[2] = theta[1] * y[4] / (y[4] + theta[2]) * y[1] - theta[3] * y[2];
dydt[3] = theta[3] * y[2];
dydt[4] = theta[4] * y[2] - theta[5] * y[4];
dydt[
return dydt;
end
= 0.
t0 = 1000000.
kappa @stan begin
@parameters begin
::real(lower=0)
beta::real(lower=0)
gamma::real(lower=0)
xi::real(lower=0)
deltaend
@model @views begin
= integrate_ode_rk45(simple_SIR, y0, t0, t, [beta, kappa, gamma, xi, delta]);
y
~ cauchy(0, 2.5);
beta ~ cauchy(0, 1);
gamma ~ cauchy(0, 25);
xi ~ cauchy(0, 1);
delta
1] ~ poisson(y0[1] - y[1, 1]);
stoi_hat[for n in 2:N_t
~ poisson(max(1e-16, y[n - 1, 1] - y[n, 1]));
stoi_hat[n] end
~ lognormal(@broadcasted(nothrow_log(y[:, 4])), 0.15);
B_hat
end
end
end
julia_implementation(::Val{:lotka_volterra}; N, ts, y_init, y) = begin
dz_dt(t, z, theta, x_r, x_i) = begin
= z
u, v
= theta
alpha, beta, gamma, delta
= (alpha - beta * v) * u
du_dt = (-gamma +delta * u) * v
dv_dt
[du_dt, dv_dt]end
@stan begin
@parameters begin
::real(lower=0)[4]
theta::real(lower=0)[2]
z_init::real(lower=0)[2]
sigmaend
@model @views begin
= integrate_ode_rk45(dz_dt, z_init, 0, ts, theta,
z missing, missing, 1e-5, 1e-3, 5e2);
1,3]] ~ normal(1, 0.5);
theta[[2,4]] ~ normal(0.05, 0.05);
theta[[~ lognormal(-1, 1);
sigma ~ lognormal(log(10), 1);
z_init ~ lognormal(@broadcasted(log(z_init)), sigma);
y_init ~ lognormal(@broadcasted(nothrow_log(z)), sigma');
y end
end
end
julia_implementation(::Val{:soil_incubation}; totalC_t0, t0, N_t, ts, eCO2mean, kwargs...) = begin
two_pool_feedback(t, C, theta, x_r, x_i) = begin
= theta
k1, k2, alpha21, alpha12
[-k1 * C[1] + alpha12 * k2 * C[2]
-k2 * C[2] + alpha21 * k1 * C[1]
]end
evolved_CO2(N_t, t0, ts, gamma, totalC_t0, k1, k2, alpha21, alpha12) = begin
= [gamma * totalC_t0, (1 - gamma) * totalC_t0]
C_t0 = k1, k2, alpha21, alpha12
theta = integrate_ode_rk45(two_pool_feedback, C_t0, t0, ts, theta)
C_hat .- sum.(eachrow(C_hat))
totalC_t0 end
@stan begin
@parameters begin
::real(lower=0)
k1::real(lower=0)
k2::real(lower=0)
alpha21::real(lower=0)
alpha12::real(lower=0, upper=1)
gamma::real(lower=0)
sigmaend
@model @views begin
= evolved_CO2(N_t, t0, ts, gamma, totalC_t0, k1, k2, alpha21, alpha12)
eCO2_hat ~ beta(10, 1);
gamma ~ normal(0, 1);
k1 ~ normal(0, 1);
k2 ~ normal(0, 1);
alpha21 ~ normal(0, 1);
alpha12 ~ cauchy(0, 1);
sigma ~ normal(eCO2_hat, sigma);
eCO2mean end
end
end
julia_implementation(::Val{:one_comp_mm_elim_abs}; t0, D, V, N_t, times, C_hat) = begin
one_comp_mm_elim_abs(t, y, theta, x_r, x_i) = begin
= theta
k_a, K_m, V_m = x_r
D, V = 0.
dose = (V_m / V) * y[1] / (K_m + y[1]);
elim if t > 0
= exp(-k_a * t) * D * k_a / V;
dose end
- elim]
[dose end
= [0.]
C0 = [D,V]
x_r = missing
x_i @stan begin
@parameters begin
::real(lower=0)
k_a::real(lower=0)
K_m::real(lower=0)
V_m::real(lower=0)
sigmaend
@model @views begin
= [k_a, K_m, V_m]
theta = integrate_ode_bdf(one_comp_mm_elim_abs, C0, t0, times, theta, x_r, x_i)
C ~ cauchy(0, 1);
k_a ~ cauchy(0, 1);
K_m ~ cauchy(0, 1);
V_m ~ cauchy(0, 1);
sigma
~ lognormal(@broadcasted(nothrow_log(C[:, 1])), sigma);
C_hat end
end
end
julia_implementation(::Val{:bym2_offset_only}; N, N_edges, node1, node2, y, E, scaling_factor, kwargs...) = begin
= @. log(E)
log_E @stan begin
@parameters begin
::real;
beta0::real(lower=0);
sigma::real(lower=0, upper=1);
rho::vector[N];
theta::vector[N];
phiend
= @broadcasted(sqrt(1 - rho) * theta + sqrt(rho / scaling_factor) * phi)
convolved_re @model @views begin
~ poisson_log(@broadcasted(log_E + beta0 + convolved_re * sigma));
y
+= -0.5 * dot_self(phi[node1] - phi[node2]);
target
~ normal(0, 1);
beta0 ~ normal(0, 1);
theta ~ normal(0, 1);
sigma ~ beta(0.5, 0.5);
rho sum(phi) ~ normal(0, 0.001 * N);
end
end
end
julia_implementation(::Val{:bones_model}; nChild, nInd, gamma, delta, ncat, grade, kwargs...) = begin
# error(ncat)
@stan begin
@parameters begin
::real[nChild]
thetaend
@model @views begin
~ normal(0.0, 36.);
theta = zeros((nChild, nInd, 5))
p = zeros((nChild, nInd, 4))
Q for i in 1:nChild
for j in 1:nInd
for k in 1:(ncat[j]-1)
= inv_logit(delta[j] * (theta[i] - gamma[j, k]))
Q[i,j,k] end
1] = 1 - Q[i,j,1]
p[i,j,for k in 2:(ncat[j]-1)
= Q[i, j, k - 1] - Q[i, j, k];
p[i, j, k] end
= Q[i, j, ncat[j] - 1];
p[i, j, ncat[j]] if grade[i, j] != -1
+= log(p[i, j, grade[i, j]]);
target end
end
end
end
end
end
julia_implementation(::Val{:logistic_regression_rhs}; n, d, y, x, scale_icept,
scale_global,
nu_global,
nu_local,
slab_scale,...) = begin
slab_df, kwargs= Matrix{Float64}(x)
x @stan begin
@parameters begin
::real;
beta0::vector[d];
z::real(lower=0.);
tau::vector(lower=0.)[d];
lambda::real(lower=0.);
cauxend
= slab_scale * sqrt(caux);
c = @broadcasted sqrt(c ^ 2 * square(lambda) / (c ^ 2 + tau ^ 2 * square(lambda)));
lambda_tilde = @. z * lambda_tilde * tau;
beta @model begin
~ std_normal();
z ~ student_t(nu_local, 0., 1.);
lambda ~ student_t(nu_global, 0, 2. * scale_global);
tau ~ inv_gamma(0.5 * slab_df, 0.5 * slab_df);
caux ~ normal(0., scale_icept);
beta0
~ bernoulli_logit_glm(x, beta0, beta);
y end
end
end
julia_implementation(::Val{:hmm_example}; N, K, y, kwargs...) = begin
@stan begin
@parameters begin
::simplex[K]
theta1::simplex[K]
theta2::positive_ordered[K]
muend
= hcat(theta1, theta2)'
theta @model @views begin
+= normal_lpdf(mu[1], 3, 1);
target += normal_lpdf(mu[2], 10, 1);
target = zeros(K)
acc = zeros((K,N))'
gamma 1, :] .= @. normal_lpdf(y[1], mu, 1)
gamma[for t in 2:N
for k in 1:K
for j in 1:K
= gamma[t - 1, j] + log(theta[j, k]) + normal_lpdf(y[t], mu[k], 1);
acc[j] end
= log_sum_exp(acc);
gamma[t, k] end
end
+= log_sum_exp(gamma[N, :])
target end
end
end
julia_implementation(::Val{:Mb_model}; M, T, y, kwargs...) = begin
@assert size(y) == (M, T)
= 0
C = zeros(Int64, M)
s for i in 1:M
= sum(y[i, :])
s[i] if s[i] > 0
+= 1
C end
end
@stan begin
@parameters begin
::real(lower=0.,upper=1.)
omega::real(lower=0.,upper=1.)
p::real(lower=0.,upper=1.)
cend
@model @views begin
= hcat(
p_eff fill(p, M),
1 - y[:, 1:end-1]) * p + y[:, 1:end-1] * c
@. (
)for i in 1:M
if s[i] > 0
+= bernoulli_lpmf(1, omega) + bernoulli_lpmf(y[i, :], p_eff[i, :])
target else
+= log_sum_exp(bernoulli_lpmf(1, omega)
target + bernoulli_lpmf(y[i, :], p_eff[i, :]),
bernoulli_lpmf(0, omega))
end
end
end
end
end
julia_implementation(::Val{:Mh_model}; M, T, y, kwargs...) = begin
@assert size(y) == (M, )
= 0
C for i in 1:M
if y[i] > 0
+= 1
C end
end
@stan begin
@parameters begin
::real(lower=0,upper=1)
omega::real(lower=0,upper=1)
mean_p::real(lower=0,upper=5)
sigma::vector[M]
eps_rawend
= @. logit(mean_p) + sigma * eps_raw
eps @model begin
~ normal(0, 1)
eps_raw for i in 1:M
if y[i] > 0
+= bernoulli_lpmf(1, omega) + binomial_logit_lpmf(y[i], T, eps[i])
target else
+= log_sum_exp(bernoulli_lpmf(1, omega)
target + binomial_logit_lpmf(0, T, eps[i]),
bernoulli_lpmf(0, omega))
end
end
end
end
end
julia_implementation(::Val{:Mth_model}; M, T, y, kwargs...) = begin
@assert size(y) == (M, T)
= 0
C = zeros(Int64, M)
s for i in 1:M
= sum(y[i, :])
s[i] if s[i] > 0
+= 1
C end
end
@stan begin
@parameters begin
::real(lower=0.,upper=1.)
omega::vector(lower=0.,upper=1.)[T]
mean_p::real(lower=0., upper=5.)
sigma::vector[M]
eps_rawend
@model @views begin
= @. logit(mean_p)' .+ sigma * eps_raw
logit_p ~ normal(0., 1.)
eps_raw for i in 1:M
if s[i] > 0
+= bernoulli_lpmf(1, omega) + bernoulli_logit_lpmf(y[i, :], logit_p[i, :])
target else
+= log_sum_exp(bernoulli_lpmf(1, omega)
target + bernoulli_logit_lpmf(y[i, :], logit_p[i, :]),
bernoulli_lpmf(0, omega))
end
end
end
end
end
julia_implementation(::Val{Symbol("2pl_latent_reg_irt")}; I, J, N, ii, jj, y, K, W, kwargs...) = begin
obtain_adjustments(W) = begin
local M, K = size(W)
= zeros((2, K))
adj 1,1] = 0
adj[2, 1] = 1
adj[for k in 2:K
= minimum(W[:,k])
min_w = maximum(W[:,k])
max_w = sum(w->w in (min_w, max_w), W[:, k])
minmax_count if minmax_count == M
1, k] = mean(W[:, k]);
adj[2, k] = max_w - min_w;
adj[else
1, k] = mean(W[:, k]);
adj[2, k] = sd(W[:, k]) * 2;
adj[end
end
return adj
end
= obtain_adjustments(W);
adj = @. ((W - adj[1:1,:])/adj[2:2,:])
W_adj @stan begin
@parameters begin
::vector(lower=0)[I];
alpha::vector[I - 1] ;
beta_free::vector[J];
theta::vector[K];
lambda_adjend
= vcat(beta_free, -sum(beta_free))
beta @model @views begin
~ lognormal(1, 1);
alpha += normal_lpdf(beta, 0, 3);
target ~ student_t(3, 0, 1);
lambda_adj ~ normal(W_adj * lambda_adj, 1);
theta ~ bernoulli_logit(@broadcasted(alpha[ii] * theta[jj] - beta[ii]));
y end
end
end
julia_implementation(::Val{:Mtbh_model}; M, T, y, kwargs...) = begin
@assert size(y) == (M, T)
= 0
C = zeros(Int64, M)
s for i in 1:M
= sum(y[i, :])
s[i] if s[i] > 0
+= 1
C end
end
@stan begin
@parameters begin
::real(lower=0,upper=1)
omega::vector(lower=0,upper=1)[T]
mean_p::real
gamma::real(lower=0, upper=3)
sigma::vector[M]
eps_rawend
@model @views begin
= @. sigma * eps_raw
eps = @. logit(mean_p)
alpha = hcat(
logit_p 1] + eps),
@.(alpha[2:end]' + eps + gamma * y[:, 1:end-1])
@.(alpha[
)~ normal(0, 10)
gamma ~ normal(0, 1)
eps_raw for i in 1:M
if s[i] > 0
+= bernoulli_lpmf(1, omega) + bernoulli_logit_lpmf(y[i, :], logit_p[i, :])
target else
+= log_sum_exp(bernoulli_lpmf(1, omega)
target + bernoulli_logit_lpmf(y[i, :], logit_p[i, :]),
bernoulli_lpmf(0, omega))
end
end
end
end
end
julia_implementation(::Val{:multi_occupancy}; J, K, n, X, S, kwargs...) = begin
cov_matrix_2d(sigma, rho) = begin
= sigma[1] * sigma[2] * rho
rv12
[square(sigma[1]) rv12
square(sigma[2])
rv12
]end
lp_observed(X, K, logit_psi, logit_theta) = log_inv_logit(logit_psi) + binomial_logit_lpmf(X, K, logit_theta);
lp_unobserved(K, logit_psi, logit_theta) = log_sum_exp(
lp_observed(0, K, logit_psi, logit_theta),
log1m_inv_logit(logit_psi)
);lp_never_observed(J, K, logit_psi, logit_theta, Omega) = begin
= bernoulli_lpmf(0, Omega);
lp_unavailable = bernoulli_lpmf(1, Omega) + J * lp_unobserved(K, logit_psi, logit_theta);
lp_available return log_sum_exp(lp_unavailable, lp_available);
end
@stan begin
@parameters begin
::real
alpha::real
beta::real(lower=0, upper=+1)
Omega::real(lower=-1, upper=+1)
rho_uv::vector(lower=0,upper=+Inf)[2]
sigma_uv::vector[S]
uv1::vector[S]
uv2end
@transformed_parameters begin
= hcat(uv1, uv2)
uv = @. uv1 + alpha
logit_psi = @. uv2 + beta
logit_theta end
@model begin
~ cauchy(0, 2.5);
alpha ~ cauchy(0, 2.5);
beta ~ cauchy(0, 2.5);
sigma_uv + 1) / 2) ~ beta(2, 2);
((rho_uv += multi_normal_lpdf(uv, rep_vector(0., 2), cov_matrix_2d(sigma_uv, rho_uv));
target ~ beta(2, 2);
Omega
for i in 1:n
1 ~ bernoulli(Omega);
for j in 1:J
if X[i, j] > 0
+= lp_observed(X[i, j], K, logit_psi[i], logit_theta[i]);
target else
+= lp_unobserved(K, logit_psi[i], logit_theta[i]);
target end
end
end
for i in (n + 1):S
+= lp_never_observed(J, K, logit_psi[i], logit_theta[i], Omega);
target end
end
end
end
julia_implementation(::Val{:losscurve_sislob};
growthmodel_id,
n_data,
n_time,
n_cohort,
cohort_id,
t_idx,
cohort_maxtime,
t_value,
premium,
loss,...) = begin
kwargsgrowth_factor_weibull(t, omega, theta) = begin
return 1 - exp(-(t / theta) ^ omega);
end
growth_factor_loglogistic(t, omega, theta) = begin
= t ^ omega;
pow_t_omega return pow_t_omega / (pow_t_omega + theta ^ omega);
end
@stan begin
@parameters begin
::real(lower=0);
omega::real(lower=0);
theta
::vector(lower=0)[n_cohort];
LR
::real;
mu_LR::real(lower=0);
sd_LR
::real(lower=0);
loss_sdend
= if growthmodel_id == 1
gf growth_factor_weibull(t_value, omega, theta)
@. else
growth_factor_loglogistic(t_value, omega, theta)
@. end
@model @views begin
~ normal(0, 0.5);
mu_LR ~ lognormal(0, 0.5);
sd_LR
~ lognormal(mu_LR, sd_LR);
LR
~ lognormal(0, 0.7);
loss_sd
~ lognormal(0, 0.5);
omega ~ lognormal(0, 0.5);
theta
~ normal(@broadcasted(LR[cohort_id] * premium[cohort_id] * gf[t_idx]), (loss_sd * premium)[cohort_id]);
loss end
end
end
julia_implementation(::Val{:accel_splines}; N,Y,Ks,Xs,knots_1,Zs_1_1, Ks_sigma, Xs_sigma,knots_sigma_1,Zs_sigma_1_1,prior_only, kwargs...) = begin
@stan begin
@parameters begin
::real;
Intercept::vector[Ks];
bs::vector[knots_1];
zs_1_1::real(lower=0);
sds_1_1::real;
Intercept_sigma::vector[Ks_sigma];
bs_sigma::vector[knots_sigma_1];
zs_sigma_1_1::real(lower=0);
sds_sigma_1_1end
= @. sds_1_1 * zs_1_1
s_1_1 = @. sds_sigma_1_1 * zs_sigma_1_1;
s_sigma_1_1 @model begin
= Intercept .+ Xs * bs + Zs_1_1 * s_1_1;
mu = exp.(Intercept_sigma .+ Xs_sigma * bs_sigma
sigma + Zs_sigma_1_1 * s_sigma_1_1);
+= student_t_lpdf(Intercept, 3, -13, 36);
target += normal_lpdf(zs_1_1, 0, 1);
target += student_t_lpdf(sds_1_1, 3, 0, 36)
target # - 1 * student_t_lccdf(0, 3, 0, 36);
+= student_t_lpdf(Intercept_sigma, 3, 0, 10);
target += normal_lpdf(zs_sigma_1_1, 0, 1);
target += student_t_lpdf(sds_sigma_1_1, 3, 0, 36)
target # - 1 * student_t_lccdf(0, 3, 0, 36);
if !(prior_only == 1)
+= normal_lpdf(Y, mu, sigma);
target end
end
end
end
julia_implementation(::Val{Symbol("grsm_latent_reg_irt")}; I, J, N, ii, jj, y, K, W, kwargs...) = begin
rsm(y, theta, beta, kappa) = begin
= vcat(0, theta .- beta .- kappa);
unsummed = softmax(cumulative_sum(unsummed));
probs return categorical_lpmf(y + 1, probs);
end
obtain_adjustments(W) = begin
local M, K = size(W)
= zeros((2, K))
adj 1,1] = 0
adj[2, 1] = 1
adj[for k in 2:K
= minimum(W[:,k])
min_w = maximum(W[:,k])
max_w = sum(w->w in (min_w, max_w), W[:, k])
minmax_count if minmax_count == M
1, k] = mean(W[:, k]);
adj[2, k] = max_w - min_w;
adj[else
1, k] = mean(W[:, k]);
adj[2, k] = sd(W[:, k]) * 2;
adj[end
end
return adj
end
= maximum(y)
m = obtain_adjustments(W);
adj = @. ((W - adj[1:1,:])/adj[2:2,:])
W_adj @stan begin
@parameters begin
::vector(lower=0)[I];
alpha::vector[I - 1] ;
beta_free::vector[m - 1] ;
kappa_free::vector[J];
theta::vector[K];
lambda_adjend
= vcat(beta_free, -sum(beta_free))
beta = vcat(kappa_free, -sum(kappa_free))
kappa @model @views begin
~ lognormal(1, 1);
alpha += normal_lpdf(beta, 0, 3);
target += normal_lpdf(kappa, 0, 3);
target ~ normal(W_adj * lambda_adj, 1);
theta ~ student_t(3, 0, 1);
lambda_adj for n in 1:N
+= rsm(y[n], theta[jj[n]] .* alpha[ii[n]], beta[ii[n]], kappa)
target end
end
end
end
julia_implementation(::Val{:prophet};
T,
K,
t,
cap,
y,
S,
t_change,
X,
sigmas,
tau,
trend_indicator,
s_a,
s_m, ...) = begin
kwargsget_changepoint_matrix(t, t_change, T, S) = begin
local A = rep_matrix(0, T, S);
= rep_row_vector(0, S);
a_row = 1;
cp_idx
for i in 1:T
while ((cp_idx <= S) && (t[i] >= t_change[cp_idx]))
= 1;
a_row[cp_idx] = cp_idx + 1;
cp_idx end
:] = a_row;
A[i,end
return A;
end
logistic_gamma(k, m, delta, t_change, S) = begin
local gamma = zeros(S)
= append_row(k, k + cumulative_sum(delta));
k_s
= m;
m_pr for i in 1:S
= (t_change[i] - m_pr) * (1 - k_s[i] / k_s[i + 1]);
gamma[i] = m_pr + gamma[i];
m_pr end
return gamma;
end
logistic_trend(k, m, delta, t, cap, A, t_change, S) = begin
local gamma = logistic_gamma(k, m, delta, t_change, S);
return cap .* inv_logit.((k .+ A * delta) .* (t .- m .- A * gamma));
end
linear_trend(k, m, delta, t, A, t_change) = begin
return (k .+ A * delta) .* t .+ (m .+ A * (-t_change .* delta));
end
= get_changepoint_matrix(t, t_change, T, S)
A @stan begin
@parameters begin
::real
k::real
m::vector[S]
delta::real(lower=0)
sigma_obs::vector[K]
betaend
@model begin
~ normal(0, 5);
k ~ normal(0, 5);
m ~ double_exponential(0, tau);
delta ~ normal(0, 0.5);
sigma_obs ~ normal(0, sigmas);
beta
if trend_indicator == 0
~ normal(linear_trend(k, m, delta, t, A, t_change)
y .* (1 .+ X * (beta .* s_m)) + X * (beta .* s_a), sigma_obs);
elseif trend_indicator == 1
~ normal(logistic_trend(k, m, delta, t, cap, A, t_change, S)
y .* (1 .+ X * (beta .* s_m)) + X * (beta .* s_a), sigma_obs);
end
end
end
end
julia_implementation(::Val{:hmm_gaussian}; T, K, y, kwargs...) = begin
@stan begin
@parameters begin
::simplex[K]
pi1::simplex[K,K]
A::ordered[K]
mu::vector(lower=0)[K]
sigmaend
@model @views begin
= zeros((K,T))'
logalpha = zeros(K)
accumulator 1,:] .= log.(pi1) .+ normal_lpdf(y[1], mu, sigma);
logalpha[for t in 2 : T
for j in 1:K
for i in 1:K
= logalpha[t - 1, i] + log(A[i, j]) + normal_lpdf(y[t], mu[j], sigma[j]);
accumulator[i] end
= log_sum_exp(accumulator);
logalpha[t, j] end
end
+= log_sum_exp(logalpha[T,:]);
target end
end
end
julia_implementation(::Val{:hmm_drive_0}; K, N, u, v, alpha, kwargs...) = begin
@stan begin
@parameters begin
::simplex[K]
theta1::simplex[K]
theta2::positive_ordered[K]
phi::positive_ordered[K]
lambdaend
= hcat(theta1, theta2)'
theta @model @views begin
for k in 1:K
+= dirichlet_lpdf(theta[k, :], alpha[k, :]);
target end
+= normal_lpdf(phi[1], 0, 1);
target += normal_lpdf(phi[2], 3, 1);
target += normal_lpdf(lambda[1], 0, 1);
target += normal_lpdf(lambda[2], 3, 1);
target
= zeros(K)
acc = zeros((K,N))'
gamma 1,:] .= @.(exponential_lpdf(u[1], phi) + exponential_lpdf(v[1], lambda))
gamma[for t in 2:N
for k in 1:K
for j in 1:K
= gamma[t - 1, j] + log(theta[j, k]) + exponential_lpdf(u[t], phi[k]) + exponential_lpdf(v[t], lambda[k]);
acc[j] end
= log_sum_exp(acc);
gamma[t, k] end
end
+= log_sum_exp(gamma[N, :])
target end
end
end
julia_implementation(::Val{:hmm_drive_1}; K, N, u, v, alpha, tau, rho, kwargs...) = begin
@stan begin
@parameters begin
::simplex[K]
theta1::simplex[K]
theta2::ordered[K]
phi::ordered[K]
lambdaend
= hcat(theta1, theta2)'
theta @model @views begin
for k in 1:K
+= dirichlet_lpdf(theta[k, :], alpha[k, :]);
target end
+= normal_lpdf(phi[1], 0, 1);
target += normal_lpdf(phi[2], 3, 1);
target += normal_lpdf(lambda[1], 0, 1);
target += normal_lpdf(lambda[2], 3, 1);
target
= zeros(K)
acc = zeros((K,N))'
gamma 1,:] .= @.(normal_lpdf(u[1], phi, tau) + normal_lpdf(v[1], lambda, rho))
gamma[for t in 2:N
for k in 1:K
for j in 1:K
= gamma[t - 1, j] + log(theta[j, k]) + normal_lpdf(u[t], phi[k], tau) + normal_lpdf(v[t], lambda[k], rho);
acc[j] end
= log_sum_exp(acc);
gamma[t, k] end
end
+= log_sum_exp(gamma[N, :])
target end
end
end
julia_implementation(::Val{:iohmm_reg}; T, K, M, y, u, kwargs...) = begin
@inline normalize(x) = x ./ sum(x)
@stan begin
@parameters begin
::simplex[K]
pi1::vector[K,M]
w::vector[K,M]
b::vector(lower=0)[K]
sigmaend
@model @views begin
= hcat(pi1, w * u'[:, 2:end])'
unA = copy(unA)
A for t in 2:T
:] .= softmax(unA[t, :])
A[t, end
= log.(A)
logA = normal_lpdf.(y, u * b', sigma')
logoblik = zeros(K)
accumulator = zeros((K,T))'
logalpha 1,:] .= @.(log(pi1) + logoblik[1,:])
logalpha[for t in 2:T
for j in 1:K
for i in 1:K
= logalpha[t - 1, i] + logA[t,i] + logoblik[t,j];
accumulator[i] end
= log_sum_exp(accumulator);
logalpha[t, j] end
end
~ normal(0, 5);
w ~ normal(0, 5);
b ~ normal(0, 3);
sigma += log_sum_exp(logalpha[T,:]);
target end
end
end
julia_implementation(::Val{:accel_gp}; N, Y, Kgp_1, Dgp_1, NBgp_1, Xgp_1, slambda_1, Kgp_sigma_1, Dgp_sigma_1, NBgp_sigma_1, Xgp_sigma_1, slambda_sigma_1, prior_only, kwargs...) = begin
@inline sqrt_spd_cov_exp_quad(slambda, sdgp, lscale) = begin
= size(slambda)
NB, D = length(lscale)
Dls if Dls == 1
= (sdgp) * (sqrt(2 * pi) * lscale[1]) ^ (D/2)
constant = -0.25 * square(lscale[1])
neg_half_lscale2 * exp(neg_half_lscale2 * dot_self($eachrow(slambda))))
@.(constant else
error()
end
end
@inline gpa(X, sdgp, lscale, zgp, slambda) = X * (sqrt_spd_cov_exp_quad(slambda, sdgp, lscale) .* zgp)
@stan begin
@parameters begin
::real
Intercept::real(lower=0)
sdgp_1::real(lower=0)
lscale_1::vector[NBgp_1]
zgp_1::real
Intercept_sigma::real(lower=0)
sdgp_sigma_1::real(lower=0)
lscale_sigma_1::vector[NBgp_sigma_1]
zgp_sigma_1end
= fill(sdgp_1, 1)
vsdgp_1 = fill(lscale_1, (1, 1))
vlscale_1 = fill(sdgp_sigma_1, 1)
vsdgp_sigma_1 = fill(lscale_sigma_1, (1, 1))
vlscale_sigma_1 @model @views begin
= Intercept .+ gpa(Xgp_1, vsdgp_1[1], vlscale_1[1,:], zgp_1, slambda_1);
mu = exp.(Intercept_sigma .+ gpa(Xgp_sigma_1, vsdgp_sigma_1[1], vlscale_sigma_1[1,:], zgp_sigma_1, slambda_sigma_1));
sigma += student_t_lpdf(Intercept, 3, -13, 36);
target += student_t_lpdf(vsdgp_1, 3, 0, 36)
target # - 1 * student_t_lccdf(0, 3, 0, 36);
+= normal_lpdf(zgp_1, 0, 1);
target += inv_gamma_lpdf(vlscale_1[1,:], 1.124909, 0.0177);
target += student_t_lpdf(Intercept_sigma, 3, 0, 10);
target += student_t_lpdf(vsdgp_sigma_1, 3, 0, 36)
target # - 1 * student_t_lccdf(0, 3, 0, 36);
+= normal_lpdf(zgp_sigma_1, 0, 1);
target += inv_gamma_lpdf(vlscale_sigma_1[1,:], 1.124909, 0.0177);
target if prior_only == 0
+= normal_lpdf(Y, mu, sigma);
target end
end
end
end
julia_implementation(::Val{:hierarchical_gp};
N,
N_states,
N_regions,
N_years_obs,
N_years,
state_region_ind,
state_ind,
region_ind,
year_ind,
y,...) = begin
kwargs@stan begin
= 1:N_years
years = fill(2, 17)
counts @parameters begin
::matrix[N_years, N_regions]
GP_region_std::matrix[N_years, N_states]
GP_state_std::vector[N_years_obs]
year_std::vector[N_states]
state_std::vector[N_regions]
region_std::real(lower=0)
tot_var::simplex[17]
prop_var::real
mu::real(lower=0)
length_GP_region_long::real(lower=0)
length_GP_state_long::real(lower=0)
length_GP_region_short::real(lower=0)
length_GP_state_shortend
= 17 * prop_var * tot_var;
vars = sqrt(vars[1]);
sigma_year = sqrt(vars[2]);
sigma_region = @.(sqrt(vars[3:end]))
sigma_state
= sqrt(vars[13]);
sigma_GP_region_long = sqrt(vars[14]);
sigma_GP_state_long = sqrt(vars[15]);
sigma_GP_region_short = sqrt(vars[16]);
sigma_GP_state_short = sqrt(vars[17]);
sigma_error_state_2
= sigma_region * region_std;
region_re = sigma_year * year_std;
year_re = sigma_state[state_region_ind] .* state_std;
state_re
begin
= gp_exp_quad_cov(years, sigma_GP_region_long,
cov_region
length_GP_region_long)+ gp_exp_quad_cov(years, sigma_GP_region_short,
length_GP_region_short);= gp_exp_quad_cov(years, sigma_GP_state_long,
cov_state
length_GP_state_long)+ gp_exp_quad_cov(years, sigma_GP_state_short,
length_GP_state_short);for year in 1 : N_years
= cov_region[year, year] + 1e-6;
cov_region[year, year] = cov_state[year, year] + 1e-6;
cov_state[year, year] end
= cholesky_decompose(cov_region);
L_cov_region = cholesky_decompose(cov_state);
L_cov_state = L_cov_region * GP_region_std;
GP_region = L_cov_state * GP_state_std;
GP_state end
@model begin
= zeros(N)
obs_mu for n in 1 : N
= mu + year_re[year_ind[n]] + state_re[state_ind[n]]
obs_mu[n] + region_re[region_ind[n]]
+ GP_region[year_ind[n], region_ind[n]]
+ GP_state[year_ind[n], state_ind[n]];
end
~ normal(obs_mu, sigma_error_state_2);
y
~ normal(0, 1);
(GP_region_std) ~ normal(0, 1);
(GP_state_std) ~ normal(0, 1);
year_std ~ normal(0, 1);
state_std ~ normal(0, 1);
region_std ~ normal(.5, .5);
mu ~ gamma(3, 3);
tot_var ~ dirichlet(counts);
prop_var ~ weibull(30, 8);
length_GP_region_long ~ weibull(30, 8);
length_GP_state_long ~ weibull(30, 3);
length_GP_region_short ~ weibull(30, 3);
length_GP_state_short end
end
end
julia_implementation(::Val{:kronecker_gp}; n1, n2, x1, y, kwargs...) = begin
kron_mvprod(A, B, V) = adjoint(A * adjoint(B * V))
calculate_eigenvalues(A, B, sigma2) = A .* B' .+ sigma2
= -(x1 .- x1') .^ 2
xd return @stan begin end
@stan begin
@parameters begin
::real(lower=0)
var1::real(lower=0)
bw1::cholesky_factor_corr[n2]
L::real(lower=.00001)
sigma1end
= multiply_lower_tri_self_transpose(L)
Lambda = Symmetric(var1 .* exp.(xd .* bw1) + .00001 * I);
Sigma1 = eigen(Sigma1);
R1, Q1 = eigen(Lambda);
R2, Q2 = calculate_eigenvalues(R2, R1, sigma1);
eigenvalues @model @views begin
~ lognormal(0, 1);
var1 ~ cauchy(0, 2.5);
bw1 ~ lognormal(0, 1);
sigma1 ~ lkj_corr_cholesky(2);
L += -0.5 * sum(y .* kron_mvprod(Q1, Q2, kron_mvprod(transpose(Q1), transpose(Q2), y) ./ eigenvalues)) - 0.5 * sum(log(eigenvalues))
target end
end
end
julia_implementation(::Val{:covid19imperial_v2}; M, P, N0, N, N2, cases, deaths, f, X, EpidemicStart, pop, SI, kwargs...) = begin
= reverse(SI)
SI_rev = mapreduce(reverse, hcat, eachcol(f))'
f_rev @stan begin
@parameters begin
::real(lower=0)[M]
mu::real(lower=0)[P]
alpha_hier::real(lower=0)
kappa::real(lower=0)[M]
y::real(lower=0)
phi::real(lower=0)
tau::real(lower=0)[M]
ifr_noiseend
@model @views begin
= rep_matrix(0., N2, M);
prediction = rep_matrix(0., N2, M);
E_deaths = rep_matrix(0., N2, M);
Rt = copy(Rt);
Rt_adj = rep_matrix(0., N2, M);
cumm_sum = alpha_hier .- (log(1.05) / 6.)
alpha for m in 1:M
1:N0, m] .= y[m]
prediction[2:N0, m] .= cumulative_sum(prediction[2:N0, m]);
cumm_sum[:, m] .= mu[m] * exp.(-X[m,:,:] * alpha);
Rt[1:N0, m] .= Rt[1:N0, m];
Rt_adj[for i in (N0+1):N2
= dot_product(sub_col(prediction, 1, m, i - 1), stan_tail(SI_rev, i - 1))
convolution = cumm_sum[i - 1, m] + prediction[i - 1, m];
cumm_sum[i, m] = ((pop[m] - cumm_sum[i, m]) / pop[m]) * Rt[i, m];
Rt_adj[i, m] = Rt_adj[i, m] * convolution;
prediction[i, m] end
1, m] = 1e-15 * prediction[1, m];
E_deaths[for i in 2:N2
= ifr_noise[m] * dot_product(sub_col(prediction, 1, m, i - 1), stan_tail(f_rev[m, :], i - 1));
E_deaths[i, m] end
end
~ exponential(0.03);
tau ~ exponential(1 / tau)
y ~ normal(0, 5);
phi ~ normal(0, 0.5);
kappa ~ normal(3.28, kappa);
mu ~ gamma(.1667, 1);
alpha_hier ~ normal(1, 0.1);
ifr_noise for m in 1:M
: N[m], m] ~ neg_binomial_2(E_deaths[EpidemicStart[m] : N[m], m], phi);
deaths[EpidemicStart[m] end
end
end
end
julia_implementation(::Val{:covid19imperial_v3}; kwargs...) = julia_implementation(Val{:covid19imperial_v2}(); kwargs...)
julia_implementation(::Val{:gpcm_latent_reg_irt}; I, J, N, ii, jj, y, K, W, kwargs...) = begin
pcm(y, theta, beta) = begin
= append_row(rep_vector(0.0, 1), theta .- beta)
unsummed = softmax(cumulative_sum(unsummed))
probs categorical_lpmf(y + 1, probs)
end
obtain_adjustments(W) = begin
local M, K = size(W)
= zeros((2, K))
adj 1,1] = 0
adj[2, 1] = 1
adj[for k in 2:K
= minimum(W[:,k])
min_w = maximum(W[:,k])
max_w = sum(w->w in (min_w, max_w), W[:, k])
minmax_count if minmax_count == M
1, k] = mean(W[:, k]);
adj[2, k] = max_w - min_w;
adj[else
1, k] = mean(W[:, k]);
adj[2, k] = sd(W[:, k]) * 2;
adj[end
end
return adj
end
= fill(0, I)
m = fill(1, I)
pos for n in 1:N
if y[n] > m[ii[n]]
= y[n]
m[ii[n]] end
end
for i in 2:I
= m[i - 1] + pos[i - 1]
pos[i] end
= obtain_adjustments(W);
adj = @. ((W - adj[1:1,:])/adj[2:2,:])
W_adj @stan begin
@parameters begin
::real(lower=0)[I]
alpha::vector[sum(m) - 1]
beta_free::vector[J]
theta::vector[K]
lambda_adjend
= vcat(beta_free, -sum(beta_free))
beta @model @views begin
~ lognormal(1, 1);
alpha += normal_lpdf(beta, 0, 3);
target ~ normal(W_adj * lambda_adj, 1);
theta ~ student_t(3, 0, 1);
lambda_adj for n in 1:N
+= pcm(y[n], theta[jj[n]] .* alpha[ii[n]], segment(beta, pos[ii[n]], m[ii[n]]));
target end
end
end
end
end