top of page

From: Bayesian Models for Astrophysical Data, Cambridge Univ. Press
(c) 2017,  Joseph M. Hilbe, Rafael S. de Souza and Emille E. O. Ishida 
 
you are kindly asked to include the complete citation if you used this material in a publication

Code 7.12 Bayesian log-gamma–logit hurdle model in Python using Stan

=======================================================

import numpy as np
import pystan
import statsmodels.api as sm
from scipy.stats import uniform, gamma, bernoulli

# Data
np.random.seed(33559)                                                  # set seed to replicate example
nobs = 1000                                                                    # number of obs in model

# Generate predictors, design matrix
x1 = uniform.rvs(loc=0, scale=4, size=nobs)
xc = -1 + 0.75 * x1
exc = np.exp(xc)
phi = 0.066
r = 1.0/phi
y = np.random.gamma(shape=exc, scale=r)

# Construct filter
xb = -2 + 1.5 * x1
pi = 1 / (1 + np.exp(-xb))
bern = bernoulli.rvs(1 - pi)
gy = [y[i]  * bern[i] for i in range(nobs)]              # add structural zeros


X = np.transpose(x1)
X = sm.add_constant(X)

# Fit
mydata = {}                                                          # build data dictionary
mydata['Y'] = gy                                                   # response variable
mydata['N'] = nobs                                               # sample size
mydata['Xb'] = X                                                  # predictors
mydata['Xc'] = X
mydata['Kb'] = X.shape[1]                                   # number of coefficients
mydata['Kc'] = X.shape[1]

stan_code = """
data{
    int N;
    int Kb;
    int Kc;
    matrix[N, Kb] Xb;
    matrix[N, Kc] Xc;
    real<lower=0> Y[N];
}
parameters{
    vector[Kc] beta;
    vector[Kb] gamma;
    real<lower=0> phi;
}
model{
    vector[N] mu;
    vector[N] Pi;


    mu = exp(Xc  *  beta);

    for (i in 1:N) Pi[i] = inv_logit(Xb[i] * gamma);

    for (i in 1:N) {
        (Y[i] == 0) ~ bernoulli(Pi[i]);
        if (Y[i] > 0) Y[i] ~ gamma(mu[i], phi) T[0,];
    }
}
"""

# Run mcmc
fit = pystan.stan(model_code=stan_code, data=mydata, iter=6000, chains=3,
                            warmup=4000, n_jobs=3)

# Output
print(fit)

=========================================================

Output on screen:

Inference for Stan model: anon_model_530ffe5cbc409d8d3e9f94f71362b8d5.
3 chains, each with iter=6000; warmup=4000; thin=1; 
post-warmup draws per chain=2000, total post-warmup draws=6000.

                       mean    se_mean           sd        2.5%         25%        50%        75%      97.5%         n_eff      Rhat
beta[0]            -0.97        1.3e-3        0.08       -1.12        -1.02       -0.97       -0.92         -0.82         3396         1.0
beta[1]               0.8        6.8e-4        0.04         0.72        0.77           0.8        0.82           0.87         3348         1.0
gamma[0]         -1.9        2.9e-3        0.16       -2.23        -2.01       -1.89       -1.79         -1.58         3096         1.0
gamma[1]        1.49         1.7e-3       0.09         1.31         1.43         1.49        1.56          1.68         3118         1.0
phi                   0.07         1.0e-4     6.2e-3        0.06         0.07         0.07         0.08         0.09         3649         1.0
lp__               -1603            0.03        1.49      -1607       -1604      -1603       -1602       -1601        2679         1.0

Samples were drawn using NUTS at Sun Apr 30 23:51:09 2017.
For each parameter, n_eff is a crude measure of effective sample size,
and Rhat is the potential scale reduction factor on split chains (at 
convergence, Rhat=1).

 

bottom of page