In [1]:
using Turing
using DataFrames
using CSV
using Random
using StatisticalRethinking
using StatisticalRethinking: link
using StatisticalRethinkingPlots
using ParetoSmooth
using StatsPlots
using StatsBase
using FreqTables
using Logging

default(label=false)
Logging.disable_logging(Logging.Warn);

11.1 Binomial regression

Code 11.1

In [2]:
d = DataFrame(CSV.File("data/chimpanzees.csv", delim=';'));

Code 11.2

In [3]:
d[!,:treatment] = 1 .+ d.prosoc_left .+ 2*d.condition;

Code 11.3

In [4]:
println(freqtable(d, :treatment, :prosoc_left, subset=d.condition .== 0))
println(freqtable(d, :treatment, :prosoc_left, subset=d.condition .== 1))
2×2 Named Matrix{Int64}
treatment ╲ prosoc_left │   0    1
────────────────────────┼─────────
1                       │ 126    0
2                       │   0  126
2×2 Named Matrix{Int64}
treatment ╲ prosoc_left │   0    1
────────────────────────┼─────────
3                       │ 126    0
4                       │   0  126

Code 11.4

In [5]:
@model function m11_1(pulled_left)
    a ~ Normal(0, 10)
    p = logistic(a)     # inverse of the `logit` function
    pulled_left ~ Binomial(1, p)
end
Out[5]:
m11_1 (generic function with 2 methods)

Code 11.5

In [6]:
Random.seed!(1999)
prior_chain = sample(m11_1(d.pulled_left), Prior(), 10000)
prior = DataFrame(prior_chain);

Code 11.6

In [7]:
p = logistic.(prior.a)
density(p, bandwidth=0.01)
Out[7]:

Code 11.7

In [8]:
@model function m11_2(pulled_left, treatment)
    a ~ Normal(0, 1.5)
    treat_levels = length(levels(d.treatment))
    b ~ MvNormal(zeros(treat_levels), 10)
    
    p = @. logistic(a + b[treatment])
    for i  eachindex(pulled_left)
        pulled_left[i] ~ Binomial(1, p[i])
    end
end

Random.seed!(1999)
prior_chain = sample(m11_2(d.pulled_left, d.treatment), Prior(), 10000)
prior = DataFrame(prior_chain);

f = i -> @. logistic(prior.a + prior[!,"b[$i]"])
p = map(f, 1:4);

Code 11.8

In [9]:
density(abs.(p[1] .- p[2]), bandwidth=0.01)
Out[9]:

Code 11.9

In [10]:
@model function m11_3(pulled_left, treatment)
    a ~ Normal(0, 1.5)
    treat_levels = length(levels(treatment))
    b ~ MvNormal(zeros(treat_levels), 0.5)
    
    p = @. logistic(a + b[treatment])
    for i  eachindex(pulled_left)
        pulled_left[i] ~ Binomial(1, p[i])
    end
end

Random.seed!(1999)
prior_chain = sample(m11_3(d.pulled_left, d.treatment), Prior(), 10000)
prior = DataFrame(prior_chain);

f = i -> @. logistic(prior.a + prior[!,"b[$i]"])
p = map(f, 1:4);

mean(abs.(p[1] .- p[2]))
Out[10]:
0.09924607142428958

Code 11.10

In [11]:
dat_list = d[!,[:pulled_left, :actor, :treatment]];

Code 11.11

In [12]:
@model function m11_4(actor, treatment, pulled_left)
    act_levels = length(levels(actor))
    a ~ MvNormal(zeros(act_levels), 1.5)
    treat_levels = length(levels(treatment))
    b ~ MvNormal(zeros(treat_levels), 0.5)
    
    p = @. logistic(a[actor] + b[treatment])
    for i  eachindex(pulled_left)
        pulled_left[i] ~ Binomial(1, p[i])
    end
end

m11_4_chain = sample(m11_4(dat_list.actor, dat_list.treatment, dat_list.pulled_left), NUTS(), 1000)
m11_4_df = DataFrame(m11_4_chain)
precis(m11_4_df)
┌───────┬────────────────────────────────────────────────────────────┐
│ param     mean     std     5.5%      50%    94.5%       histogram │
├───────┼────────────────────────────────────────────────────────────┤
│  a[1] │  -0.457  0.3381  -0.9885  -0.4566   0.0777  ▁▁▁▁▂▄▇█▇▆▂▁▁▁ │
│  a[2] │  3.8942   0.775   2.7576   3.8283   5.1935   ▁▁▄▇█▇▅▂▁▁▁▁▁ │
│  a[3] │  -0.752  0.3455  -1.2995  -0.7384  -0.2267    ▁▁▁▃▆███▅▂▁▁ │
│  a[4] │ -0.7477  0.3435   -1.303  -0.7393  -0.2006    ▁▁▁▃▅██▆▄▂▁▁ │
│  a[5] │ -0.4456  0.3427  -0.9981  -0.4388   0.0985    ▁▁▁▂▄▅█▇▆▂▁▁ │
│  a[6] │  0.4896  0.3515  -0.0806   0.4863    1.042   ▁▁▁▃▄███▄▂▁▁▁ │
│  a[7] │  1.9586  0.4169   1.3324   1.9373   2.6741   ▁▁▃▆███▇▄▃▁▁▁ │
│  b[1] │ -0.0421  0.2943  -0.4934  -0.0546   0.4495     ▁▁▄▆█▇▄▂▁▁▁ │
│  b[2] │  0.4819  0.2986   0.0119   0.4772    0.949      ▁▂▅▇█▇▄▂▁▁ │
│  b[3] │ -0.3816  0.2945  -0.8235  -0.3971   0.1029     ▁▁▁▂▆██▅▃▁▁ │
│  b[4] │  0.3715  0.2882  -0.0681   0.3632   0.8412     ▁▁▃▆██▅▂▁▁▁ │
└───────┴────────────────────────────────────────────────────────────┘

Code 11.12

In [13]:
p_left = DataFrame(map(i -> "V$i" => logistic.(m11_4_df[:,"a[$i]"]), 1:7)...);
coeftab_plot(p_left)
Out[13]:

Code 11.13

In [14]:
names = ["R/N", "L/N", "R/P", "L/P"]
labs = DataFrame(map(i -> names[i] => m11_4_df[:,"b[$i]"], 1:4)...)
coeftab_plot(labs)
Out[14]:

Code 11.14

In [15]:
diffs = DataFrame(
    db13=m11_4_df."b[1]" .- m11_4_df."b[3]",
    db24=m11_4_df."b[2]" .- m11_4_df."b[4]",
)
coeftab_plot(diffs)
Out[15]:

Code 11.15

In [16]:
gd = groupby(d, [:actor, :treatment])
c = combine(gd, :pulled_left => mean => :val)
pl = unstack(c, :actor, :treatment, :val)
pl[1,:]
Out[16]:

DataFrameRow (5 columns)

actor1234
Int64Float64?Float64?Float64?Float64?
110.3333330.50.2777780.555556

Code 11.16

In [17]:
names = ["R/N", "L/N", "R/P", "L/P"]
p = plot(ylims=(0, 1.1), ylab="proportion left lever", showaxis=:y, xticks=false)
hline!([0.5], c=:gray, s=:dash)
for actor in 1:7
    ofs = (actor-1)*4
    actor > 1 && vline!([ofs+0.5], c=:gray)
    plot!([ofs+1,ofs+3], collect(pl[actor,["1","3"]]), lw=2, m=:o, c=:blue)
    plot!([ofs+2,ofs+4], collect(pl[actor,["2","4"]]), lw=2, m=:o, c=:blue)
    anns = [
        (ofs+idx, pl[actor,string(idx)]+.04, (name, 8))
        for (idx,name)  enumerate(names)
    ]
    actor != 2 && annotate!(anns)
end

annotate!([
    (2.5 + (idx-1)*4, 1.1, ("actor $idx", 8))
    for idx  1:7
])
p
Out[17]:

Code 11.17

In [18]:
l_fun = (r, (ai, bi)) -> logistic(get(r, "a[$ai]", missing) + get(r, "b[$bi]", missing))
p_post = link(m11_4_df, l_fun, Iterators.product(1:7, 1:4))
p_μ = map(mean, p_post)
p_ci = map(PI, p_post);
In [19]:
# visualize mean and cred intervals of posterior distribution

# compute relative intervals 
rel_ci = map(idx -> (p_μ[idx]-p_ci[idx][1], p_ci[idx][2]-p_μ[idx]), CartesianIndices(p_ci))

names = ["R/N", "L/N", "R/P", "L/P"]
p = plot(ylims=(0, 1.1), ylab="proportion left lever", title="Posterior", showaxis=:y, xticks=false)
hline!([0.5], c=:gray, s=:dash)
for actor in 1:7
    ofs = (actor-1)*4
    actor > 1 && vline!([ofs+0.5], c=:gray)
    err = [rel_ci[actor,1], rel_ci[actor,3]]
    plot!([ofs+1,ofs+3], collect(p_μ[actor,[1,3]]), err=err, lw=2, m=:o, c=:blue)
    err = [rel_ci[actor,2], rel_ci[actor,4]]    
    plot!([ofs+2,ofs+4], collect(p_μ[actor,[2,4]]), err=err, lw=2, m=:o, c=:blue)
    anns = [
        (ofs+idx, p_μ[actor,idx]+.04, (name, 8))
        for (idx,name)  enumerate(names)
    ]
    actor != 2 && annotate!(anns)
end

annotate!([
    (2.5 + (idx-1)*4, 1.1, ("actor $idx", 8))
    for idx  1:7
])
p
Out[19]:

Code 11.18

In [20]:
d.side = d.prosoc_left .+ 1
d.cond = d.condition .+ 1;

Code 11.19

In [21]:
@model function m11_5(actor, side, cond, pulled_left)
    act_levels = length(levels(actor))
    a ~ MvNormal(zeros(act_levels), 1.5)
    side_levels = length(levels(side))
    bs ~ MvNormal(zeros(side_levels), 0.5)    
    cond_levels = length(levels(cond))
    bc ~ MvNormal(zeros(cond_levels), 0.5)
    
    p = @. logistic(a[actor] + bs[side] + bc[cond])
    for i  eachindex(pulled_left)
        pulled_left[i] ~ Binomial(1, p[i])
    end
end

m11_5_chain = sample(m11_5(d.actor, d.side, d.cond, d.pulled_left), NUTS(), 1000)
m11_5_df = DataFrame(m11_5_chain);

Code 11.20

In [22]:
l_fun = (r, (ai, bi, pull_left)) -> begin
    p = logistic(get(r, "a[$ai]", 0) + get(r, "b[$bi]", 0))
    binomlogpdf(1, p, pull_left)
end

m11_4_ll = link(m11_4_df, l_fun, zip(d.actor, d.treatment, d.pulled_left))
m11_4_ll = hcat(m11_4_ll...);

l_fun = (r, (ai, si, ci, pull_left)) -> begin
    p = logistic(get(r, "a[$ai]", 0) + get(r, "bs[$si]", 0) + get(r, "bc[$ci]", 0))
    binomlogpdf(1, p, pull_left)
end

m11_5_ll = link(m11_5_df, l_fun, zip(d.actor, d.side, d.cond, d.pulled_left))
m11_5_ll = hcat(m11_5_ll...);

compare([m11_5_ll, m11_4_ll], :psis, mnames=["m5", "m4"])
Out[22]:

2 rows × 8 columns

modelsPSISlppdSEdPSISdSEpPSISweight
StringFloat64Float64Float64Float64Float64Float64Float64
1m5529.2515.3518.950.00.07.420.73
2m4531.2515.2918.912.01.268.530.27

Code 11.21 and 11.22 were omitted, as they are stan-specific

Code 11.23

In [23]:
mean(@. exp(m11_4_df."b[4]" - m11_4_df."b[2]"))
Out[23]:
0.929122563241465

Code 11.24

In [24]:
gb = groupby(d, [:treatment, :actor, :side, :cond])
d_aggregated = combine(gb, :pulled_left => sum => :left_pulls)
first(d_aggregated, 8)
Out[24]:

8 rows × 5 columns

treatmentactorsidecondleft_pulls
Int64Int64Int64Int64Int64
111116
2121118
313115
414116
515116
6161114
7171114
821219

Code 11.25

In [25]:
@model function m11_6(actor, treatment, left_pulls)
    act_levels = length(levels(actor))
    a ~ MvNormal(zeros(act_levels), 1.5)
    treat_levels = length(levels(treatment))
    b ~ MvNormal(zeros(treat_levels), 0.5)
    
    p = @. logistic(a[actor] + b[treatment])
    for i  eachindex(left_pulls)
        left_pulls[i] ~ Binomial(18, p[i])
    end
end

m11_6_chain = sample(m11_6(d_aggregated.actor, d_aggregated.treatment, d_aggregated.left_pulls), NUTS(), 1000)
m11_6_df = DataFrame(m11_6_chain);

Code 11.26

In [26]:
l_fun = (r, (ai, bi, left_pulls)) -> begin
    p = logistic(get(r, "a[$ai]", 0) + get(r, "b[$bi]", 0))
    binomlogpdf(18, p, left_pulls)
end

m11_6_ll = link(m11_6_df, l_fun, zip(d_aggregated.actor, d_aggregated.treatment, d_aggregated.left_pulls))
m11_6_ll = hcat(m11_6_ll...)

try
    compare([m11_6_ll, m11_4_ll], :psis, mnames=["m6", "m4"])
catch e
    println(e)
end
DimensionMismatch("arrays could not be broadcast to a common size; got a dimension with lengths 28 and 504")

Code 11.27

In [27]:
(
    -2*binomlogpdf(9, 0.2, 6),
    -2*sum(binomlogpdf.(1, 0.2, [1,1,1,1,1,1,0,0,0]))
)
Out[27]:
(11.79048265940783, 20.652116257094466)

Code 11.28

In [28]:
d = DataFrame(CSV.File("data/UCBadmit.csv", skipto=2, 
        header=[:id, :dept, :gender, :admit, :reject, :applications]))
Out[28]:

12 rows × 6 columns

iddeptgenderadmitrejectapplications
Int64String1String7Int64Int64Int64
11Amale512313825
22Afemale8919108
33Bmale353207560
44Bfemale17825
55Cmale120205325
66Cfemale202391593
77Dmale138279417
88Dfemale131244375
99Emale53138191
1010Efemale94299393
1111Fmale22351373
1212Ffemale24317341

Code 11.29

In [29]:
dat = d[!, [:admit, :applications]]
dat.gid = @. ifelse(d.gender == "male", 1, 2)

@model function m11_7(admit, applications, gid)
    a ~ MvNormal([0, 0], 1.5)
    p = @. logistic(a[gid])
    for i  eachindex(applications)
        admit[i] ~ Binomial(applications[i], p[i])
    end
end

m11_7_chain = sample(m11_7(dat.admit, dat.applications, dat.gid), NUTS(), 1000)
m11_7_df = DataFrame(m11_7_chain)
precis(m11_7_df)
┌───────┬──────────────────────────────────────────────────────────┐
│ param     mean     std     5.5%      50%    94.5%     histogram │
├───────┼──────────────────────────────────────────────────────────┤
│  a[1] │ -0.2216  0.0383  -0.2832  -0.2213  -0.1563  ▁▁▂▄▇██▇▄▂▁▁ │
│  a[2] │ -0.8306  0.0508  -0.9126  -0.8313  -0.7488       ▁▂▆█▅▂▁ │
└───────┴──────────────────────────────────────────────────────────┘

Code 11.30

In [30]:
diff_a = m11_7_df."a[1]" .- m11_7_df."a[2]"
diff_p = @. logistic(m11_7_df."a[1]") - logistic(m11_7_df."a[2]")

precis(DataFrame(diff_a=diff_a, diff_p=diff_p))
┌────────┬────────────────────────────────────────────────────┐
│  param    mean     std    5.5%     50%   94.5%   histogram │
├────────┼────────────────────────────────────────────────────┤
│ diff_a │  0.609  0.0635  0.5079  0.6094   0.711   ▁▁▂▄██▆▂▁ │
│ diff_p │ 0.1412  0.0143  0.1186  0.1412  0.1641  ▁▁▂▅██▆▂▁▁ │
└────────┴────────────────────────────────────────────────────┘

Code 11.31

In [31]:
Random.seed!(1)

fun = (r, (apps, gid)) -> begin
    p = logistic(get(r, "a[$gid]", 0))
    rand(Binomial(apps, p))
end

admit_pred = link(m11_7_df, fun, zip(dat.applications, dat.gid))
admit_rate = @. admit_pred ./ dat.applications

# plot
xrange = 1:12
p = plot(yrange=(0, 1), title="Posterior validation check", xlab="case", ylab="admit")
scatter!(xrange, mean.(admit_rate), yerr=std.(admit_rate))
pi_rate = PI.(admit_rate)
scatter!(xrange, first.(pi_rate), shape=:cross, c=:black)
scatter!(xrange, last.(pi_rate), shape=:cross, c=:black)

dat_rates = dat.admit ./ dat.applications

for idx in 1:2:12
    r = [dat_rates[idx], dat_rates[idx+1]]
    plot!([idx, idx+1], r, mark=:o, lw=2, c=:blue)
    annotate!([(idx+0.5, mean(r)+0.03, (d.dept[idx], 8))])
end
p
Out[31]:

Code 11.32

In [32]:
dat.dept_id = reshape(hcat(1:6, 1:6)', 12)

@model function m11_8(admit, applications, gid, dept_id)
    a ~ MvNormal([0, 0], 1.5)
    delta ~ MvNormal(zeros(6), 1.5)
    p = @. logistic(a[gid] + delta[dept_id])
    for i  eachindex(applications)
        admit[i] ~ Binomial(applications[i], p[i])
    end
end

m11_8_chain = sample(m11_8(dat.admit, dat.applications, dat.gid, dat.dept_id), NUTS(), 1000)
m11_8_df = DataFrame(m11_8_chain)
precis(m11_8_df)
┌──────────┬───────────────────────────────────────────────────────┐
│    param     mean     std     5.5%      50%    94.5%  histogram │
├──────────┼───────────────────────────────────────────────────────┤
│     a[1] │ -0.5904  0.4753  -1.4541  -0.5667   0.0759    ▁▂▃██▂▁ │
│     a[2] │ -0.4937  0.4728  -1.3508   -0.464   0.1564    ▁▁▃▇█▃▁ │
│ delta[1] │  1.1706  0.4761   0.4834   1.1412   2.0253    ▁▂▇█▄▂▁ │
│ delta[2] │  1.1285  0.4827   0.4516   1.0966   1.9931    ▁▂██▄▂▁ │
│ delta[3] │ -0.0858   0.477  -0.7715  -0.1229   0.7755    ▁▄█▆▂▁▁ │
│ delta[4] │  -0.121  0.4755  -0.7791  -0.1486   0.7583     ▁▄█▅▂▁ │
│ delta[5] │ -0.5609   0.479  -1.2411  -0.5933   0.3125     ▁▄█▇▂▁ │
│ delta[6] │  -2.125  0.4952  -2.8541  -2.1313  -1.2774     ▁▅█▆▂▁ │
└──────────┴───────────────────────────────────────────────────────┘

Code 11.33

In [33]:
diff_a = m11_8_df."a[1]" .- m11_8_df."a[2]"
diff_p = logistic.(m11_8_df."a[1]") .- logistic.(m11_8_df."a[2]")
precis(DataFrame(diff_a=diff_a, diff_p=diff_p))
┌────────┬─────────────────────────────────────────────────────────┐
│  param     mean     std     5.5%      50%   94.5%     histogram │
├────────┼─────────────────────────────────────────────────────────┤
│ diff_a │ -0.0967  0.0761  -0.2167  -0.0965  0.0309    ▁▁▂▆██▆▃▁▁ │
│ diff_p │ -0.0216  0.0175  -0.0495  -0.0214  0.0065  ▁▁▂▄▆██▆▃▂▁▁ │
└────────┴─────────────────────────────────────────────────────────┘

Code 11.34

In [34]:
gb = groupby(d, :dept)
fun = (as,gs) -> [(gender=g, a_ratio=a/sum(as)) for (a,g) in zip(as,gs)]
c = combine(gb, [:applications, :gender] => fun => AsTable)
unstack(c, :gender, :dept, :a_ratio)
Out[34]:

2 rows × 7 columns

genderABCDEF
String7Float64?Float64?Float64?Float64?Float64?Float64?
1male0.8842440.9572650.3540310.5265150.3270550.522409
2female0.1157560.0427350.6459690.4734850.6729450.477591

11.2 Poisson regression

Code 11.35

In [35]:
Random.seed!(1)
y = rand(Binomial(1000, 1/1000), 10^5)
mean(y), var(y)
Out[35]:
(0.99378, 0.9968312799127984)

Code 11.36

In [36]:
d = DataFrame(CSV.File("data/Kline.csv"))
Out[36]:

10 rows × 5 columns

culturepopulationcontacttotal_toolsmean_TU
String15Int64String7Int64Float64
1Malekula1100low133.2
2Tikopia1500low224.7
3Santa Cruz3600low244.0
4Yap4791high435.0
5Lau Fiji7400high335.0
6Trobriand8000high194.0
7Chuuk9200high403.8
8Manus13000low286.6
9Tonga17500high555.4
10Hawaii275000low716.6

Code 11.37

In [37]:
d.P = standardize(ZScoreTransform, log.(d.population))
d.contact_id = ifelse.(d.contact .== "high", 2, 1);

Code 11.38

In [38]:
p = plot(xlims=(-1, 100), ylims=(0, 0.08), 
        xlab="mean number of tools", ylab="Density")
r = 0:0.5:100
plot!(r, LogNormal(0, 10), label=L"a \sim \ln \mathcal{N}(0, 10)")
plot!(r, LogNormal(3, 0.5), label=L"a \sim \ln \mathcal{N}(3, 0.5)")
Out[38]:

Code 11.39

In [39]:
a = rand(Normal(0, 10), 10^4)
λ = exp.(a)
mean(λ)
Out[39]:
6.39321350768853e11

Code 11.40 - joined with 11.38

Code 11.41

In [40]:
Random.seed!(1)
N = 100
a = rand(Normal(3, 0.5), N)
b = rand(Normal(0, 10), N)
p = plot(xlims=(-2, 2), ylims=(0, 100))
r = -2:0.05:2

for (aᵢ, bᵢ)  zip(a, b)
    plot!(r, x -> exp(aᵢ + bᵢ * x), c=:gray)
end
p
Out[40]:

Code 11.42

In [41]:
Random.seed!(1)
N = 100
a = rand(Normal(3, 0.5), N)
b = rand(Normal(0, .2), N)
p = plot(xlims=(-2, 2), ylims=(0, 100))
r = -2:0.05:2

for (aᵢ, bᵢ)  zip(a, b)
    plot!(r, x -> exp(aᵢ + bᵢ * x), c=:gray)
end
p
Out[41]:

Code 11.43

In [42]:
x_seq = range(log(100), log(200_000), length=100)
λ = map(x -> (@. exp(a + b * x)), x_seq)
λ = hcat(λ...)
p = plot(xlims=extrema(x_seq), ylims=(0, 500), xlab="log population", ylab="total tools")
for λᵢ  eachrow(λ)
    plot!(x_seq, λᵢ, c=:gray)
end
p
Out[42]:

Code 11.44

In [43]:
p = plot(xlims=extrema(exp.(x_seq)), ylims=(0, 500), 
    xlab="population", ylab="total tools")
for λᵢ  eachrow(λ)
    plot!(exp.(x_seq), λᵢ, c=:gray)
end
p
Out[43]:

Code 11.45

In [44]:
# intercept only
@model function m11_9(T)
    a ~ Normal(3, 0.5)
    λ = exp(a)
    T ~ Poisson(λ)
end

m11_9_ch = sample(m11_9(d.total_tools), NUTS(), 1000)
m11_9_df = DataFrame(m11_9_ch)

# interaction model
@model function m11_10(T, cid, P)
    a ~ MvNormal([3, 3], 0.5)
    b ~ MvNormal([0, 0], 0.2)
    λ = @. exp(a[cid] + b[cid]*P)
    for i  eachindex(T)
        T[i] ~ Poisson(λ[i])
    end
end

m11_10_ch = sample(m11_10(d.total_tools, d.contact_id, d.P), NUTS(), 1000)
m11_10_df = DataFrame(m11_10_ch);

Code 11.46

In [45]:
f = (r, T) -> poislogpdf(exp(r.a), T)
ll_m9 = link(m11_9_df, f, d.total_tools)
ll_m9 = hcat(ll_m9...);

f = (r, (T, cid, P)) -> poislogpdf(exp(get(r, "a[$cid]", 0) + get(r, "b[$cid]", 0)*P), T)
ll_m10 = link(m11_10_df, f, zip(d.total_tools, d.contact_id, d.P))
ll_m10 = hcat(ll_m10...);

compare([ll_m9, ll_m10], :psis, mnames=["m9", "m10"])
Out[45]:

2 rows × 8 columns

modelsPSISlppdSEdPSISdSEpPSISweight
StringFloat64Float64Float64Float64Float64Float64Float64
1m1082.971.3912.20.00.05.971.0
2m9141.0124.6931.6158.131.838.940.0

Code 11.47

In [46]:
# get psis K values
t = ll_m10'
m10_t = collect(reshape(t, size(t)..., 1))
PSIS_m10 = psis_loo(m10_t)
k = PSIS_m10.pointwise(:pareto_k);

# plot
mark_size = standardize(ZScoreTransform, k).+5
mark_color = ifelse.(d.contact_id .== 1, :white, :blue)
scatter(d.P, d.total_tools, xlab="log population (std)", ylab="total tools", ylim=(0, 75), 
    ms=mark_size, mc=mark_color, msw=2)

ns = 100
P_seq = range(-1.4, 3, length=ns)

# cid=1 predictions
f = (r,p) -> exp(r."a[1]" + r."b[1]"*p)
λ = link(m11_10_df, f, P_seq)
λ = hcat(λ...);
λ1_mean = mean.(eachcol(λ))
λ1_ci = PI.(eachcol(λ))
λ1_ci = vcat(λ1_ci'...)

plot!(P_seq, [λ1_mean λ1_mean], fillrange=λ1_ci, fillalpha=0.2, ls=:dash, c=:black, lw=1.5)

# cid=2 predictions
f = (r,p) -> exp(r."a[2]" + r."b[2]"*p)
λ = link(m11_10_df, f, P_seq)
λ = hcat(λ...);
λ2_mean = mean.(eachcol(λ))
λ2_ci = PI.(eachcol(λ))
λ2_ci = vcat(λ2_ci'...)

plot!(P_seq, [λ2_mean λ2_mean], fillrange=λ2_ci, fillalpha=0.2, c=:black, lw=1.5)
Out[46]:

Code 11.48

In [47]:
scatter(d.population, d.total_tools, xlab="population", ylab="total tools", ylim=(0, 75), 
    ms=mark_size, mc=mark_color, msw=2)

P_seq = range(-5, 3, length=ns)
# 1.53 is sd of log(population)
# 9 is mean of log(population)
pop_seq = @. exp(P_seq * 1.53 + 9)

plot!(pop_seq, [λ1_mean λ1_mean], fillrange=λ1_ci, fillalpha=0.2, ls=:dash, c=:black, lw=1.5)
plot!(pop_seq, [λ2_mean λ2_mean], fillrange=λ2_ci, fillalpha=0.2, c=:black, lw=1.5)
Out[47]:

Code 11.49

In [48]:
@model function m11_11(T, P, cid)
    a ~ MvNormal([1,1], 1)
    b₁ ~ Exponential(1)
    b₂ ~ Exponential(1)
    b = [b₁, b₂]
    g ~ Exponential(1)
    λ = @. exp(a[cid]) * P ^ b[cid] / g
    for i  eachindex(T)
        T[i] ~ Poisson(λ[i])
    end
end

m11_11_ch = sample(m11_11(d.total_tools, d.population, d.contact_id), NUTS(), 1000)
m11_11_df = DataFrame(m11_11_ch)
precis(m11_11_df)
┌───────┬───────────────────────────────────────────────────────┐
│ param    mean     std     5.5%     50%   94.5%     histogram │
├───────┼───────────────────────────────────────────────────────┤
│  a[1] │ 0.8656  0.6512  -0.2068   0.864  1.8687   ▁▁▁▁▂▅█▇▄▁▁ │
│  a[2] │ 0.8979  0.8581   -0.473  0.8861  2.3729    ▁▂▄▇██▅▃▂▁ │
│    b₁ │ 0.2588  0.0341   0.2052  0.2591   0.313  ▁▁▁▁▃▇██▆▄▂▁ │
│    b₂ │ 0.2869  0.1039   0.1273  0.2828  0.4552  ▁▁▄▆▇██▆▄▂▂▁ │
│     g │ 1.0763  0.7044   0.3216  0.8842  2.5334     ▅█▅▃▂▁▁▁▁ │
└───────┴───────────────────────────────────────────────────────┘

Code 11.50

In [49]:
Random.seed!(1)
num_days = 30
y = rand(Poisson(1.5), num_days);

Code 11.51

In [50]:
Random.seed!(2)
num_weeks = 4
y_new = rand(Poisson(0.5*7), num_weeks);

Code 11.52

In [51]:
y_all = [y; y_new]
exposure = [repeat([1], 30); repeat([7], 4)]
monastery = [repeat([0], 30); repeat([1], 4)]
d = DataFrame(y=y_all, days=exposure, monastery=monastery);

Code 11.53

In [52]:
Random.seed!(1)
d.log_days = log.(d.days)

@model function m11_12(y, log_days, monastery)
    a ~ Normal()
    b ~ Normal()
    λ = @. exp(log_days + a + b*monastery)
    @. y ~ Poisson(λ)
end

m11_12_chain = sample(m11_12(d.y, d.log_days, d.monastery), NUTS(), 1000)
m11_12_df = DataFrame(m11_12_chain);

Code 11.54

Note that values are slightly different from the book. This is due to non-Normal distributions (you can check this yourself with optimize)

In [53]:
λ_old = exp.(m11_12_df.a)
λ_new = @. exp(m11_12_df.a + m11_12_df.b)
precis(DataFrame(λ_old=λ_old, λ_new=λ_new))
┌───────┬──────────────────────────────────────────────────────┐
│ param    mean     std    5.5%     50%   94.5%     histogram │
├───────┼──────────────────────────────────────────────────────┤
│ λ_old │ 1.4935  0.2284  1.1472  1.4783  1.8972      ▁▃▇█▅▂▁▁ │
│ λ_new │ 0.7648  0.1635   0.513    0.76  1.0218  ▁▁▂▄▇█▇▄▂▁▁▁ │
└───────┴──────────────────────────────────────────────────────┘

11.3 Multinomial and categorical models

Code 11.55

In [54]:
# simulate career choice among 500 individuals
N = 500
income = [1,2,5]
c_score = 0.5*income
p = softmax(c_score)

# simulate choice
Random.seed!(34302)
w = Weights(p)
career = [sample(w) for _ in 1:N];

Code 11.56

In [55]:
@model function m11_13(career, income)
    a ~ MvNormal([0, 0], 1)
    b ~ TruncatedNormal(0, 0.5, 0, Inf)
    p = softmax([a[1] + b*income[1], a[2] + b*income[2], 0])
    career ~ Categorical(p)
end
Out[55]:
m11_13 (generic function with 2 methods)

Code 11.57

In [56]:
Random.seed!(121)
m11_13_chain = sample(m11_13(career, income), NUTS(), 5000, n_chains=4)
m11_13_df = DataFrame(m11_13_chain)
precis(m11_13_df)
┌───────┬─────────────────────────────────────────────────────────┐
│ param     mean     std     5.5%      50%    94.5%    histogram │
├───────┼─────────────────────────────────────────────────────────┤
│  a[1] │ -1.8621  0.1702  -2.1531  -1.8514  -1.6067  ▁▁▂▅▇█▇▅▂▁▁ │
│  a[2] │ -1.7931  0.2471  -2.2586  -1.7554   -1.457     ▁▂▃▆█▆▁▁ │
│     b │  0.1391  0.1134   0.0096   0.1104   0.3681  █▇▆▄▃▂▂▁▁▁▁ │
└───────┴─────────────────────────────────────────────────────────┘

Code 11.58

In [57]:
# logit scores
s1 = m11_13_df."a[1]" + m11_13_df.b * income[1]
s2_orig = m11_13_df."a[2]" + m11_13_df.b * income[2]
s2_new = m11_13_df."a[2]" + m11_13_df.b * income[2] * 2

# compute probabilities for original and counterfactual
p_orig = softmax.(eachrow(hcat(s1, s2_orig, zeros(length(s1)))))
p_orig = hcat(p_orig...)'
p_new = softmax.(eachrow(hcat(s1, s2_new, zeros(length(s1)))))
p_new = hcat(p_new...)'

p_diff = p_new[:, 2] - p_orig[:, 2]
precis(DataFrame(p_diff=p_diff))
┌────────┬─────────────────────────────────────────────────────┐
│  param    mean     std    5.5%     50%   94.5%    histogram │
├────────┼─────────────────────────────────────────────────────┤
│ p_diff │ 0.0432  0.0394  0.0026  0.0315  0.1278  █▆▄▃▂▁▁▁▁▁▁ │
└────────┴─────────────────────────────────────────────────────┘

Code 11.59

In [58]:
Random.seed!(1)

N = 500
family_income = rand(Uniform(), N)
b = [-2, 0, 2]
career = [
    begin
        score = (0.5 * 1:3) + b * inc
        p = softmax(score)
        sample(Weights(p))
    end
    for inc in family_income
]

@model function m11_14(career, family_income)
    a ~ MvNormal([0, 0], 1.5)
    b ~ MvNormal([0, 0], 1)
    
    for i  eachindex(career)
        income = family_income[i]
        s = [a[1] + b[1] * income, a[2] + b[2] * income, 0]
        p = softmax(s)
        career[i] ~ Categorical(p)
    end
end

m11_14_chain = sample(m11_14(career, family_income), NUTS(), 1000)
m11_14_df = DataFrame(m11_14_chain)
precis(m11_14_df)
┌───────┬────────────────────────────────────────────────────────────┐
│ param     mean     std     5.5%      50%    94.5%       histogram │
├───────┼────────────────────────────────────────────────────────────┤
│  a[1] │ -2.7538    0.39  -3.3927  -2.7355  -2.1699  ▁▁▁▂▃▆██▇▅▂▁▁▁ │
│  a[2] │ -1.2474  0.2198  -1.6024  -1.2415   -0.896   ▁▁▁▂▄▆███▆▄▂▁ │
│  b[1] │ -1.6594  0.7035  -2.7403   -1.658  -0.5104      ▁▁▃▇█▇▄▂▁▁ │
│  b[2] │ -1.0656  0.4021  -1.7315  -1.0556  -0.4297  ▁▁▂▃▅▇██▆▄▂▁▁▁ │
└───────┴────────────────────────────────────────────────────────────┘

Code 11.60

In [60]:
d = DataFrame(CSV.File("data/UCBadmit.csv", skipto=2, 
        header=[:id, :dept, :gender, :admit, :reject, :applications]))
Out[60]:

12 rows × 6 columns

iddeptgenderadmitrejectapplications
Int64String1String7Int64Int64Int64
11Amale512313825
22Afemale8919108
33Bmale353207560
44Bfemale17825
55Cmale120205325
66Cfemale202391593
77Dmale138279417
88Dfemale131244375
99Emale53138191
1010Efemale94299393
1111Fmale22351373
1212Ffemale24317341

Code 11.61

In [61]:
@model function m_binom(admit, applications)
    a ~ Normal(0, 1.5)
    p = logistic(a)
    @. admit ~ Binomial(applications, p)
end

m_binom_chain = sample(m_binom(d.admit, d.applications), NUTS(), 1000)
m_binom_df = DataFrame(m_binom_chain);

@model function m_pois(admit, reject)
    a ~ MvNormal([0, 0], 1.5)
    λ = exp.(a)
    admit ~ Poisson(λ[1])
    reject ~ Poisson(λ[2])
end

m_pois_chain = sample(m_pois(d.admit, d.reject), NUTS(), 1000)
m_pois_df = DataFrame(m_pois_chain);

Code 11.62

In [62]:
m_binom_df.a .|> logistic |> mean
Out[62]:
0.387568484041908

Code 11.63

In [63]:
a1 = m_pois_df."a[1]" |> mean
a2 = m_pois_df."a[2]" |> mean
exp(a1)/(exp(a1)+exp(a2))
Out[63]:
0.38722562179640474
In [ ]: