Skip to content
This repository has been archived by the owner on Nov 1, 2024. It is now read-only.

Commit

Permalink
feat(turing): juliaformatter blue style
Browse files Browse the repository at this point in the history
  • Loading branch information
storopoli committed Dec 30, 2023
1 parent 30281e0 commit 62db2e8
Show file tree
Hide file tree
Showing 20 changed files with 100 additions and 71 deletions.
21 changes: 21 additions & 0 deletions flake.nix
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,27 @@
files = "\\.typ$";
language = "rust";
};
julia-formatter = {
enable = true;
name = "format julia code";
entry = ''
${pkgs.julia-bin}/bin/julia -e '
using Pkg
Pkg.activate(".")
using JuliaFormatter
format(ARGS)
out = Cmd(`git diff --name-only`) |> read |> String
if out == ""
exit(0)
else
@error "Some files have been formatted !!!"
write(stdout, out)
exit(1)
end'
'';
files = "\\.jl$";
language = "system";
};
};
settings = {
treefmt.package = treefmtEval.config.build.wrapper;
Expand Down
1 change: 1 addition & 0 deletions turing/.JuliaFormatter.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
style = "blue"
12 changes: 8 additions & 4 deletions turing/02-linear_regression-kidiq.jl
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,11 @@ seed!(123)
kidiq = CSV.read("datasets/kidiq.csv", DataFrame)

# define data matrix X and standardize
X = select(kidiq, Not(:kid_score)) |> Matrix
X = Matrix(select(kidiq, Not(:kid_score)))
X = standardize(ZScoreTransform, X; dims=1)

# define dependent variable y and standardize
y = kidiq[:, :kid_score] |> float
y = float(kidiq[:, :kid_score])
y = standardize(ZScoreTransform, y; dims=1)

# define the model
Expand Down Expand Up @@ -61,8 +61,12 @@ model_qr = linear_regression(Q_ast, y)
chn_qr = sample(model_qr, NUTS(1_000, 0.8), MCMCThreads(), 1_000, 4)

# reconstruct β back from the Q_ast scale into X scale
betas = mapslices(x -> R_ast^-1 * x, chn_qr[:, namesingroup(chn_qr, ), :].value.data, dims=[2])
chain_beta = setrange(Chains(betas, ["real_β[$i]" for i in 1:size(Q_ast, 2)]), 1_001:1:2_000)
betas = mapslices(
x -> R_ast^-1 * x, chn_qr[:, namesingroup(chn_qr, ), :].value.data; dims=[2]
)
chain_beta = setrange(
Chains(betas, ["real_β[$i]" for i in 1:size(Q_ast, 2)]), 1_001:1:2_000
)
chn_qr_reconstructed = hcat(chain_beta, chn_qr)
println(DataFrame(summarystats(chn_qr_reconstructed)))

Expand Down
2 changes: 1 addition & 1 deletion turing/03-logistic_regression-wells.jl
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ seed!(123)
wells = CSV.read("datasets/wells.csv", DataFrame)

# define data matrix X and standardize
X = select(wells, Not(:switch)) |> Matrix
X = Matrix(select(wells, Not(:switch)))
X = standardize(ZScoreTransform, X; dims=1)

# define dependent variable y
Expand Down
2 changes: 1 addition & 1 deletion turing/04-ordinal_regression-esoph.jl
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ transform!(
x -> categorical(x; levels=["0-39g/day", "40-79", "80-119", "120+"], ordered=true),
:tobgp =>
x -> categorical(x; levels=["0-9g/day", "10-19", "20-29", "30+"], ordered=true);
renamecols=false
renamecols=false,
)
transform!(esoph, [:agegp, :alcgp, :tobgp] .=> ByRow(levelcode); renamecols=false)

Expand Down
2 changes: 1 addition & 1 deletion turing/05-poisson_regression-roaches.jl
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ seed!(123)
roaches = CSV.read("datasets/roaches.csv", DataFrame)

# define data matrix X and standardize
X = select(roaches, Not(:y)) |> Matrix
X = Matrix(select(roaches, Not(:y)))
X = standardize(ZScoreTransform, X; dims=1)

# define dependent variable y
Expand Down
4 changes: 2 additions & 2 deletions turing/06-robust_linear_regression-duncan.jl
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,11 @@ seed!(123)
duncan = CSV.read("datasets/duncan.csv", DataFrame)

# define data matrix X and standardize
X = select(duncan, [:income, :education]) |> Matrix |> float
X = float(Matrix(select(duncan, [:income, :education])))
X = standardize(ZScoreTransform, X; dims=1)

# define dependent variable y and standardize
y = duncan[:, :prestige] |> float
y = float(duncan[:, :prestige])
y = standardize(ZScoreTransform, y; dims=1)

# define the model
Expand Down
2 changes: 1 addition & 1 deletion turing/07-robust_beta_binomial_regression-wells.jl
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ seed!(123)
wells = CSV.read("datasets/wells.csv", DataFrame)

# define data matrix X and standardize
X = select(wells, Not(:switch)) |> Matrix
X = Matrix(select(wells, Not(:switch)))
X = standardize(ZScoreTransform, X; dims=1)

# define dependent variable y
Expand Down
2 changes: 1 addition & 1 deletion turing/08-robust_robit_regression-wells.jl
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ seed!(123)
wells = CSV.read("datasets/wells.csv", DataFrame)

# define data matrix X and standardize
X = select(wells, Not(:switch)) |> Matrix
X = Matrix(select(wells, Not(:switch)))
X = standardize(ZScoreTransform, X; dims=1)

# define dependent variable y
Expand Down
2 changes: 1 addition & 1 deletion turing/09-robust_negative_binomial_regression-roaches.jl
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ seed!(123)
roaches = CSV.read("datasets/roaches.csv", DataFrame)

# define data matrix X and standardize
X = select(roaches, Not(:y)) |> Matrix
X = Matrix(select(roaches, Not(:y)))
X = standardize(ZScoreTransform, X; dims=1)

# define dependent variable y
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ seed!(123)
roaches = CSV.read("datasets/roaches.csv", DataFrame)

# define data matrix X and standardize
X = select(roaches, Not(:y)) |> Matrix
X = Matrix(select(roaches, Not(:y)))
X = standardize(ZScoreTransform, X; dims=1)

# define dependent variable y
Expand All @@ -28,7 +28,9 @@ function NegativeBinomial2(μ, ϕ)
end

# define the model
@model function zero_inflated_negative_binomial_regression(X, y; predictors=size(X, 2), N=size(X, 1))
@model function zero_inflated_negative_binomial_regression(
X, y; predictors=size(X, 2), N=size(X, 1)
)
# priors
α ~ TDist(3) * 2.5
β ~ filldist(TDist(3) * 2.5, predictors)
Expand All @@ -40,11 +42,11 @@ end
for n in 1:N
if y[n] == 0
Turing.@addlogprob! logpdf(Bernoulli(γ), 0) +
logpdf(Bernoulli(γ), 1) +
logpdf(NegativeBinomial2(exp+ X[n, :] β), ϕ), y[n])
logpdf(Bernoulli(γ), 1) +
logpdf(NegativeBinomial2(exp+ X[n, :] β), ϕ), y[n])
else
Turing.@addlogprob! logpdf(Bernoulli(γ), 0) +
logpdf(NegativeBinomial2(exp+ X[n, :] β), ϕ), y[n])
logpdf(NegativeBinomial2(exp+ X[n, :] β), ϕ), y[n])
end
end
return (; y, α, β, γ, ϕ)
Expand Down
8 changes: 4 additions & 4 deletions turing/10-robust_zero_inflated_regression-poisson-roaches.jl
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ seed!(123)
roaches = CSV.read("datasets/roaches.csv", DataFrame)

# define data matrix X and standardize
X = select(roaches, Not(:y)) |> Matrix
X = Matrix(select(roaches, Not(:y)))
X = standardize(ZScoreTransform, X; dims=1)

# define dependent variable y
Expand All @@ -30,11 +30,11 @@ y = roaches[:, :y]
for n in 1:N
if y[n] == 0
Turing.@addlogprob! logpdf(Bernoulli(γ), 0) +
logpdf(Bernoulli(γ), 1) +
logpdf(LogPoisson+ X[n, :] β), y[n])
logpdf(Bernoulli(γ), 1) +
logpdf(LogPoisson+ X[n, :] β), y[n])
else
Turing.@addlogprob! logpdf(Bernoulli(γ), 0) +
logpdf(LogPoisson+ X[n, :] β), y[n])
logpdf(LogPoisson+ X[n, :] β), y[n])
end
end
return (; y, α, β, γ)
Expand Down
4 changes: 2 additions & 2 deletions turing/11-sparse_horseshoe.jl
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,11 @@ seed!(123)
df = CSV.read("datasets/sparse_regression.csv", DataFrame)

# define data matrix X and standardize
X = select(df, Not(:y)) |> Matrix |> float
X = float(Matrix(select(df, Not(:y))))
X = standardize(ZScoreTransform, X; dims=1)

# define dependent variable y and standardize
y = df[:, :y] |> float
y = float(df[:, :y])
y = standardize(ZScoreTransform, y; dims=1)

# define the model
Expand Down
4 changes: 2 additions & 2 deletions turing/12-sparse_horseshoe_p.jl
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,11 @@ seed!(123)
df = CSV.read("datasets/sparse_regression.csv", DataFrame)

# define data matrix X and standardize
X = select(df, Not(:y)) |> Matrix |> float
X = float(Matrix(select(df, Not(:y))))
X = standardize(ZScoreTransform, X; dims=1)

# define dependent variable y and standardize
y = df[:, :y] |> float
y = float(df[:, :y])
y = standardize(ZScoreTransform, y; dims=1)

# define the model
Expand Down
13 changes: 3 additions & 10 deletions turing/13-sparse_finnish_horseshoe.jl
Original file line number Diff line number Diff line change
Expand Up @@ -13,23 +13,16 @@ seed!(123)
df = CSV.read("datasets/sparse_regression.csv", DataFrame)

# define data matrix X and standardize
X = select(df, Not(:y)) |> Matrix |> float
X = float(Matrix(select(df, Not(:y))))
X = standardize(ZScoreTransform, X; dims=1)

# define dependent variable y and standardize
y = df[:, :y] |> float
y = float(df[:, :y])
y = standardize(ZScoreTransform, y; dims=1)

# define the model
@model function sparse_finnish_horseshoe_regression(
X,
y;
predictors=size(X, 2),
τ₀=3,
ν_local=1,
ν_global=1,
slab_df=4,
slab_scale=2,
X, y; predictors=size(X, 2), τ₀=3, ν_local=1, ν_global=1, slab_df=4, slab_scale=2
)
# priors
α ~ TDist(3) * 2.5
Expand Down
11 changes: 3 additions & 8 deletions turing/14-sparse_r2d2.jl
Original file line number Diff line number Diff line change
Expand Up @@ -13,21 +13,16 @@ seed!(123)
df = CSV.read("datasets/sparse_regression.csv", DataFrame)

# define data matrix X and standardize
X = select(df, Not(:y)) |> Matrix |> float
X = float(Matrix(select(df, Not(:y))))
X = standardize(ZScoreTransform, X; dims=1)

# define dependent variable y and standardize
y = df[:, :y] |> float
y = float(df[:, :y])
y = standardize(ZScoreTransform, y; dims=1)

# define the model
@model function sparse_r2d2_regression(
X,
y;
predictors=size(X, 2),
mean_R²=0.5,
prec_R²=2,
cons_D2=1,
X, y; predictors=size(X, 2), mean_R²=0.5, prec_R²=2, cons_D2=1
)
# priors
α ~ TDist(3) * 2.5
Expand Down
19 changes: 12 additions & 7 deletions turing/15-hierarchical_varying_intercept-cheese.jl
Original file line number Diff line number Diff line change
Expand Up @@ -19,24 +19,29 @@ end

# create int idx
cheese[:, :background_int] = map(cheese[:, :background]) do b
b == "urban" ? 1 :
b == "rural" ? 2 : missing
if b == "urban"
1
elseif b == "rural"
2
else
missing
end
end

# define data matrix X
X = select(cheese, Between(:cheese_A, :cheese_D)) |> Matrix
X = Matrix(select(cheese, Between(:cheese_A, :cheese_D)))

# define dependent variable y and standardize
y = cheese[:, :y] |> float
y = float(cheese[:, :y])
y = standardize(ZScoreTransform, y; dims=1)

# define vector of group memberships idx
idx = cheese[:, :background_int]

# define the model
@model function varying_intercept_regression(X, idx, y;
predictors=size(X, 2),
n_gr=length(unique(idx)))
@model function varying_intercept_regression(
X, idx, y; predictors=size(X, 2), n_gr=length(unique(idx))
)
# priors
α ~ TDist(3) * 2.5
β ~ filldist(TDist(3) * 2.5, predictors)
Expand Down
19 changes: 12 additions & 7 deletions turing/15-hierarchical_varying_intercept-non_centered-cheese.jl
Original file line number Diff line number Diff line change
Expand Up @@ -19,24 +19,29 @@ end

# create int idx
cheese[:, :background_int] = map(cheese[:, :background]) do b
b == "urban" ? 1 :
b == "rural" ? 2 : missing
if b == "urban"
1
elseif b == "rural"
2
else
missing
end
end

# define data matrix X
X = select(cheese, Between(:cheese_A, :cheese_D)) |> Matrix
X = Matrix(select(cheese, Between(:cheese_A, :cheese_D)))

# define dependent variable y and standardize
y = cheese[:, :y] |> float
y = float(cheese[:, :y])
y = standardize(ZScoreTransform, y; dims=1)

# define vector of group memberships idx
idx = cheese[:, :background_int]

# define the model
@model function varying_intercept_ncp_regression(X, idx, y;
predictors=size(X, 2),
n_gr=length(unique(idx)))
@model function varying_intercept_ncp_regression(
X, idx, y; predictors=size(X, 2), n_gr=length(unique(idx))
)
# priors
α ~ TDist(3) * 2.5
β ~ filldist(TDist(3) * 2.5, predictors)
Expand Down
20 changes: 12 additions & 8 deletions turing/16-hierarchical_varying_intercept_slope-cheese.jl
Original file line number Diff line number Diff line change
Expand Up @@ -19,28 +19,32 @@ end

# create int idx
cheese[:, :background_int] = map(cheese[:, :background]) do b
b == "urban" ? 1 :
b == "rural" ? 2 : missing
if b == "urban"
1
elseif b == "rural"
2
else
missing
end
end

# define data matrix X
# now we are binding a column of 1s as the first column of X
# for the correlated intercepts
insertcols!(cheese, :intercept => fill(1, nrow(cheese)))
X = select(cheese, Cols(:intercept, Between(:cheese_A, :cheese_D))) |> Matrix
X = Matrix(select(cheese, Cols(:intercept, Between(:cheese_A, :cheese_D))))

# define dependent variable y and standardize
y = cheese[:, :y] |> float
y = float(cheese[:, :y])
y = standardize(ZScoreTransform, y; dims=1)

# define vector of group memberships idx
idx = cheese[:, :background_int]

# define the model
@model function correlated_varying_intercept_slope_regression(X, idx, y;
predictors=size(X, 2),
N=size(X, 1),
n_gr=length(unique(idx)))
@model function correlated_varying_intercept_slope_regression(
X, idx, y; predictors=size(X, 2), N=size(X, 1), n_gr=length(unique(idx))
)
# priors
Ω ~ LKJCholesky(predictors, 2.0)
σ ~ Exponential(1)
Expand Down
Loading

0 comments on commit 62db2e8

Please sign in to comment.