Skip to content

Commit 8051348

Browse files
jlperlaFarhad Shahryarpoor
andauthored
More quadrature (#365)
* Added Gauss-hermite * Added more gaussian quadrature rules * All the tasks are implemented. * ifp file and career files are edited. * Running formatter. * Address review comments. --------- Co-authored-by: Farhad Shahryarpoor <[email protected]>
1 parent dd7bfc7 commit 8051348

File tree

4 files changed

+130
-54
lines changed

4 files changed

+130
-54
lines changed

lectures/dynamic_programming/career.md

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ This exposition draws on the presentation in {cite}`Ljungqvist2012`, section 6.5
4343

4444

4545
```{code-cell} julia
46-
using LinearAlgebra, Statistics
46+
using LinearAlgebra, Statistics, NLsolve
4747
```
4848

4949
## Model
@@ -154,7 +154,7 @@ using Test
154154
```
155155

156156
```{code-cell} julia
157-
using LaTeXStrings, Plots, QuantEcon, Distributions
157+
using LaTeXStrings, Plots, Distributions
158158
159159
n = 50
160160
a_vals = [0.5, 1, 100]
@@ -266,7 +266,8 @@ Here's the value function
266266
wp = CareerWorkerProblem()
267267
v_init = fill(100.0, wp.N, wp.N)
268268
func(x) = update_bellman(wp, x)
269-
v = compute_fixed_point(func, v_init, max_iter = 500, verbose = false)
269+
sol = fixedpoint(func, v_init)
270+
v = sol.zero
270271
271272
plot(linetype = :surface, wp.theta, wp.epsilon, transpose(v),
272273
xlabel = L"\theta",
@@ -309,7 +310,7 @@ In particular, modulo randomness, reproduce the following figure (where the hori
309310
:width: 100%
310311
```
311312

312-
Hint: To generate the draws from the distributions $F$ and $G$, use the type [DiscreteRV](https://github.com/QuantEcon/QuantEcon.jl/blob/master/src/discrete_rv.jl).
313+
Hint: To generate the draws from the distributions $F$ and $G$, you can use the `Categorical` distribution from the `Distributions` package.
313314

314315
(career_ex2)=
315316
### Exercise 2
@@ -357,15 +358,16 @@ wp = CareerWorkerProblem()
357358
function solve_wp(wp)
358359
v_init = fill(100.0, wp.N, wp.N)
359360
func(x) = update_bellman(wp, x)
360-
v = compute_fixed_point(func, v_init, max_iter = 500, verbose = false)
361+
sol = fixedpoint(func, v_init)
362+
v = sol.zero
361363
optimal_policy = get_greedy(wp, v)
362364
return v, optimal_policy
363365
end
364366
365367
v, optimal_policy = solve_wp(wp)
366368
367-
F = DiscreteRV(wp.F_probs)
368-
G = DiscreteRV(wp.G_probs)
369+
F = Categorical(wp.F_probs)
370+
G = Categorical(wp.G_probs)
369371
370372
function gen_path(T = 20)
371373
i = j = 1
@@ -375,9 +377,9 @@ function gen_path(T = 20)
375377
for t in 1:T
376378
# do nothing if stay put
377379
if optimal_policy[i, j] == 2 # new job
378-
j = rand(G)[1]
380+
j = rand(G)
379381
elseif optimal_policy[i, j] == 3 # new life
380-
i, j = rand(F)[1], rand(G)[1]
382+
i, j = rand(F), rand(G)
381383
end
382384
push!(theta_ind, i)
383385
push!(epsilon_ind, j)
@@ -512,4 +514,3 @@ You will see that the region for which the worker
512514
will stay put has grown because the distribution for $\epsilon$
513515
has become more concentrated around the mean, making high-paying jobs
514516
less realistic.
515-

lectures/dynamic_programming/ifp.md

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -375,8 +375,8 @@ using Test
375375
```
376376

377377
```{code-cell} julia
378-
using LinearAlgebra, Statistics
379-
using BenchmarkTools, LaTeXStrings, Optim, Plots, QuantEcon, Random
378+
using LinearAlgebra, Statistics, Interpolations, NLsolve
379+
using BenchmarkTools, LaTeXStrings, Optim, Plots, Random, QuantEcon
380380
using Optim: converged, maximum, maximizer, minimizer, iterations
381381
382382
```
@@ -407,8 +407,8 @@ function T!(cp, V, out; ret_policy = false)
407407
z_idx = 1:length(z_vals)
408408
409409
# value function when the shock index is z_i
410-
vf = interp(asset_grid, V)
411-
410+
vf = LinearInterpolation((asset_grid, z_idx), V;
411+
extrapolation_bc = Interpolations.Flat())
412412
opt_lb = 1e-8
413413
414414
# solve for RHS of Bellman equation
@@ -444,7 +444,8 @@ function K!(cp, c, out)
444444
gam = R * beta
445445
446446
# policy function when the shock index is z_i
447-
cf = interp(asset_grid, c)
447+
cf = LinearInterpolation((asset_grid, z_idx), c;
448+
extrapolation_bc = Interpolations.Flat())
448449
449450
# compute lower_bound for optimization
450451
opt_lb = 1e-8
@@ -525,7 +526,7 @@ In the Julia console, a comparison of the operators can be made as follows
525526

526527
```{code-cell} julia
527528
cp = ConsumerProblem()
528-
v, c, = initialize(cp)
529+
v, c = initialize(cp)
529530
```
530531

531532
```{code-cell} julia
@@ -577,10 +578,9 @@ The following figure is a 45 degree diagram showing the law of motion for assets
577578
m = ConsumerProblem(r = 0.03, grid_max = 4)
578579
v_init, c_init = initialize(m)
579580
580-
c = compute_fixed_point(c -> K(m, c),
581-
c_init,
582-
max_iter = 150,
583-
verbose = false)
581+
sol = fixedpoint(c -> K(m, c), c_init)
582+
c = sol.zero
583+
584584
a = m.asset_grid
585585
R, z_vals = m.R, m.z_vals
586586
@@ -711,10 +711,9 @@ legends = []
711711
for r_val in r_vals
712712
cp = ConsumerProblem(r = r_val)
713713
v_init, c_init = initialize(cp)
714-
c = compute_fixed_point(x -> K(cp, x),
715-
c_init,
716-
max_iter = 150,
717-
verbose = false)
714+
sol = fixedpoint(x -> K(cp, x), c_init)
715+
c = sol.zero
716+
718717
traces = push!(traces, c[:, 1])
719718
legends = push!(legends, L"r = %$(round(r_val, digits = 3))")
720719
end
@@ -741,10 +740,12 @@ function compute_asset_series(cp, T = 500_000; verbose = false)
741740
(; Pi, z_vals, R) = cp # simplify names
742741
z_idx = 1:length(z_vals)
743742
v_init, c_init = initialize(cp)
744-
c = compute_fixed_point(x -> K(cp, x), c_init,
745-
max_iter = 150, verbose = false)
746743
747-
cf = interp(cp.asset_grid, c)
744+
sol = fixedpoint(x -> K(cp, x), c_init)
745+
c = sol.zero
746+
747+
cf = LinearInterpolation((cp.asset_grid, z_idx), c;
748+
extrapolation_bc = Interpolations.Flat())
748749
749750
a = zeros(T + 1)
750751
z_seq = simulate(MarkovChain(Pi), T)
@@ -804,4 +805,3 @@ tags: [remove-cell]
804805
#test ys[1] ≈ 0.0:0.0016666666666666668:0.04 atol = 1e-4
805806
end
806807
```
807-

lectures/dynamic_programming/odu.md

Lines changed: 24 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -160,8 +160,8 @@ using Test # At the head of every lecture.
160160
```
161161

162162
```{code-cell} julia
163-
using LinearAlgebra, Statistics
164-
using Distributions, LaTeXStrings, Plots, QuantEcon, Interpolations
163+
using LinearAlgebra, Statistics, SpecialFunctions
164+
using Distributions, LaTeXStrings, Plots, Interpolations, FastGaussQuadrature, NLsolve
165165
166166
w_max = 2
167167
x = range(0, w_max, length = 200)
@@ -209,7 +209,13 @@ The code is as follows.
209209

210210
(odu_vfi_code)=
211211
```{code-cell} julia
212-
# use key word argment
212+
function gauss_jacobi_dist(F::Beta, N)
213+
s, wj = FastGaussQuadrature.gaussjacobi(N, F.β - 1, F.α - 1)
214+
x = (s .+ 1) ./ 2
215+
C = 2.0^(-(F.α + F.β - 1.0)) / SpecialFunctions.beta(F.α, F.β)
216+
return x, C .* wj
217+
end
218+
213219
function SearchProblem(; beta = 0.95, c = 0.6, F_a = 1, F_b = 1,
214220
G_a = 3, G_b = 1.2, w_max = 2.0,
215221
w_grid_size = 40, pi_grid_size = 40)
@@ -226,7 +232,9 @@ function SearchProblem(; beta = 0.95, c = 0.6, F_a = 1, F_b = 1,
226232
w_grid = range(0, w_max, length = w_grid_size)
227233
pi_grid = range(pi_min, pi_max, length = pi_grid_size)
228234
229-
nodes, weights = qnwlege(21, 0.0, w_max)
235+
nodes, weights = gauss_jacobi_dist(F, 21) # or G; both share support [0, w_max]
236+
nodes .*= w_max
237+
weights .*= w_max
230238
231239
return (; beta, c, F, G, f,
232240
g, n_w = w_grid_size, w_max,
@@ -251,21 +259,15 @@ function T!(sp, v, out;
251259
vf = extrapolate(interpolate((sp.w_grid, sp.pi_grid), v,
252260
Gridded(Linear())), Flat())
253261
254-
# set up quadrature nodes/weights
255-
# q_nodes, q_weights = qnwlege(21, 0.0, sp.w_max)
256-
257262
for (w_i, w) in enumerate(sp.w_grid)
258263
# calculate v1
259264
v1 = w / (1 - beta)
260265
261266
for (pi_j, _pi) in enumerate(sp.pi_grid)
262-
# calculate v2
263-
function integrand(m)
264-
[vf(m[i], q.(Ref(sp), m[i], _pi)) *
265-
(_pi * f(m[i]) + (1 - _pi) * g(m[i])) for i in 1:length(m)]
266-
end
267-
integral = do_quad(integrand, nodes, weights)
268-
# integral = do_quad(integrand, q_nodes, q_weights)
267+
vals = vf.(nodes, q.(Ref(sp), nodes, _pi))
268+
density = _pi * f.(nodes) + (1 - _pi) * g.(nodes)
269+
integral = dot(weights, vals .* density)
270+
269271
v2 = c + beta * integral
270272
271273
# return policy if asked for, otherwise return max of values
@@ -294,14 +296,13 @@ function res_wage_operator!(sp, phi, out)
294296
phi_f = LinearInterpolation(sp.pi_grid, phi, extrapolation_bc = Line())
295297
296298
# set up quadrature nodes/weights
297-
q_nodes, q_weights = qnwlege(7, 0.0, sp.w_max)
299+
q_nodes, q_weights = sp.quad_nodes, sp.quad_weights
298300
299301
for (i, _pi) in enumerate(sp.pi_grid)
300-
function integrand(x)
301-
max.(x, phi_f.(q.(Ref(sp), x, _pi))) .*
302-
(_pi * f(x) + (1 - _pi) * g(x))
303-
end
304-
integral = do_quad(integrand, q_nodes, q_weights)
302+
vals = max.(q_nodes, phi_f.(q.(Ref(sp), q_nodes, _pi)))
303+
density = _pi * f.(q_nodes) + (1 - _pi) * g.(q_nodes)
304+
integral = dot(q_weights, vals .* density)
305+
305306
out[i] = (1 - beta) * c + beta * integral
306307
end
307308
end
@@ -331,7 +332,7 @@ Here's the value function:
331332
sp = SearchProblem(; w_grid_size = 100, pi_grid_size = 100)
332333
v_init = fill(sp.c / (1 - sp.beta), sp.n_w, sp.n_pi)
333334
f(x) = T(sp, x)
334-
v = compute_fixed_point(f, v_init)
335+
v = fixedpoint(v -> T(sp, v), v_init).zero
335336
policy = get_greedy(sp, v)
336337
337338
# Make functions for the linear interpolants of these
@@ -561,7 +562,7 @@ sp = SearchProblem(pi_grid_size = 50)
561562
562563
phi_init = ones(sp.n_pi)
563564
f_ex1(x) = res_wage_operator(sp, x)
564-
w_bar = compute_fixed_point(f_ex1, phi_init)
565+
w_bar = fixedpoint(f_ex1, phi_init).zero
565566
566567
plot(sp.pi_grid, w_bar, linewidth = 2, color = :black,
567568
fillrange = 0, fillalpha = 0.15, fillcolor = :blue)
@@ -592,7 +593,7 @@ Random.seed!(42)
592593
sp = SearchProblem(pi_grid_size = 50, F_a = 1, F_b = 1)
593594
phi_init = ones(sp.n_pi)
594595
g(x) = res_wage_operator(sp, x)
595-
w_bar_vals = compute_fixed_point(g, phi_init)
596+
w_bar_vals = fixedpoint(g, phi_init).zero
596597
w_bar = extrapolate(interpolate((sp.pi_grid,), w_bar_vals,
597598
Gridded(Linear())), Flat())
598599
@@ -655,4 +656,3 @@ end
655656
plot(unempl_rate, linewidth = 2, label = "unemployment rate")
656657
vline!([change_date], color = :red, label = "")
657658
```
658-

lectures/more_julia/general_packages.md

Lines changed: 77 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ This is an adaptive Gauss-Kronrod integration technique that's relatively accura
6565

6666
However, its adaptive implementation makes it slow and not well suited to inner loops.
6767

68-
### Gaussian Quadrature
68+
### Gauss Legendre
6969

7070
Alternatively, many integrals can be done efficiently with (non-adaptive) [Gaussian quadrature](https://en.wikipedia.org/wiki/Gaussian_quadrature).
7171

@@ -82,6 +82,8 @@ f(x) = x^2
8282
With the `FastGaussQuadrature` package you may need to deal with affine transformations to the non-default domains yourself.
8383

8484

85+
### Gauss Legendre
86+
8587
Another commonly used quadrature well suited to random variables with bounded support is [Gauss–Jacobi quadrature](https://en.wikipedia.org/wiki/Gauss–Jacobi_quadrature).
8688

8789
It provides nodes $s_n\in[-1,1]$ and weights $\omega_n$ for
@@ -121,6 +123,79 @@ f(x) = x^2
121123
@show dot(w2, f.(x2)), mean(f.(rand(F2, 1000)));
122124
```
123125

126+
## Gauss Hermite
127+
128+
Many expectations are of the form $\mathbb{E}\left[f(X)\right]\approx \sum_{n=1}^N w_n f(x_n)$ where $X \sim N(0,1)$. Alternatively, integrals of the form $\int_{-\infty}^{\infty}f(x) exp(-x^2) dx$.
129+
130+
Gauss-hermite quadrature provides weights and nodes of this form, where the `normalize = true` argument provides the appropriate rescaling for the normal distribution.
131+
132+
Through a change of variables this can be used to calculate expectations of $N(\mu,\sigma^2)$ variables, through
133+
134+
```{code-cell} julia
135+
function gauss_hermite_normal(N::Integer, mu::Real, sigma::Real)
136+
s, w = FastGaussQuadrature.gausshermite(N; normalize = true)
137+
138+
# X ~ N(mu, sigma^2), X \sim mu + sigma N(0,1) we transform the standard-normal nodes
139+
x = mu .+ sigma .* s
140+
return x, w
141+
end
142+
143+
N = 32
144+
x, w = gauss_hermite_normal(N, 1.0, 0.1)
145+
x2, w2 = gauss_hermite_normal(N, 0.0, 0.05)
146+
f(x) = x^2
147+
@show dot(w, f.(x)), mean(f.(rand(Normal(1.0, 0.1), 1000)))
148+
@show dot(w2, f.(x2)), mean(f.(rand(Normal(0.0, 0.05), 1000)));
149+
```
150+
151+
## Gauss Laguerre
152+
153+
Another quadrature scheme appropriate integrals defined on $[0,\infty]$ is Gauss Laguerre, which approximates integrals of the form $\int_0^{\infty} f(x) x^{\alpha} \exp(-x) dx \approx \sum_{n=1}^N w_n f(x_n)$.
154+
155+
One application is to calculate expectations of exponential variables. The PDF of an exponential distribution with parameter $\theta$ is $f(x;\theta) = \frac{1}{\theta}\exp(-x/\theta)$. With a change of variables we can use Gauss Laguerre quadrature
156+
157+
```{code-cell} julia
158+
function gauss_laguerre_exponential(N, theta)
159+
# E[f(X)] = \int_0^\infty f(x) (1/theta) e^{-x/theta} dx = \int_0^\infty f(theta*y) e^{-y} dy.
160+
s, w = FastGaussQuadrature.gausslaguerre(N) # alpha = 0 (default)
161+
x = theta .* s
162+
return x, w
163+
end
164+
165+
N = 64
166+
theta = 0.5
167+
x, w = gauss_laguerre_exponential(N, theta)
168+
f(x) = x^2 + 1
169+
@show dot(w, f.(x)), mean(f.(rand(Exponential(theta), 1_000)))
170+
```
171+
172+
Similarly, the Gamma distribution with shape parameter $\alpha$ and scale $\theta$ has PDF $f(x; \alpha, \theta) = \frac{x^{\alpha-1} e^{-x/\theta}}{\Gamma(\alpha) \theta^\alpha}$ for $x > 0$ with $\Gamma(\cdot)$ the Gamma special function.
173+
174+
Using a change of variable and Gauss Laguerre quadrature
175+
176+
```{code-cell} julia
177+
function gauss_laguerre_gamma(N, alpha, theta)
178+
# For Gamma(shape=alpha, scale=theta) with pdf
179+
# x^{alpha-1} e^{-x/theta} / (Gamma(alpha) theta^alpha)
180+
# change variable y = x/theta -> x = theta*y, dx = theta dy
181+
# E[f(X)] = 1/Gamma(alpha) * ∫_0^∞ f(theta*y) y^{alpha-1} e^{-y} dy
182+
# FastGaussQuadrature.gausslaguerre(N, a) returns nodes/weights for
183+
# ∫_0^∞ g(y) y^a e^{-y} dy, so pass a = alpha - 1.
184+
185+
s, w = FastGaussQuadrature.gausslaguerre(N, alpha - 1)
186+
x = theta .* s
187+
w = w ./ SpecialFunctions.gamma(alpha)
188+
return x, w
189+
end
190+
191+
N = 256
192+
alpha = 7.0
193+
theta = 1.1
194+
x, w = gauss_laguerre_gamma(N, alpha, theta)
195+
f(x) = x^2 + 1
196+
@show dot(w, f.(x)), mean(f.(rand(Gamma(alpha, theta), 100_000)))
197+
```
198+
124199

125200

126201
## Interpolation
@@ -175,7 +250,7 @@ y = log.(x) # corresponding y points
175250
176251
interp = LinearInterpolation(x, y)
177252
178-
xf = log.(range(1, exp(4), length = 100)) .+ 1 # finer grid
253+
xf = log.(range(1,exp(4), length = 100)) .+ 1 # finer grid
179254
180255
plot(xf, interp.(xf), label = "linear")
181256
scatter!(x, y, label = "sampled data", markersize = 4, size = (800, 400))

0 commit comments

Comments
 (0)