diff --git a/lectures/dynamic_programming/career.md b/lectures/dynamic_programming/career.md index 1ca57060..13182bae 100644 --- a/lectures/dynamic_programming/career.md +++ b/lectures/dynamic_programming/career.md @@ -43,7 +43,7 @@ This exposition draws on the presentation in {cite}`Ljungqvist2012`, section 6.5 ```{code-cell} julia -using LinearAlgebra, Statistics +using LinearAlgebra, Statistics, NLsolve ``` ## Model @@ -154,7 +154,7 @@ using Test ``` ```{code-cell} julia -using LaTeXStrings, Plots, QuantEcon, Distributions +using LaTeXStrings, Plots, Distributions n = 50 a_vals = [0.5, 1, 100] @@ -266,7 +266,8 @@ Here's the value function wp = CareerWorkerProblem() v_init = fill(100.0, wp.N, wp.N) func(x) = update_bellman(wp, x) -v = compute_fixed_point(func, v_init, max_iter = 500, verbose = false) +sol = fixedpoint(func, v_init) +v = sol.zero plot(linetype = :surface, wp.theta, wp.epsilon, transpose(v), xlabel = L"\theta", @@ -309,7 +310,7 @@ In particular, modulo randomness, reproduce the following figure (where the hori :width: 100% ``` -Hint: To generate the draws from the distributions $F$ and $G$, use the type [DiscreteRV](https://github.com/QuantEcon/QuantEcon.jl/blob/master/src/discrete_rv.jl). +Hint: To generate the draws from the distributions $F$ and $G$, you can use the `Categorical` distribution from the `Distributions` package. (career_ex2)= ### Exercise 2 @@ -357,15 +358,16 @@ wp = CareerWorkerProblem() function solve_wp(wp) v_init = fill(100.0, wp.N, wp.N) func(x) = update_bellman(wp, x) - v = compute_fixed_point(func, v_init, max_iter = 500, verbose = false) + sol = fixedpoint(func, v_init) + v = sol.zero optimal_policy = get_greedy(wp, v) return v, optimal_policy end v, optimal_policy = solve_wp(wp) -F = DiscreteRV(wp.F_probs) -G = DiscreteRV(wp.G_probs) +F = Categorical(wp.F_probs) +G = Categorical(wp.G_probs) function gen_path(T = 20) i = j = 1 @@ -375,9 +377,9 @@ function gen_path(T = 20) for t in 1:T # do nothing if stay put if optimal_policy[i, j] == 2 # new job - j = rand(G)[1] + j = rand(G) elseif optimal_policy[i, j] == 3 # new life - i, j = rand(F)[1], rand(G)[1] + i, j = rand(F), rand(G) end push!(theta_ind, i) push!(epsilon_ind, j) @@ -512,4 +514,3 @@ You will see that the region for which the worker will stay put has grown because the distribution for $\epsilon$ has become more concentrated around the mean, making high-paying jobs less realistic. - diff --git a/lectures/dynamic_programming/ifp.md b/lectures/dynamic_programming/ifp.md index 11a5cf3c..67709126 100644 --- a/lectures/dynamic_programming/ifp.md +++ b/lectures/dynamic_programming/ifp.md @@ -375,8 +375,8 @@ using Test ``` ```{code-cell} julia -using LinearAlgebra, Statistics -using BenchmarkTools, LaTeXStrings, Optim, Plots, QuantEcon, Random +using LinearAlgebra, Statistics, Interpolations, NLsolve +using BenchmarkTools, LaTeXStrings, Optim, Plots, Random, QuantEcon using Optim: converged, maximum, maximizer, minimizer, iterations ``` @@ -407,8 +407,8 @@ function T!(cp, V, out; ret_policy = false) z_idx = 1:length(z_vals) # value function when the shock index is z_i - vf = interp(asset_grid, V) - + vf = LinearInterpolation((asset_grid, z_idx), V; + extrapolation_bc = Interpolations.Flat()) opt_lb = 1e-8 # solve for RHS of Bellman equation @@ -444,7 +444,8 @@ function K!(cp, c, out) gam = R * beta # policy function when the shock index is z_i - cf = interp(asset_grid, c) + cf = LinearInterpolation((asset_grid, z_idx), c; + extrapolation_bc = Interpolations.Flat()) # compute lower_bound for optimization opt_lb = 1e-8 @@ -525,7 +526,7 @@ In the Julia console, a comparison of the operators can be made as follows ```{code-cell} julia cp = ConsumerProblem() -v, c, = initialize(cp) +v, c = initialize(cp) ``` ```{code-cell} julia @@ -577,10 +578,9 @@ The following figure is a 45 degree diagram showing the law of motion for assets m = ConsumerProblem(r = 0.03, grid_max = 4) v_init, c_init = initialize(m) -c = compute_fixed_point(c -> K(m, c), - c_init, - max_iter = 150, - verbose = false) +sol = fixedpoint(c -> K(m, c), c_init) +c = sol.zero + a = m.asset_grid R, z_vals = m.R, m.z_vals @@ -711,10 +711,9 @@ legends = [] for r_val in r_vals cp = ConsumerProblem(r = r_val) v_init, c_init = initialize(cp) - c = compute_fixed_point(x -> K(cp, x), - c_init, - max_iter = 150, - verbose = false) + sol = fixedpoint(x -> K(cp, x), c_init) + c = sol.zero + traces = push!(traces, c[:, 1]) legends = push!(legends, L"r = %$(round(r_val, digits = 3))") end @@ -741,10 +740,12 @@ function compute_asset_series(cp, T = 500_000; verbose = false) (; Pi, z_vals, R) = cp # simplify names z_idx = 1:length(z_vals) v_init, c_init = initialize(cp) - c = compute_fixed_point(x -> K(cp, x), c_init, - max_iter = 150, verbose = false) - cf = interp(cp.asset_grid, c) + sol = fixedpoint(x -> K(cp, x), c_init) + c = sol.zero + + cf = LinearInterpolation((cp.asset_grid, z_idx), c; + extrapolation_bc = Interpolations.Flat()) a = zeros(T + 1) z_seq = simulate(MarkovChain(Pi), T) @@ -804,4 +805,3 @@ tags: [remove-cell] #test ys[1] ≈ 0.0:0.0016666666666666668:0.04 atol = 1e-4 end ``` - diff --git a/lectures/dynamic_programming/odu.md b/lectures/dynamic_programming/odu.md index debd9cb9..f7af835a 100644 --- a/lectures/dynamic_programming/odu.md +++ b/lectures/dynamic_programming/odu.md @@ -160,8 +160,8 @@ using Test # At the head of every lecture. ``` ```{code-cell} julia -using LinearAlgebra, Statistics -using Distributions, LaTeXStrings, Plots, QuantEcon, Interpolations +using LinearAlgebra, Statistics, SpecialFunctions +using Distributions, LaTeXStrings, Plots, Interpolations, FastGaussQuadrature, NLsolve w_max = 2 x = range(0, w_max, length = 200) @@ -209,7 +209,13 @@ The code is as follows. (odu_vfi_code)= ```{code-cell} julia -# use key word argment +function gauss_jacobi_dist(F::Beta, N) + s, wj = FastGaussQuadrature.gaussjacobi(N, F.β - 1, F.α - 1) + x = (s .+ 1) ./ 2 + C = 2.0^(-(F.α + F.β - 1.0)) / SpecialFunctions.beta(F.α, F.β) + return x, C .* wj +end + function SearchProblem(; beta = 0.95, c = 0.6, F_a = 1, F_b = 1, G_a = 3, G_b = 1.2, w_max = 2.0, w_grid_size = 40, pi_grid_size = 40) @@ -226,7 +232,9 @@ function SearchProblem(; beta = 0.95, c = 0.6, F_a = 1, F_b = 1, w_grid = range(0, w_max, length = w_grid_size) pi_grid = range(pi_min, pi_max, length = pi_grid_size) - nodes, weights = qnwlege(21, 0.0, w_max) + nodes, weights = gauss_jacobi_dist(F, 21) # or G; both share support [0, w_max] + nodes .*= w_max + weights .*= w_max return (; beta, c, F, G, f, g, n_w = w_grid_size, w_max, @@ -251,21 +259,15 @@ function T!(sp, v, out; vf = extrapolate(interpolate((sp.w_grid, sp.pi_grid), v, Gridded(Linear())), Flat()) - # set up quadrature nodes/weights - # q_nodes, q_weights = qnwlege(21, 0.0, sp.w_max) - for (w_i, w) in enumerate(sp.w_grid) # calculate v1 v1 = w / (1 - beta) for (pi_j, _pi) in enumerate(sp.pi_grid) - # calculate v2 - function integrand(m) - [vf(m[i], q.(Ref(sp), m[i], _pi)) * - (_pi * f(m[i]) + (1 - _pi) * g(m[i])) for i in 1:length(m)] - end - integral = do_quad(integrand, nodes, weights) - # integral = do_quad(integrand, q_nodes, q_weights) + vals = vf.(nodes, q.(Ref(sp), nodes, _pi)) + density = _pi * f.(nodes) + (1 - _pi) * g.(nodes) + integral = dot(weights, vals .* density) + v2 = c + beta * integral # return policy if asked for, otherwise return max of values @@ -294,14 +296,13 @@ function res_wage_operator!(sp, phi, out) phi_f = LinearInterpolation(sp.pi_grid, phi, extrapolation_bc = Line()) # set up quadrature nodes/weights - q_nodes, q_weights = qnwlege(7, 0.0, sp.w_max) + q_nodes, q_weights = sp.quad_nodes, sp.quad_weights for (i, _pi) in enumerate(sp.pi_grid) - function integrand(x) - max.(x, phi_f.(q.(Ref(sp), x, _pi))) .* - (_pi * f(x) + (1 - _pi) * g(x)) - end - integral = do_quad(integrand, q_nodes, q_weights) + vals = max.(q_nodes, phi_f.(q.(Ref(sp), q_nodes, _pi))) + density = _pi * f.(q_nodes) + (1 - _pi) * g.(q_nodes) + integral = dot(q_weights, vals .* density) + out[i] = (1 - beta) * c + beta * integral end end @@ -331,7 +332,7 @@ Here's the value function: sp = SearchProblem(; w_grid_size = 100, pi_grid_size = 100) v_init = fill(sp.c / (1 - sp.beta), sp.n_w, sp.n_pi) f(x) = T(sp, x) -v = compute_fixed_point(f, v_init) +v = fixedpoint(v -> T(sp, v), v_init).zero policy = get_greedy(sp, v) # Make functions for the linear interpolants of these @@ -561,7 +562,7 @@ sp = SearchProblem(pi_grid_size = 50) phi_init = ones(sp.n_pi) f_ex1(x) = res_wage_operator(sp, x) -w_bar = compute_fixed_point(f_ex1, phi_init) +w_bar = fixedpoint(f_ex1, phi_init).zero plot(sp.pi_grid, w_bar, linewidth = 2, color = :black, fillrange = 0, fillalpha = 0.15, fillcolor = :blue) @@ -592,7 +593,7 @@ Random.seed!(42) sp = SearchProblem(pi_grid_size = 50, F_a = 1, F_b = 1) phi_init = ones(sp.n_pi) g(x) = res_wage_operator(sp, x) -w_bar_vals = compute_fixed_point(g, phi_init) +w_bar_vals = fixedpoint(g, phi_init).zero w_bar = extrapolate(interpolate((sp.pi_grid,), w_bar_vals, Gridded(Linear())), Flat()) @@ -655,4 +656,3 @@ end plot(unempl_rate, linewidth = 2, label = "unemployment rate") vline!([change_date], color = :red, label = "") ``` - diff --git a/lectures/more_julia/general_packages.md b/lectures/more_julia/general_packages.md index 50062391..060237fe 100644 --- a/lectures/more_julia/general_packages.md +++ b/lectures/more_julia/general_packages.md @@ -65,7 +65,7 @@ This is an adaptive Gauss-Kronrod integration technique that's relatively accura However, its adaptive implementation makes it slow and not well suited to inner loops. -### Gaussian Quadrature +### Gauss Legendre Alternatively, many integrals can be done efficiently with (non-adaptive) [Gaussian quadrature](https://en.wikipedia.org/wiki/Gaussian_quadrature). @@ -82,6 +82,8 @@ f(x) = x^2 With the `FastGaussQuadrature` package you may need to deal with affine transformations to the non-default domains yourself. +### Gauss Legendre + Another commonly used quadrature well suited to random variables with bounded support is [Gauss–Jacobi quadrature](https://en.wikipedia.org/wiki/Gauss–Jacobi_quadrature). It provides nodes $s_n\in[-1,1]$ and weights $\omega_n$ for @@ -121,6 +123,79 @@ f(x) = x^2 @show dot(w2, f.(x2)), mean(f.(rand(F2, 1000))); ``` +## Gauss Hermite + +Many expectations are of the form $\mathbb{E}\left[f(X)\right]\approx \sum_{n=1}^N w_n f(x_n)$ where $X \sim N(0,1)$. Alternatively, integrals of the form $\int_{-\infty}^{\infty}f(x) exp(-x^2) dx$. + +Gauss-hermite quadrature provides weights and nodes of this form, where the `normalize = true` argument provides the appropriate rescaling for the normal distribution. + +Through a change of variables this can be used to calculate expectations of $N(\mu,\sigma^2)$ variables, through + +```{code-cell} julia +function gauss_hermite_normal(N::Integer, mu::Real, sigma::Real) + s, w = FastGaussQuadrature.gausshermite(N; normalize = true) + + # X ~ N(mu, sigma^2), X \sim mu + sigma N(0,1) we transform the standard-normal nodes + x = mu .+ sigma .* s + return x, w +end + +N = 32 +x, w = gauss_hermite_normal(N, 1.0, 0.1) +x2, w2 = gauss_hermite_normal(N, 0.0, 0.05) +f(x) = x^2 +@show dot(w, f.(x)), mean(f.(rand(Normal(1.0, 0.1), 1000))) +@show dot(w2, f.(x2)), mean(f.(rand(Normal(0.0, 0.05), 1000))); +``` + +## Gauss Laguerre + +Another quadrature scheme appropriate integrals defined on $[0,\infty]$ is Gauss Laguerre, which approximates integrals of the form $\int_0^{\infty} f(x) x^{\alpha} \exp(-x) dx \approx \sum_{n=1}^N w_n f(x_n)$. + +One application is to calculate expectations of exponential variables. The PDF of an exponential distribution with parameter $\theta$ is $f(x;\theta) = \frac{1}{\theta}\exp(-x/\theta)$. With a change of variables we can use Gauss Laguerre quadrature + +```{code-cell} julia +function gauss_laguerre_exponential(N, theta) + # E[f(X)] = \int_0^\infty f(x) (1/theta) e^{-x/theta} dx = \int_0^\infty f(theta*y) e^{-y} dy. + s, w = FastGaussQuadrature.gausslaguerre(N) # alpha = 0 (default) + x = theta .* s + return x, w +end + +N = 64 +theta = 0.5 +x, w = gauss_laguerre_exponential(N, theta) +f(x) = x^2 + 1 +@show dot(w, f.(x)), mean(f.(rand(Exponential(theta), 1_000))) +``` + +Similarly, the Gamma distribution with shape parameter $\alpha$ and scale $\theta$ has PDF $f(x; \alpha, \theta) = \frac{x^{\alpha-1} e^{-x/\theta}}{\Gamma(\alpha) \theta^\alpha}$ for $x > 0$ with $\Gamma(\cdot)$ the Gamma special function. + +Using a change of variable and Gauss Laguerre quadrature + +```{code-cell} julia +function gauss_laguerre_gamma(N, alpha, theta) + # For Gamma(shape=alpha, scale=theta) with pdf + # x^{alpha-1} e^{-x/theta} / (Gamma(alpha) theta^alpha) + # change variable y = x/theta -> x = theta*y, dx = theta dy + # E[f(X)] = 1/Gamma(alpha) * ∫_0^∞ f(theta*y) y^{alpha-1} e^{-y} dy + # FastGaussQuadrature.gausslaguerre(N, a) returns nodes/weights for + # ∫_0^∞ g(y) y^a e^{-y} dy, so pass a = alpha - 1. + + s, w = FastGaussQuadrature.gausslaguerre(N, alpha - 1) + x = theta .* s + w = w ./ SpecialFunctions.gamma(alpha) + return x, w +end + +N = 256 +alpha = 7.0 +theta = 1.1 +x, w = gauss_laguerre_gamma(N, alpha, theta) +f(x) = x^2 + 1 +@show dot(w, f.(x)), mean(f.(rand(Gamma(alpha, theta), 100_000))) +``` + ## Interpolation @@ -175,7 +250,7 @@ y = log.(x) # corresponding y points interp = LinearInterpolation(x, y) -xf = log.(range(1, exp(4), length = 100)) .+ 1 # finer grid +xf = log.(range(1,exp(4), length = 100)) .+ 1 # finer grid plot(xf, interp.(xf), label = "linear") scatter!(x, y, label = "sampled data", markersize = 4, size = (800, 400)) diff --git a/lectures/tools_and_techniques/stationary_densities.md b/lectures/tools_and_techniques/stationary_densities.md index 26be7a12..bf2573a7 100644 --- a/lectures/tools_and_techniques/stationary_densities.md +++ b/lectures/tools_and_techniques/stationary_densities.md @@ -433,16 +433,13 @@ In fact much stronger convergence results are true (see, for example, [this pape ### Implementation -A function which calls an `LAE` type for estimating densities by this technique can be found in [lae.jl](https://github.com/QuantEcon/QuantEcon.jl/blob/master/src/lae.jl). +We'll use a simple helper `lae_est(p, X, ygrid)` that evaluates the look-ahead estimator directly -This function returns the right-hand side of {eq}`statd_lae1` using +* `p` is the stochastic kernel +* `X` is a vector of draws of the previous-period state +* `ygrid` is a vector of evaluation points -* an object of type `LAE` that stores the stochastic kernel and the observations -* the value $y$ as its second argument - -The function is vectorized, in the sense that if `psi` is such an instance and `y` is an array, then the call `psi(y)` acts elementwise. - -(This is the reason that we reshaped `X` and `y` inside the type --- to make vectorization work) +The helper reshapes inputs so that `p.(X, ygrid)` broadcasts over all combinations and returns the sample mean across draws. ### Example @@ -477,6 +474,13 @@ function p(x, y) return pdf.(phi, pdf_arg) ./ d end +function lae_est(p, X, ygrid) + Xmat = reshape(X, :, 1) + Ymat = reshape(ygrid, 1, :) + psi_vals = mean(p.(Xmat, Ymat), dims = 1) + return dropdims(psi_vals, dims = 1) +end + n = 10000 # Number of observations at each date t T = 30 # Compute density of k_t at 1,...,T+1 @@ -490,16 +494,16 @@ for t in 1:(T - 1) k[:, t + 1] = s * A[:, t] .* k[:, t] .^ alpha + (1 - delta) .* k[:, t] end -# Generate T instances of LAE using this data, one for each date t -laes = [LAE(p, k[:, t]) for t in T:-1:1] +# Store draws for each date t +laes = [k[:, t] for t in T:-1:1] # Plot ygrid = range(0.01, 4, length = 200) laes_plot = [] colors = [] for i in 1:T - psi = laes[i] - push!(laes_plot, lae_est(psi, ygrid)) + lae = laes[i] + push!(laes_plot, lae_est(p, lae, ygrid)) push!(colors, RGBA(0, 0, 0, 1 - (i - 1) / T)) end plot(ygrid, laes_plot, color = reshape(colors, 1, length(colors)), lw = 2, @@ -513,7 +517,6 @@ plot!(title = t) tags: [remove-cell] --- @testset "First Figure Tests" begin - # @test laes[2].X[4] ≈ 2.606090690969538 @test length(ygrid) == 200 && ygrid[1] ≈ 0.01 && ygrid[end] ≈ 4.0 # @test k[5, 5] ≈ 0.8597155601089135 end @@ -943,7 +946,7 @@ for t in 1:(n - 1) X[t + 1] = theta * abs(X[t]) + d * Z[t] end -psi_est(a) = lae_est(LAE(p_TAR, X), a) +psi_est(a) = lae_est(p_TAR, X, a) k_est = kde(X) ys = range(-3, 3, length = 200) @@ -1012,12 +1015,12 @@ for i in 1:4 k[:, t + 1] = s * A[:, t] .* k[:, t] .^ alpha + (1 - delta) .* k[:, t] end - # Generate T instances of LAE using this data, one for each date t - laes = [LAE(p_growth, k[:, t]) for t in T:-1:1] + # Store draws for each date t + laes = [k[:, t] for t in T:-1:1] ind = i for j in 1:T - psi = laes[j] - laes_plot[:, ind] = lae_est(psi, ygrid) + lae = laes[j] + laes_plot[:, ind] = lae_est(p_growth, lae, ygrid) ind = ind + 4 push!(colors, RGBA(0, 0, 0, 1 - (j - 1) / T)) end @@ -1034,7 +1037,6 @@ plot(ygrid, laes_plot, layout = (2, 2), color = colors, tags: [remove-cell] --- @testset "Solution 2 Tests" begin - # @test laes[3].X[4] ≈ 3.165182625666698 @test length(ygrid) == 150 && ygrid[end] ≈ 6.5 && ygrid[1] ≈ 0.01 end ``` @@ -1107,4 +1109,3 @@ By the definition of $V$, we have $F_V(v) = \mathbb P \{ a + b U \leq v \} = \ma In other words, $F_V(v) = F_U ( (v - a)/b )$. Differentiating with respect to $v$ yields {eq}`statd_dv`. -