From d2da05b697feb64ddaafb317c63f42413e1a14c1 Mon Sep 17 00:00:00 2001 From: Jesse Perla Date: Wed, 5 Nov 2025 16:12:43 -0800 Subject: [PATCH 1/2] Julia 1.12 package bump and removal of lectures --- .github/workflows/cache.yml | 2 +- .github/workflows/ci.yml | 2 +- .github/workflows/format.yml | 2 +- .github/workflows/publish.yml | 2 +- lectures/Manifest.toml | 1131 +++++----- lectures/Project.toml | 18 +- lectures/_config.yml | 4 +- lectures/_toc.yml | 16 - lectures/about_lectures.md | 2 +- lectures/continuous_time/covid_sde.md | 2 +- lectures/continuous_time/seir_model.md | 2 +- lectures/dynamic_programming/career.md | 2 +- .../coleman_policy_iter.md | 2 +- lectures/dynamic_programming/discrete_dp.md | 2 +- .../dynamic_programming/egm_policy_iter.md | 2 +- lectures/dynamic_programming/ifp.md | 2 +- lectures/dynamic_programming/jv.md | 2 +- lectures/dynamic_programming/lqcontrol.md | 4 +- lectures/dynamic_programming/mccall_model.md | 2 +- .../mccall_model_with_separation.md | 2 +- lectures/dynamic_programming/odu.md | 2 +- lectures/dynamic_programming/optgrowth.md | 2 +- lectures/dynamic_programming/perm_income.md | 2 +- .../dynamic_programming/perm_income_cons.md | 2 +- lectures/dynamic_programming/robustness.md | 2 +- lectures/dynamic_programming/smoothing.md | 15 +- lectures/dynamic_programming/wald_friedman.md | 2 +- lectures/dynamic_programming_squared/amss.md | 1707 --------------- .../dynamic_programming_squared/dyn_stack.md | 1451 ------------- .../dynamic_programming_squared/lqramsey.md | 984 --------- .../opt_tax_recur.md | 1893 ----------------- .../fundamental_types.md | 2 +- .../getting_started_julia/getting_started.md | 6 +- .../introduction_to_types.md | 2 +- .../getting_started_julia/julia_by_example.md | 2 +- .../getting_started_julia/julia_essentials.md | 2 +- .../introduction_dynamics/ar1_processes.md | 2 +- .../introduction_dynamics/finite_markov.md | 2 +- lectures/introduction_dynamics/kalman.md | 2 +- .../introduction_dynamics/linear_models.md | 2 +- .../introduction_dynamics/scalar_dynam.md | 2 +- lectures/introduction_dynamics/short_path.md | 2 +- .../introduction_dynamics/wealth_dynamics.md | 2 +- .../more_julia/data_statistical_packages.md | 81 +- lectures/more_julia/general_packages.md | 2 +- lectures/more_julia/generic_programming.md | 2 +- .../optimization_solver_packages.md | 27 +- lectures/multi_agent_models/aiyagari.md | 2 +- lectures/multi_agent_models/arellano.md | 2 +- lectures/multi_agent_models/harrison_kreps.md | 2 +- lectures/multi_agent_models/lake_model.md | 2 +- lectures/multi_agent_models/lucas_model.md | 2 +- lectures/multi_agent_models/markov_asset.md | 2 +- lectures/multi_agent_models/markov_perf.md | 2 +- lectures/multi_agent_models/matsuyama.md | 2 +- .../rational_expectations.md | 2 +- lectures/multi_agent_models/schelling.md | 2 +- .../multi_agent_models/uncertainty_traps.md | 2 +- .../software_engineering/need_for_speed.md | 2 +- lectures/software_engineering/testing.md | 2 +- .../software_engineering/tools_editors.md | 6 +- .../software_engineering/version_control.md | 2 +- lectures/status.md | 2 +- .../additive_functionals.md | 947 --------- lectures/time_series_models/arma.md | 931 -------- .../time_series_models/classical_filtering.md | 1224 ----------- lectures/time_series_models/estspec.md | 624 ------ lectures/time_series_models/lu_tricks.md | 1288 ----------- .../multiplicative_functionals.md | 824 ------- lectures/tools_and_techniques/geom_series.md | 2 +- .../iterative_methods_sparsity.md | 2 +- .../tools_and_techniques/linear_algebra.md | 2 +- lectures/tools_and_techniques/lln_clt.md | 2 +- .../numerical_linear_algebra.md | 4 +- lectures/tools_and_techniques/orth_proj.md | 2 +- .../stationary_densities.md | 5 +- lectures/troubleshooting.md | 2 +- lectures/zreferences.md | 2 +- 78 files changed, 644 insertions(+), 12658 deletions(-) delete mode 100644 lectures/dynamic_programming_squared/amss.md delete mode 100644 lectures/dynamic_programming_squared/dyn_stack.md delete mode 100644 lectures/dynamic_programming_squared/lqramsey.md delete mode 100644 lectures/dynamic_programming_squared/opt_tax_recur.md delete mode 100644 lectures/time_series_models/additive_functionals.md delete mode 100644 lectures/time_series_models/arma.md delete mode 100644 lectures/time_series_models/classical_filtering.md delete mode 100644 lectures/time_series_models/estspec.md delete mode 100644 lectures/time_series_models/lu_tricks.md delete mode 100644 lectures/time_series_models/multiplicative_functionals.md diff --git a/.github/workflows/cache.yml b/.github/workflows/cache.yml index 34f19f48..dc46d97e 100644 --- a/.github/workflows/cache.yml +++ b/.github/workflows/cache.yml @@ -37,7 +37,7 @@ jobs: - name: Set up Julia uses: julia-actions/setup-julia@v2 with: - version: 1.11.1 + version: 1.12.1 - name: Install IJulia and Setup Project shell: bash run: | diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f8b97f29..a121478a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -32,7 +32,7 @@ jobs: - name: Set up Julia uses: julia-actions/setup-julia@v2 with: - version: 1.11.1 + version: 1.12.1 - name: Install IJulia and Setup Project shell: bash run: | diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index d2078e38..5b7f8bc3 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -17,7 +17,7 @@ jobs: - name: Setup Julia uses: julia-actions/setup-julia@v2 with: - version: 1.11.1 + version: 1.12.1 - name: Install JuliaFormatter.jl run: julia -e 'import Pkg; Pkg.add("JuliaFormatter")' diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index fa1ace81..e227c0c7 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -35,7 +35,7 @@ jobs: - name: Set up Julia uses: julia-actions/setup-julia@v2 with: - version: 1.11.1 + version: 1.12.1 - name: Install IJulia and Setup Project shell: bash run: | diff --git a/lectures/Manifest.toml b/lectures/Manifest.toml index cd49b549..3cd7f356 100644 --- a/lectures/Manifest.toml +++ b/lectures/Manifest.toml @@ -1,8 +1,8 @@ # This file is machine-generated - editing it directly is not advised -julia_version = "1.11.5" +julia_version = "1.12.1" manifest_format = "2.0" -project_hash = "a8a5aa0dbeab0aafd1a2a0c783b97e0650c8b206" +project_hash = "ded1023db51cdcd062d98e167751e8d33159e2b4" [[deps.ADTypes]] git-tree-sha1 = "27cecae79e5cc9935255f90c53bb831cc3c870d7" @@ -21,12 +21,6 @@ git-tree-sha1 = "45a1272e3f809d36431e57ab22703c6896b8908f" uuid = "14f7f29c-3bd6-536c-9a0b-7339e30b5a3e" version = "0.5.3" -[[deps.ASL_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "6252039f98492252f9e47c312c8ffda0e3b9e78d" -uuid = "ae81ac8f-d209-56e5-92de-9978fef736f9" -version = "0.1.3+0" - [[deps.AbstractFFTs]] deps = ["LinearAlgebra"] git-tree-sha1 = "d92ad398961a3ed262d8bf04a1a2b8340f915fef" @@ -168,9 +162,9 @@ version = "1.1.0" [[deps.BandedMatrices]] deps = ["ArrayLayouts", "FillArrays", "LinearAlgebra", "PrecompileTools"] -git-tree-sha1 = "e35c672b239c5105f597963c33e740eeb46cf0ab" +git-tree-sha1 = "4826c9fe6023a87029e54870ad1a9800c7ea6623" uuid = "aae01518-5342-5314-be14-df237901396f" -version = "1.9.4" +version = "1.10.1" [deps.BandedMatrices.extensions] BandedMatricesSparseArraysExt = "SparseArrays" @@ -186,14 +180,19 @@ version = "1.11.0" [[deps.BenchmarkTools]] deps = ["Compat", "JSON", "Logging", "Printf", "Profile", "Statistics", "UUIDs"] -git-tree-sha1 = "e38fbc49a620f5d0b660d7f543db1009fe0f8336" +git-tree-sha1 = "7fecfb1123b8d0232218e2da0c213004ff15358d" uuid = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" -version = "1.6.0" +version = "1.6.3" + +[[deps.Bessels]] +git-tree-sha1 = "4435559dc39793d53a9e3d278e185e920b4619ef" +uuid = "0e736298-9ec6-45e8-9647-e4fc86a2fe38" +version = "0.2.8" [[deps.Bijections]] -git-tree-sha1 = "6aaafea90a56dc1fc8cbc15e3cf26d6bc81eb0a3" +git-tree-sha1 = "a2d308fcd4c2fb90e943cf9cd2fbfa9c32b69733" uuid = "e2ed5e7c-b2de-5872-ae92-c73ca462fb04" -version = "0.1.10" +version = "0.2.2" [[deps.BitFlags]] git-tree-sha1 = "0691e34b3bb8be9307330f88d1a3c3f25466c24d" @@ -208,9 +207,9 @@ version = "0.1.6" [[deps.BracketingNonlinearSolve]] deps = ["CommonSolve", "ConcreteStructs", "NonlinearSolveBase", "PrecompileTools", "Reexport", "SciMLBase"] -git-tree-sha1 = "a9014924595b7a2c1dd14aac516e38fa10ada656" +git-tree-sha1 = "03100f03a58e14c60ba0a465e6f1ac9450eb495c" uuid = "70df07ce-3d50-431d-a3e7-ca6ddb60ac1e" -version = "1.3.0" +version = "1.6.0" weakdeps = ["ChainRulesCore", "ForwardDiff"] [deps.BracketingNonlinearSolve.extensions] @@ -234,36 +233,12 @@ git-tree-sha1 = "f3a21d7fc84ba618a779d1ed2fcca2e682865bab" uuid = "2a0fbf3d-bb9c-48f3-b0a9-814d99fd7ab9" version = "0.2.7" -[[deps.CSV]] -deps = ["CodecZlib", "Dates", "FilePathsBase", "InlineStrings", "Mmap", "Parsers", "PooledArrays", "PrecompileTools", "SentinelArrays", "Tables", "Unicode", "WeakRefStrings", "WorkerUtilities"] -git-tree-sha1 = "deddd8725e5e1cc49ee205a1964256043720a6c3" -uuid = "336ed68f-0bac-5ca0-87d4-7b16caf5d00b" -version = "0.10.15" - [[deps.Cairo_jll]] deps = ["Artifacts", "Bzip2_jll", "CompilerSupportLibraries_jll", "Fontconfig_jll", "FreeType2_jll", "Glib_jll", "JLLWrappers", "LZO_jll", "Libdl", "Pixman_jll", "Xorg_libXext_jll", "Xorg_libXrender_jll", "Zlib_jll", "libpng_jll"] git-tree-sha1 = "fde3bf89aead2e723284a8ff9cdf5b551ed700e8" uuid = "83423d85-b0ee-5818-9007-b63ccbeb887a" version = "1.18.5+0" -[[deps.CategoricalArrays]] -deps = ["DataAPI", "Future", "Missings", "Printf", "Requires", "Statistics", "Unicode"] -git-tree-sha1 = "1568b28f91293458345dabba6a5ea3f183250a61" -uuid = "324d7699-5711-5eae-9e2f-1d82baa6b597" -version = "0.10.8" -weakdeps = ["JSON", "RecipesBase", "SentinelArrays", "StructTypes"] - - [deps.CategoricalArrays.extensions] - CategoricalArraysJSONExt = "JSON" - CategoricalArraysRecipesBaseExt = "RecipesBase" - CategoricalArraysSentinelArraysExt = "SentinelArrays" - CategoricalArraysStructTypesExt = "StructTypes" - -[[deps.Chain]] -git-tree-sha1 = "9ae9be75ad8ad9d26395bf625dea9beac6d519f1" -uuid = "8be319e6-bccf-4806-a6f7-6fae938471bc" -version = "0.6.0" - [[deps.ChainRulesCore]] deps = ["Compat", "LinearAlgebra"] git-tree-sha1 = "e4c6a16e77171a5f5e25e9646617ab1c276c5607" @@ -286,12 +261,6 @@ git-tree-sha1 = "3e22db924e2945282e70c33b75d4dde8bfa44c94" uuid = "aaaa29a8-35af-508c-8bc3-b662a17a0fe5" version = "0.15.8" -[[deps.CodecBzip2]] -deps = ["Bzip2_jll", "TranscodingStreams"] -git-tree-sha1 = "84990fa864b7f2b4901901ca12736e45ee79068c" -uuid = "523fee87-0ab8-5b00-afb7-3ecf72e48cfd" -version = "0.8.5" - [[deps.CodecZlib]] deps = ["TranscodingStreams", "Zlib_jll"] git-tree-sha1 = "962834c22b66e32aa10f7611c08c8ca4e20749a9" @@ -364,7 +333,7 @@ weakdeps = ["Dates", "LinearAlgebra"] [[deps.CompilerSupportLibraries_jll]] deps = ["Artifacts", "Libdl"] uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae" -version = "1.1.1+0" +version = "1.3.0+1" [[deps.CompositeTypes]] git-tree-sha1 = "bce26c3dab336582805503bed209faab1c279768" @@ -391,6 +360,12 @@ git-tree-sha1 = "d9d26935a0bcffc87d2613ce14c527c99fc543fd" uuid = "f0e56b4a-5159-44fe-b623-3e5288b988bb" version = "2.5.0" +[[deps.ConsoleProgressMonitor]] +deps = ["Logging", "ProgressMeter"] +git-tree-sha1 = "3ab7b2136722890b9af903859afcf457fa3059e8" +uuid = "88cd18e8-d9cc-4ea6-8889-5259c0d15c8b" +version = "0.1.2" + [[deps.ConstructionBase]] git-tree-sha1 = "b4b092499347b18a015186eae3042f72267106cb" uuid = "187b0558-2788-49d3-abe0-74a17ed4e7c9" @@ -419,10 +394,14 @@ uuid = "a8cc5b0e-0ffa-5ad4-8c14-923d3ee1735f" version = "4.1.1" [[deps.DSP]] -deps = ["Compat", "FFTW", "IterTools", "LinearAlgebra", "Polynomials", "Random", "Reexport", "SpecialFunctions", "Statistics"] -git-tree-sha1 = "0df00546373af8eee1598fb4b2ba480b1ebe895c" +deps = ["Bessels", "FFTW", "IterTools", "LinearAlgebra", "Polynomials", "Random", "Reexport", "SpecialFunctions", "Statistics"] +git-tree-sha1 = "5989debfc3b38f736e69724818210c67ffee4352" uuid = "717857b8-e6f2-59f4-9121-6e50c889abd2" -version = "0.7.10" +version = "0.8.4" +weakdeps = ["OffsetArrays"] + + [deps.DSP.extensions] + OffsetArraysExt = "OffsetArrays" [[deps.DataAPI]] git-tree-sha1 = "abe83f3a2f1b857aac70ef8b269080af17764bbe" @@ -431,32 +410,31 @@ version = "1.16.0" [[deps.DataFrames]] deps = ["Compat", "DataAPI", "DataStructures", "Future", "InlineStrings", "InvertedIndices", "IteratorInterfaceExtensions", "LinearAlgebra", "Markdown", "Missings", "PooledArrays", "PrecompileTools", "PrettyTables", "Printf", "Random", "Reexport", "SentinelArrays", "SortingAlgorithms", "Statistics", "TableTraits", "Tables", "Unicode"] -git-tree-sha1 = "fb61b4812c49343d7ef0b533ba982c46021938a6" +git-tree-sha1 = "d8928e9169ff76c6281f39a659f9bca3a573f24c" uuid = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0" -version = "1.7.0" - -[[deps.DataFramesMeta]] -deps = ["Chain", "DataFrames", "MacroTools", "OrderedCollections", "Reexport", "TableMetadataTools"] -git-tree-sha1 = "21a4335f249f8b5f311d00d5e62938b50ccace4e" -uuid = "1313f7d8-7da2-5740-9ea0-a2ca25f37964" -version = "0.15.4" +version = "1.8.1" [[deps.DataInterpolations]] deps = ["EnumX", "FindFirstFunctions", "ForwardDiff", "LinearAlgebra", "PrettyTables", "RecipesBase", "Reexport"] -git-tree-sha1 = "b6fc25b5dbff016d8aae1e0ae2740347bccb6f65" +git-tree-sha1 = "58ae0a38dd3002963a3c8d4af097e660cf409c38" uuid = "82cc6244-b520-54b8-b5a6-8a565e85f1d0" -version = "8.0.1" +version = "8.6.1" [deps.DataInterpolations.extensions] DataInterpolationsChainRulesCoreExt = "ChainRulesCore" + DataInterpolationsMakieExt = "Makie" DataInterpolationsOptimExt = "Optim" DataInterpolationsRegularizationToolsExt = "RegularizationTools" + DataInterpolationsSparseConnectivityTracerExt = ["SparseConnectivityTracer", "FillArrays"] DataInterpolationsSymbolicsExt = "Symbolics" [deps.DataInterpolations.weakdeps] ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" + FillArrays = "1a297f60-69ca-5386-bcde-b61e274b549b" + Makie = "ee78f7c6-11fb-53f2-987a-cfe4a2b5a57a" Optim = "429524aa-4258-5aef-a3af-852621145aeb" RegularizationTools = "29dad682-9a27-4bc3-9c72-016788665182" + SparseConnectivityTracer = "9f842d2f-2579-4b1d-911e-f412cf18a3f5" Symbolics = "0c5d862f-8b57-4792-8d23-62f2024744c7" Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" @@ -471,12 +449,6 @@ git-tree-sha1 = "bfc1187b79289637fa0ef6d4436ebdfe6905cbd6" uuid = "e2d170a0-9d28-54be-80f0-106bbe20a464" version = "1.0.0" -[[deps.DataValues]] -deps = ["DataValueInterfaces", "Dates"] -git-tree-sha1 = "d88a19299eba280a6d062e135a43f00323ae70bf" -uuid = "e7dc6d0d-1eca-5fa6-8ad6-5aecde8b7ea5" -version = "0.4.13" - [[deps.Dates]] deps = ["Printf"] uuid = "ade2ca70-3891-5945-98fb-dc099432e06a" @@ -495,15 +467,14 @@ uuid = "8bb1440f-4735-579b-a4ab-409b98df4dab" version = "1.9.1" [[deps.DiffEqBase]] -deps = ["ArrayInterface", "ConcreteStructs", "DataStructures", "DocStringExtensions", "EnumX", "EnzymeCore", "FastBroadcast", "FastClosures", "FastPower", "FunctionWrappers", "FunctionWrappersWrappers", "LinearAlgebra", "Logging", "Markdown", "MuladdMacro", "Parameters", "PrecompileTools", "Printf", "RecursiveArrayTools", "Reexport", "SciMLBase", "SciMLOperators", "SciMLStructures", "Setfield", "Static", "StaticArraysCore", "Statistics", "SymbolicIndexingInterface", "TruncatedStacktraces"] -git-tree-sha1 = "1bcd3a5c585c477e5d0595937ea7b5adcda6c621" +deps = ["ArrayInterface", "ConcreteStructs", "DocStringExtensions", "EnzymeCore", "FastBroadcast", "FastClosures", "FastPower", "FunctionWrappers", "FunctionWrappersWrappers", "LinearAlgebra", "Logging", "Markdown", "MuladdMacro", "PrecompileTools", "Printf", "RecursiveArrayTools", "Reexport", "SciMLBase", "SciMLOperators", "SciMLStructures", "Setfield", "Static", "StaticArraysCore", "Statistics", "SymbolicIndexingInterface", "TruncatedStacktraces"] +git-tree-sha1 = "087632db966c90079a5534e4147afea9136ca39a" uuid = "2b5f629d-d688-5b77-993f-72d75c75574e" -version = "6.174.0" +version = "6.190.2" [deps.DiffEqBase.extensions] DiffEqBaseCUDAExt = "CUDA" DiffEqBaseChainRulesCoreExt = "ChainRulesCore" - DiffEqBaseDistributionsExt = "Distributions" DiffEqBaseEnzymeExt = ["ChainRulesCore", "Enzyme"] DiffEqBaseForwardDiffExt = ["ForwardDiff"] DiffEqBaseGTPSAExt = "GTPSA" @@ -511,6 +482,7 @@ version = "6.174.0" DiffEqBaseMPIExt = "MPI" DiffEqBaseMeasurementsExt = "Measurements" DiffEqBaseMonteCarloMeasurementsExt = "MonteCarloMeasurements" + DiffEqBaseMooncakeExt = "Mooncake" DiffEqBaseReverseDiffExt = "ReverseDiff" DiffEqBaseSparseArraysExt = "SparseArrays" DiffEqBaseTrackerExt = "Tracker" @@ -527,6 +499,7 @@ version = "6.174.0" MPI = "da04e1cc-30fd-572f-bb4f-1f8673147195" Measurements = "eff96d63-e80a-5855-80a2-b1b0885c5ab7" MonteCarloMeasurements = "0987c9cc-fe09-11e8-30f0-b96dd679fdca" + Mooncake = "da2b9cff-9c12-43a0-ae48-6db2b0edb7d6" ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" Tracker = "9f7883ad-71c0-57eb-9f7f-b5c9e6d3789c" @@ -570,9 +543,9 @@ version = "1.15.1" [[deps.DifferentiationInterface]] deps = ["ADTypes", "LinearAlgebra"] -git-tree-sha1 = "529bebbc74b36a4cfea09dd2aecb1288cd713a6d" +git-tree-sha1 = "961e5d49b64d63b3f2201b0de60065876f4be551" uuid = "a0c0ee7d-e4b9-4e03-894e-1c5f64a51d63" -version = "0.7.9" +version = "0.7.10" [deps.DifferentiationInterface.extensions] DifferentiationInterfaceChainRulesCoreExt = "ChainRulesCore" @@ -636,9 +609,9 @@ version = "1.11.0" [[deps.Distributions]] deps = ["AliasTables", "FillArrays", "LinearAlgebra", "PDMats", "Printf", "QuadGK", "Random", "SpecialFunctions", "Statistics", "StatsAPI", "StatsBase", "StatsFuns"] -git-tree-sha1 = "6d8b535fd38293bc54b88455465a1386f8ac1c3c" +git-tree-sha1 = "3bc002af51045ca3b47d2e1787d6ce02e68b943a" uuid = "31c24e10-a181-5473-b8eb-7969acd0382f" -version = "0.25.119" +version = "0.25.122" [deps.Distributions.extensions] DistributionsChainRulesCoreExt = "ChainRulesCore" @@ -685,6 +658,31 @@ git-tree-sha1 = "bddad79635af6aec424f53ed8aad5d7555dc6f00" uuid = "4e289a0a-7415-4d19-859d-a7e5c4648b56" version = "1.0.5" +[[deps.Enzyme]] +deps = ["CEnum", "EnzymeCore", "Enzyme_jll", "GPUCompiler", "InteractiveUtils", "LLVM", "Libdl", "LinearAlgebra", "ObjectFile", "PrecompileTools", "Preferences", "Printf", "Random", "SparseArrays"] +git-tree-sha1 = "b36b64b70d4dd2d5473ffecfd9bf298fe7dcaf5b" +uuid = "7da242da-08ed-463a-9acd-ee780be4f1d9" +version = "0.13.96" + + [deps.Enzyme.extensions] + EnzymeBFloat16sExt = "BFloat16s" + EnzymeChainRulesCoreExt = "ChainRulesCore" + EnzymeDynamicPPLExt = ["ADTypes", "DynamicPPL"] + EnzymeGPUArraysCoreExt = "GPUArraysCore" + EnzymeLogExpFunctionsExt = "LogExpFunctions" + EnzymeSpecialFunctionsExt = "SpecialFunctions" + EnzymeStaticArraysExt = "StaticArrays" + + [deps.Enzyme.weakdeps] + ADTypes = "47edcb42-4c32-4615-8424-f2b9edc5f35b" + BFloat16s = "ab4f0b2a-ad5b-11e8-123f-65d77653426b" + ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" + DynamicPPL = "366bfd00-2699-11ea-058f-f148b4cae6d8" + GPUArraysCore = "46192b85-c4d5-4398-a991-12ede77f4527" + LogExpFunctions = "2ab3a3ac-af41-5b50-aa03-7779005ae688" + SpecialFunctions = "276daf66-3868-5448-9aa4-cd146d93841b" + StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" + [[deps.EnzymeCore]] git-tree-sha1 = "f91e7cb4c17dae77c490b75328f22a226708557c" uuid = "f151be2c-9106-41f4-ab19-57ee4f262869" @@ -694,6 +692,12 @@ weakdeps = ["Adapt"] [deps.EnzymeCore.extensions] AdaptExt = "Adapt" +[[deps.Enzyme_jll]] +deps = ["Artifacts", "JLLWrappers", "LazyArtifacts", "Libdl", "TOML"] +git-tree-sha1 = "a24799d1ca416f80b2c589b66d82867db3f70624" +uuid = "7cc45869-7501-5eee-bdea-0790c847d4ef" +version = "0.0.207+0" + [[deps.EpollShim_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl"] git-tree-sha1 = "8a4be429317c42cfae6a7fc03c31bad1970c310d" @@ -712,12 +716,6 @@ git-tree-sha1 = "27af30de8b5445644e8ffe3bcb0d72049c089cf1" uuid = "2e619515-83b5-522b-bb60-26c02a35a201" version = "2.7.3+0" -[[deps.Expectations]] -deps = ["Distributions", "FastGaussQuadrature", "LinearAlgebra", "SpecialFunctions"] -git-tree-sha1 = "5b46996cf6fc4ad19d1122884c36faa62ed0a7d2" -uuid = "2fe49d83-0758-5602-8f54-1f90ad0d522b" -version = "1.9.2" - [[deps.ExponentialUtilities]] deps = ["Adapt", "ArrayInterface", "GPUArraysCore", "GenericSchur", "LinearAlgebra", "PrecompileTools", "Printf", "SparseArrays", "libblastrampoline_jll"] git-tree-sha1 = "cae251c76f353e32d32d76fae2fea655eab652af" @@ -775,9 +773,9 @@ version = "0.3.2" [[deps.FastGaussQuadrature]] deps = ["LinearAlgebra", "SpecialFunctions", "StaticArrays"] -git-tree-sha1 = "fd923962364b645f3719855c88f7074413a6ad92" +git-tree-sha1 = "0044e9f5e49a57e88205e8f30ab73928b05fe5b6" uuid = "442a2c76-b920-505d-bb47-c5924d526838" -version = "1.0.2" +version = "1.1.0" [[deps.FastPower]] git-tree-sha1 = "e47c70bf430175e077d1955d7f04923504acc74c" @@ -802,27 +800,6 @@ version = "1.2.0" ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267" Tracker = "9f7883ad-71c0-57eb-9f7f-b5c9e6d3789c" -[[deps.FileIO]] -deps = ["Pkg", "Requires", "UUIDs"] -git-tree-sha1 = "d60eb76f37d7e5a40cc2e7c36974d864b82dc802" -uuid = "5789e2e9-d7fb-5bc7-8068-2c6fae9b9549" -version = "1.17.1" -weakdeps = ["HTTP"] - - [deps.FileIO.extensions] - HTTPExt = "HTTP" - -[[deps.FilePathsBase]] -deps = ["Compat", "Dates"] -git-tree-sha1 = "3bab2c5aa25e7840a4b065805c0cdfc01f3068d2" -uuid = "48062228-2e41-5def-b9a4-89aafe57970f" -version = "0.9.24" -weakdeps = ["Mmap", "Test"] - - [deps.FilePathsBase.extensions] - FilePathsBaseMmapExt = "Mmap" - FilePathsBaseTestExt = "Test" - [[deps.FileWatching]] uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee" version = "1.11.0" @@ -862,26 +839,6 @@ version = "2.29.0" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" -[[deps.FixedEffectModels]] -deps = ["DataFrames", "FixedEffects", "LinearAlgebra", "PrecompileTools", "Printf", "Reexport", "Statistics", "StatsAPI", "StatsBase", "StatsFuns", "StatsModels", "Tables", "Vcov"] -git-tree-sha1 = "6491a2511e4475ffa0b425e86b2bdebaef1e2009" -uuid = "9d5cd8c9-2029-5cab-9928-427838db53e3" -version = "1.12.0" - -[[deps.FixedEffects]] -deps = ["GroupedArrays", "LinearAlgebra", "Printf", "StatsBase"] -git-tree-sha1 = "0a7c5815f44becfeba2165fb31251286dd5c5a27" -uuid = "c8885935-8500-56a7-9867-7708b20db0eb" -version = "2.4.1" - - [deps.FixedEffects.extensions] - CUDAExt = "CUDA" - MetalExt = "Metal" - - [deps.FixedEffects.weakdeps] - CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" - Metal = "dde4c033-4e86-420c-a63e-0dd931031962" - [[deps.FixedPointNumbers]] deps = ["Statistics"] git-tree-sha1 = "05882d6995ae5c12bb5f36dd2ed3f61c98cbb172" @@ -901,9 +858,9 @@ version = "1.3.7" [[deps.ForwardDiff]] deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "LinearAlgebra", "LogExpFunctions", "NaNMath", "Preferences", "Printf", "Random", "SpecialFunctions"] -git-tree-sha1 = "a2df1b776752e3f344e5116c06d75a10436ab853" +git-tree-sha1 = "ba6ce081425d0afb2bedd00d9884464f764a9225" uuid = "f6369f11-7733-5829-9624-2563aa707210" -version = "0.10.38" +version = "1.2.2" weakdeps = ["StaticArrays"] [deps.ForwardDiff.extensions] @@ -943,29 +900,29 @@ git-tree-sha1 = "fcb0584ff34e25155876418979d4c8971243bb89" uuid = "0656b61e-2033-5cc2-a64a-77c0f6c09b89" version = "3.4.0+2" -[[deps.GLM]] -deps = ["Distributions", "LinearAlgebra", "Printf", "Reexport", "SparseArrays", "SpecialFunctions", "Statistics", "StatsAPI", "StatsBase", "StatsFuns", "StatsModels"] -git-tree-sha1 = "273bd1cd30768a2fddfa3fd63bbc746ed7249e5f" -uuid = "38e38edf-8417-5370-95a0-9cbb8c7f171a" -version = "1.9.0" - [[deps.GPUArraysCore]] deps = ["Adapt"] git-tree-sha1 = "83cf05ab16a73219e5f6bd1bdfa9848fa24ac627" uuid = "46192b85-c4d5-4398-a991-12ede77f4527" version = "0.2.0" +[[deps.GPUCompiler]] +deps = ["ExprTools", "InteractiveUtils", "LLVM", "Libdl", "Logging", "PrecompileTools", "Preferences", "Scratch", "Serialization", "TOML", "Tracy", "UUIDs"] +git-tree-sha1 = "9a8b92a457f55165923fcfe48997b7b93b712fca" +uuid = "61eb1bfa-7361-4325-ad38-22787b887f55" +version = "1.7.2" + [[deps.GR]] deps = ["Artifacts", "Base64", "DelimitedFiles", "Downloads", "GR_jll", "HTTP", "JSON", "Libdl", "LinearAlgebra", "Preferences", "Printf", "Qt6Wayland_jll", "Random", "Serialization", "Sockets", "TOML", "Tar", "Test", "p7zip_jll"] -git-tree-sha1 = "1828eb7275491981fa5f1752a5e126e8f26f8741" +git-tree-sha1 = "f52c27dd921390146624f3aab95f4e8614ad6531" uuid = "28b8d3ca-fb5f-59d9-8090-bfdbd6d07a71" -version = "0.73.17" +version = "0.73.18" [[deps.GR_jll]] deps = ["Artifacts", "Bzip2_jll", "Cairo_jll", "FFMPEG_jll", "Fontconfig_jll", "FreeType2_jll", "GLFW_jll", "JLLWrappers", "JpegTurbo_jll", "Libdl", "Libtiff_jll", "Pixman_jll", "Qt6Base_jll", "Zlib_jll", "libpng_jll"] -git-tree-sha1 = "27299071cc29e409488ada41ec7643e0ab19091f" +git-tree-sha1 = "4b0406b866ea9fdbaf1148bc9c0b887e59f9af68" uuid = "d2c73de3-f751-5644-a686-071e5b155ba9" -version = "0.73.17+0" +version = "0.73.18+0" [[deps.GenericSchur]] deps = ["LinearAlgebra", "Printf"] @@ -979,6 +936,12 @@ git-tree-sha1 = "45288942190db7c5f760f59c04495064eedf9340" uuid = "b0724c58-0f36-5564-988d-3bb0596ebc4a" version = "0.22.4+0" +[[deps.Ghostscript_jll]] +deps = ["Artifacts", "JLLWrappers", "JpegTurbo_jll", "Libdl", "Zlib_jll"] +git-tree-sha1 = "38044a04637976140074d0b0621c1edf0eb531fd" +uuid = "61579ee1-b43e-5ca0-a5da-69d92c66a64b" +version = "9.55.1+0" + [[deps.Glib_jll]] deps = ["Artifacts", "GettextRuntime_jll", "JLLWrappers", "Libdl", "Libffi_jll", "Libiconv_jll", "Libmount_jll", "PCRE2_jll", "Zlib_jll"] git-tree-sha1 = "50c11ffab2a3d50192a228c313f05b5b5dc5acb2" @@ -992,21 +955,21 @@ uuid = "3b182d85-2403-5c21-9c21-1e1f0cc25472" version = "1.3.15+0" [[deps.Graphs]] -deps = ["ArnoldiMethod", "Compat", "DataStructures", "Distributed", "Inflate", "LinearAlgebra", "Random", "SharedArrays", "SimpleTraits", "SparseArrays", "Statistics"] -git-tree-sha1 = "3169fd3440a02f35e549728b0890904cfd4ae58a" +deps = ["ArnoldiMethod", "DataStructures", "Distributed", "Inflate", "LinearAlgebra", "Random", "SharedArrays", "SimpleTraits", "SparseArrays", "Statistics"] +git-tree-sha1 = "7a98c6502f4632dbe9fb1973a4244eaa3324e84d" uuid = "86223c79-3864-5bf0-83f7-82e725a168b6" -version = "1.12.1" +version = "1.13.1" [[deps.Grisu]] git-tree-sha1 = "53bb909d1151e57e2484c3d1b53e19552b887fb2" uuid = "42e2da0e-8278-4e71-bc24-59509adca0fe" version = "1.0.2" -[[deps.GroupedArrays]] -deps = ["DataAPI", "Missings"] -git-tree-sha1 = "3777562b981fdc1556b4efbb0bf7526b525fe089" -uuid = "6407cd72-fade-4a84-8a1e-56e431fc1533" -version = "0.3.4" +[[deps.HCubature]] +deps = ["Combinatorics", "DataStructures", "LinearAlgebra", "QuadGK", "StaticArrays"] +git-tree-sha1 = "19ef9f0cb324eed957b7fe7257ac84e8ed8a48ec" +uuid = "19dc6840-f33b-545b-b366-655c7e3ffd49" +version = "1.7.0" [[deps.HTTP]] deps = ["Base64", "CodecZlib", "ConcurrentUtilities", "Dates", "ExceptionUnwrapping", "Logging", "LoggingExtras", "MbedTLS", "NetworkOptions", "OpenSSL", "PrecompileTools", "Random", "SimpleBufferStream", "Sockets", "URIs", "UUIDs"] @@ -1026,12 +989,6 @@ git-tree-sha1 = "8e070b599339d622e9a081d17230d74a5c473293" uuid = "3e5b6fbb-0976-4d2c-9146-d79de83f2fb0" version = "0.1.17" -[[deps.Hwloc_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "XML2_jll", "Xorg_libpciaccess_jll"] -git-tree-sha1 = "3d468106a05408f9f7b6f161d9e7715159af247b" -uuid = "e33a78d0-f292-5ffc-b300-72abe9b543c8" -version = "2.12.2+0" - [[deps.HypergeometricFunctions]] deps = ["LinearAlgebra", "OpenLibm_jll", "SpecialFunctions"] git-tree-sha1 = "68c173f4f449de5b438ee67ed0c9c748dc31a2ec" @@ -1072,6 +1029,31 @@ git-tree-sha1 = "4c1acff2dc6b6967e7e750633c50bc3b8d83e617" uuid = "18e54dd8-cb9d-406c-a71d-865a43cbb235" version = "0.1.3" +[[deps.Integrals]] +deps = ["CommonSolve", "HCubature", "LinearAlgebra", "MonteCarloIntegration", "QuadGK", "Random", "Reexport", "SciMLBase"] +git-tree-sha1 = "940c16a019f2d2d51a9f6febbb7ec449010e87bb" +uuid = "de52edbc-65ea-441a-8357-d3a637375a31" +version = "4.7.1" + + [deps.Integrals.extensions] + IntegralsArblibExt = "Arblib" + IntegralsCubaExt = "Cuba" + IntegralsCubatureExt = "Cubature" + IntegralsFastGaussQuadratureExt = "FastGaussQuadrature" + IntegralsForwardDiffExt = "ForwardDiff" + IntegralsMCIntegrationExt = "MCIntegration" + IntegralsZygoteExt = ["Zygote", "ChainRulesCore"] + + [deps.Integrals.weakdeps] + Arblib = "fb37089c-8514-4489-9461-98f9c8763369" + ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" + Cuba = "8a292aeb-7a57-582c-b821-06e4c11590b1" + Cubature = "667455a9-e2ce-5579-9412-b964f529a492" + FastGaussQuadrature = "442a2c76-b920-505d-bb47-c5924d526838" + ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" + MCIntegration = "ea1e2de9-7db7-4b42-91ee-0cd1bf6df167" + Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" + [[deps.IntelOpenMP_jll]] deps = ["Artifacts", "JLLWrappers", "LazyArtifacts", "Libdl"] git-tree-sha1 = "ec1debd61c300961f98064cfb21287613ad7f303" @@ -1084,15 +1066,19 @@ uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240" version = "1.11.0" [[deps.Interpolations]] -deps = ["Adapt", "AxisAlgorithms", "ChainRulesCore", "LinearAlgebra", "OffsetArrays", "Random", "Ratios", "Requires", "SharedArrays", "SparseArrays", "StaticArrays", "WoodburyMatrices"] -git-tree-sha1 = "88a101217d7cb38a7b481ccd50d21876e1d1b0e0" +deps = ["Adapt", "AxisAlgorithms", "ChainRulesCore", "LinearAlgebra", "OffsetArrays", "Random", "Ratios", "SharedArrays", "SparseArrays", "StaticArrays", "WoodburyMatrices"] +git-tree-sha1 = "65d505fa4c0d7072990d659ef3fc086eb6da8208" uuid = "a98d9a8b-a2ab-59e6-89dd-64a1c18fca59" -version = "0.15.1" -weakdeps = ["Unitful"] +version = "0.16.2" [deps.Interpolations.extensions] + InterpolationsForwardDiffExt = "ForwardDiff" InterpolationsUnitfulExt = "Unitful" + [deps.Interpolations.weakdeps] + ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" + Unitful = "1986cc42-f94f-5a68-af5c-568840ba703d" + [[deps.IntervalSets]] git-tree-sha1 = "5fbb102dcb8b1a858111ae81d56682376130517d" uuid = "8197267c-284f-5f27-9208-e0e47529a953" @@ -1119,22 +1105,6 @@ git-tree-sha1 = "6da3c4316095de0f5ee2ebd875df8721e7e0bdbe" uuid = "41ab1584-1d38-5bbf-9106-f11c6c58b48f" version = "1.3.1" -[[deps.Ipopt]] -deps = ["Ipopt_jll", "LinearAlgebra", "OpenBLAS32_jll", "PrecompileTools"] -git-tree-sha1 = "1e385287972ab12b8fd46bf108c05f34cbf0be82" -uuid = "b6b21f68-93f8-5de0-b562-5493be1d77c9" -version = "1.10.2" -weakdeps = ["MathOptInterface"] - - [deps.Ipopt.extensions] - IpoptMathOptInterfaceExt = "MathOptInterface" - -[[deps.Ipopt_jll]] -deps = ["ASL_jll", "Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "MUMPS_seq_jll", "SPRAL_jll", "libblastrampoline_jll"] -git-tree-sha1 = "4f55ad688c698a4f77d892a1cb673f7e8a30f178" -uuid = "9cc047cb-c261-5740-88fc-0cf96f7bdcc7" -version = "300.1400.1700+0" - [[deps.IrrationalConstants]] git-tree-sha1 = "b2d91fe939cae05960e760110b328288867b5758" uuid = "92d709cd-6900-40b7-9082-c6be49f344b6" @@ -1145,12 +1115,6 @@ git-tree-sha1 = "42d5f897009e7ff2cf88db414a389e5ed1bdd023" uuid = "c8e1da08-722c-5040-9ed9-7db0dc04731e" version = "1.10.0" -[[deps.IterableTables]] -deps = ["DataValues", "IteratorInterfaceExtensions", "Requires", "TableTraits", "TableTraitsUtils"] -git-tree-sha1 = "70300b876b2cebde43ebc0df42bc8c94a144e1b4" -uuid = "1c8ee90f-4401-5389-894e-7a04a3dc0f4d" -version = "1.0.0" - [[deps.IterativeSolvers]] deps = ["LinearAlgebra", "Printf", "Random", "RecipesBase", "SparseArrays"] git-tree-sha1 = "59545b0a2b27208b0650df0a46b8e3019f85055b" @@ -1180,18 +1144,6 @@ git-tree-sha1 = "31e996f0a15c7b280ba9f76636b3ff9e2ae58c9a" uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6" version = "0.21.4" -[[deps.JSON3]] -deps = ["Dates", "Mmap", "Parsers", "PrecompileTools", "StructTypes", "UUIDs"] -git-tree-sha1 = "411eccfe8aba0814ffa0fdf4860913ed09c34975" -uuid = "0f8b85d8-7281-11e9-16c2-39a750bddbf1" -version = "1.14.3" - - [deps.JSON3.extensions] - JSON3ArrowExt = ["ArrowTypes"] - - [deps.JSON3.weakdeps] - ArrowTypes = "31f734f8-188a-4ce0-8406-c8a06bd891cd" - [[deps.Jieko]] deps = ["ExproniconLite"] git-tree-sha1 = "2f05ed29618da60c06a87e9c033982d4f71d0b6c" @@ -1204,30 +1156,30 @@ git-tree-sha1 = "4255f0032eafd6451d707a51d5f0248b8a165e4d" uuid = "aacddb02-875f-59d6-b918-886e6ef4fbf8" version = "3.1.3+0" -[[deps.JuMP]] -deps = ["LinearAlgebra", "MacroTools", "MathOptInterface", "MutableArithmetics", "OrderedCollections", "PrecompileTools", "Printf", "SparseArrays"] -git-tree-sha1 = "c9ace86360c1dc0635de5f9e2ce5143b86c53311" -uuid = "4076af6c-e467-56ae-b986-b466b2749572" -version = "1.25.0" - - [deps.JuMP.extensions] - JuMPDimensionalDataExt = "DimensionalData" - - [deps.JuMP.weakdeps] - DimensionalData = "0703355e-b756-11e9-17c0-8b28908087d0" +[[deps.JuliaSyntaxHighlighting]] +deps = ["StyledStrings"] +uuid = "ac6e5ff7-fb65-4e79-a425-ec3bc9c03011" +version = "1.12.0" [[deps.JumpProcesses]] -deps = ["ArrayInterface", "DataStructures", "DiffEqBase", "DiffEqCallbacks", "DocStringExtensions", "FunctionWrappers", "Graphs", "LinearAlgebra", "Markdown", "PoissonRandom", "Random", "RecursiveArrayTools", "Reexport", "SciMLBase", "Setfield", "StaticArrays", "SymbolicIndexingInterface", "UnPack"] -git-tree-sha1 = "f8da88993c914357031daf0023f18748ff473924" +deps = ["ArrayInterface", "DataStructures", "DiffEqBase", "DiffEqCallbacks", "DocStringExtensions", "FunctionWrappers", "Graphs", "LinearAlgebra", "PoissonRandom", "Random", "RecursiveArrayTools", "Reexport", "SciMLBase", "Setfield", "StaticArrays", "SymbolicIndexingInterface", "UnPack"] +git-tree-sha1 = "905a2a28770e23f3ed750306ef48eb8c46c3a002" uuid = "ccbc3e58-028d-4f4c-8cd5-9ae44345cda5" -version = "9.16.1" -weakdeps = ["FastBroadcast"] +version = "9.19.1" + + [deps.JumpProcesses.extensions] + JumpProcessesKernelAbstractionsExt = ["Adapt", "KernelAbstractions"] + + [deps.JumpProcesses.weakdeps] + Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" + FastBroadcast = "7034ab61-46d4-4ed7-9d0f-46aef9175898" + KernelAbstractions = "63c18a36-062a-441e-b654-da1e3ab1ce7c" [[deps.KernelDensity]] deps = ["Distributions", "DocStringExtensions", "FFTW", "Interpolations", "StatsBase"] -git-tree-sha1 = "7d703202e65efa1369de1279c162b915e245eed1" +git-tree-sha1 = "ba51324b894edaf1df3ab16e2cc6bc3280a2f1a7" uuid = "5ab0869b-81aa-558d-bb23-cbf5423bbe9b" -version = "0.6.9" +version = "0.6.10" [[deps.Krylov]] deps = ["LinearAlgebra", "Printf", "SparseArrays"] @@ -1247,6 +1199,24 @@ git-tree-sha1 = "aaafe88dccbd957a8d82f7d05be9b69172e0cee3" uuid = "88015f11-f218-50d7-93a8-a6af411a945d" version = "4.0.1+0" +[[deps.LLVM]] +deps = ["CEnum", "LLVMExtra_jll", "Libdl", "Preferences", "Printf", "Unicode"] +git-tree-sha1 = "ce8614210409eaa54ed5968f4b50aa96da7ae543" +uuid = "929cbde3-209d-540e-8aea-75f648917ca0" +version = "9.4.4" + + [deps.LLVM.extensions] + BFloat16sExt = "BFloat16s" + + [deps.LLVM.weakdeps] + BFloat16s = "ab4f0b2a-ad5b-11e8-123f-65d77653426b" + +[[deps.LLVMExtra_jll]] +deps = ["Artifacts", "JLLWrappers", "LazyArtifacts", "Libdl", "TOML"] +git-tree-sha1 = "8e76807afb59ebb833e9b131ebf1a8c006510f33" +uuid = "dad2f222-ce93-54a1-a47d-0025e8a3acab" +version = "0.0.38+0" + [[deps.LLVMOpenMP_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl"] git-tree-sha1 = "eb62a3deb62fc6d8822c0c4bef73e4412419c5d8" @@ -1265,20 +1235,28 @@ uuid = "b964fa9f-0449-5b57-a5c2-d3ea65f4040f" version = "1.4.0" [[deps.Latexify]] -deps = ["Format", "InteractiveUtils", "LaTeXStrings", "MacroTools", "Markdown", "OrderedCollections", "Requires"] -git-tree-sha1 = "cd10d2cc78d34c0e2a3a36420ab607b611debfbb" +deps = ["Format", "Ghostscript_jll", "InteractiveUtils", "LaTeXStrings", "MacroTools", "Markdown", "OrderedCollections", "Requires"] +git-tree-sha1 = "44f93c47f9cd6c7e431f2f2091fcba8f01cd7e8f" uuid = "23fbe1c1-3f47-55db-b15f-69d7ec21a316" -version = "0.16.7" +version = "0.16.10" [deps.Latexify.extensions] DataFramesExt = "DataFrames" SparseArraysExt = "SparseArrays" SymEngineExt = "SymEngine" + TectonicExt = "tectonic_jll" [deps.Latexify.weakdeps] DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" SymEngine = "123dc426-2d89-5057-bbad-38513e3affd8" + tectonic_jll = "d7dd28d6-a5e6-559c-9131-7eb760cdacc5" + +[[deps.LatticeRules]] +deps = ["Random"] +git-tree-sha1 = "7f5b02258a3ca0221a6a9710b0a0a2e8fb4957fe" +uuid = "73f95e8e-ec14-4e6a-8b18-0d2e271c4e55" +version = "0.0.1" [[deps.LayoutPointers]] deps = ["ArrayInterface", "LinearAlgebra", "ManualMemory", "SIMDTypes", "Static", "StaticArrayInterface"] @@ -1309,6 +1287,12 @@ deps = ["Artifacts", "Pkg"] uuid = "4af54fe1-eca0-43a8-85a7-787d91b784e3" version = "1.11.0" +[[deps.LeftChildRightSiblingTrees]] +deps = ["AbstractTrees"] +git-tree-sha1 = "95ba48564903b43b2462318aa243ee79d81135ff" +uuid = "1d6d02ad-be62-4b6b-8a6d-2f90e265016e" +version = "0.2.1" + [[deps.LevyArea]] deps = ["LinearAlgebra", "Random", "SpecialFunctions"] git-tree-sha1 = "56513a09b8e0ae6485f34401ea9e2f31357958ec" @@ -1321,24 +1305,30 @@ uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21" version = "0.6.4" [[deps.LibCURL_jll]] -deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"] +deps = ["Artifacts", "LibSSH2_jll", "Libdl", "OpenSSL_jll", "Zlib_jll", "nghttp2_jll"] uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0" -version = "8.6.0+0" +version = "8.11.1+1" [[deps.LibGit2]] -deps = ["Base64", "LibGit2_jll", "NetworkOptions", "Printf", "SHA"] +deps = ["LibGit2_jll", "NetworkOptions", "Printf", "SHA"] uuid = "76f85450-5226-5b5a-8eaa-529ad045b433" version = "1.11.0" [[deps.LibGit2_jll]] -deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll"] +deps = ["Artifacts", "LibSSH2_jll", "Libdl", "OpenSSL_jll"] uuid = "e37daf67-58a4-590a-8e99-b0245dd2ffc5" -version = "1.7.2+0" +version = "1.9.0+0" [[deps.LibSSH2_jll]] -deps = ["Artifacts", "Libdl", "MbedTLS_jll"] +deps = ["Artifacts", "Libdl", "OpenSSL_jll"] uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8" -version = "1.11.0+1" +version = "1.11.3+1" + +[[deps.LibTracyClient_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl"] +git-tree-sha1 = "d2bc4e1034b2d43076b50f0e34ea094c2cb0a717" +uuid = "ad6e5548-8b26-5c9f-8ef3-ef0ad883f3a5" +version = "0.9.1+6" [[deps.Libdl]] uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb" @@ -1405,7 +1395,7 @@ version = "7.4.0" [[deps.LinearAlgebra]] deps = ["Libdl", "OpenBLAS_jll", "libblastrampoline_jll"] uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" -version = "1.11.0" +version = "1.12.0" [[deps.LinearMaps]] deps = ["LinearAlgebra"] @@ -1420,18 +1410,20 @@ weakdeps = ["ChainRulesCore", "SparseArrays", "Statistics"] LinearMapsStatisticsExt = "Statistics" [[deps.LinearSolve]] -deps = ["ArrayInterface", "ChainRulesCore", "ConcreteStructs", "DocStringExtensions", "EnumX", "GPUArraysCore", "InteractiveUtils", "Krylov", "LazyArrays", "Libdl", "LinearAlgebra", "MKL_jll", "Markdown", "PrecompileTools", "Preferences", "RecursiveArrayTools", "Reexport", "SciMLBase", "SciMLOperators", "Setfield", "StaticArraysCore", "UnPack"] -git-tree-sha1 = "38e21d192e3549a402191ad7e91d07afdf801674" +deps = ["ArrayInterface", "ChainRulesCore", "ConcreteStructs", "DocStringExtensions", "EnumX", "GPUArraysCore", "InteractiveUtils", "Krylov", "LazyArrays", "Libdl", "LinearAlgebra", "MKL_jll", "Markdown", "OpenBLAS_jll", "PrecompileTools", "Preferences", "RecursiveArrayTools", "Reexport", "SciMLBase", "SciMLLogging", "SciMLOperators", "Setfield", "StaticArraysCore", "UnPack"] +git-tree-sha1 = "b5def83652705bdc00035dff671039e707588a00" uuid = "7ed4a6bd-45f5-4d41-b270-4a48e9bafcae" -version = "3.26.0" +version = "3.46.1" [deps.LinearSolve.extensions] + LinearSolveAMDGPUExt = "AMDGPU" LinearSolveBLISExt = ["blis_jll", "LAPACK_jll"] LinearSolveBandedMatricesExt = "BandedMatrices" LinearSolveBlockDiagonalsExt = "BlockDiagonals" LinearSolveCUDAExt = "CUDA" LinearSolveCUDSSExt = "CUDSS" LinearSolveCUSOLVERRFExt = ["CUSOLVERRF", "SparseArrays"] + LinearSolveCliqueTreesExt = ["CliqueTrees", "SparseArrays"] LinearSolveEnzymeExt = "EnzymeCore" LinearSolveFastAlmostBandedMatricesExt = "FastAlmostBandedMatrices" LinearSolveFastLapackInterfaceExt = "FastLapackInterface" @@ -1441,17 +1433,20 @@ version = "3.26.0" LinearSolveKernelAbstractionsExt = "KernelAbstractions" LinearSolveKrylovKitExt = "KrylovKit" LinearSolveMetalExt = "Metal" + LinearSolveMooncakeExt = "Mooncake" LinearSolvePardisoExt = ["Pardiso", "SparseArrays"] LinearSolveRecursiveFactorizationExt = "RecursiveFactorization" LinearSolveSparseArraysExt = "SparseArrays" LinearSolveSparspakExt = ["SparseArrays", "Sparspak"] [deps.LinearSolve.weakdeps] + AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e" BandedMatrices = "aae01518-5342-5314-be14-df237901396f" BlockDiagonals = "0a1fb500-61f7-11e9-3c65-f5ef3456f9f0" CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" CUDSS = "45b445bb-4962-46a0-9369-b4df9d0f772e" CUSOLVERRF = "a8cc9031-bad2-4722-94f5-40deabb4245c" + CliqueTrees = "60701a23-6482-424a-84db-faee86b9b1f8" EnzymeCore = "f151be2c-9106-41f4-ab19-57ee4f262869" FastAlmostBandedMatrices = "9d29842c-ecb8-4973-b1e9-a27b1157504e" FastLapackInterface = "29a986be-02c6-4525-aec4-84b980013641" @@ -1462,6 +1457,7 @@ version = "3.26.0" KrylovKit = "0b1a1467-8014-51b9-945f-bf0ae24f4b77" LAPACK_jll = "51474c39-65e3-53ba-86ba-03b1b862ec14" Metal = "dde4c033-4e86-420c-a63e-0dd931031962" + Mooncake = "da2b9cff-9c12-43a0-ae48-6db2b0edb7d6" Pardiso = "46dd5b70-b6fb-5a00-ae2d-e8fea33afaf2" RecursiveFactorization = "f2c3362d-daeb-58d1-803e-2bc74f2840b4" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" @@ -1496,20 +1492,20 @@ version = "1.2.0" [[deps.LoopVectorization]] deps = ["ArrayInterface", "CPUSummary", "CloseOpenIntervals", "DocStringExtensions", "HostCPUFeatures", "IfElse", "LayoutPointers", "LinearAlgebra", "OffsetArrays", "PolyesterWeave", "PrecompileTools", "SIMDTypes", "SLEEFPirates", "Static", "StaticArrayInterface", "ThreadingUtilities", "UnPack", "VectorizationBase"] -git-tree-sha1 = "e5afce7eaf5b5ca0d444bcb4dc4fd78c54cbbac0" +git-tree-sha1 = "a9fc7883eb9b5f04f46efb9a540833d1fad974b3" uuid = "bdcacae8-1622-11e9-2a5c-532679323890" -version = "0.12.172" -weakdeps = ["ChainRulesCore", "ForwardDiff", "SpecialFunctions"] +version = "0.12.173" [deps.LoopVectorization.extensions] ForwardDiffExt = ["ChainRulesCore", "ForwardDiff"] + ForwardDiffNNlibExt = ["ForwardDiff", "NNlib"] SpecialFunctionsExt = "SpecialFunctions" -[[deps.METIS_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl"] -git-tree-sha1 = "2eefa8baa858871ae7770c98c3c2a7e46daba5b4" -uuid = "d00139f3-1899-568f-a2f0-47f597d42d70" -version = "5.1.3+0" + [deps.LoopVectorization.weakdeps] + ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" + ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" + NNlib = "872c559c-99b0-510c-b3b7-b6c96a88d5cd" + SpecialFunctions = "276daf66-3868-5448-9aa4-cd146d93841b" [[deps.MKL_jll]] deps = ["Artifacts", "IntelOpenMP_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "oneTBB_jll"] @@ -1517,12 +1513,6 @@ git-tree-sha1 = "282cadc186e7b2ae0eeadbd7a4dffed4196ae2aa" uuid = "856f044c-d86e-5d09-b602-aeab76dc8ba7" version = "2025.2.0+0" -[[deps.MUMPS_seq_jll]] -deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "METIS_jll", "libblastrampoline_jll"] -git-tree-sha1 = "0eab12f94948ca67908aec14b9f2ebefd17463fe" -uuid = "d7ed1dd3-d0ae-5e8e-bfb4-87a502085b8d" -version = "500.700.301+0" - [[deps.MacroTools]] git-tree-sha1 = "1e0228a030642014fe5cfe68c2c0a818f9e3f522" uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09" @@ -1534,16 +1524,10 @@ uuid = "d125e4d3-2237-4719-b19c-fa641b8a4667" version = "0.1.8" [[deps.Markdown]] -deps = ["Base64"] +deps = ["Base64", "JuliaSyntaxHighlighting", "StyledStrings"] uuid = "d6f4376e-aef5-505a-96c1-9c027394607a" version = "1.11.0" -[[deps.MathOptInterface]] -deps = ["BenchmarkTools", "CodecBzip2", "CodecZlib", "DataStructures", "ForwardDiff", "JSON3", "LinearAlgebra", "MutableArithmetics", "NaNMath", "OrderedCollections", "PrecompileTools", "Printf", "SparseArrays", "SpecialFunctions", "Test"] -git-tree-sha1 = "a2cbab4256690aee457d136752c404e001f27768" -uuid = "b8f27783-ece8-5eb3-8dc8-9495eed66fee" -version = "1.46.0" - [[deps.MaybeInplace]] deps = ["ArrayInterface", "LinearAlgebra", "MacroTools"] git-tree-sha1 = "54e2fdc38130c05b42be423e90da3bade29b74bd" @@ -1561,14 +1545,15 @@ uuid = "739be429-bea8-5141-9913-cc70e7f3736d" version = "1.1.9" [[deps.MbedTLS_jll]] -deps = ["Artifacts", "Libdl"] +deps = ["Artifacts", "JLLWrappers", "Libdl"] +git-tree-sha1 = "3cce3511ca2c6f87b19c34ffc623417ed2798cbd" uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1" -version = "2.28.6+0" +version = "2.28.10+0" [[deps.Measures]] -git-tree-sha1 = "c13304c81eec1ed3af7fc20e75fb6b26092a1102" +git-tree-sha1 = "b513cedd20d9c914783d8ad83d08120702bf2c77" uuid = "442fdcdd-2543-5da2-b0f3-8c86c306513e" -version = "0.3.2" +version = "0.3.3" [[deps.Missings]] deps = ["DataAPI"] @@ -1580,11 +1565,11 @@ version = "1.2.0" uuid = "a63ad114-7e13-5084-954f-fe012c677804" version = "1.11.0" -[[deps.Mocking]] -deps = ["Compat", "ExprTools"] -git-tree-sha1 = "2c140d60d7cb82badf06d8783800d0bcd1a7daa2" -uuid = "78c3b35d-d492-501b-9361-3d52fe80e533" -version = "0.8.1" +[[deps.MonteCarloIntegration]] +deps = ["Distributions", "QuasiMonteCarlo", "Random"] +git-tree-sha1 = "722ad522068d31954b4a976b66a26aeccbf509ed" +uuid = "4886b29c-78c9-11e9-0a6e-41e1f4161f7b" +version = "0.2.0" [[deps.Moshi]] deps = ["ExproniconLite", "Jieko"] @@ -1594,7 +1579,7 @@ version = "0.3.7" [[deps.MozillaCACerts_jll]] uuid = "14a3606d-f60d-562e-9121-12d972cd8159" -version = "2023.12.12" +version = "2025.5.20" [[deps.MuladdMacro]] git-tree-sha1 = "cac9cc5499c25554cba55cd3c30543cff5ca4fab" @@ -1627,14 +1612,16 @@ version = "7.10.0" [[deps.NLopt]] deps = ["CEnum", "NLopt_jll"] -git-tree-sha1 = "ddb22a00a2dd27c98e0a94879544eb92d192184a" +git-tree-sha1 = "624785b15005a0e0f4e462b27ee745dbe5941863" uuid = "76087f3c-5699-56af-9a33-bf431cd00edd" -version = "1.1.3" -weakdeps = ["MathOptInterface"] +version = "1.2.1" [deps.NLopt.extensions] NLoptMathOptInterfaceExt = ["MathOptInterface"] + [deps.NLopt.weakdeps] + MathOptInterface = "b8f27783-ece8-5eb3-8dc8-9495eed66fee" + [[deps.NLopt_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl"] git-tree-sha1 = "b0154a615d5b2b6cf7a2501123b793577d0b9950" @@ -1661,13 +1648,13 @@ version = "0.4.22" [[deps.NetworkOptions]] uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908" -version = "1.2.0" +version = "1.3.0" [[deps.NonlinearSolve]] -deps = ["ADTypes", "ArrayInterface", "BracketingNonlinearSolve", "CommonSolve", "ConcreteStructs", "DiffEqBase", "DifferentiationInterface", "FastClosures", "FiniteDiff", "ForwardDiff", "LineSearch", "LinearAlgebra", "LinearSolve", "NonlinearSolveBase", "NonlinearSolveFirstOrder", "NonlinearSolveQuasiNewton", "NonlinearSolveSpectralMethods", "PrecompileTools", "Preferences", "Reexport", "SciMLBase", "SimpleNonlinearSolve", "SparseArrays", "SparseMatrixColorings", "StaticArraysCore", "SymbolicIndexingInterface"] -git-tree-sha1 = "d2ec18c1e4eccbb70b64be2435fc3b06fbcdc0a1" +deps = ["ADTypes", "ArrayInterface", "BracketingNonlinearSolve", "CommonSolve", "ConcreteStructs", "DifferentiationInterface", "FastClosures", "FiniteDiff", "ForwardDiff", "LineSearch", "LinearAlgebra", "LinearSolve", "NonlinearSolveBase", "NonlinearSolveFirstOrder", "NonlinearSolveQuasiNewton", "NonlinearSolveSpectralMethods", "PrecompileTools", "Preferences", "Reexport", "SciMLBase", "SimpleNonlinearSolve", "StaticArraysCore", "SymbolicIndexingInterface"] +git-tree-sha1 = "1d091cfece012662b06d25c792b3a43a0804c47b" uuid = "8913a72c-1f9b-4ce2-8d82-65094dcecaec" -version = "4.10.0" +version = "4.12.0" [deps.NonlinearSolve.extensions] NonlinearSolveFastLevenbergMarquardtExt = "FastLevenbergMarquardt" @@ -1676,7 +1663,7 @@ version = "4.10.0" NonlinearSolveMINPACKExt = "MINPACK" NonlinearSolveNLSolversExt = "NLSolvers" NonlinearSolveNLsolveExt = ["NLsolve", "LineSearches"] - NonlinearSolvePETScExt = ["PETSc", "MPI"] + NonlinearSolvePETScExt = ["PETSc", "MPI", "SparseArrays"] NonlinearSolveSIAMFANLEquationsExt = "SIAMFANLEquations" NonlinearSolveSpeedMappingExt = "SpeedMapping" NonlinearSolveSundialsExt = "Sundials" @@ -1692,51 +1679,74 @@ version = "4.10.0" NLsolve = "2774e3e8-f4cf-5e23-947b-6d7e65073b56" PETSc = "ace2c81b-2b5f-4b1e-a30d-d662738edfe0" SIAMFANLEquations = "084e46ad-d928-497d-ad5e-07fa361a48c4" + SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" SpeedMapping = "f1835b91-879b-4a3f-a438-e4baacf14412" Sundials = "c3572dad-4567-51f8-b174-8c6c989267f4" [[deps.NonlinearSolveBase]] -deps = ["ADTypes", "Adapt", "ArrayInterface", "CommonSolve", "Compat", "ConcreteStructs", "DifferentiationInterface", "EnzymeCore", "FastClosures", "LinearAlgebra", "Markdown", "MaybeInplace", "Preferences", "Printf", "RecursiveArrayTools", "SciMLBase", "SciMLJacobianOperators", "SciMLOperators", "StaticArraysCore", "SymbolicIndexingInterface", "TimerOutputs"] -git-tree-sha1 = "1a6f6b161a644beac3c46a46f9bbb830c24abffb" +deps = ["ADTypes", "Adapt", "ArrayInterface", "CommonSolve", "Compat", "ConcreteStructs", "DifferentiationInterface", "EnzymeCore", "FastClosures", "LinearAlgebra", "Markdown", "MaybeInplace", "Preferences", "Printf", "RecursiveArrayTools", "SciMLBase", "SciMLJacobianOperators", "SciMLLogging", "SciMLOperators", "SciMLStructures", "Setfield", "StaticArraysCore", "SymbolicIndexingInterface", "TimerOutputs"] +git-tree-sha1 = "9f1e723df4aafef077ac8bb6771602138b4b211f" uuid = "be0214bd-f91f-a760-ac4e-3421ce2b2da0" -version = "1.8.0" -weakdeps = ["BandedMatrices", "DiffEqBase", "ForwardDiff", "LineSearch", "LinearSolve", "SparseArrays", "SparseMatrixColorings"] +version = "2.1.0" [deps.NonlinearSolveBase.extensions] NonlinearSolveBaseBandedMatricesExt = "BandedMatrices" - NonlinearSolveBaseDiffEqBaseExt = "DiffEqBase" + NonlinearSolveBaseChainRulesCoreExt = "ChainRulesCore" + NonlinearSolveBaseEnzymeExt = ["ChainRulesCore", "Enzyme"] NonlinearSolveBaseForwardDiffExt = "ForwardDiff" NonlinearSolveBaseLineSearchExt = "LineSearch" NonlinearSolveBaseLinearSolveExt = "LinearSolve" + NonlinearSolveBaseMooncakeExt = "Mooncake" + NonlinearSolveBaseReverseDiffExt = "ReverseDiff" NonlinearSolveBaseSparseArraysExt = "SparseArrays" NonlinearSolveBaseSparseMatrixColoringsExt = "SparseMatrixColorings" + NonlinearSolveBaseTrackerExt = "Tracker" + + [deps.NonlinearSolveBase.weakdeps] + BandedMatrices = "aae01518-5342-5314-be14-df237901396f" + ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" + Enzyme = "7da242da-08ed-463a-9acd-ee780be4f1d9" + ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" + LineSearch = "87fe0de2-c867-4266-b59a-2f0a94fc965b" + LinearSolve = "7ed4a6bd-45f5-4d41-b270-4a48e9bafcae" + Mooncake = "da2b9cff-9c12-43a0-ae48-6db2b0edb7d6" + ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267" + SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" + SparseMatrixColorings = "0a514795-09f3-496d-8182-132a7b665d35" + Tracker = "9f7883ad-71c0-57eb-9f7f-b5c9e6d3789c" [[deps.NonlinearSolveFirstOrder]] -deps = ["ADTypes", "ArrayInterface", "CommonSolve", "ConcreteStructs", "DiffEqBase", "FiniteDiff", "ForwardDiff", "LineSearch", "LinearAlgebra", "LinearSolve", "MaybeInplace", "NonlinearSolveBase", "PrecompileTools", "Reexport", "SciMLBase", "SciMLJacobianOperators", "Setfield", "StaticArraysCore"] -git-tree-sha1 = "3f1198ae5cbf21e84b8251a9e62fa1f888f3e4cb" +deps = ["ADTypes", "ArrayInterface", "CommonSolve", "ConcreteStructs", "FiniteDiff", "ForwardDiff", "LineSearch", "LinearAlgebra", "LinearSolve", "MaybeInplace", "NonlinearSolveBase", "PrecompileTools", "Reexport", "SciMLBase", "SciMLJacobianOperators", "Setfield", "StaticArraysCore"] +git-tree-sha1 = "872c32bc8a524e1a51bfc0a0cf72ff2a2f886226" uuid = "5959db7a-ea39-4486-b5fe-2dd0bf03d60d" -version = "1.7.0" +version = "1.10.0" [[deps.NonlinearSolveQuasiNewton]] -deps = ["ArrayInterface", "CommonSolve", "ConcreteStructs", "DiffEqBase", "LinearAlgebra", "LinearSolve", "MaybeInplace", "NonlinearSolveBase", "PrecompileTools", "Reexport", "SciMLBase", "SciMLOperators", "StaticArraysCore"] -git-tree-sha1 = "b69a68ef3a7bba7ab1d5ef6321ed6d9a613142b0" +deps = ["ArrayInterface", "CommonSolve", "ConcreteStructs", "LinearAlgebra", "LinearSolve", "MaybeInplace", "NonlinearSolveBase", "PrecompileTools", "Reexport", "SciMLBase", "SciMLOperators", "StaticArraysCore"] +git-tree-sha1 = "21596ddee2e18c95bfe92803988611ab6daa9cfe" uuid = "9a2c21bd-3a47-402d-9113-8faf9a0ee114" -version = "1.5.0" +version = "1.11.0" weakdeps = ["ForwardDiff"] [deps.NonlinearSolveQuasiNewton.extensions] NonlinearSolveQuasiNewtonForwardDiffExt = "ForwardDiff" [[deps.NonlinearSolveSpectralMethods]] -deps = ["CommonSolve", "ConcreteStructs", "DiffEqBase", "LineSearch", "MaybeInplace", "NonlinearSolveBase", "PrecompileTools", "Reexport", "SciMLBase"] -git-tree-sha1 = "84de5a469e119eb2c22ae07c543dc4e7f7001ee7" +deps = ["CommonSolve", "ConcreteStructs", "LineSearch", "MaybeInplace", "NonlinearSolveBase", "PrecompileTools", "Reexport", "SciMLBase"] +git-tree-sha1 = "eafd027b5cd768f19bb5de76c0e908a9065ddd36" uuid = "26075421-4e9a-44e1-8bd1-420ed7ad02b2" -version = "1.3.0" +version = "1.6.0" weakdeps = ["ForwardDiff"] [deps.NonlinearSolveSpectralMethods.extensions] NonlinearSolveSpectralMethodsForwardDiffExt = "ForwardDiff" +[[deps.ObjectFile]] +deps = ["Reexport", "StructIO"] +git-tree-sha1 = "22faba70c22d2f03e60fbc61da99c4ebfc3eb9ba" +uuid = "d8793406-e978-5875-9003-1fc021f44a92" +version = "0.5.0" + [[deps.Observables]] git-tree-sha1 = "7438a59546cf62428fc9d1bc94729146d37a7225" uuid = "510215fc-4207-5dde-b226-833fc4488ee2" @@ -1757,33 +1767,26 @@ git-tree-sha1 = "b6aa4566bb7ae78498a5e68943863fa8b5231b59" uuid = "e7412a2a-1a6e-54c0-be00-318e2571c051" version = "1.3.6+0" -[[deps.OpenBLAS32_jll]] -deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl"] -git-tree-sha1 = "ece4587683695fe4c5f20e990da0ed7e83c351e7" -uuid = "656ef2d0-ae68-5445-9ca0-591084a874a2" -version = "0.3.29+0" - [[deps.OpenBLAS_jll]] deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"] uuid = "4536629a-c528-5b80-bd46-f80d51c5b363" -version = "0.3.27+1" +version = "0.3.29+0" [[deps.OpenLibm_jll]] deps = ["Artifacts", "Libdl"] uuid = "05823500-19ac-5b8b-9628-191a04bc5112" -version = "0.8.5+0" +version = "0.8.7+0" [[deps.OpenSSL]] -deps = ["BitFlags", "Dates", "MozillaCACerts_jll", "OpenSSL_jll", "Sockets"] -git-tree-sha1 = "f1a7e086c677df53e064e0fdd2c9d0b0833e3f6e" +deps = ["BitFlags", "Dates", "MozillaCACerts_jll", "NetworkOptions", "OpenSSL_jll", "Sockets"] +git-tree-sha1 = "386b47442468acfb1add94bf2d85365dea10cbab" uuid = "4d8831e6-92b7-49fb-bdf8-b643e874388c" -version = "1.5.0" +version = "1.6.0" [[deps.OpenSSL_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl"] -git-tree-sha1 = "f19301ae653233bc88b1810ae908194f07f8db9d" +deps = ["Artifacts", "Libdl"] uuid = "458c3c95-2e84-50aa-8efc-19380b2a3a95" -version = "3.5.4+0" +version = "3.5.1+0" [[deps.OpenSpecFun_jll]] deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl"] @@ -1793,14 +1796,50 @@ version = "0.5.6+0" [[deps.Optim]] deps = ["Compat", "EnumX", "FillArrays", "ForwardDiff", "LineSearches", "LinearAlgebra", "NLSolversBase", "NaNMath", "PositiveFactorizations", "Printf", "SparseArrays", "StatsBase"] -git-tree-sha1 = "31b3b1b8e83ef9f1d50d74f1dd5f19a37a304a1f" +git-tree-sha1 = "61942645c38dd2b5b78e2082c9b51ab315315d10" uuid = "429524aa-4258-5aef-a3af-852621145aeb" -version = "1.12.0" -weakdeps = ["MathOptInterface"] +version = "1.13.2" [deps.Optim.extensions] OptimMOIExt = "MathOptInterface" + [deps.Optim.weakdeps] + MathOptInterface = "b8f27783-ece8-5eb3-8dc8-9495eed66fee" + +[[deps.Optimization]] +deps = ["ADTypes", "ArrayInterface", "ConsoleProgressMonitor", "DocStringExtensions", "LinearAlgebra", "Logging", "LoggingExtras", "OptimizationBase", "Printf", "Reexport", "SciMLBase", "SparseArrays", "TerminalLoggers"] +git-tree-sha1 = "b0afd00640ed7a122dfdd6f7c3e676079ce75dc0" +uuid = "7f7a1694-90dd-40f0-9382-eb1efda571ba" +version = "5.1.0" + +[[deps.OptimizationBase]] +deps = ["ADTypes", "ArrayInterface", "DifferentiationInterface", "DocStringExtensions", "FastClosures", "LinearAlgebra", "PDMats", "Reexport", "SciMLBase", "SparseArrays", "SparseConnectivityTracer", "SparseMatrixColorings"] +git-tree-sha1 = "96e15dffd0499eba632c10a4d864b98071042809" +uuid = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" +version = "4.0.2" + + [deps.OptimizationBase.extensions] + OptimizationEnzymeExt = "Enzyme" + OptimizationFiniteDiffExt = "FiniteDiff" + OptimizationForwardDiffExt = "ForwardDiff" + OptimizationMLDataDevicesExt = "MLDataDevices" + OptimizationMLUtilsExt = "MLUtils" + OptimizationMTKExt = "ModelingToolkit" + OptimizationReverseDiffExt = "ReverseDiff" + OptimizationSymbolicAnalysisExt = "SymbolicAnalysis" + OptimizationZygoteExt = "Zygote" + + [deps.OptimizationBase.weakdeps] + Enzyme = "7da242da-08ed-463a-9acd-ee780be4f1d9" + FiniteDiff = "6a86dc24-6348-571c-b903-95158fe2bd41" + ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" + MLDataDevices = "7e8f7934-dd98-4c1a-8fe8-92b47a384d40" + MLUtils = "f1d291b0-491e-4a28-83b9-f70985020b54" + ModelingToolkit = "961ee093-0014-501f-94e3-6117800e7a78" + ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267" + SymbolicAnalysis = "4297ee4d-0239-47d8-ba5d-195ecdf594fe" + Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" + [[deps.Opus_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl"] git-tree-sha1 = "c392fc5dd032381919e3b22dd32d6443760ce7ea" @@ -1813,28 +1852,28 @@ uuid = "bac558e1-5e72-5ebc-8fee-abe8a469f55d" version = "1.8.1" [[deps.OrdinaryDiffEq]] -deps = ["ADTypes", "Adapt", "ArrayInterface", "DataStructures", "DiffEqBase", "DocStringExtensions", "EnumX", "ExponentialUtilities", "FastBroadcast", "FastClosures", "FillArrays", "FiniteDiff", "ForwardDiff", "FunctionWrappersWrappers", "InteractiveUtils", "LineSearches", "LinearAlgebra", "LinearSolve", "Logging", "MacroTools", "MuladdMacro", "NonlinearSolve", "OrdinaryDiffEqAdamsBashforthMoulton", "OrdinaryDiffEqBDF", "OrdinaryDiffEqCore", "OrdinaryDiffEqDefault", "OrdinaryDiffEqDifferentiation", "OrdinaryDiffEqExplicitRK", "OrdinaryDiffEqExponentialRK", "OrdinaryDiffEqExtrapolation", "OrdinaryDiffEqFIRK", "OrdinaryDiffEqFeagin", "OrdinaryDiffEqFunctionMap", "OrdinaryDiffEqHighOrderRK", "OrdinaryDiffEqIMEXMultistep", "OrdinaryDiffEqLinear", "OrdinaryDiffEqLowOrderRK", "OrdinaryDiffEqLowStorageRK", "OrdinaryDiffEqNonlinearSolve", "OrdinaryDiffEqNordsieck", "OrdinaryDiffEqPDIRK", "OrdinaryDiffEqPRK", "OrdinaryDiffEqQPRK", "OrdinaryDiffEqRKN", "OrdinaryDiffEqRosenbrock", "OrdinaryDiffEqSDIRK", "OrdinaryDiffEqSSPRK", "OrdinaryDiffEqStabilizedIRK", "OrdinaryDiffEqStabilizedRK", "OrdinaryDiffEqSymplecticRK", "OrdinaryDiffEqTsit5", "OrdinaryDiffEqVerner", "Polyester", "PreallocationTools", "PrecompileTools", "Preferences", "RecursiveArrayTools", "Reexport", "SciMLBase", "SciMLOperators", "SciMLStructures", "SimpleNonlinearSolve", "SimpleUnPack", "SparseArrays", "Static", "StaticArrayInterface", "StaticArrays", "TruncatedStacktraces"] -git-tree-sha1 = "2d7026dd8e4c7b3e7f47eef9c13c60ae55fe4912" +deps = ["ADTypes", "Adapt", "ArrayInterface", "CommonSolve", "DataStructures", "DiffEqBase", "DocStringExtensions", "EnumX", "ExponentialUtilities", "FastBroadcast", "FastClosures", "FillArrays", "FiniteDiff", "ForwardDiff", "FunctionWrappersWrappers", "InteractiveUtils", "LineSearches", "LinearAlgebra", "LinearSolve", "Logging", "MacroTools", "MuladdMacro", "NonlinearSolve", "OrdinaryDiffEqAdamsBashforthMoulton", "OrdinaryDiffEqBDF", "OrdinaryDiffEqCore", "OrdinaryDiffEqDefault", "OrdinaryDiffEqDifferentiation", "OrdinaryDiffEqExplicitRK", "OrdinaryDiffEqExponentialRK", "OrdinaryDiffEqExtrapolation", "OrdinaryDiffEqFIRK", "OrdinaryDiffEqFeagin", "OrdinaryDiffEqFunctionMap", "OrdinaryDiffEqHighOrderRK", "OrdinaryDiffEqIMEXMultistep", "OrdinaryDiffEqLinear", "OrdinaryDiffEqLowOrderRK", "OrdinaryDiffEqLowStorageRK", "OrdinaryDiffEqNonlinearSolve", "OrdinaryDiffEqNordsieck", "OrdinaryDiffEqPDIRK", "OrdinaryDiffEqPRK", "OrdinaryDiffEqQPRK", "OrdinaryDiffEqRKN", "OrdinaryDiffEqRosenbrock", "OrdinaryDiffEqSDIRK", "OrdinaryDiffEqSSPRK", "OrdinaryDiffEqStabilizedIRK", "OrdinaryDiffEqStabilizedRK", "OrdinaryDiffEqSymplecticRK", "OrdinaryDiffEqTsit5", "OrdinaryDiffEqVerner", "Polyester", "PreallocationTools", "PrecompileTools", "Preferences", "RecursiveArrayTools", "Reexport", "SciMLBase", "SciMLOperators", "SciMLStructures", "SimpleNonlinearSolve", "SimpleUnPack", "SparseArrays", "Static", "StaticArrayInterface", "StaticArrays", "TruncatedStacktraces"] +git-tree-sha1 = "89172157d16139165d470602f1e552484b357771" uuid = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed" -version = "6.95.1" +version = "6.103.0" [[deps.OrdinaryDiffEqAdamsBashforthMoulton]] deps = ["DiffEqBase", "FastBroadcast", "MuladdMacro", "OrdinaryDiffEqCore", "OrdinaryDiffEqLowOrderRK", "Polyester", "RecursiveArrayTools", "Reexport", "SciMLBase", "Static"] -git-tree-sha1 = "41fd0cd4f98c390dcf4f3ca66bf0503cd22f1396" +git-tree-sha1 = "09aae1486c767caa6bce9de892455cbdf5a6fbc8" uuid = "89bda076-bce5-4f1c-845f-551c83cdda9a" -version = "1.3.0" +version = "1.5.0" [[deps.OrdinaryDiffEqBDF]] deps = ["ADTypes", "ArrayInterface", "DiffEqBase", "FastBroadcast", "LinearAlgebra", "MacroTools", "MuladdMacro", "OrdinaryDiffEqCore", "OrdinaryDiffEqDifferentiation", "OrdinaryDiffEqNonlinearSolve", "OrdinaryDiffEqSDIRK", "PrecompileTools", "Preferences", "RecursiveArrayTools", "Reexport", "SciMLBase", "StaticArrays", "TruncatedStacktraces"] -git-tree-sha1 = "10647c3caef2b85a341a3271376fc5ad34c13985" +git-tree-sha1 = "ce8db53fd1e4e41c020fd53961e7314f75e4c21c" uuid = "6ad6398a-0878-4a85-9266-38940aa047c8" -version = "1.8.0" +version = "1.10.1" [[deps.OrdinaryDiffEqCore]] deps = ["ADTypes", "Accessors", "Adapt", "ArrayInterface", "DataStructures", "DiffEqBase", "DocStringExtensions", "EnumX", "FastBroadcast", "FastClosures", "FastPower", "FillArrays", "FunctionWrappersWrappers", "InteractiveUtils", "LinearAlgebra", "Logging", "MacroTools", "MuladdMacro", "Polyester", "PrecompileTools", "Preferences", "RecursiveArrayTools", "Reexport", "SciMLBase", "SciMLOperators", "SciMLStructures", "SimpleUnPack", "Static", "StaticArrayInterface", "StaticArraysCore", "SymbolicIndexingInterface", "TruncatedStacktraces"] -git-tree-sha1 = "1bd20b621e8dee5f2d170ae31631bf573ab77eec" +git-tree-sha1 = "4b68f9ca0cfa68cb9ee544df96391d47ca0e62a9" uuid = "bbf590c4-e513-4bbe-9b18-05decba2e5d8" -version = "1.26.2" +version = "1.36.0" [deps.OrdinaryDiffEqCore.extensions] OrdinaryDiffEqCoreEnzymeCoreExt = "EnzymeCore" @@ -1846,170 +1885,174 @@ version = "1.26.2" [[deps.OrdinaryDiffEqDefault]] deps = ["ADTypes", "DiffEqBase", "EnumX", "LinearAlgebra", "LinearSolve", "OrdinaryDiffEqBDF", "OrdinaryDiffEqCore", "OrdinaryDiffEqRosenbrock", "OrdinaryDiffEqTsit5", "OrdinaryDiffEqVerner", "PrecompileTools", "Preferences", "Reexport", "SciMLBase"] -git-tree-sha1 = "3ca10642e562dda57bf2accbcc33ee7f3cdfa593" +git-tree-sha1 = "7d5ddeee97e1bdcc848f1397cbc3d03bd57f33e7" uuid = "50262376-6c5a-4cf5-baba-aaf4f84d72d7" -version = "1.6.0" +version = "1.8.0" [[deps.OrdinaryDiffEqDifferentiation]] -deps = ["ADTypes", "ArrayInterface", "ConcreteStructs", "ConstructionBase", "DiffEqBase", "DifferentiationInterface", "FastBroadcast", "FiniteDiff", "ForwardDiff", "FunctionWrappersWrappers", "LinearAlgebra", "LinearSolve", "OrdinaryDiffEqCore", "SciMLBase", "SciMLOperators", "SparseArrays", "SparseMatrixColorings", "StaticArrayInterface", "StaticArrays"] -git-tree-sha1 = "906da94c9b81054b974e97590d24b092369026b8" +deps = ["ADTypes", "ArrayInterface", "ConcreteStructs", "ConstructionBase", "DiffEqBase", "DifferentiationInterface", "FastBroadcast", "FiniteDiff", "ForwardDiff", "FunctionWrappersWrappers", "LinearAlgebra", "LinearSolve", "OrdinaryDiffEqCore", "SciMLBase", "SciMLOperators", "SparseMatrixColorings", "StaticArrayInterface", "StaticArrays"] +git-tree-sha1 = "320b5f3e4e61ca0ad863c63c803f69973ba6efce" uuid = "4302a76b-040a-498a-8c04-15b101fed76b" -version = "1.11.0" +version = "1.16.1" +weakdeps = ["SparseArrays"] + + [deps.OrdinaryDiffEqDifferentiation.extensions] + OrdinaryDiffEqDifferentiationSparseArraysExt = "SparseArrays" [[deps.OrdinaryDiffEqExplicitRK]] deps = ["DiffEqBase", "FastBroadcast", "LinearAlgebra", "MuladdMacro", "OrdinaryDiffEqCore", "RecursiveArrayTools", "Reexport", "SciMLBase", "TruncatedStacktraces"] -git-tree-sha1 = "b1396040d1300353423fd2b582161226a285a409" +git-tree-sha1 = "4c0633f587395d7aaec0679dc649eb03fcc74e73" uuid = "9286f039-9fbf-40e8-bf65-aa933bdc4db0" -version = "1.2.0" +version = "1.4.0" [[deps.OrdinaryDiffEqExponentialRK]] deps = ["ADTypes", "DiffEqBase", "ExponentialUtilities", "FastBroadcast", "LinearAlgebra", "MuladdMacro", "OrdinaryDiffEqCore", "OrdinaryDiffEqDifferentiation", "RecursiveArrayTools", "Reexport", "SciMLBase"] -git-tree-sha1 = "7c98f51f8f155ae49b2088ed48b678d82e11c771" +git-tree-sha1 = "3b81416ff11e55ea0ae7b449efc818256d9d450b" uuid = "e0540318-69ee-4070-8777-9e2de6de23de" -version = "1.6.0" +version = "1.8.0" [[deps.OrdinaryDiffEqExtrapolation]] deps = ["ADTypes", "DiffEqBase", "FastBroadcast", "FastPower", "LinearSolve", "MuladdMacro", "OrdinaryDiffEqCore", "OrdinaryDiffEqDifferentiation", "Polyester", "RecursiveArrayTools", "Reexport", "SciMLBase"] -git-tree-sha1 = "e1bdc7f7a1990e9b2c6180897b1179e1ed13123b" +git-tree-sha1 = "9e1b11cf448a2c1bca640103c1c848a20aa2f967" uuid = "becaefa8-8ca2-5cf9-886d-c06f3d2bd2c4" -version = "1.6.0" +version = "1.9.0" [[deps.OrdinaryDiffEqFIRK]] deps = ["ADTypes", "DiffEqBase", "FastBroadcast", "FastGaussQuadrature", "FastPower", "LinearAlgebra", "LinearSolve", "MuladdMacro", "OrdinaryDiffEqCore", "OrdinaryDiffEqDifferentiation", "OrdinaryDiffEqNonlinearSolve", "Polyester", "RecursiveArrayTools", "Reexport", "SciMLBase", "SciMLOperators"] -git-tree-sha1 = "d3ec1c085cc2f7a990301caca182cbc5011f6ef2" +git-tree-sha1 = "b968d66de3de5ffcf18544bc202ca792bad20710" uuid = "5960d6e9-dd7a-4743-88e7-cf307b64f125" -version = "1.14.0" +version = "1.16.0" [[deps.OrdinaryDiffEqFeagin]] deps = ["DiffEqBase", "FastBroadcast", "MuladdMacro", "OrdinaryDiffEqCore", "Polyester", "RecursiveArrayTools", "Reexport", "SciMLBase", "Static"] -git-tree-sha1 = "06751a3fe2adea9bf23082decaae35656e6f864c" +git-tree-sha1 = "815b54211201ec42b8829e0275ab3c9632d16cbe" uuid = "101fe9f7-ebb6-4678-b671-3a81e7194747" -version = "1.2.0" +version = "1.4.0" [[deps.OrdinaryDiffEqFunctionMap]] deps = ["DiffEqBase", "FastBroadcast", "MuladdMacro", "OrdinaryDiffEqCore", "RecursiveArrayTools", "Reexport", "SciMLBase", "Static"] -git-tree-sha1 = "816a531b6e980ce81d517a919ff68ee33431aa38" +git-tree-sha1 = "fe750e4b8c1b1b9e1c1319ff2e052e83ad57b3ac" uuid = "d3585ca7-f5d3-4ba6-8057-292ed1abd90f" -version = "1.3.0" +version = "1.5.0" [[deps.OrdinaryDiffEqHighOrderRK]] deps = ["DiffEqBase", "FastBroadcast", "MuladdMacro", "OrdinaryDiffEqCore", "RecursiveArrayTools", "Reexport", "SciMLBase", "Static"] -git-tree-sha1 = "15decb1668d79933ae96d54224542f3c6eefbe8c" +git-tree-sha1 = "42096f72136078fa02804515f1748ddeb1f0d47d" uuid = "d28bc4f8-55e1-4f49-af69-84c1a99f0f58" -version = "1.3.0" +version = "1.5.0" [[deps.OrdinaryDiffEqIMEXMultistep]] deps = ["ADTypes", "DiffEqBase", "FastBroadcast", "OrdinaryDiffEqCore", "OrdinaryDiffEqDifferentiation", "OrdinaryDiffEqNonlinearSolve", "Reexport", "SciMLBase"] -git-tree-sha1 = "aeedd794e7a0bf062e96a29c5555c6ab17de2409" +git-tree-sha1 = "a5dcd75959dada0005b1707a5ca9359faa1734ba" uuid = "9f002381-b378-40b7-97a6-27a27c83f129" -version = "1.5.0" +version = "1.7.0" [[deps.OrdinaryDiffEqLinear]] deps = ["DiffEqBase", "ExponentialUtilities", "LinearAlgebra", "OrdinaryDiffEqCore", "RecursiveArrayTools", "Reexport", "SciMLBase", "SciMLOperators"] -git-tree-sha1 = "27c1c09db6e37349d2cc57b7b8c519c757e34c60" +git-tree-sha1 = "925fc0136e8128fd19abf126e9358ec1f997390f" uuid = "521117fe-8c41-49f8-b3b6-30780b3f0fb5" -version = "1.4.0" +version = "1.6.0" [[deps.OrdinaryDiffEqLowOrderRK]] deps = ["DiffEqBase", "FastBroadcast", "LinearAlgebra", "MuladdMacro", "OrdinaryDiffEqCore", "RecursiveArrayTools", "Reexport", "SciMLBase", "Static"] -git-tree-sha1 = "b59435d4130dc477617f1d39f5ed1c134c51c35d" +git-tree-sha1 = "3cc4987c8e4725276b55a52e08b56ded4862917e" uuid = "1344f307-1e59-4825-a18e-ace9aa3fa4c6" -version = "1.4.0" +version = "1.6.0" [[deps.OrdinaryDiffEqLowStorageRK]] deps = ["Adapt", "DiffEqBase", "FastBroadcast", "MuladdMacro", "OrdinaryDiffEqCore", "Polyester", "PrecompileTools", "Preferences", "RecursiveArrayTools", "Reexport", "SciMLBase", "Static", "StaticArrays"] -git-tree-sha1 = "b9d2df59ffa8efe75068775200d034805695ca6f" +git-tree-sha1 = "e6bd0a7fb6643a57b06a90415608a81aaf7bd772" uuid = "b0944070-b475-4768-8dec-fb6eb410534d" -version = "1.4.0" +version = "1.7.0" [[deps.OrdinaryDiffEqNonlinearSolve]] deps = ["ADTypes", "ArrayInterface", "DiffEqBase", "FastBroadcast", "FastClosures", "ForwardDiff", "LinearAlgebra", "LinearSolve", "MuladdMacro", "NonlinearSolve", "OrdinaryDiffEqCore", "OrdinaryDiffEqDifferentiation", "PreallocationTools", "RecursiveArrayTools", "SciMLBase", "SciMLOperators", "SciMLStructures", "SimpleNonlinearSolve", "StaticArrays"] -git-tree-sha1 = "88f9a5a1f53f6aaa8392e300038c931fcc63c381" +git-tree-sha1 = "f59c1c07cfa674c1d3f5dd386c4274d9bc2be221" uuid = "127b3ac7-2247-4354-8eb6-78cf4e7c58e8" -version = "1.12.0" +version = "1.15.0" [[deps.OrdinaryDiffEqNordsieck]] deps = ["DiffEqBase", "FastBroadcast", "LinearAlgebra", "MuladdMacro", "OrdinaryDiffEqCore", "OrdinaryDiffEqTsit5", "Polyester", "RecursiveArrayTools", "Reexport", "SciMLBase", "Static"] -git-tree-sha1 = "509abeb08ff505a26e0f8fbcda80b557710219ab" +git-tree-sha1 = "c90aa7fa0d725472c4098096adf6a08266c2f682" uuid = "c9986a66-5c92-4813-8696-a7ec84c806c8" -version = "1.2.0" +version = "1.4.0" [[deps.OrdinaryDiffEqPDIRK]] deps = ["ADTypes", "DiffEqBase", "FastBroadcast", "MuladdMacro", "OrdinaryDiffEqCore", "OrdinaryDiffEqDifferentiation", "OrdinaryDiffEqNonlinearSolve", "Polyester", "Reexport", "SciMLBase", "StaticArrays"] -git-tree-sha1 = "5e01ca09e23b2f6025489f23555d454d982012d4" +git-tree-sha1 = "9d599d2eafdf74ab26ea6bf3feb28183a2ade143" uuid = "5dd0a6cf-3d4b-4314-aa06-06d4e299bc89" -version = "1.4.0" +version = "1.6.0" [[deps.OrdinaryDiffEqPRK]] deps = ["DiffEqBase", "FastBroadcast", "MuladdMacro", "OrdinaryDiffEqCore", "Polyester", "Reexport", "SciMLBase"] -git-tree-sha1 = "dad312b54bed710764658df53905bc6bbb5ebb0c" +git-tree-sha1 = "8e35132689133255be6d63df4190b5fc97b6cf2b" uuid = "5b33eab2-c0f1-4480-b2c3-94bc1e80bda1" -version = "1.2.0" +version = "1.4.0" [[deps.OrdinaryDiffEqQPRK]] deps = ["DiffEqBase", "FastBroadcast", "MuladdMacro", "OrdinaryDiffEqCore", "RecursiveArrayTools", "Reexport", "SciMLBase", "Static"] -git-tree-sha1 = "80d0482d9f39be2331a9ad0d9dd5b18d378fa617" +git-tree-sha1 = "63fb643a956b27cd0e33a3c6d910c3c118082e0f" uuid = "04162be5-8125-4266-98ed-640baecc6514" -version = "1.2.0" +version = "1.4.0" [[deps.OrdinaryDiffEqRKN]] deps = ["DiffEqBase", "FastBroadcast", "MuladdMacro", "OrdinaryDiffEqCore", "Polyester", "RecursiveArrayTools", "Reexport", "SciMLBase"] -git-tree-sha1 = "aec9f718113d57756bdc02355701e14739d12d51" +git-tree-sha1 = "a31c41f9dbea7c7179c6e544c25c7e144d63868c" uuid = "af6ede74-add8-4cfd-b1df-9a4dbb109d7a" -version = "1.3.0" +version = "1.5.0" [[deps.OrdinaryDiffEqRosenbrock]] deps = ["ADTypes", "DiffEqBase", "DifferentiationInterface", "FastBroadcast", "FiniteDiff", "ForwardDiff", "LinearAlgebra", "LinearSolve", "MacroTools", "MuladdMacro", "OrdinaryDiffEqCore", "OrdinaryDiffEqDifferentiation", "Polyester", "PrecompileTools", "Preferences", "RecursiveArrayTools", "Reexport", "SciMLBase", "Static"] -git-tree-sha1 = "c31944e4a1f6795d6914bf563fb591c3d1c5d62c" +git-tree-sha1 = "f34bc2f58656843596d09a4c4de8c20724ebc2f1" uuid = "43230ef6-c299-4910-a778-202eb28ce4ce" -version = "1.14.0" +version = "1.18.1" [[deps.OrdinaryDiffEqSDIRK]] deps = ["ADTypes", "DiffEqBase", "FastBroadcast", "LinearAlgebra", "MacroTools", "MuladdMacro", "OrdinaryDiffEqCore", "OrdinaryDiffEqDifferentiation", "OrdinaryDiffEqNonlinearSolve", "RecursiveArrayTools", "Reexport", "SciMLBase", "TruncatedStacktraces"] -git-tree-sha1 = "0e9d0b209c50ac6008640cd46aa5a469a12dcb8a" +git-tree-sha1 = "20caa72c004414435fb5769fadb711e96ed5bcd4" uuid = "2d112036-d095-4a1e-ab9a-08536f3ecdbf" -version = "1.5.0" +version = "1.7.0" [[deps.OrdinaryDiffEqSSPRK]] deps = ["DiffEqBase", "FastBroadcast", "MuladdMacro", "OrdinaryDiffEqCore", "Polyester", "PrecompileTools", "Preferences", "RecursiveArrayTools", "Reexport", "SciMLBase", "Static", "StaticArrays"] -git-tree-sha1 = "c161e26500923470320233f7ff0444c3dfdad47a" +git-tree-sha1 = "3bce87977264916bd92455754ab336faec68bf8a" uuid = "669c94d9-1f4b-4b64-b377-1aa079aa2388" -version = "1.4.0" +version = "1.7.0" [[deps.OrdinaryDiffEqStabilizedIRK]] deps = ["ADTypes", "DiffEqBase", "FastBroadcast", "MuladdMacro", "OrdinaryDiffEqCore", "OrdinaryDiffEqDifferentiation", "OrdinaryDiffEqNonlinearSolve", "OrdinaryDiffEqStabilizedRK", "RecursiveArrayTools", "Reexport", "SciMLBase", "StaticArrays"] -git-tree-sha1 = "8c3da13beebec54bf0c9660e4c33327026f069ac" +git-tree-sha1 = "75abe7462f4b0b2a2463bb512c8a5458bbd39185" uuid = "e3e12d00-db14-5390-b879-ac3dd2ef6296" -version = "1.4.0" +version = "1.6.0" [[deps.OrdinaryDiffEqStabilizedRK]] deps = ["DiffEqBase", "FastBroadcast", "MuladdMacro", "OrdinaryDiffEqCore", "RecursiveArrayTools", "Reexport", "SciMLBase", "StaticArrays"] -git-tree-sha1 = "01b83b89106c338cfe9b7446916122e5f3687979" +git-tree-sha1 = "7e94d3d1b3528b4bcf9e0248198ee0a2fd65a697" uuid = "358294b1-0aab-51c3-aafe-ad5ab194a2ad" -version = "1.3.0" +version = "1.4.0" [[deps.OrdinaryDiffEqSymplecticRK]] deps = ["DiffEqBase", "FastBroadcast", "MuladdMacro", "OrdinaryDiffEqCore", "Polyester", "RecursiveArrayTools", "Reexport", "SciMLBase"] -git-tree-sha1 = "a738d482a65f25ce0944d99f1385d062e3a02ef4" +git-tree-sha1 = "e8dd5ab225287947016dc144a5ded1fb83885638" uuid = "fa646aed-7ef9-47eb-84c4-9443fc8cbfa8" -version = "1.5.0" +version = "1.7.0" [[deps.OrdinaryDiffEqTsit5]] deps = ["DiffEqBase", "FastBroadcast", "LinearAlgebra", "MuladdMacro", "OrdinaryDiffEqCore", "PrecompileTools", "Preferences", "RecursiveArrayTools", "Reexport", "SciMLBase", "Static", "TruncatedStacktraces"] -git-tree-sha1 = "90300c3151763fc90cb530e1ef78ffccdd43dea4" +git-tree-sha1 = "778c7d379265f17f40dbe9aaa6f6a2a08bc7fa3e" uuid = "b1df2697-797e-41e3-8120-5422d3b24e4a" -version = "1.3.0" +version = "1.5.0" [[deps.OrdinaryDiffEqVerner]] deps = ["DiffEqBase", "FastBroadcast", "LinearAlgebra", "MuladdMacro", "OrdinaryDiffEqCore", "Polyester", "PrecompileTools", "Preferences", "RecursiveArrayTools", "Reexport", "SciMLBase", "Static", "TruncatedStacktraces"] -git-tree-sha1 = "e6e7be78dd7084c89a909f3b08e8a01be5698235" +git-tree-sha1 = "185578fa7c38119d4318326f9375f1cba0f0ce53" uuid = "79d7bb75-1356-48c1-b8c0-6832512096c2" -version = "1.4.0" +version = "1.6.0" [[deps.PCRE2_jll]] deps = ["Artifacts", "Libdl"] uuid = "efcefdf7-47ab-520b-bdef-62a2eaa19f15" -version = "10.42.0+1" +version = "10.44.0+1" [[deps.PDMats]] deps = ["LinearAlgebra", "SparseArrays", "SuiteSparse"] @@ -2021,12 +2064,6 @@ weakdeps = ["StatsBase"] [deps.PDMats.extensions] StatsBaseExt = "StatsBase" -[[deps.PackageExtensionCompat]] -git-tree-sha1 = "fb28e33b8a95c4cee25ce296c817d89cc2e53518" -uuid = "65ce6f38-6b18-4e1d-a461-8949797d7930" -version = "1.0.2" -weakdeps = ["Requires", "TOML"] - [[deps.Pango_jll]] deps = ["Artifacts", "Cairo_jll", "Fontconfig_jll", "FreeType2_jll", "FriBidi_jll", "Glib_jll", "HarfBuzz_jll", "JLLWrappers", "Libdl"] git-tree-sha1 = "1f7f9bbd5f7a2e5a9f7d96e51c9754454ea7f60b" @@ -2054,7 +2091,7 @@ version = "0.44.2+0" [[deps.Pkg]] deps = ["Artifacts", "Dates", "Downloads", "FileWatching", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "Random", "SHA", "TOML", "Tar", "UUIDs", "p7zip_jll"] uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" -version = "1.11.0" +version = "1.12.0" weakdeps = ["REPL"] [deps.Pkg.extensions] @@ -2068,15 +2105,15 @@ version = "3.3.0" [[deps.PlotUtils]] deps = ["ColorSchemes", "Colors", "Dates", "PrecompileTools", "Printf", "Random", "Reexport", "StableRNGs", "Statistics"] -git-tree-sha1 = "3ca9a356cd2e113c420f2c13bea19f8d3fb1cb18" +git-tree-sha1 = "26ca162858917496748aad52bb5d3be4d26a228a" uuid = "995b91a9-d308-5afd-9ec6-746e21dbc043" -version = "1.4.3" +version = "1.4.4" [[deps.Plots]] -deps = ["Base64", "Contour", "Dates", "Downloads", "FFMPEG", "FixedPointNumbers", "GR", "JLFzf", "JSON", "LaTeXStrings", "Latexify", "LinearAlgebra", "Measures", "NaNMath", "Pkg", "PlotThemes", "PlotUtils", "PrecompileTools", "Printf", "REPL", "Random", "RecipesBase", "RecipesPipeline", "Reexport", "RelocatableFolders", "Requires", "Scratch", "Showoff", "SparseArrays", "Statistics", "StatsBase", "TOML", "UUIDs", "UnicodeFun", "UnitfulLatexify", "Unzip"] -git-tree-sha1 = "809ba625a00c605f8d00cd2a9ae19ce34fc24d68" +deps = ["Base64", "Contour", "Dates", "Downloads", "FFMPEG", "FixedPointNumbers", "GR", "JLFzf", "JSON", "LaTeXStrings", "Latexify", "LinearAlgebra", "Measures", "NaNMath", "Pkg", "PlotThemes", "PlotUtils", "PrecompileTools", "Printf", "REPL", "Random", "RecipesBase", "RecipesPipeline", "Reexport", "RelocatableFolders", "Requires", "Scratch", "Showoff", "SparseArrays", "Statistics", "StatsBase", "TOML", "UUIDs", "UnicodeFun", "Unzip"] +git-tree-sha1 = "12ce661880f8e309569074a61d3767e5756a199f" uuid = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" -version = "1.40.13" +version = "1.41.1" [deps.Plots.extensions] FileIOExt = "FileIO" @@ -2112,20 +2149,20 @@ version = "0.2.2" [[deps.Polynomials]] deps = ["LinearAlgebra", "OrderedCollections", "RecipesBase", "Requires", "Setfield", "SparseArrays"] -git-tree-sha1 = "555c272d20fc80a2658587fb9bbda60067b93b7c" +git-tree-sha1 = "972089912ba299fba87671b025cd0da74f5f54f7" uuid = "f27b6e38-b328-58d1-80ce-0feddd5e7a45" -version = "4.0.19" +version = "4.1.0" [deps.Polynomials.extensions] PolynomialsChainRulesCoreExt = "ChainRulesCore" PolynomialsFFTWExt = "FFTW" - PolynomialsMakieCoreExt = "MakieCore" + PolynomialsMakieExt = "Makie" PolynomialsMutableArithmeticsExt = "MutableArithmetics" [deps.Polynomials.weakdeps] ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" FFTW = "7a1cc6ca-52ef-59f5-83cd-3a7055c09341" - MakieCore = "20f20a25-4f0e-4fdf-b5d1-57303727442b" + Makie = "ee78f7c6-11fb-53f2-987a-cfe4a2b5a57a" MutableArithmetics = "d8a4904e-b15c-11e9-3269-09a3773c0cb0" [[deps.PooledArrays]] @@ -2158,9 +2195,9 @@ version = "0.4.34" [[deps.PrecompileTools]] deps = ["Preferences"] -git-tree-sha1 = "5aa36f7049a63a1528fe8f7c3f2113413ffd4e1f" +git-tree-sha1 = "07a921781cab75691315adc645096ed5e370cb77" uuid = "aea7be01-6a6a-4083-8856-8a6e6704d82a" -version = "1.2.1" +version = "1.3.3" [[deps.Preconditioners]] deps = ["AlgebraicMultigrid", "LimitedLDLFactorizations", "LinearAlgebra", "SparseArrays"] @@ -2175,10 +2212,10 @@ uuid = "21216c6a-2e73-6563-6e65-726566657250" version = "1.5.0" [[deps.PrettyTables]] -deps = ["Crayons", "LaTeXStrings", "Markdown", "PrecompileTools", "Printf", "Reexport", "StringManipulation", "Tables"] -git-tree-sha1 = "1101cd475833706e4d0e7b122218257178f48f34" +deps = ["Crayons", "LaTeXStrings", "Markdown", "PrecompileTools", "Printf", "REPL", "Reexport", "StringManipulation", "Tables"] +git-tree-sha1 = "6b8e2f0bae3f678811678065c09571c1619da219" uuid = "08abe8d2-0d0c-5749-adfa-8a2ac140af0d" -version = "2.4.0" +version = "3.1.0" [[deps.Primes]] deps = ["IntegerMathUtils"] @@ -2192,9 +2229,22 @@ uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7" version = "1.11.0" [[deps.Profile]] +deps = ["StyledStrings"] uuid = "9abbd945-dff8-562f-b5e8-e1ebf5ef1b79" version = "1.11.0" +[[deps.ProgressLogging]] +deps = ["Logging", "SHA", "UUIDs"] +git-tree-sha1 = "d95ed0324b0799843ac6f7a6a85e65fe4e5173f0" +uuid = "33c8b6b6-d38a-422a-b730-caa89a2f386c" +version = "0.1.5" + +[[deps.ProgressMeter]] +deps = ["Distributed", "Printf"] +git-tree-sha1 = "fbb92c6c56b34e1a2c4c36058f68f332bec840e7" +uuid = "92933f4c-e287-5a05-a399-4b506db050ca" +version = "1.11.0" + [[deps.PtrArrays]] git-tree-sha1 = "1d36ef11a9aaf1e8b74dacc6a731dd1de8fd493d" uuid = "43287f4e-b6f4-7ad1-bb20-aadabca52c3d" @@ -2229,45 +2279,29 @@ deps = ["DataStructures", "LinearAlgebra"] git-tree-sha1 = "9da16da70037ba9d701192e27befedefb91ec284" uuid = "1fd47b50-473d-5c70-9696-f719f8f3bcdc" version = "2.11.2" +weakdeps = ["Enzyme"] [deps.QuadGK.extensions] QuadGKEnzymeExt = "Enzyme" - [deps.QuadGK.weakdeps] - Enzyme = "7da242da-08ed-463a-9acd-ee780be4f1d9" - [[deps.QuantEcon]] -deps = ["DSP", "DataStructures", "Distributions", "FFTW", "Graphs", "LinearAlgebra", "Markdown", "NLopt", "Optim", "Pkg", "Primes", "Random", "SparseArrays", "SpecialFunctions", "Statistics", "StatsBase", "Test"] -git-tree-sha1 = "034293b29fdbcae73aeb7ca0b2755e693f04701b" +deps = ["DSP", "Distributions", "FFTW", "Graphs", "LinearAlgebra", "Markdown", "NLopt", "Optim", "Primes", "Random", "SparseArrays", "SpecialFunctions", "Statistics", "StatsBase"] +git-tree-sha1 = "441453af42d42c42beeadf6cab81e313c38c493f" uuid = "fcd29c91-0bd7-5a09-975d-7ac3f643a60c" -version = "0.16.6" - -[[deps.Query]] -deps = ["DataValues", "IterableTables", "MacroTools", "QueryOperators", "Statistics"] -git-tree-sha1 = "a66aa7ca6f5c29f0e303ccef5c8bd55067df9bbe" -uuid = "1a8c2f83-1ff3-5112-b086-8aa67b057ba1" -version = "1.0.0" - -[[deps.QueryOperators]] -deps = ["DataStructures", "DataValues", "IteratorInterfaceExtensions", "TableShowUtils"] -git-tree-sha1 = "911c64c204e7ecabfd1872eb93c49b4e7c701f02" -uuid = "2aef5ad7-51ca-5a8f-8e88-e75cf067b44b" -version = "0.9.3" +version = "0.16.8" -[[deps.RData]] -deps = ["CategoricalArrays", "CodecZlib", "DataFrames", "Dates", "FileIO", "Requires", "TimeZones", "Unicode"] -git-tree-sha1 = "19e47a495dfb7240eb44dc6971d660f7e4244a72" -uuid = "df47a6cb-8c03-5eed-afd8-b6050d6c41da" -version = "0.8.3" +[[deps.QuasiMonteCarlo]] +deps = ["Accessors", "ConcreteStructs", "LatticeRules", "LinearAlgebra", "Primes", "Random", "Requires", "Sobol", "StatsBase"] +git-tree-sha1 = "cc086f8485bce77b6187141e1413c3b55f9a4341" +uuid = "8a4e6c94-4038-4cdc-81c3-7e6ffdb2a71b" +version = "0.3.3" +weakdeps = ["Distributions"] -[[deps.RDatasets]] -deps = ["CSV", "CodecZlib", "DataFrames", "FileIO", "Printf", "RData", "Reexport"] -git-tree-sha1 = "2720e6f6afb3e562ccb70a6b62f8f308ff810333" -uuid = "ce6b1742-4840-55fa-b093-852dadbb1d8b" -version = "0.7.7" + [deps.QuasiMonteCarlo.extensions] + QuasiMonteCarloDistributionsExt = "Distributions" [[deps.REPL]] -deps = ["InteractiveUtils", "Markdown", "Sockets", "StyledStrings", "Unicode"] +deps = ["InteractiveUtils", "JuliaSyntaxHighlighting", "Markdown", "Sockets", "StyledStrings", "Unicode"] uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb" version = "1.11.0" @@ -2347,24 +2381,6 @@ git-tree-sha1 = "45e428421666073eab6f2da5c9d310d99bb12f9b" uuid = "189a3867-3050-52da-a836-e630ba90ab69" version = "1.2.2" -[[deps.RegressionTables]] -deps = ["Distributions", "Format", "Statistics", "StatsAPI", "StatsBase", "StatsModels"] -git-tree-sha1 = "ff6d3659bff2b6309e0628e4bdf1783aaf891c09" -uuid = "d519eb52-b820-54da-95a6-98e1306fdade" -version = "0.7.8" - - [deps.RegressionTables.extensions] - RegressionTablesFixedEffectModelsExt = "FixedEffectModels" - RegressionTablesGLFixedEffectModelsExt = "GLFixedEffectModels" - RegressionTablesGLMExt = "GLM" - RegressionTablesMixedModelsExt = "MixedModels" - - [deps.RegressionTables.weakdeps] - FixedEffectModels = "9d5cd8c9-2029-5cab-9928-427838db53e3" - GLFixedEffectModels = "bafb0ae5-e5f5-5100-81b6-6a55d777c812" - GLM = "38e38edf-8417-5370-95a0-9cbb8c7f171a" - MixedModels = "ff71e718-51f3-5ec2-a782-8ffcbfa3c316" - [[deps.RelocatableFolders]] deps = ["SHA", "Scratch"] git-tree-sha1 = "ffdaf70d81cf6ff22c2b6e733c900c3321cab864" @@ -2397,9 +2413,9 @@ version = "0.5.1+0" [[deps.Roots]] deps = ["Accessors", "CommonSolve", "Printf"] -git-tree-sha1 = "3ac13765751ffc81e3531223782d9512f6023f71" +git-tree-sha1 = "8a433b1ede5e9be9a7ba5b1cc6698daa8d718f1d" uuid = "f2b01f46-fcfa-551c-844a-d8ac1e96c665" -version = "2.2.7" +version = "2.2.10" [deps.Roots.extensions] RootsChainRulesCoreExt = "ChainRulesCore" @@ -2407,6 +2423,7 @@ version = "2.2.7" RootsIntervalRootFindingExt = "IntervalRootFinding" RootsSymPyExt = "SymPy" RootsSymPyPythonCallExt = "SymPyPythonCall" + RootsUnitfulExt = "Unitful" [deps.Roots.weakdeps] ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" @@ -2414,12 +2431,13 @@ version = "2.2.7" IntervalRootFinding = "d2bf35a9-74e0-55ec-b149-d360ff49b807" SymPy = "24249f21-da20-56a4-8eb1-6a02cf4ae2e6" SymPyPythonCall = "bc8888f7-b21e-4b7c-a06a-5d9c9496438c" + Unitful = "1986cc42-f94f-5a68-af5c-568840ba703d" [[deps.RuntimeGeneratedFunctions]] deps = ["ExprTools", "SHA", "Serialization"] -git-tree-sha1 = "86a8a8b783481e1ea6b9c91dd949cb32191f8ab4" +git-tree-sha1 = "2f609ec2295c452685d3142bc4df202686e555d2" uuid = "7e49a35a-f44a-4d26-94aa-eba1b4ca6b47" -version = "0.5.15" +version = "0.5.16" [[deps.SHA]] uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce" @@ -2436,50 +2454,68 @@ git-tree-sha1 = "456f610ca2fbd1c14f5fcf31c6bfadc55e7d66e0" uuid = "476501e8-09a2-5ece-8869-fb82de89a1fa" version = "0.6.43" -[[deps.SPRAL_jll]] -deps = ["Artifacts", "CompilerSupportLibraries_jll", "Hwloc_jll", "JLLWrappers", "Libdl", "METIS_jll", "libblastrampoline_jll"] -git-tree-sha1 = "11f3da4b25efacd1cec8e263421f2a9003a5e8e0" -uuid = "319450e9-13b8-58e8-aa9f-8fd1420848ab" -version = "2024.5.8+0" - [[deps.SciMLBase]] -deps = ["ADTypes", "Accessors", "ArrayInterface", "CommonSolve", "ConstructionBase", "Distributed", "DocStringExtensions", "EnumX", "FunctionWrappersWrappers", "IteratorInterfaceExtensions", "LinearAlgebra", "Logging", "Markdown", "Moshi", "PrecompileTools", "Preferences", "Printf", "RecipesBase", "RecursiveArrayTools", "Reexport", "RuntimeGeneratedFunctions", "SciMLOperators", "SciMLStructures", "StaticArraysCore", "Statistics", "SymbolicIndexingInterface"] -git-tree-sha1 = "2fd047893cb0089b180fcbb7e8434ba15dcc2841" +deps = ["ADTypes", "Accessors", "Adapt", "ArrayInterface", "CommonSolve", "ConstructionBase", "Distributed", "DocStringExtensions", "EnumX", "FunctionWrappersWrappers", "IteratorInterfaceExtensions", "LinearAlgebra", "Logging", "Markdown", "Moshi", "PreallocationTools", "PrecompileTools", "Preferences", "Printf", "RecipesBase", "RecursiveArrayTools", "Reexport", "RuntimeGeneratedFunctions", "SciMLLogging", "SciMLOperators", "SciMLPublic", "SciMLStructures", "StaticArraysCore", "Statistics", "SymbolicIndexingInterface"] +git-tree-sha1 = "7614a1b881317b6800a8c66eb1180c6ea5b986f3" uuid = "0bca4576-84f4-4d90-8ffe-ffa030f20462" -version = "2.87.0" +version = "2.124.0" [deps.SciMLBase.extensions] SciMLBaseChainRulesCoreExt = "ChainRulesCore" + SciMLBaseDifferentiationInterfaceExt = "DifferentiationInterface" + SciMLBaseDistributionsExt = "Distributions" + SciMLBaseEnzymeExt = "Enzyme" + SciMLBaseForwardDiffExt = "ForwardDiff" SciMLBaseMLStyleExt = "MLStyle" SciMLBaseMakieExt = "Makie" + SciMLBaseMeasurementsExt = "Measurements" + SciMLBaseMonteCarloMeasurementsExt = "MonteCarloMeasurements" + SciMLBaseMooncakeExt = "Mooncake" SciMLBasePartialFunctionsExt = "PartialFunctions" SciMLBasePyCallExt = "PyCall" SciMLBasePythonCallExt = "PythonCall" SciMLBaseRCallExt = "RCall" + SciMLBaseReverseDiffExt = "ReverseDiff" + SciMLBaseTrackerExt = "Tracker" SciMLBaseZygoteExt = ["Zygote", "ChainRulesCore"] [deps.SciMLBase.weakdeps] ChainRules = "082447d4-558c-5d27-93f4-14fc19e9eca2" ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" + DifferentiationInterface = "a0c0ee7d-e4b9-4e03-894e-1c5f64a51d63" + Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" + Enzyme = "7da242da-08ed-463a-9acd-ee780be4f1d9" + ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" MLStyle = "d8e11817-5142-5d16-987a-aa16d5891078" Makie = "ee78f7c6-11fb-53f2-987a-cfe4a2b5a57a" + Measurements = "eff96d63-e80a-5855-80a2-b1b0885c5ab7" + MonteCarloMeasurements = "0987c9cc-fe09-11e8-30f0-b96dd679fdca" + Mooncake = "da2b9cff-9c12-43a0-ae48-6db2b0edb7d6" PartialFunctions = "570af359-4316-4cb7-8c74-252c00c2016b" PyCall = "438e738f-606a-5dbb-bf0a-cddfbfd45ab0" PythonCall = "6099a3de-0909-46bc-b1f4-468b9a2dfc0d" RCall = "6f49c342-dc21-5d91-9882-a32aef131414" + ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267" + Tracker = "9f7883ad-71c0-57eb-9f7f-b5c9e6d3789c" Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" [[deps.SciMLJacobianOperators]] deps = ["ADTypes", "ArrayInterface", "ConcreteStructs", "ConstructionBase", "DifferentiationInterface", "FastClosures", "LinearAlgebra", "SciMLBase", "SciMLOperators"] -git-tree-sha1 = "d563758f3ce5153810adebc534d88e24d34eeb95" +git-tree-sha1 = "a273b291c90909ba6fe08402dd68e09aae423008" uuid = "19f34311-ddf3-4b8b-af20-060888a46c0e" -version = "0.1.5" +version = "0.1.11" + +[[deps.SciMLLogging]] +deps = ["Logging", "LoggingExtras", "Preferences"] +git-tree-sha1 = "5a026f5549ad167cda34c67b62f8d3dc55754da3" +uuid = "a6db7da4-7206-11f0-1eab-35f2a5dbe1d1" +version = "1.3.1" [[deps.SciMLOperators]] deps = ["Accessors", "ArrayInterface", "DocStringExtensions", "LinearAlgebra", "MacroTools"] -git-tree-sha1 = "1c4b7f6c3e14e6de0af66e66b86d525cae10ecb4" +git-tree-sha1 = "18e8ea3fdfca9c3408f1df8fc1d7690b12784338" uuid = "c0aeaf25-5076-4817-a8d5-81caf7dfa961" -version = "0.3.13" +version = "1.10.0" weakdeps = ["SparseArrays", "StaticArraysCore"] [deps.SciMLOperators.extensions] @@ -2524,11 +2560,6 @@ deps = ["Distributed", "Mmap", "Random", "Serialization"] uuid = "1a1011a3-84de-559e-8e89-a11a2f7dc383" version = "1.11.0" -[[deps.ShiftedArrays]] -git-tree-sha1 = "503688b59397b3307443af35cd953a13e8005c16" -uuid = "1277b4bf-5013-50f5-be3d-901d8477a67a" -version = "2.0.0" - [[deps.Showoff]] deps = ["Dates", "Grisu"] git-tree-sha1 = "91eddf657aca81df9ae6ceb20b959ae5653ad1de" @@ -2542,19 +2573,17 @@ version = "1.2.0" [[deps.SimpleNonlinearSolve]] deps = ["ADTypes", "ArrayInterface", "BracketingNonlinearSolve", "CommonSolve", "ConcreteStructs", "DifferentiationInterface", "FastClosures", "FiniteDiff", "ForwardDiff", "LineSearch", "LinearAlgebra", "MaybeInplace", "NonlinearSolveBase", "PrecompileTools", "Reexport", "SciMLBase", "Setfield", "StaticArraysCore"] -git-tree-sha1 = "09d986e27a606f172c5b6cffbd8b8b2f10bf1c75" +git-tree-sha1 = "8825064775bf4ae0f22d04ea63979d8c868fd510" uuid = "727e6d20-b764-4bd8-a329-72de5adea6c7" -version = "2.7.0" +version = "2.9.0" [deps.SimpleNonlinearSolve.extensions] SimpleNonlinearSolveChainRulesCoreExt = "ChainRulesCore" - SimpleNonlinearSolveDiffEqBaseExt = "DiffEqBase" SimpleNonlinearSolveReverseDiffExt = "ReverseDiff" SimpleNonlinearSolveTrackerExt = "Tracker" [deps.SimpleNonlinearSolve.weakdeps] ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" - DiffEqBase = "2b5f629d-d688-5b77-993f-72d75c75574e" ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267" Tracker = "9f7883ad-71c0-57eb-9f7f-b5c9e6d3789c" @@ -2569,6 +2598,12 @@ git-tree-sha1 = "58e6353e72cde29b90a69527e56df1b5c3d8c437" uuid = "ce78b400-467f-4804-87d8-8f486da07d0a" version = "1.1.0" +[[deps.Sobol]] +deps = ["DelimitedFiles", "Random"] +git-tree-sha1 = "5a74ac22a9daef23705f010f72c81d6925b19df8" +uuid = "ed01d8cd-4d21-5b2a-85b4-cc3bdc58bad4" +version = "1.5.0" + [[deps.Sockets]] uuid = "6462fe0b-24de-5631-8697-dd941f90decc" version = "1.11.0" @@ -2582,43 +2617,46 @@ version = "1.2.2" [[deps.SparseArrays]] deps = ["Libdl", "LinearAlgebra", "Random", "Serialization", "SuiteSparse_jll"] uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" -version = "1.11.0" +version = "1.12.0" -[[deps.SparseDiffTools]] -deps = ["ADTypes", "Adapt", "ArrayInterface", "Compat", "DataStructures", "FiniteDiff", "ForwardDiff", "Graphs", "LinearAlgebra", "PackageExtensionCompat", "Random", "Reexport", "SciMLOperators", "Setfield", "SparseArrays", "StaticArrayInterface", "StaticArrays", "UnPack", "VertexSafeGraphs"] -git-tree-sha1 = "ccbf06a08573200853b1bd06203d8ccce8449578" -uuid = "47a9eef4-7e08-11e9-0b38-333d64bd3804" -version = "2.26.0" +[[deps.SparseConnectivityTracer]] +deps = ["ADTypes", "DocStringExtensions", "FillArrays", "LinearAlgebra", "Random", "SparseArrays"] +git-tree-sha1 = "ba6dc9b87304964647bd1c750b903cb360003a36" +uuid = "9f842d2f-2579-4b1d-911e-f412cf18a3f5" +version = "1.1.2" - [deps.SparseDiffTools.extensions] - SparseDiffToolsEnzymeExt = "Enzyme" - SparseDiffToolsPolyesterExt = "Polyester" - SparseDiffToolsPolyesterForwardDiffExt = "PolyesterForwardDiff" - SparseDiffToolsSymbolicsExt = "Symbolics" - SparseDiffToolsZygoteExt = "Zygote" + [deps.SparseConnectivityTracer.extensions] + SparseConnectivityTracerChainRulesCoreExt = "ChainRulesCore" + SparseConnectivityTracerLogExpFunctionsExt = "LogExpFunctions" + SparseConnectivityTracerNNlibExt = "NNlib" + SparseConnectivityTracerNaNMathExt = "NaNMath" + SparseConnectivityTracerSpecialFunctionsExt = "SpecialFunctions" - [deps.SparseDiffTools.weakdeps] - Enzyme = "7da242da-08ed-463a-9acd-ee780be4f1d9" - Polyester = "f517fe37-dbe3-4b94-8317-1923a5111588" - PolyesterForwardDiff = "98d1487c-24ca-40b6-b7ab-df2af84e126b" - Symbolics = "0c5d862f-8b57-4792-8d23-62f2024744c7" - Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" + [deps.SparseConnectivityTracer.weakdeps] + ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" + LogExpFunctions = "2ab3a3ac-af41-5b50-aa03-7779005ae688" + NNlib = "872c559c-99b0-510c-b3b7-b6c96a88d5cd" + NaNMath = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3" + SpecialFunctions = "276daf66-3868-5448-9aa4-cd146d93841b" [[deps.SparseMatrixColorings]] deps = ["ADTypes", "DocStringExtensions", "LinearAlgebra", "PrecompileTools", "Random", "SparseArrays"] -git-tree-sha1 = "d3f3b7bb8a561b5ff20ee7cf9574841ee4e4e137" +git-tree-sha1 = "6ed48d9a3b22417c765dc273ae3e1e4de035e7c8" uuid = "0a514795-09f3-496d-8182-132a7b665d35" -version = "0.4.22" +version = "0.4.23" [deps.SparseMatrixColorings.extensions] SparseMatrixColoringsCUDAExt = "CUDA" SparseMatrixColoringsCliqueTreesExt = "CliqueTrees" SparseMatrixColoringsColorsExt = "Colors" + SparseMatrixColoringsJuMPExt = ["JuMP", "MathOptInterface"] [deps.SparseMatrixColorings.weakdeps] CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" CliqueTrees = "60701a23-6482-424a-84db-faee86b9b1f8" Colors = "5ae59095-9a9b-59fe-a467-6f913c188581" + JuMP = "4076af6c-e467-56ae-b986-b466b2749572" + MathOptInterface = "b8f27783-ece8-5eb3-8dc8-9495eed66fee" [[deps.SpecialFunctions]] deps = ["IrrationalConstants", "LogExpFunctions", "OpenLibm_jll", "OpenSpecFun_jll"] @@ -2655,9 +2693,9 @@ weakdeps = ["OffsetArrays", "StaticArrays"] [[deps.StaticArrays]] deps = ["LinearAlgebra", "PrecompileTools", "Random", "StaticArraysCore"] -git-tree-sha1 = "0feb6b9031bd5c51f9072393eb5ab3efd31bf9e4" +git-tree-sha1 = "b8693004b385c842357406e3af647701fe783f98" uuid = "90137ffa-7385-5640-81b9-e52037218182" -version = "1.9.13" +version = "1.9.15" weakdeps = ["ChainRulesCore", "Statistics"] [deps.StaticArrays.extensions] @@ -2702,23 +2740,17 @@ weakdeps = ["ChainRulesCore", "InverseFunctions"] StatsFunsChainRulesCoreExt = "ChainRulesCore" StatsFunsInverseFunctionsExt = "InverseFunctions" -[[deps.StatsModels]] -deps = ["DataAPI", "DataStructures", "LinearAlgebra", "Printf", "REPL", "ShiftedArrays", "SparseArrays", "StatsAPI", "StatsBase", "StatsFuns", "Tables"] -git-tree-sha1 = "b117c1fe033a04126780c898e75c7980bf676df3" -uuid = "3eaba693-59b7-5ba5-a881-562e759f1c8d" -version = "0.7.7" - [[deps.StatsPlots]] deps = ["AbstractFFTs", "Clustering", "DataStructures", "Distributions", "Interpolations", "KernelDensity", "LinearAlgebra", "MultivariateStats", "NaNMath", "Observables", "Plots", "RecipesBase", "RecipesPipeline", "Reexport", "StatsBase", "TableOperations", "Tables", "Widgets"] -git-tree-sha1 = "3b1dcbf62e469a67f6733ae493401e53d92ff543" +git-tree-sha1 = "88cf3587711d9ad0a55722d339a013c4c56c5bbc" uuid = "f3b207a7-027a-5e70-b257-86293d7955fd" -version = "0.15.7" +version = "0.15.8" [[deps.StochasticDiffEq]] -deps = ["ADTypes", "Adapt", "ArrayInterface", "DataStructures", "DiffEqBase", "DiffEqNoiseProcess", "DocStringExtensions", "FastPower", "FiniteDiff", "ForwardDiff", "JumpProcesses", "LevyArea", "LinearAlgebra", "Logging", "MuladdMacro", "NLsolve", "OrdinaryDiffEqCore", "OrdinaryDiffEqDifferentiation", "OrdinaryDiffEqNonlinearSolve", "Random", "RandomNumbers", "RecursiveArrayTools", "Reexport", "SciMLBase", "SciMLOperators", "SparseArrays", "SparseDiffTools", "StaticArrays", "UnPack"] -git-tree-sha1 = "fa374aac59f48d11274ce15862aecb8a144350a9" +deps = ["ADTypes", "Adapt", "ArrayInterface", "DataStructures", "DiffEqBase", "DiffEqNoiseProcess", "DocStringExtensions", "FastPower", "FiniteDiff", "ForwardDiff", "JumpProcesses", "LevyArea", "LinearAlgebra", "Logging", "MuladdMacro", "NLsolve", "OrdinaryDiffEqCore", "OrdinaryDiffEqDifferentiation", "OrdinaryDiffEqNonlinearSolve", "Random", "RecursiveArrayTools", "Reexport", "SciMLBase", "SciMLOperators", "SparseArrays", "StaticArrays", "UnPack"] +git-tree-sha1 = "a7d5d87185450b61a95000547c85401ffd8e6e42" uuid = "789caeaf-c7a9-5a7d-9973-96adeb23e2a0" -version = "6.76.0" +version = "6.84.0" [[deps.StrideArraysCore]] deps = ["ArrayInterface", "CloseOpenIntervals", "IfElse", "LayoutPointers", "LinearAlgebra", "ManualMemory", "SIMDTypes", "Static", "StaticArrayInterface", "ThreadingUtilities"] @@ -2732,11 +2764,10 @@ git-tree-sha1 = "725421ae8e530ec29bcbdddbe91ff8053421d023" uuid = "892a3eda-7b42-436c-8928-eab12a02cf0e" version = "0.4.1" -[[deps.StructTypes]] -deps = ["Dates", "UUIDs"] -git-tree-sha1 = "159331b30e94d7b11379037feeb9b690950cace8" -uuid = "856f2bd8-1eba-4b0a-8007-ebc267875bd4" -version = "1.11.0" +[[deps.StructIO]] +git-tree-sha1 = "c581be48ae1cbf83e899b14c07a807e1787512cc" +uuid = "53d494c1-5632-5724-8f4c-31dff12d585f" +version = "0.3.1" [[deps.StyledStrings]] uuid = "f489334b-da3d-4c2e-b8f0-e476e12c162b" @@ -2749,13 +2780,13 @@ uuid = "4607b0f0-06f3-5cda-b6b1-a6196a1729e9" [[deps.SuiteSparse_jll]] deps = ["Artifacts", "Libdl", "libblastrampoline_jll"] uuid = "bea87d4a-7f5b-5778-9afe-8cc45184846c" -version = "7.7.0+0" +version = "7.8.3+2" [[deps.SymbolicIndexingInterface]] deps = ["Accessors", "ArrayInterface", "RuntimeGeneratedFunctions", "StaticArraysCore"] -git-tree-sha1 = "b19cf024a2b11d72bef7c74ac3d1cbe86ec9e4ed" +git-tree-sha1 = "94c58884e013efff548002e8dc2fdd1cb74dfce5" uuid = "2efcf032-c050-4f8e-a9bb-153293bab1f5" -version = "0.3.44" +version = "0.3.46" weakdeps = ["PrettyTables"] [deps.SymbolicIndexingInterface.extensions] @@ -2782,68 +2813,48 @@ version = "3.32.0" ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267" [[deps.Symbolics]] -deps = ["ADTypes", "ArrayInterface", "Bijections", "CommonWorldInvalidations", "ConstructionBase", "DataStructures", "DiffRules", "Distributions", "DocStringExtensions", "DomainSets", "DynamicPolynomials", "LaTeXStrings", "Latexify", "Libdl", "LinearAlgebra", "LogExpFunctions", "MacroTools", "Markdown", "NaNMath", "OffsetArrays", "PrecompileTools", "Primes", "RecipesBase", "Reexport", "RuntimeGeneratedFunctions", "SciMLBase", "Setfield", "SparseArrays", "SpecialFunctions", "StaticArraysCore", "SymbolicIndexingInterface", "SymbolicLimits", "SymbolicUtils", "TermInterface"] -git-tree-sha1 = "e46dbf646bc3944c22a37745361c2e0a94f81d66" +deps = ["ADTypes", "ArrayInterface", "Bijections", "CommonWorldInvalidations", "ConstructionBase", "DataStructures", "DiffRules", "Distributions", "DocStringExtensions", "DomainSets", "DynamicPolynomials", "LaTeXStrings", "Latexify", "Libdl", "LinearAlgebra", "LogExpFunctions", "MacroTools", "Markdown", "NaNMath", "OffsetArrays", "PrecompileTools", "Primes", "RecipesBase", "Reexport", "RuntimeGeneratedFunctions", "SciMLBase", "SciMLPublic", "Setfield", "SparseArrays", "SpecialFunctions", "StaticArraysCore", "SymbolicIndexingInterface", "SymbolicLimits", "SymbolicUtils", "TermInterface"] +git-tree-sha1 = "8206e177903a41519145f577cb7f3793f3b7c960" uuid = "0c5d862f-8b57-4792-8d23-62f2024744c7" -version = "6.38.0" +version = "6.57.0" [deps.Symbolics.extensions] + SymbolicsD3TreesExt = "D3Trees" SymbolicsForwardDiffExt = "ForwardDiff" SymbolicsGroebnerExt = "Groebner" SymbolicsLuxExt = "Lux" SymbolicsNemoExt = "Nemo" SymbolicsPreallocationToolsExt = ["PreallocationTools", "ForwardDiff"] SymbolicsSymPyExt = "SymPy" + SymbolicsSymPyPythonCallExt = "SymPyPythonCall" [deps.Symbolics.weakdeps] + D3Trees = "e3df1716-f71e-5df9-9e2d-98e193103c45" ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" Groebner = "0b43b601-686d-58a3-8a1c-6623616c7cd4" Lux = "b2108857-7c20-44ae-9111-449ecde12c47" Nemo = "2edaba10-b0f1-5616-af89-8c11ac63239a" PreallocationTools = "d236fae5-4411-538c-8e31-a6e3d9e00b46" SymPy = "24249f21-da20-56a4-8eb1-6a02cf4ae2e6" + SymPyPythonCall = "bc8888f7-b21e-4b7c-a06a-5d9c9496438c" [[deps.TOML]] deps = ["Dates"] uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76" version = "1.0.3" -[[deps.TZJData]] -deps = ["Artifacts"] -git-tree-sha1 = "72df96b3a595b7aab1e101eb07d2a435963a97e2" -uuid = "dc5dba14-91b3-4cab-a142-028a31da12f7" -version = "1.5.0+2025b" - -[[deps.TableMetadataTools]] -deps = ["DataAPI", "Dates", "TOML", "Tables", "Unitful"] -git-tree-sha1 = "c0405d3f8189bb9a9755e429c6ea2138fca7e31f" -uuid = "9ce81f87-eacc-4366-bf80-b621a3098ee2" -version = "0.1.0" - [[deps.TableOperations]] deps = ["SentinelArrays", "Tables", "Test"] git-tree-sha1 = "e383c87cf2a1dc41fa30c093b2a19877c83e1bc1" uuid = "ab02a1b2-a7df-11e8-156e-fb1833f50b87" version = "1.2.0" -[[deps.TableShowUtils]] -deps = ["DataValues", "Dates", "JSON", "Markdown", "Unicode"] -git-tree-sha1 = "2a41a3dedda21ed1184a47caab56ed9304e9a038" -uuid = "5e66a065-1f0a-5976-b372-e0b8c017ca10" -version = "0.2.6" - [[deps.TableTraits]] deps = ["IteratorInterfaceExtensions"] git-tree-sha1 = "c06b2f539df1c6efa794486abfb6ed2022561a39" uuid = "3783bdb8-4a98-5b6b-af9a-565f29a5fe9c" version = "1.0.1" -[[deps.TableTraitsUtils]] -deps = ["DataValues", "IteratorInterfaceExtensions", "Missings", "TableTraits"] -git-tree-sha1 = "78fecfe140d7abb480b53a44f3f85b6aa373c293" -uuid = "382cd787-c1b6-5bf2-a167-d5b971a19bda" -version = "1.0.2" - [[deps.Tables]] deps = ["DataAPI", "DataValueInterfaces", "IteratorInterfaceExtensions", "OrderedCollections", "TableTraits"] git-tree-sha1 = "f2c1efbc8f3a609aadf318094f8fc5204bdaf344" @@ -2871,6 +2882,12 @@ git-tree-sha1 = "d673e0aca9e46a2f63720201f55cc7b3e7169b16" uuid = "8ea1fca8-c5ef-4a55-8b96-4e9afe9c9a3c" version = "2.0.0" +[[deps.TerminalLoggers]] +deps = ["LeftChildRightSiblingTrees", "Logging", "Markdown", "Printf", "ProgressLogging", "UUIDs"] +git-tree-sha1 = "f133fab380933d042f6796eda4e130272ba520ca" +uuid = "5d786b92-1e48-4d6f-9151-6b4477ca9bed" +version = "0.1.7" + [[deps.Test]] deps = ["InteractiveUtils", "Logging", "Random", "Serialization"] uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40" @@ -2882,16 +2899,6 @@ git-tree-sha1 = "d969183d3d244b6c33796b5ed01ab97328f2db85" uuid = "8290d209-cae3-49c0-8002-c8c24d57dab5" version = "0.5.5" -[[deps.TimeZones]] -deps = ["Artifacts", "Dates", "Downloads", "InlineStrings", "Mocking", "Printf", "Scratch", "TZJData", "Unicode", "p7zip_jll"] -git-tree-sha1 = "06f4f1f3e8ff09e42e59b043a747332e88e01aba" -uuid = "f269a46b-ccf7-5d73-abea-4c690281aa53" -version = "1.22.1" -weakdeps = ["RecipesBase"] - - [deps.TimeZones.extensions] - TimeZonesRecipesBaseExt = "RecipesBase" - [[deps.TimerOutputs]] deps = ["ExprTools", "Printf"] git-tree-sha1 = "3748bd928e68c7c346b52125cf41fff0de6937d0" @@ -2904,6 +2911,18 @@ version = "0.5.29" [deps.TimerOutputs.weakdeps] FlameGraphs = "08572546-2f56-4bcf-ba4e-bab62c3a3f89" +[[deps.Tracy]] +deps = ["ExprTools", "LibTracyClient_jll", "Libdl"] +git-tree-sha1 = "73e3ff50fd3990874c59fef0f35d10644a1487bc" +uuid = "e689c965-62c8-4b79-b2c5-8359227902fd" +version = "0.1.6" + + [deps.Tracy.extensions] + TracyProfilerExt = "TracyProfiler_jll" + + [deps.Tracy.weakdeps] + TracyProfiler_jll = "0c351ed6-8a68-550e-8b79-de6f926da83c" + [[deps.TranscodingStreams]] git-tree-sha1 = "0c45878dcfdcfa8480052b6ab162cdd138781742" uuid = "3bb67fe8-82b1-5028-8e26-92a6c54297fa" @@ -2940,25 +2959,6 @@ git-tree-sha1 = "53915e50200959667e78a92a418594b428dffddf" uuid = "1cfade01-22cf-5700-b092-accc4b62d6e1" version = "0.4.1" -[[deps.Unitful]] -deps = ["Dates", "LinearAlgebra", "Random"] -git-tree-sha1 = "6258d453843c466d84c17a58732dda5deeb8d3af" -uuid = "1986cc42-f94f-5a68-af5c-568840ba703d" -version = "1.24.0" -weakdeps = ["ConstructionBase", "ForwardDiff", "InverseFunctions", "Printf"] - - [deps.Unitful.extensions] - ConstructionBaseUnitfulExt = "ConstructionBase" - ForwardDiffExt = "ForwardDiff" - InverseFunctionsUnitfulExt = "InverseFunctions" - PrintfExt = "Printf" - -[[deps.UnitfulLatexify]] -deps = ["LaTeXStrings", "Latexify", "Unitful"] -git-tree-sha1 = "975c354fcd5f7e1ddcc1f1a23e6e091d99e99bc8" -uuid = "45397f5d-5981-4c77-b2b3-fc36d6e9b728" -version = "1.6.4" - [[deps.Unityper]] deps = ["ConstructionBase"] git-tree-sha1 = "25008b734a03736c41e2a7dc314ecb95bd6bbdb0" @@ -2970,24 +2970,12 @@ git-tree-sha1 = "ca0969166a028236229f63514992fc073799bb78" uuid = "41fe7b60-77ed-43a1-b4f0-825fd5a5650d" version = "0.2.0" -[[deps.Vcov]] -deps = ["Combinatorics", "GroupedArrays", "LinearAlgebra", "StatsAPI", "StatsBase", "Tables"] -git-tree-sha1 = "22491492d601448b0fef54afe8a5bdfd67282965" -uuid = "ec2bfdc2-55df-4fc9-b9ae-4958c2cf2486" -version = "0.8.1" - [[deps.VectorizationBase]] deps = ["ArrayInterface", "CPUSummary", "HostCPUFeatures", "IfElse", "LayoutPointers", "Libdl", "LinearAlgebra", "SIMDTypes", "Static", "StaticArrayInterface"] git-tree-sha1 = "d1d9a935a26c475ebffd54e9c7ad11627c43ea85" uuid = "3d5dd08c-fd9d-11e8-17fa-ed2836048c2f" version = "0.21.72" -[[deps.VertexSafeGraphs]] -deps = ["Graphs"] -git-tree-sha1 = "8351f8d73d7e880bfc042a8b6922684ebeafb35c" -uuid = "19fa3120-7c27-5ec5-8db8-b0b0aa330d6f" -version = "0.2.0" - [[deps.Vulkan_Loader_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Wayland_jll", "Xorg_libX11_jll", "Xorg_libXrandr_jll", "xkbcommon_jll"] git-tree-sha1 = "2f0486047a07670caad3a81a075d2e518acc5c59" @@ -3000,12 +2988,6 @@ git-tree-sha1 = "96478df35bbc2f3e1e791bc7a3d0eeee559e60e9" uuid = "a2964d1f-97da-50d4-b82a-358c7fce9d89" version = "1.24.0+0" -[[deps.WeakRefStrings]] -deps = ["DataAPI", "InlineStrings", "Parsers"] -git-tree-sha1 = "b1be2855ed9ed8eac54e5caff2afcdb442d52c23" -uuid = "ea10d353-3f73-51f8-a26c-33c1cb351aa5" -version = "1.4.2" - [[deps.Widgets]] deps = ["Colors", "Dates", "Observables", "OrderedCollections"] git-tree-sha1 = "e9aeb174f95385de31e70bd15fa066a505ea82b9" @@ -3018,17 +3000,6 @@ git-tree-sha1 = "c1a7aa6219628fcd757dede0ca95e245c5cd9511" uuid = "efce3f68-66dc-5838-9240-27a6d6f5f9b6" version = "1.0.0" -[[deps.WorkerUtilities]] -git-tree-sha1 = "cd1659ba0d57b71a464a29e64dbc67cfe83d54e7" -uuid = "76eceee3-57b5-4d4a-8e66-0e911cebbf60" -version = "1.6.1" - -[[deps.XML2_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Libiconv_jll", "Zlib_jll"] -git-tree-sha1 = "80d3930c6347cfce7ccf96bd3bafdf079d9c0390" -uuid = "02c8fc9c-b97f-50b9-bbe4-9be30ff0a78a" -version = "2.13.9+0" - [[deps.XZ_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl"] git-tree-sha1 = "fee71455b0aaa3440dfdd54a9a36ccef829be7d4" @@ -3107,12 +3078,6 @@ git-tree-sha1 = "7ed9347888fac59a618302ee38216dd0379c480d" uuid = "ea2f1a96-1ddc-540d-b46f-429655e07cfa" version = "0.9.12+0" -[[deps.Xorg_libpciaccess_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Zlib_jll"] -git-tree-sha1 = "4909eb8f1cbf6bd4b1c30dd18b2ead9019ef2fad" -uuid = "a65dc6b1-eb27-53a1-bb3e-dea574b5389e" -version = "0.18.1+0" - [[deps.Xorg_libxcb_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Xorg_libXau_jll", "Xorg_libXdmcp_jll"] git-tree-sha1 = "bfcaf7ec088eaba362093393fe11aa141fa15422" @@ -3182,7 +3147,7 @@ version = "1.6.0+0" [[deps.Zlib_jll]] deps = ["Libdl"] uuid = "83775a58-1f1d-513f-b197-d71354ab007a" -version = "1.2.13+1" +version = "1.3.1+2" [[deps.Zstd_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl"] @@ -3217,7 +3182,7 @@ version = "0.17.4+0" [[deps.libblastrampoline_jll]] deps = ["Artifacts", "Libdl"] uuid = "8e850b90-86db-534c-a0d3-1478176c7d93" -version = "5.11.0+0" +version = "5.15.0+0" [[deps.libdecor_jll]] deps = ["Artifacts", "Dbus_jll", "JLLWrappers", "Libdl", "Libglvnd_jll", "Pango_jll", "Wayland_jll", "xkbcommon_jll"] @@ -3264,7 +3229,7 @@ version = "1.1.7+0" [[deps.nghttp2_jll]] deps = ["Artifacts", "Libdl"] uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d" -version = "1.59.0+0" +version = "1.64.0+1" [[deps.oneTBB_jll]] deps = ["Artifacts", "JLLWrappers", "LazyArtifacts", "Libdl"] @@ -3275,7 +3240,7 @@ version = "2022.0.0+1" [[deps.p7zip_jll]] deps = ["Artifacts", "Libdl"] uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0" -version = "17.4.0+2" +version = "17.5.0+2" [[deps.x264_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl"] diff --git a/lectures/Project.toml b/lectures/Project.toml index 3c48b137..3cc80997 100644 --- a/lectures/Project.toml +++ b/lectures/Project.toml @@ -1,46 +1,40 @@ name = "quantecon-notebooks-julia" -authors = ["quantecon "] version = "0.10.0" +authors = ["quantecon "] [deps] Arpack = "7d9fca2a-8960-54d3-9f78-7d1dccf2cb97" BandedMatrices = "aae01518-5342-5314-be14-df237901396f" BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" -CategoricalArrays = "324d7699-5711-5eae-9e2f-1d82baa6b597" DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0" -DataFramesMeta = "1313f7d8-7da2-5740-9ea0-a2ca25f37964" DataInterpolations = "82cc6244-b520-54b8-b5a6-8a565e85f1d0" Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" -Expectations = "2fe49d83-0758-5602-8f54-1f90ad0d522b" +Enzyme = "7da242da-08ed-463a-9acd-ee780be4f1d9" FastGaussQuadrature = "442a2c76-b920-505d-bb47-c5924d526838" -FixedEffectModels = "9d5cd8c9-2029-5cab-9928-427838db53e3" ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" -GLM = "38e38edf-8417-5370-95a0-9cbb8c7f171a" Graphs = "86223c79-3864-5bf0-83f7-82e725a168b6" IncompleteLU = "40713840-3770-5561-ab4c-a76e7d0d7895" +Integrals = "de52edbc-65ea-441a-8357-d3a637375a31" Interpolations = "a98d9a8b-a2ab-59e6-89dd-64a1c18fca59" -Ipopt = "b6b21f68-93f8-5de0-b562-5493be1d77c9" IterativeSolvers = "42fd0dbc-a981-5370-80f2-aaf504508153" -JuMP = "4076af6c-e467-56ae-b986-b466b2749572" KernelDensity = "5ab0869b-81aa-558d-bb23-cbf5423bbe9b" LaTeXStrings = "b964fa9f-0449-5b57-a5c2-d3ea65f4040f" Latexify = "23fbe1c1-3f47-55db-b15f-69d7ec21a316" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" LinearMaps = "7a12625a-238d-50fd-b39a-03d52299707e" +LinearSolve = "7ed4a6bd-45f5-4d41-b270-4a48e9bafcae" LoopVectorization = "bdcacae8-1622-11e9-2a5c-532679323890" -NLopt = "76087f3c-5699-56af-9a33-bf431cd00edd" NLsolve = "2774e3e8-f4cf-5e23-947b-6d7e65073b56" +NonlinearSolve = "8913a72c-1f9b-4ce2-8d82-65094dcecaec" Optim = "429524aa-4258-5aef-a3af-852621145aeb" +Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" OrdinaryDiffEq = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed" Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" Polynomials = "f27b6e38-b328-58d1-80ce-0feddd5e7a45" Preconditioners = "af69fa37-3177-5a40-98ee-561f696e4fcd" QuadGK = "1fd47b50-473d-5c70-9696-f719f8f3bcdc" QuantEcon = "fcd29c91-0bd7-5a09-975d-7ac3f643a60c" -Query = "1a8c2f83-1ff3-5112-b086-8aa67b057ba1" -RDatasets = "ce6b1742-4840-55fa-b093-852dadbb1d8b" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" -RegressionTables = "d519eb52-b820-54da-95a6-98e1306fdade" Roots = "f2b01f46-fcfa-551c-844a-d8ac1e96c665" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" SpecialFunctions = "276daf66-3868-5448-9aa4-cd146d93841b" diff --git a/lectures/_config.yml b/lectures/_config.yml index 1e0a941d..ed16452a 100644 --- a/lectures/_config.yml +++ b/lectures/_config.yml @@ -88,7 +88,7 @@ sphinx: rediraffe_redirects: index_toc.md: intro.md tojupyter_default_lang: julia - tojupyter_lang_synonyms: ['julia-1.11'] + tojupyter_lang_synonyms: ['julia-1.12'] tojupyter_static_file_path: ["source/_static", "_static"] tojupyter_target_html: true tojupyter_urlpath: "https://julia.quantecon.org/" @@ -98,5 +98,5 @@ sphinx: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 tojupyter_images_markdown: true diff --git a/lectures/_toc.yml b/lectures/_toc.yml index 378c007d..85fed96c 100644 --- a/lectures/_toc.yml +++ b/lectures/_toc.yml @@ -81,22 +81,6 @@ parts: - file: multi_agent_models/aiyagari - file: multi_agent_models/arellano - file: multi_agent_models/matsuyama -- caption: Time Series Models - numbered: true - chapters: - - file: time_series_models/arma - - file: time_series_models/estspec - - file: time_series_models/additive_functionals - - file: time_series_models/multiplicative_functionals - - file: time_series_models/lu_tricks - - file: time_series_models/classical_filtering -- caption: Dynamic Programming Squared - numbered: true - chapters: - - file: dynamic_programming_squared/dyn_stack - - file: dynamic_programming_squared/lqramsey - - file: dynamic_programming_squared/opt_tax_recur - - file: dynamic_programming_squared/amss - caption: Other numbered: true chapters: diff --git a/lectures/about_lectures.md b/lectures/about_lectures.md index b4a5f34b..e69093ff 100644 --- a/lectures/about_lectures.md +++ b/lectures/about_lectures.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (about)= diff --git a/lectures/continuous_time/covid_sde.md b/lectures/continuous_time/covid_sde.md index 1bab8553..12c49f31 100644 --- a/lectures/continuous_time/covid_sde.md +++ b/lectures/continuous_time/covid_sde.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (covid_sde)= diff --git a/lectures/continuous_time/seir_model.md b/lectures/continuous_time/seir_model.md index 7eaa1dc9..f22e3083 100644 --- a/lectures/continuous_time/seir_model.md +++ b/lectures/continuous_time/seir_model.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (seir_model)= diff --git a/lectures/dynamic_programming/career.md b/lectures/dynamic_programming/career.md index 1ca57060..f2ad1dfb 100644 --- a/lectures/dynamic_programming/career.md +++ b/lectures/dynamic_programming/career.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (career)= diff --git a/lectures/dynamic_programming/coleman_policy_iter.md b/lectures/dynamic_programming/coleman_policy_iter.md index 86122cea..fa24a728 100644 --- a/lectures/dynamic_programming/coleman_policy_iter.md +++ b/lectures/dynamic_programming/coleman_policy_iter.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (coleman_policy_iter)= diff --git a/lectures/dynamic_programming/discrete_dp.md b/lectures/dynamic_programming/discrete_dp.md index 3c4ba9ab..4c3106f5 100644 --- a/lectures/dynamic_programming/discrete_dp.md +++ b/lectures/dynamic_programming/discrete_dp.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (discrete_dp)= diff --git a/lectures/dynamic_programming/egm_policy_iter.md b/lectures/dynamic_programming/egm_policy_iter.md index 5bb9388e..ed189a89 100644 --- a/lectures/dynamic_programming/egm_policy_iter.md +++ b/lectures/dynamic_programming/egm_policy_iter.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (egm_policy_iter)= diff --git a/lectures/dynamic_programming/ifp.md b/lectures/dynamic_programming/ifp.md index 11a5cf3c..31a5493b 100644 --- a/lectures/dynamic_programming/ifp.md +++ b/lectures/dynamic_programming/ifp.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (ifp)= diff --git a/lectures/dynamic_programming/jv.md b/lectures/dynamic_programming/jv.md index 573519da..ab2ea346 100644 --- a/lectures/dynamic_programming/jv.md +++ b/lectures/dynamic_programming/jv.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (jv)= diff --git a/lectures/dynamic_programming/lqcontrol.md b/lectures/dynamic_programming/lqcontrol.md index 66319300..5b7f48dc 100644 --- a/lectures/dynamic_programming/lqcontrol.md +++ b/lectures/dynamic_programming/lqcontrol.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (lqc)= @@ -42,7 +42,7 @@ These themes appear repeatedly below. Mathematically, LQ control problems are closely related to {doc}`the Kalman filter <../introduction_dynamics/kalman>`. * Recursive formulations of linear-quadratic control problems and Kalman filtering problems both involve matrix **Riccati equations**. -* Classical formulations of linear control and linear filtering problems make use of similar matrix decompositions (see for example {doc}`this lecture <../time_series_models/lu_tricks>` and {doc}`this lecture <../time_series_models/classical_filtering>`). +* Classical formulations of linear control and linear filtering problems make use of similar matrix decompositions. In reading what follows, it will be useful to have some familiarity with diff --git a/lectures/dynamic_programming/mccall_model.md b/lectures/dynamic_programming/mccall_model.md index 51cdbdbd..a60602dc 100644 --- a/lectures/dynamic_programming/mccall_model.md +++ b/lectures/dynamic_programming/mccall_model.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (mccall)= diff --git a/lectures/dynamic_programming/mccall_model_with_separation.md b/lectures/dynamic_programming/mccall_model_with_separation.md index 0f179458..81bce894 100644 --- a/lectures/dynamic_programming/mccall_model_with_separation.md +++ b/lectures/dynamic_programming/mccall_model_with_separation.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (mccall_with_sep)= diff --git a/lectures/dynamic_programming/odu.md b/lectures/dynamic_programming/odu.md index debd9cb9..bb587954 100644 --- a/lectures/dynamic_programming/odu.md +++ b/lectures/dynamic_programming/odu.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (odu)= diff --git a/lectures/dynamic_programming/optgrowth.md b/lectures/dynamic_programming/optgrowth.md index d7500e58..24466d72 100644 --- a/lectures/dynamic_programming/optgrowth.md +++ b/lectures/dynamic_programming/optgrowth.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (optgrowth)= diff --git a/lectures/dynamic_programming/perm_income.md b/lectures/dynamic_programming/perm_income.md index 9f193f82..3e3aafc7 100644 --- a/lectures/dynamic_programming/perm_income.md +++ b/lectures/dynamic_programming/perm_income.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (perm_income)= diff --git a/lectures/dynamic_programming/perm_income_cons.md b/lectures/dynamic_programming/perm_income_cons.md index 620d0d01..992a5f81 100644 --- a/lectures/dynamic_programming/perm_income_cons.md +++ b/lectures/dynamic_programming/perm_income_cons.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (perm_income_cons)= diff --git a/lectures/dynamic_programming/robustness.md b/lectures/dynamic_programming/robustness.md index 1f9c3896..c79968b6 100644 --- a/lectures/dynamic_programming/robustness.md +++ b/lectures/dynamic_programming/robustness.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (rob)= diff --git a/lectures/dynamic_programming/smoothing.md b/lectures/dynamic_programming/smoothing.md index 82a1d977..595b8898 100644 --- a/lectures/dynamic_programming/smoothing.md +++ b/lectures/dynamic_programming/smoothing.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (smoothing)= @@ -77,14 +77,11 @@ We'll spend most of this lecture studying the finite-state Markov specification, ### Relationship to Other Lectures -This lecture can be viewed as a followup to {doc}`Optimal Savings II: LQ Techniques ` and a warm up for a model of tax smoothing described in {doc}`opt_tax_recur <../dynamic_programming_squared/opt_tax_recur>`. - -Linear-quadratic versions of the Lucas-Stokey tax-smoothing model are described in {doc}`lqramsey <../dynamic_programming_squared/lqramsey>`. +This lecture can be viewed as a followup to {doc}`Optimal Savings II: LQ Techniques `. The key differences between those lectures and this one are * Here the decision maker takes all prices as exogenous, meaning that his decisions do not affect them. -* In {doc}`lqramsey <../dynamic_programming_squared/lqramsey>` and {doc}`opt_tax_recur <../dynamic_programming_squared/opt_tax_recur>`, the decision maker -- the government in the case of these lectures -- recognizes that his decisions affect prices. So these later lectures are partly about how the government should manipulate prices of government debt. @@ -1029,11 +1026,3 @@ lecture on the {doc}`permanent income model <../dynamic_programming/perm_income In that version, consumption follows a random walk and the consumer's debt follows a process with a unit root. We leave it to the reader to apply the usual isomorphism to deduce the corresponding implications for a tax-smoothing model like Barro's {cite}`Barro1979`. - -### Government Manipulation of Arrow Securities Prices - -In {doc}`optimal taxation in an LQ economy <../dynamic_programming_squared/lqramsey>` and {doc}`recursive optimal taxation <../dynamic_programming_squared/opt_tax_recur>`, we study **complete-markets** -models in which the government recognizes that it can manipulate Arrow securities prices. - -In {doc}`optimal taxation with incomplete markets <../dynamic_programming_squared/amss>`, we study an **incomplete-markets** model in which the government manipulates asset prices. - diff --git a/lectures/dynamic_programming/wald_friedman.md b/lectures/dynamic_programming/wald_friedman.md index ef5b4fff..7d34f441 100644 --- a/lectures/dynamic_programming/wald_friedman.md +++ b/lectures/dynamic_programming/wald_friedman.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (wald_friedman)= diff --git a/lectures/dynamic_programming_squared/amss.md b/lectures/dynamic_programming_squared/amss.md deleted file mode 100644 index 51f94f2d..00000000 --- a/lectures/dynamic_programming_squared/amss.md +++ /dev/null @@ -1,1707 +0,0 @@ ---- -jupytext: - text_representation: - extension: .md - format_name: myst -kernelspec: - display_name: Julia - language: julia - name: julia-1.11 ---- - -(opt_tax_amss)= -```{raw} html - -``` - -# Optimal Taxation without State-Contingent Debt - -```{contents} Contents -:depth: 2 -``` - -## Overview - -In {doc}`an earlier lecture <../dynamic_programming_squared/opt_tax_recur>` we described a model of -optimal taxation with state-contingent debt due to -Robert E. Lucas, Jr., and Nancy Stokey {cite}`LucasStokey1983`. - -Aiyagari, Marcet, Sargent, and Seppälä {cite}`amss2002` (hereafter, AMSS) -studied optimal taxation in a model without state-contingent debt. - -In this lecture, we - -* describe assumptions and equilibrium concepts -* solve the model -* implement the model numerically -* conduct some policy experiments -* compare outcomes with those in a corresponding complete-markets model - -We begin with an introduction to the model. - - -## Competitive Equilibrium with Distorting Taxes - -Many but not all features of the economy are identical to those of {doc}`the Lucas-Stokey economy <../dynamic_programming_squared/opt_tax_recur>`. - -Let's start with things that are identical. - -For $t \geq 0$, a history of the state is represented by $s^t = [s_t, s_{t-1}, \ldots, s_0]$. - -Government purchases $g(s)$ are an exact time-invariant function of $s$. - -Let $c_t(s^t)$, $\ell_t(s^t)$, and $n_t(s^t)$ denote consumption, -leisure, and labor supply, respectively, at history $s^t$ at time $t$. - -Each period a representative household is endowed with one unit of time that can be divided between leisure -$\ell_t$ and labor $n_t$: - -```{math} -:label: feas1_amss - -n_t(s^t) + \ell_t(s^t) = 1 -``` - -Output equals $n_t(s^t)$ and can be divided between consumption $c_t(s^t)$ and $g(s_t)$ - -```{math} -:label: TSs_techr_amss - -c_t(s^t) + g(s_t) = n_t(s^t) -``` - -Output is not storable. - -The technology pins down a pre-tax wage rate to unity for all $t, s^t$. - -A representative household’s preferences over $\{c_t(s^t), \ell_t(s^t)\}_{t=0}^\infty$ are ordered by - -```{math} -:label: TS_prefr_amss - -\sum_{t=0}^\infty \sum_{s^t} \beta^t \pi_t(s^t) u[c_t(s^t), \ell_t(s^t)] -``` - -where - -* $\pi_t(s^t)$ is a joint probability distribution over the sequence $s^t$, and -* the utility function $u$ is increasing, strictly concave, and three times continuously differentiable in both arguments - -The government imposes a flat rate tax $\tau_t(s^t)$ on labor income at time $t$, history $s^t$. - -Lucas and Stokey assumed that there are complete markets in one-period Arrow securities; also see {doc}`smoothing models <../dynamic_programming/smoothing>`. - -It is at this point that AMSS {cite}`amss2002` modify the Lucas and Stokey economy. - -AMSS allow the government to issue only one-period risk-free debt each period. - -Ruling out complete markets in this way is a step in the direction of making total tax collections behave more like that prescribed in {cite}`Barro1979` than they do in {cite}`LucasStokey1983`. - -### Risk-free One-Period Debt Only - -In period $t$ and history $s^t$, let - -* $b_{t+1}(s^t)$ be the amount of the time $t+1$ consumption good that at time $t$ the government promised to pay. -* $R_t(s^t)$ be the gross interest rate on risk-free one-period debt between periods $t$ and $t+1$. -* $T_t(s^t)$ be a nonnegative lump-sum transfer to the representative household [^fn_a]. - -That $b_{t+1}(s^t)$ is the same for all realizations of $s_{t+1}$ captures its *risk-free* character. - -The market value at time $t$ of government debt maturing at time $t+1$ equals $b_{t+1}(s^t)$ divided by $R_t(s^t)$. - -The government’s budget constraint in period $t$ at history $s^t$ is - -```{math} -:label: TS_gov_wo - -\begin{aligned} -b_t(s^{t-1}) - & = \tau^n_t(s^t) n_t(s^t) - g_t(s_t) - T_t(s^t) + - {b_{t+1}(s^t) \over R_t(s^t )} - \\ - & \equiv z(s^t) + {b_{t+1}(s^t) \over R_t(s^t )}, -\end{aligned} -``` - -where $z(s^t)$ is the net-of-interest government surplus. - -To rule out Ponzi schemes, we assume that the government is subject to a **natural debt limit** (to be discussed in a forthcoming lecture). - -The consumption Euler equation for a representative household able to trade only one-period risk-free debt -with one-period gross interest rate $R_t(s^t)$ is - -$$ -{1 \over R_t(s^t)} -= \sum_{s^{t+1}\vert s^t} \beta \pi_{t+1}(s^{t+1} | s^t) - { u_c(s^{t+1}) \over u_c(s^{t}) } -$$ - -Substituting this expression into the government’s budget constraint {eq}`TS_gov_wo` -yields: - -```{math} -:label: TS_gov_wo2 - -b_t(s^{t-1}) = z(s^t) + \beta \sum_{s^{t+1}\vert s^t} \pi_{t+1}(s^{t+1} | s^t) - { u_c(s^{t+1}) \over u_c(s^{t}) } \; b_{t+1}(s^t) -``` - -Components of $z(s^t)$ on the right side depend on $s^t$, but the left side is required to depend on $s^{t-1}$ only. - -**This is what it means for one-period government debt to be risk-free**. - -Therefore, the sum on the right side of equation {eq}`TS_gov_wo2` also has to depend only on $s^{t-1}$. - -This requirement will give rise to **measurability constraints** on the Ramsey allocation to be discussed soon. - -If we replace $b_{t+1}(s^t)$ on the right side of equation {eq}`TS_gov_wo2` by the right -side of next period’s budget constraint (associated with a -particular realization $s_{t}$) we get - -$$ -b_t(s^{t-1}) = z(s^t) + \sum_{s^{t+1}\vert s^t} \beta \pi_{t+1}(s^{t+1} | s^t) - { u_c(s^{t+1}) \over u_c(s^{t}) } -\, \left[z(s^{t+1}) + {b_{t+2}(s^{t+1}) \over R_{t+1}(s^{t+1})}\right] -$$ - -After making similar repeated substitutions for all future occurrences of -government indebtedness, and by invoking the natural debt limit, we -arrive at: - -```{math} -:label: TS_gov_wo3 - -\begin{aligned} -b_t(s^{t-1}) - &= \sum_{j=0}^\infty \sum_{s^{t+j} | s^t} \beta^j \pi_{t+j}(s^{t+j} | s^t) - { u_c(s^{t+j}) \over u_c(s^{t}) } \;z(s^{t+j}) - \end{aligned} -``` - -Now let's - -* substitute the resource constraint into the net-of-interest government surplus, and -* use the household’s first-order condition $1-\tau^n_t(s^t)= u_{\ell}(s^t) /u_c(s^t)$ to eliminate the labor tax rate - -so that we can express the net-of-interest government surplus $z(s^t)$ as - -```{math} -:label: AMSS_44_2 - -z(s^t) - = \left[1 - {u_{\ell}(s^t) \over u_c(s^t)}\right] \left[c_t(s^t)+g_t(s_t)\right] - -g_t(s_t) - T_t(s^t)\,. -``` - -If we substitute the appropriate versions of right side of {eq}`AMSS_44_2` for $z(s^{t+j})$ into equation {eq}`TS_gov_wo3`, -we obtain a sequence of *implementability constraints* on a Ramsey allocation in an AMSS economy. - -Expression {eq}`TS_gov_wo3` at time $t=0$ and initial state $s^0$ -was also an *implementability constraint* on a Ramsey allocation in a Lucas-Stokey economy: - -```{math} -:label: TS_gov_wo4 - -b_0(s^{-1}) = \mathbb{E}\,_0 \sum_{j=0}^\infty \beta^j - { u_c(s^{j}) \over u_c(s^{0}) } \;z(s^{j}) -``` - -Indeed, it was the *only* implementability constraint there. - -But now we also have a large number of additional implementability constraints - -```{math} -:label: TS_gov_wo4a - -b_t(s^{t-1}) = \mathbb{E}\,_t \sum_{j=0}^\infty \beta^j - { u_c(s^{t+j}) \over u_c(s^{t}) } \;z(s^{t+j}) -``` - -Equation {eq}`TS_gov_wo4a` must hold for each $s^t$ for each $t \geq 1$. - -### Comparison with Lucas-Stokey Economy - -The expression on the right side of {eq}`TS_gov_wo4a` in the Lucas-Stokey (1983) economy would equal the present value of a continuation stream of government surpluses evaluated at what would be competitive equilibrium Arrow-Debreu prices at date $t$. - -In the Lucas-Stokey economy, that present value is measurable with respect to $s^t$. - -In the AMSS economy, the restriction that government debt be risk-free imposes that that same present value must be measurable with respect to $s^{t-1}$. - -In a language used in the literature on incomplete markets models, it can be said that the AMSS model requires that at each $(t, s^t)$ what would be the present value of continuation government surpluses in the Lucas-Stokey model must belong to the **marketable subspace** of the AMSS model. - -### Ramsey Problem Without State-contingent Debt - -After we have substituted the resource constraint into the utility function, we can express the Ramsey problem as being to choose an allocation that solves - -$$ -\max_{\{c_t(s^t),b_{t+1}(s^t)\}} -\mathbb{E}\,_0 \sum_{t=0}^\infty \beta^t - u\left(c_t(s^t),1-c_t(s^t)-g_t(s_t)\right) -$$ - -where the maximization is subject to - -```{math} -:label: AMSS_44 - -\mathbb{E}\,_{0} \sum_{j=0}^\infty \beta^j - { u_c(s^{j}) \over u_c(s^{0}) } \;z(s^{j}) \geq b_0(s^{-1}) -``` - -and - -```{math} -:label: AMSS_46 - -\mathbb{E}\,_{t} \sum_{j=0}^\infty \beta^j - { u_c(s^{t+j}) \over u_c(s^{t}) } \; - z(s^{t+j}) = b_t(s^{t-1}) - \quad \forall \, s^t -``` - -given $b_0(s^{-1})$. - -#### Lagrangian Formulation - -Let $\gamma_0(s^0)$ be a nonnegative Lagrange multiplier on constraint {eq}`AMSS_44`. - -As in the Lucas-Stokey economy, this multiplier is strictly positive when the government must resort to -distortionary taxation; otherwise it equals zero. - -A consequence of the assumption that there are no markets in state-contingent securities and that a market exists only in a risk-free security is that we have to attach stochastic processes $\{\gamma_t(s^t)\}_{t=1}^\infty$ of -Lagrange multipliers to the implementability constraints {eq}`AMSS_46`. - -Depending on how the constraints bind, these multipliers can be positive or negative: - -$$ -\begin{aligned} - \gamma_t(s^t) - &\;\geq\; (\leq)\;\, 0 \quad \text{if the constraint binds in this direction } - \\ - & \mathbb{E}\,_{t} \sum_{j=0}^\infty \beta^j - { u_c(s^{t+j}) \over u_c(s^{t}) } \;z(s^{t+j}) \;\geq \;(\leq)\;\, b_t(s^{t-1}). -\end{aligned} -$$ - -A negative multiplier $\gamma_t(s^t)<0$ means that if we could -relax constraint {eq}`AMSS_46`, we would like to *increase* the beginning-of-period -indebtedness for that particular realization of history $s^t$. - -That would let us reduce the beginning-of-period indebtedness for some other history [^fn_b]. - -These features flow from the fact that the government cannot use state-contingent debt and therefore cannot allocate its indebtedness efficiently across future states. - -### Some Calculations - -It is helpful to apply two transformations to the Lagrangian. - -Multiply constraint {eq}`AMSS_44` by $u_c(s^0)$ and the constraints {eq}`AMSS_46` by $\beta^t u_c(s^{t})$. - -Then a Lagrangian for the Ramsey problem can be represented as - -```{math} -:label: AMSS_lagr;a - -\begin{aligned} - J &= \mathbb{E}\,_{0} \sum_{t=0}^\infty \beta^t - \biggl\{ u\left(c_t(s^t), 1-c_t(s^t)-g_t(s_t)\right)\\ - & \qquad + \gamma_t(s^t) \Bigl[ \mathbb{E}\,_{t} \sum_{j=0}^\infty \beta^j - u_c(s^{t+j}) \,z(s^{t+j}) - u_c(s^{t}) \,b_t(s^{t-1}) \biggr\} - \\ - &= \mathbb{E}\,_{0} \sum_{t=0}^\infty \beta^t - \biggl\{ u\left(c_t(s^t), 1-c_t(s^t)-g_t(s_t)\right) - \\ - & \qquad + \Psi_t(s^t)\, u_c(s^{t}) \,z(s^{t}) - - \gamma_t(s^t)\, u_c(s^{t}) \, b_t(s^{t-1}) \biggr\} -\end{aligned} -``` - -where - -```{math} -:label: AMSS_lagr; - -\Psi_t(s^t)=\Psi_{t-1}(s^{t-1})+\gamma_t(s^t) - \quad \text{and} \quad -\Psi_{-1}(s^{-1})=0 -``` - -In {eq}`AMSS_lagr;a`, the second equality uses the law of iterated expectations -and Abel’s summation formula (also called *summation by parts*, see -[this page](https://en.wikipedia.org/wiki/Abel%27s_summation_formula)). - -First-order conditions with respect -to $c_t(s^t)$ can be expressed as - -```{math} -:label: AMSS_foc;a - -\begin{aligned} - u_c(s^t)-u_{\ell}(s^t) &+ \Psi_t(s^t)\left\{ \left[ - u_{cc}(s^t) - u_{c\ell}(s^{t})\right]z(s^{t}) + - u_{c}(s^{t})\,z_c(s^{t}) \right\} - \\ - & \hspace{35mm} - \gamma_t(s^t)\left[ - u_{cc}(s^{t}) - u_{c\ell}(s^{t})\right]b_t(s^{t-1}) =0 -\end{aligned} -``` - -and with respect to $b_t(s^t)$ as - -```{math} -:label: AMSS_foc;b - -\mathbb{E}\,_{t} \left[\gamma_{t+1}(s^{t+1})\,u_c(s^{t+1})\right] = 0 -``` - -If we substitute $z(s^t)$ from {eq}`AMSS_44_2` and its derivative -$z_c(s^t)$ into first-order condition {eq}`AMSS_foc;a`, we find two -differences from the corresponding condition for the optimal allocation -in a Lucas-Stokey economy with state-contingent government debt. - -1. The term involving $b_t(s^{t-1})$ in first-order condition -{eq}`AMSS_foc;a` does not appear in the corresponding expression -for the Lucas-Stokey economy. - -* This term reflects the constraint that - beginning-of-period government indebtedness must be the same across all - realizations of next period’s state, a constraint that would not be present if - government debt could be state contingent. - -> - -2. The Lagrange multiplier $\Psi_t(s^t)$ in first-order condition -{eq}`AMSS_foc;a` may change over time in response to realizations of the state, -while the multiplier $\Phi$ in the Lucas-Stokey economy is time invariant. - -We need some code from our {doc}`an earlier lecture <../dynamic_programming_squared/opt_tax_recur>` -on optimal taxation with state-contingent debt sequential allocation implementation: - -```{code-cell} julia ---- -tags: [remove-cell] ---- -using Test -``` - -```{code-cell} julia ---- -tags: [output_scroll] ---- - -using LinearAlgebra, Statistics, Random -using QuantEcon, NLsolve, NLopt - -import QuantEcon.simulate - -mutable struct Model{TF <: AbstractFloat, - TM <: AbstractMatrix{TF}, - TV <: AbstractVector{TF}} - beta::TF - Pi::TM - G::TV - Theta::TV - transfers::Bool - U::Function - Uc::Function - Ucc::Function - Un::Function - Unn::Function - n_less_than_one::Bool -end - -struct SequentialAllocation{TP <: Model, - TI <: Integer, - TV <: AbstractVector} - model::TP - mc::MarkovChain - S::TI - cFB::TV - nFB::TV - XiFB::TV - zFB::TV -end - -function SequentialAllocation(model::Model) - beta, Pi, G, Theta = model.beta, model.Pi, model.G, model.Theta - mc = MarkovChain(Pi) - S = size(Pi, 1) # Number of states - # Now find the first best allocation - cFB, nFB, XiFB, zFB = find_first_best(model, S, 1) - - return SequentialAllocation(model, mc, S, cFB, nFB, XiFB, zFB) -end - - -function find_first_best(model::Model, S::Integer, version::Integer) - if version != 1 && version != 2 - throw(ArgumentError("version must be 1 or 2")) - end - beta, Theta, Uc, Un, G, Pi = - model.beta, model.Theta, model.Uc, model.Un, model.G, model.Pi - function res!(out, z) - c = z[1:S] - n = z[S+1:end] - out[1:S] = Theta .* Uc.(c, n) + Un.(c, n) - out[S+1:end] = Theta .* n .- c .- G - end - res = nlsolve(res!, 0.5 * ones(2 * S)) - - if converged(res) == false - error("Could not find first best") - end - - if version == 1 - cFB = res.zero[1:S] - nFB = res.zero[S+1:end] - XiFB = Uc(cFB, nFB) # Multiplier on the resource constraint - zFB = vcat(cFB, nFB, XiFB) - return cFB, nFB, XiFB, zFB - elseif version == 2 - cFB = res.zero[1:S] - nFB = res.zero[S+1:end] - IFB = Uc(cFB, nFB) .* cFB + Un(cFB, nFB) .* nFB - xFB = \(LinearAlgebra.I - beta * Pi, IFB) - zFB = [vcat(cFB[s], xFB[s], xFB) for s in 1:S] - return cFB, nFB, IFB, xFB, zFB - end -end - - -function time1_allocation(pas::SequentialAllocation, mu::Real) - model, S = pas.model, pas.S - Theta, beta, Pi, G, Uc, Ucc, Un, Unn = - model.Theta, model.beta, model.Pi, model.G, - model.Uc, model.Ucc, model.Un, model.Unn - function FOC!(out, z::Vector) - c = z[1:S] - n = z[S+1:2S] - Xi = z[2S+1:end] - out[1:S] = Uc.(c, n) - mu * (Ucc.(c, n) .* c + Uc.(c, n)) - Xi # FOC c - out[S+1:2S] = Un.(c, n) - mu * (Unn(c, n) .* n .+ Un.(c, n)) + Theta .* Xi # FOC n - out[2S+1:end] = Theta .* n - c .- G # resource constraint - return out - end - # Find the root of the FOC - res = nlsolve(FOC!, pas.zFB) - if res.f_converged == false - error("Could not find LS allocation.") - end - z = res.zero - c, n, Xi = z[1:S], z[S+1:2S], z[2S+1:end] - # Now compute x - I = Uc(c, n) .* c + Un(c, n) .* n - x = \(LinearAlgebra.I - beta * model.Pi, I) - return c, n, x, Xi -end - - -function time0_allocation(pas::SequentialAllocation, - B_::AbstractFloat, s_0::Integer) - model = pas.model - Pi, Theta, G, beta = model.Pi, model.Theta, model.G, model.beta - Uc, Ucc, Un, Unn = - model.Uc, model.Ucc, model.Un, model.Unn - - # First order conditions of planner's problem - function FOC!(out, z) - mu, c, n, Xi = z[1], z[2], z[3], z[4] - xprime = time1_allocation(pas, mu)[3] - out .= vcat( - Uc(c, n) .* (c - B_) + Un(c, n) .* n + beta * dot(Pi[s_0, :], xprime), - Uc(c, n) .- mu * (Ucc(c, n) .* (c - B_) + Uc(c, n)) .- Xi, - Un(c, n) .- mu * (Unn(c, n) .* n + Un(c, n)) + Theta[s_0] .* Xi, - (Theta .* n .- c .- G)[s_0] - ) - end - - # Find root - res = nlsolve(FOC!, [0.0, pas.cFB[s_0], pas.nFB[s_0], pas.XiFB[s_0]]) - if res.f_converged == false - error("Could not find time 0 LS allocation.") - end - return (res.zero...,) -end - - -function time1_value(pas::SequentialAllocation, mu::Real) - model = pas.model - c, n, x, Xi = time1_allocation(pas, mu) - U_val = model.U.(c, n) - V = \(LinearAlgebra.I - model.beta*model.Pi, U_val) - return c, n, x, V -end - - -function Omega(model::Model, c::Union{Real,Vector}, n::Union{Real,Vector}) - Uc, Un = model.Uc.(c, n), model.Un.(c, n) - return 1. .+ Un./(model.Theta .* Uc) -end - - -function simulate(pas::SequentialAllocation, - B_::AbstractFloat, s_0::Integer, - T::Integer, - sHist::Union{Vector, Nothing}=nothing) - - model = pas.model - Pi, beta, Uc = model.Pi, model.beta, model.Uc - - if isnothing(sHist) - sHist = QuantEcon.simulate(pas.mc, T, init=s_0) - end - cHist = zeros(T) - nHist = zeros(T) - Bhist = zeros(T) - OmegaHist = zeros(T) - muHist = zeros(T) - RHist = zeros(T-1) - # time 0 - mu, cHist[1], nHist[1], _ = time0_allocation(pas, B_, s_0) - OmegaHist[1] = Omega(pas.model, cHist[1], nHist[1])[s_0] - Bhist[1] = B_ - muHist[1] = mu - # time 1 onward - for t in 2:T - c, n, x, Xi = time1_allocation(pas,mu) - u_c = Uc(c,n) - s = sHist[t] - OmegaHist[t] = Omega(pas.model, c, n)[s] - Eu_c = dot(Pi[sHist[t-1],:], u_c) - cHist[t], nHist[t], Bhist[t] = c[s], n[s], x[s] / u_c[s] - RHist[t-1] = Uc(cHist[t-1], nHist[t-1]) / (beta * Eu_c) - muHist[t] = mu - end - return cHist, nHist, Bhist, OmegaHist, sHist, muHist, RHist -end - - -mutable struct BellmanEquation{TP <: Model, - TI <: Integer, - TV <: AbstractVector, - TM <: AbstractMatrix{TV}, - TVV <: AbstractVector{TV}} - model::TP - S::TI - xbar::TV - time_0::Bool - z0::TM - cFB::TV - nFB::TV - xFB::TV - zFB::TVV -end - - -function BellmanEquation(model::Model, xgrid::AbstractVector, policies0::Vector) - S = size(model.Pi, 1) # number of states - xbar = [minimum(xgrid), maximum(xgrid)] - time_0 = false - cf, nf, xprimef = policies0 - z0 = [vcat(cf[s](x), nf[s](x), [xprimef[s, sprime](x) for sprime in 1:S]) - for x in xgrid, s in 1:S] - cFB, nFB, IFB, xFB, zFB = find_first_best(model, S, 2) - return BellmanEquation(model, S, xbar, time_0, z0, cFB, nFB, xFB, zFB) -end - - -function get_policies_time1(T::BellmanEquation, - i_x::Integer, x::AbstractFloat, - s::Integer, Vf::AbstractArray) - model, S = T.model, T.S - beta, Theta, G, Pi = model.beta, model.Theta, model.G, model.Pi - U, Uc, Un = model.U, model.Uc, model.Un - - function objf(z::Vector, grad) - c, xprime = z[1], z[2:end] - n=c+G[s] - Vprime = [Vf[sprime](xprime[sprime]) for sprime in 1:S] - return -(U(c, n) + beta * dot(Pi[s, :], Vprime)) - end - function cons(z::Vector, grad) - c, xprime = z[1], z[2:end] - n=c+G[s] - return x - Uc(c, n) * c - Un(c, n) * n - beta * dot(Pi[s, :], xprime) - end - lb = vcat(0, T.xbar[1] * ones(S)) - ub = vcat(1 - G[s], T.xbar[2] * ones(S)) - opt = Opt(:LN_COBYLA, length(T.z0[i_x, s])-1) - min_objective!(opt, objf) - equality_constraint!(opt, cons) - lower_bounds!(opt, lb) - upper_bounds!(opt, ub) - maxeval!(opt, 300) - maxtime!(opt, 10) - init = vcat(T.z0[i_x, s][1], T.z0[i_x, s][3:end]) - for (i, val) in enumerate(init) - if val > ub[i] - init[i] = ub[i] - elseif val < lb[i] - init[i] = lb[i] - end - end - (minf, minx, ret) = NLopt.optimize(opt, init) - T.z0[i_x, s] = vcat(minx[1], minx[1] + G[s], minx[2:end]) - return vcat(-minf, T.z0[i_x, s]) -end - -function get_policies_time0(T::BellmanEquation, - B_::AbstractFloat, s0::Integer, Vf::Array) - model, S = T.model, T.S - beta, Theta, G, Pi = model.beta, model.Theta, model.G, model.Pi - U, Uc, Un = model.U, model.Uc, model.Un - function objf(z, grad) - c, xprime = z[1], z[2:end] - n = c+G[s0] - Vprime = [Vf[sprime](xprime[sprime]) for sprime in 1:S] - return -(U(c, n) + beta * dot(Pi[s0, :], Vprime)) - end - function cons(z::Vector, grad) - c, xprime = z[1], z[2:end] - n = c + G[s0] - return -Uc(c, n) * (c - B_) - Un(c, n) * n - beta * dot(Pi[s0, :], xprime) - end - lb = vcat(0, T.xbar[1] * ones(S)) - ub = vcat(1-G[s0], T.xbar[2] * ones(S)) - opt = Opt(:LN_COBYLA, length(T.zFB[s0])-1) - min_objective!(opt, objf) - equality_constraint!(opt, cons) - lower_bounds!(opt, lb) - upper_bounds!(opt, ub) - maxeval!(opt, 300) - maxtime!(opt, 10) - init = vcat(T.zFB[s0][1], T.zFB[s0][3:end]) - for (i, val) in enumerate(init) - if val > ub[i] - init[i] = ub[i] - elseif val < lb[i] - init[i] = lb[i] - end - end - (minf, minx, ret) = NLopt.optimize(opt, init) - return vcat(-minf, vcat(minx[1], minx[1]+G[s0], minx[2:end])) -end -``` - -To analyze the AMSS model, we find it useful to adopt a recursive formulation -using techniques like those in our lectures on {doc}`dynamic Stackelberg models <../dynamic_programming_squared/dyn_stack>` and {doc}`optimal taxation with state-contingent debt <../dynamic_programming_squared/opt_tax_recur>`. - -## Recursive Version of AMSS Model - -We now describe a recursive formulation of the AMSS economy. - -We have noted that from the point of view of the Ramsey planner, the restriction -to one-period risk-free securities - -* leaves intact the single implementability constraint on allocations - {eq}`TS_gov_wo4` from the Lucas-Stokey economy, but -* adds measurability constraints {eq}`TS_gov_wo3` on functions of tails of - allocations at each time and history - -We now explore how these constraints alter Bellman equations for a time -$0$ Ramsey planner and for time $t \geq 1$, history $s^t$ -continuation Ramsey planners. - -### Recasting State Variables - -In the AMSS setting, the government faces a sequence of budget constraints - -$$ -\tau_t(s^t) n_t(s^t) + T_t(s^t) + b_{t+1}(s^t)/ R_t (s^t) = g_t + b_t(s^{t-1}) -$$ - -where $R_t(s^t)$ is the gross risk-free rate of interest between $t$ -and $t+1$ at history $s^t$ and $T_t(s^t)$ are nonnegative transfers. - -Throughout this lecture, we shall set transfers to zero (for some issues about the limiting behavior of debt, this makes a possibly -important difference from AMSS {cite}`amss2002`, who restricted transfers -to be nonnegative). - -In this case, the household faces a sequence of budget constraints - -```{math} -:label: eqn:AMSSapp1 - -b_t(s^{t-1}) + (1-\tau_t(s^t)) n_t(s^t) = c_t(s^t) + b_{t+1}(s^t)/R_t(s^t) -``` - -The household’s first-order conditions are $u_{c,t} = \beta R_t \mathbb{E}\,_t u_{c,t+1}$ -and $(1-\tau_t) u_{c,t} = u_{l,t}$. - -Using these to eliminate $R_t$ and $\tau_t$ from budget constraint -{eq}`eqn:AMSSapp1` gives - -```{math} -:label: eqn:AMSSapp2a - -b_t(s^{t-1}) + \frac{u_{l,t}(s^t)}{u_{c,t}(s^t)} n_t(s^t) -= c_t(s^t) + {\frac{\beta (\mathbb{E}\,_t u_{c,t+1}) b_{t+1}(s^t)}{u_{c,t}(s^t)}} -``` - -or - -```{math} -:label: eqn:AMSSapp2 - -u_{c,t}(s^t) b_t(s^{t-1}) + u_{l,t}(s^t) n_t(s^t) -= u_{c,t}(s^t) c_t(s^t) + \beta (\mathbb{E}\,_t u_{c,t+1}) b_{t+1}(s^t) -``` - -Now define - -```{math} -:label: eqn:AMSSapp3 - -x_t \equiv \beta b_{t+1}(s^t) \mathbb{E}\,_t u_{c,t+1} = u_{c,t} (s^t) {\frac{b_{t+1}(s^t)}{R_t(s^t)}} -``` - -and represent the household’s budget constraint at time $t$, -history $s^t$ as - -```{math} -:label: eqn:AMSSapp4 - -{\frac{u_{c,t} x_{t-1}}{\beta \mathbb{E}\,_{t-1} u_{c,t}}} = u_{c,t} c_t - u_{l,t} n_t + x_t -``` - -for $t \geq 1$. - -### Measurability Constraints - -Write equation {eq}`eqn:AMSSapp2` as - -```{math} -:label: eqn:AMSSapp2b - -b_t(s^{t-1}) = c_t(s^t) - { \frac{u_{l,t}(s^t)}{u_{c,t}(s^t)}} n_t(s^t) + -{\frac{\beta (\mathbb{E}\,_t u_{c,t+1}) b_{t+1}(s^t)}{u_{c,t}}} -``` - -The right side of equation {eq}`eqn:AMSSapp2b` expresses the time $t$ value of government debt -in terms of a linear combination of terms whose individual components -are measurable with respect to $s^t$. - -The sum of terms on the right side of equation {eq}`eqn:AMSSapp2b` must equal -$b_t(s^{t-1})$. - -That implies that it is has to be *measurable* with respect to $s^{t-1}$. - -Equations {eq}`eqn:AMSSapp2b` are the *measurablility constraints* that the AMSS model adds to the single time $0$ implementation -constraint imposed in the Lucas and Stokey model. - -### Two Bellman Equations - -Let $\Pi(s|s_-)$ be a Markov transition matrix whose entries tell probabilities of moving from state $s_-$ to state $s$ in one period. - -Let - -* $V(x_-, s_-)$ be the continuation value of a continuation - Ramsey plan at $x_{t-1} = x_-, s_{t-1} =s_-$ for $t \geq 1$. -* $W(b, s)$ be the value of the Ramsey plan at time $0$ at - $b_0=b$ and $s_0 = s$. - -We distinguish between two types of planners: - -For $t \geq 1$, the value function for a **continuation Ramsey planner** -satisfies the Bellman equation - -```{math} -:label: eqn:AMSSapp5 - -V(x_-,s_-) = \max_{\{n(s), x(s)\}} \sum_s \Pi(s|s_-) \left[ u(n(s) - -g(s), 1-n(s)) + \beta V(x(s),s) \right] -``` - -subject to the following collection of implementability constraints, one -for each $s \in {\cal S}$: - -```{math} -:label: eqn:AMSSapp6 - -{\frac{u_c(s) x_- }{\beta \sum_{\tilde s} \Pi(\tilde s|s_-) u_c(\tilde s) }} -= u_c(s) (n(s) - g(s)) - u_l(s) n(s) + x(s) -``` - -A continuation Ramsey planner at $t \geq 1$ takes -$(x_{t-1}, s_{t-1}) = (x_-, s_-)$ as given and before -$s$ is realized chooses -$(n_t(s_t), x_t(s_t)) = (n(s), x(s))$ for $s \in {\cal S}$. - -The **Ramsey planner** takes $(b_0, s_0)$ as given and chooses $(n_0, x_0)$. - -The value function $W(b_0, s_0)$ for the time $t=0$ Ramsey planner -satisfies the Bellman equation - -```{math} -:label: eqn:AMSSapp100 - -W(b_0, s_0) = \max_{n_0, x_0} u(n_0 - g_0, 1-n_0) + \beta V(x_0,s_0) -``` - -where maximization is subject to - -```{math} -:label: eqn:AMMSSapp101 - -u_{c,0} b_0 = u_{c,0} (n_0-g_0) - u_{l,0} n_0 + x_0 -``` - -### Martingale Supercedes State-Variable Degeneracy - -Let $\mu(s|s_-) \Pi(s|s_-)$ be a Lagrange multiplier on constraint {eq}`eqn:AMSSapp6` -for state $s$. - -After forming an appropriate Lagrangian, we find that the continuation Ramsey planner’s first-order -condition with respect to $x(s)$ is - -```{math} -:label: eqn:AMSSapp7 - -\beta V_x(x(s),s) = \mu(s|s_-) -``` - -Applying the envelope theorem to Bellman equation {eq}`eqn:AMSSapp5` gives - -```{math} -:label: eqn:AMSSapp8 - -V_x(x_-,s_-) = \sum_s \Pi(s|s_-) \mu(s|s_-) {\frac{u_c(s)}{\beta \sum_{\tilde s} -\Pi(\tilde s|s_-) u_c(\tilde s) }} -``` - -Equations {eq}`eqn:AMSSapp7` and {eq}`eqn:AMSSapp8` imply that - -```{math} -:label: eqn:AMSSapp9 - -V_x(x_-, s_-) = \sum_{s} \left( \Pi(s|s_-) {\frac{u_c(s)}{\sum_{\tilde s} -\Pi(\tilde s| s_-) u_c(\tilde s)}} \right) V_x(x(s), s) -``` - -Equation {eq}`eqn:AMSSapp9` states that $V_x(x, s)$ is a *risk-adjusted martingale*. - -Saying that $V_x(x, s)$ is a risk-adjusted martingale means that -$V_x(x, s)$ is a martingale with respect to the probability distribution -over $s^t$ sequences that is generated by the *twisted* transition probability matrix: - -$$ -\check \Pi(s|s_-) \equiv \Pi(s|s_-) {\frac{u_c(s)}{\sum_{\tilde s} -\Pi(\tilde s| s_-) u_c(\tilde s)}} -$$ - -**Exercise**: Please verify that $\check \Pi(s|s_-)$ is a valid Markov -transition density, i.e., that its elements are all nonnegative and -that for each $s_-$, the sum over $s$ equals unity. - -### Absence of State Variable Degeneracy - -Along a Ramsey plan, the state variable $x_t = x_t(s^t, b_0)$ -becomes a function of the history $s^t$ and initial -government debt $b_0$. - -In {doc}`Lucas-Stokey model <../dynamic_programming_squared/opt_tax_recur>`, we -found that - -* a counterpart to $V_x(x,s)$ is time invariant and equal to - the Lagrange multiplier on the Lucas-Stokey implementability constraint -* time invariance of $V_x(x,s)$ is the source of a key - feature of the Lucas-Stokey model, namely, state variable degeneracy - (i.e., $x_t$ is an exact function of $s_t$) - -That $V_x(x,s)$ varies over time according to a twisted martingale -means that there is no state-variable degeneracy in the AMSS model. - -In the AMSS model, both $x$ and $s$ are needed to describe the state. - -This property of the AMSS model transmits a twisted martingale -component to consumption, employment, and the tax rate. - -### Digression on Nonnegative Transfers - -Throughout this lecture we have imposed that transfers $T_t = 0$. - -AMSS {cite}`amss2002` instead imposed a nonnegativity -constraint $T_t\geq 0$ on transfers. - -They also considered a special case of quasi-linear preferences, -$u(c,l)= c + H(l)$. - -In this case, $V_x(x,s)\leq 0$ is a non-positive martingale. - -By the *martingale convergence theorem* $V_x(x,s)$ converges almost surely. - -Furthermore, when the Markov chain $\Pi(s| s_-)$ and the government -expenditure function $g(s)$ are such that $g_t$ is perpetually -random, $V_x(x, s)$ almost surely converges to zero. - -For quasi-linear preferences, the first-order condition with respect to $n(s)$ becomes - -$$ -(1-\mu(s|s_-) ) (1 - u_l(s)) + \mu(s|s_-) n(s) u_{ll}(s) =0 -$$ - -When $\mu(s|s_-) = \beta V_x(x(s),x)$ converges to zero, in the limit -$u_l(s)= 1 =u_c(s)$, so that $\tau(x(s),s) =0$. - -Thus, in the limit, if $g_t$ is perpetually random, the government -accumulates sufficient assets to finance all expenditures from earnings on those -assets, returning any excess revenues to the household as nonnegative lump sum transfers. - -### Code - -The recursive formulation is implemented as follows - -```{code-cell} julia - -# Interpolations.jl doesn't support irregular grids for splines -using DataInterpolations - -mutable struct BellmanEquation_Recursive{TP <: Model, TI <: Integer, TR <: Real} - model::TP - S::TI - xbar::Array{TR} - time_0::Bool - z0::Array{Array} - cFB::Vector{TR} - nFB::Vector{TR} - xFB::Vector{TR} - zFB::Vector{Vector{TR}} -end - -struct RecursiveAllocation{TP <: Model, - TI <: Integer, - TVg <: AbstractVector, - TT <: Tuple} - model::TP - mc::MarkovChain - S::TI - T::BellmanEquation_Recursive - mugrid::TVg - xgrid::TVg - Vf::Array - policies::TT -end - -function RecursiveAllocation(model::Model, mugrid::AbstractArray) - G = model.G - S = size(model.Pi, 1) # number of states - mc = MarkovChain(model.Pi) - # now find the first best allocation - Vf, policies, T, xgrid = solve_time1_bellman(model, mugrid) - T.time_0 = true # Bellman equation now solves time 0 problem - return RecursiveAllocation(model, mc, S, T, mugrid, xgrid, Vf, policies) -end - -function solve_time1_bellman(model::Model{TR}, - mugrid::AbstractArray) where {TR <: Real} - Pi = model.Pi - S = size(model.Pi, 1) - - # First get initial fit from lucas stockey solution. - # Need to change things to be ex_ante - PP = SequentialAllocation(model) - - function incomplete_allocation(PP::SequentialAllocation, - mu_::AbstractFloat, - s_::Integer) - c, n, x, V = time1_value(PP, mu_) - return c, n, dot(Pi[s_, :], x), dot(Pi[s_, :], V) - end - - cf = Array{Function}(undef, S, S) - nf = Array{Function}(undef, S, S) - xprimef = Array{Function}(undef, S, S) - Vf = Vector{Function}(undef, S) - xgrid = Array{TR}(undef, S, length(mugrid)) - - for s_ in 1:S - c = Array{TR}(undef, length(mugrid), S) - n = Array{TR}(undef, length(mugrid), S) - x = Array{TR}(undef, length(mugrid)) - V = Array{TR}(undef, length(mugrid)) - for (i_mu, mu) in enumerate(mugrid) - c[i_mu, :], n[i_mu, :], x[i_mu], V[i_mu] = incomplete_allocation(PP, - mu, - s_) - end - xprimes = repeat(x, 1, S) - xgrid[s_, :] = x - for sprime in 1:S - splc = CubicSpline(c[:, sprime][end:-1:1], x[end:-1:1]; - extrapolate = true) - spln = CubicSpline(n[:, sprime][end:-1:1], x[end:-1:1]; - extrapolate = true) - splx = CubicSpline(xprimes[:, sprime][end:-1:1], x[end:-1:1]; - extrapolate = true) - cf[s_, sprime] = y -> splc(y) - nf[s_, sprime] = y -> spln(y) - xprimef[s_, sprime] = y -> splx(y) - end - splV = CubicSpline(V[end:-1:1], x[end:-1:1]; extrapolate = true) - Vf[s_] = y -> splV(y) - end - - policies = [cf, nf, xprimef] - - # Create xgrid - xbar = [maximum(minimum(xgrid)), minimum(maximum(xgrid))] - xgrid = range(xbar[1], xbar[2], length = length(mugrid)) - - # Now iterate on Bellman equation - T = BellmanEquation_Recursive(model, xgrid, policies) - diff = 1.0 - while diff > 1e-4 - PF = (i_x, x, s) -> get_policies_time1(T, i_x, x, s, Vf, xbar) - Vfnew, policies = fit_policy_function(T, PF, xgrid) - - diff = 0.0 - for s in 1:S - diff = max(diff, - maximum(abs, - (Vf[s].(xgrid) - Vfnew[s].(xgrid)) ./ - Vf[s].(xgrid))) - end - - println("diff = $diff") - Vf = copy(Vfnew) - end - - return Vf, policies, T, xgrid -end - -function fit_policy_function(T::BellmanEquation_Recursive, - PF::Function, - xgrid::AbstractVector{TF}) where { - TF <: - AbstractFloat} - S = T.S - # preallocation - PFvec = Array{TF}(undef, 4S + 1, length(xgrid)) - cf = Array{Function}(undef, S, S) - nf = Array{Function}(undef, S, S) - xprimef = Array{Function}(undef, S, S) - TTf = Array{Function}(undef, S, S) - Vf = Vector{Function}(undef, S) - # fit policy fuctions - for s_ in 1:S - for (i_x, x) in enumerate(xgrid) - PFvec[:, i_x] = PF(i_x, x, s_) - end - splV = CubicSpline(PFvec[1, :], xgrid) - Vf[s_] = y -> splV(y) - for sprime in 1:S - splc = CubicSpline(PFvec[1 + sprime, :], xgrid) - spln = CubicSpline(PFvec[1 + S + sprime, :], xgrid) - splxprime = CubicSpline(PFvec[1 + 2S + sprime, :], xgrid) - splTT = CubicSpline(PFvec[1 + 3S + sprime, :], xgrid) - cf[s_, sprime] = y -> splc(y) - nf[s_, sprime] = y -> spln(y) - xprimef[s_, sprime] = y -> splxprime(y) - TTf[s_, sprime] = y -> splTT(y) - end - end - policies = (cf, nf, xprimef, TTf) - return Vf, policies -end - -function Tau(pab::RecursiveAllocation, - c::AbstractArray, - n::AbstractArray) - model = pab.model - Uc, Un = model.Uc(c, n), model.Un(c, n) - return 1.0 .+ Un ./ (model.Theta .* Uc) -end - -Tau(pab::RecursiveAllocation, c::Real, n::Real) = Tau(pab, [c], [n]) - -function time0_allocation(pab::RecursiveAllocation, B_::Real, s0::Integer) - T, Vf = pab.T, pab.Vf - xbar = T.xbar - z0 = get_policies_time0(T, B_, s0, Vf, xbar) - - c0, n0, xprime0, T0 = z0[2], z0[3], z0[4], z0[5] - return c0, n0, xprime0, T0 -end - -function simulate(pab::RecursiveAllocation, - B_::TF, s_0::Integer, T::Integer, - sHist::Vector = simulate(pab.mc, T, init = s_0)) where { - TF <: - AbstractFloat - } - model, mc, Vf, S = pab.model, pab.mc, pab.Vf, pab.S - Pi, Uc = model.Pi, model.Uc - cf, nf, xprimef, TTf = pab.policies - - cHist = Array{TF}(undef, T) - nHist = Array{TF}(undef, T) - Bhist = Array{TF}(undef, T) - xHist = Array{TF}(undef, T) - TauHist = Array{TF}(undef, T) - THist = Array{TF}(undef, T) - muHist = Array{TF}(undef, T) - - #time0 - cHist[1], nHist[1], xHist[1], THist[1] = time0_allocation(pab, B_, s_0) - TauHist[1] = Tau(pab, cHist[1], nHist[1])[s_0] - Bhist[1] = B_ - muHist[1] = Vf[s_0](xHist[1]) - - #time 1 onward - for t in 2:T - s_, x, s = sHist[t - 1], xHist[t - 1], sHist[t] - c = Array{TF}(undef, S) - n = Array{TF}(undef, S) - xprime = Array{TF}(undef, S) - TT = Array{TF}(undef, S) - for sprime in 1:S - c[sprime], n[sprime], xprime[sprime], TT[sprime] = cf[s_, sprime](x), - nf[s_, sprime](x), - xprimef[s_, - sprime](x), - TTf[s_, sprime](x) - end - - Tau_val = Tau(pab, c, n)[s] - u_c = Uc(c, n) - Eu_c = dot(Pi[s_, :], u_c) - - muHist[t] = Vf[s](xprime[s]) - - cHist[t], nHist[t], Bhist[t], TauHist[t] = c[s], n[s], x / Eu_c, Tau_val - xHist[t], THist[t] = xprime[s], TT[s] - end - return cHist, nHist, Bhist, xHist, TauHist, THist, muHist, sHist -end - -function BellmanEquation_Recursive(model::Model{TF}, - xgrid::AbstractVector{TF}, - policies0::Array) where {TF <: AbstractFloat} - S = size(model.Pi, 1) # number of states - xbar = [minimum(xgrid), maximum(xgrid)] - time_0 = false - z0 = Array{Array}(undef, length(xgrid), S) - cf, nf, xprimef = policies0[1], policies0[2], policies0[3] - for s in 1:S - for (i_x, x) in enumerate(xgrid) - cs = Array{TF}(undef, S) - ns = Array{TF}(undef, S) - xprimes = Array{TF}(undef, S) - for j in 1:S - cs[j], ns[j], xprimes[j] = cf[s, j](x), nf[s, j](x), - xprimef[s, j](x) - end - z0[i_x, s] = vcat(cs, ns, xprimes, zeros(S)) - end - end - cFB, nFB, IFB, xFB, zFB = find_first_best(model, S, 2) - return BellmanEquation_Recursive(model, S, xbar, time_0, z0, cFB, nFB, xFB, - zFB) -end - -function get_policies_time1(T::BellmanEquation_Recursive, - i_x::Integer, - x::Real, - s_::Integer, - Vf::AbstractArray{Function}, - xbar::AbstractVector) - model, S = T.model, T.S - beta, Theta, G, Pi = model.beta, model.Theta, model.G, model.Pi - U, Uc, Un = model.U, model.Uc, model.Un - - S_possible = sum(Pi[s_, :] .> 0) - sprimei_possible = findall(Pi[s_, :] .> 0) - - function objf(z, grad) - c, xprime = z[1:S_possible], z[(S_possible + 1):(2S_possible)] - n = (c .+ G[sprimei_possible]) ./ Theta[sprimei_possible] - Vprime = [Vf[sprimei_possible[si]](xprime[si]) for si in 1:S_possible] - return -dot(Pi[s_, sprimei_possible], U.(c, n) + beta * Vprime) - end - - function cons(out, z, grad) - c, xprime, TT = z[1:S_possible], z[(S_possible + 1):(2S_possible)], - z[(2S_possible + 1):(3S_possible)] - n = (c .+ G[sprimei_possible]) ./ Theta[sprimei_possible] - u_c = Uc.(c, n) - Eu_c = dot(Pi[s_, sprimei_possible], u_c) - out .= x * u_c / Eu_c - u_c .* (c - TT) - Un(c, n) .* n - beta * xprime - end - function cons_no_trans(out, z, grad) - c, xprime = z[1:S_possible], z[(S_possible + 1):(2S_possible)] - n = (c .+ G[sprimei_possible]) ./ Theta[sprimei_possible] - u_c = Uc.(c, n) - Eu_c = dot(Pi[s_, sprimei_possible], u_c) - out .= x * u_c / Eu_c - u_c .* c - Un(c, n) .* n - beta * xprime - end - - if model.transfers == true - lb = vcat(zeros(S_possible), ones(S_possible) * xbar[1], - zeros(S_possible)) - if model.n_less_than_one == true - ub = vcat(ones(S_possible) - G[sprimei_possible], - ones(S_possible) * xbar[2], ones(S_possible)) - else - ub = vcat(100 * ones(S_possible), - ones(S_possible) * xbar[2], - 100 * ones(S_possible)) - end - init = vcat(T.z0[i_x, s_][sprimei_possible], - T.z0[i_x, s_][2S .+ sprimei_possible], - T.z0[i_x, s_][3S .+ sprimei_possible]) - opt = Opt(:LN_COBYLA, 3S_possible) - equality_constraint!(opt, cons, zeros(S_possible)) - else - lb = vcat(zeros(S_possible), ones(S_possible) * xbar[1]) - if model.n_less_than_one == true - ub = vcat(ones(S_possible) - G[sprimei_possible], - ones(S_possible) * xbar[2]) - else - ub = vcat(ones(S_possible), ones(S_possible) * xbar[2]) - end - init = vcat(T.z0[i_x, s_][sprimei_possible], - T.z0[i_x, s_][2S .+ sprimei_possible]) - opt = Opt(:LN_COBYLA, 2S_possible) - equality_constraint!(opt, cons_no_trans, zeros(S_possible)) - end - init[init .> ub] = ub[init .> ub] - init[init .< lb] = lb[init .< lb] - - min_objective!(opt, objf) - lower_bounds!(opt, lb) - upper_bounds!(opt, ub) - maxeval!(opt, 10000000) - maxtime!(opt, 10) - ftol_rel!(opt, 1e-8) - ftol_abs!(opt, 1e-8) - - (minf, minx, ret) = NLopt.optimize(opt, init) - - if ret != :SUCCESS && ret != :ROUNDOFF_LIMITED && ret != :MAXEVAL_REACHED && - ret != :FTOL_REACHED && ret != :MAXTIME_REACHED - error("optimization failed: ret = $ret") - end - - T.z0[i_x, s_][sprimei_possible] = minx[1:S_possible] - T.z0[i_x, s_][S .+ sprimei_possible] = minx[1:S_possible] .+ - G[sprimei_possible] - T.z0[i_x, s_][2S .+ sprimei_possible] = minx[(S_possible .+ 1):(2S_possible)] - if model.transfers == true - T.z0[i_x, s_][3S .+ sprimei_possible] = minx[(2S_possible + 1):(3S_possible)] - else - T.z0[i_x, s_][3S .+ sprimei_possible] = zeros(S) - end - - return vcat(-minf, T.z0[i_x, s_]) -end - -function get_policies_time0(T::BellmanEquation_Recursive, - B_::Real, - s0::Integer, - Vf::AbstractArray{Function}, - xbar::AbstractVector) - model = T.model - beta, Theta, G = model.beta, model.Theta, model.G - U, Uc, Un = model.U, model.Uc, model.Un - - function objf(z, grad) - if any(isnan, z) - return -Inf - end - c, xprime = z[1], z[2] - n = (c + G[s0]) / Theta[s0] - return -(U(c, n) + beta * Vf[s0](xprime)) - end - - function cons(z, grad) - if any(isnan, z) - return -Inf - end - c, xprime, TT = z[1], z[2], z[3] - n = (c + G[s0]) / Theta[s0] - return -Uc(c, n) * (c - B_ - TT) - Un(c, n) * n - beta * xprime - end - cons_no_trans(z, grad) = cons(vcat(z, 0), grad) - - if model.transfers == true - lb = [0.0, xbar[1], 0.0] - if model.n_less_than_one == true - ub = [1 - G[s0], xbar[2], 100] - else - ub = [100.0, xbar[2], 100.0] - end - init = vcat(T.zFB[s0][1], T.zFB[s0][3], T.zFB[s0][4]) - init = [0.95124922, -1.15926816, 0.0] - opt = Opt(:LN_COBYLA, 3) - equality_constraint!(opt, cons) - else - lb = [0.0, xbar[1]] - if model.n_less_than_one == true - ub = [1 - G[s0], xbar[2]] - else - ub = [100, xbar[2]] - end - init = vcat(T.zFB[s0][1], T.zFB[s0][3]) - init = [0.95124922, -1.15926816] - opt = Opt(:LN_COBYLA, 2) - equality_constraint!(opt, cons_no_trans) - end - init[init .> ub] = ub[init .> ub] - init[init .< lb] = lb[init .< lb] - - min_objective!(opt, objf) - lower_bounds!(opt, lb) - upper_bounds!(opt, ub) - maxeval!(opt, 100000000) - maxtime!(opt, 30) - - (minf, minx, ret) = NLopt.optimize(opt, init) - - if ret != :SUCCESS && ret != :ROUNDOFF_LIMITED && ret != :MAXEVAL_REACHED && - ret != :FTOL_REACHED - error("optimization failed: ret = $ret") - end - - if model.transfers == true - return -minf, minx[1], minx[1] + G[s0], minx[2], minx[3] - else - return -minf, minx[1], minx[1] + G[s0], minx[2], 0 - end -end -``` - -## Examples - -We now turn to some examples. - -### Anticipated One-Period War - -In our lecture on {doc}`optimal taxation with state contingent debt <../dynamic_programming_squared/opt_tax_recur>` -we studied how the government manages uncertainty in a simple setting. - -As in that lecture, we assume the one-period utility function - -$$ -u(c,n) = {\frac{c^{1-\sigma}}{1-\sigma}} - {\frac{n^{1+\gamma}}{1+\gamma}} -$$ - -```{note} -For convenience in matching our computer code, we have expressed -utility as a function of $n$ rather than leisure $l$ -``` - -We consider the same government expenditure process studied in the lecture on -{doc}`optimal taxation with state contingent debt <../dynamic_programming_squared/opt_tax_recur>`. - -Government expenditures are known for sure in all periods except one - -* For $t<3$ or $t > 3$ we assume that $g_t = g_l = 0.1$. -* At $t = 3$ a war occurs with probability 0.5. - * If there is war, $g_3 = g_h = 0.2$. - * If there is no war $g_3 = g_l = 0.1$. - -A useful trick is to define components of the state vector as the following six -$(t,g)$ pairs: - -$$ -(0,g_l), (1,g_l), (2,g_l), (3,g_l), (3,g_h), (t\geq 4,g_l) -$$ - -We think of these 6 states as corresponding to $s=1,2,3,4,5,6$. - -The transition matrix is - -$$ -P = \begin{pmatrix} - 0 & 1 & 0 & 0 & 0 & 0\\ - 0 & 0 & 1 & 0 & 0 & 0\\ - 0 & 0 & 0 & 0.5 & 0.5 & 0\\ - 0 & 0 & 0 & 0 & 0 & 1\\ - 0 & 0 & 0 & 0 & 0 & 1\\ - 0 & 0 & 0 & 0 & 0 & 1 -\end{pmatrix} -$$ - -The government expenditure at each state is - -$$ -g = \left(\begin{matrix} 0.1\\0.1\\0.1\\0.1\\0.2\\0.1 \end{matrix}\right) -$$ - -We assume the same utility parameters as in the {doc}`Lucas-Stokey economy <../dynamic_programming_squared/opt_tax_recur>`. - -This utility function is implemented in the following constructor - -```{code-cell} julia -function CRRAModel(; - beta = 0.9, - sigma = 2.0, - gamma = 2.0, - Pi = 0.5 * ones(2, 2), - G = [0.1, 0.2], - Theta = ones(Float64, 2), - transfers = false) - function U(c, n) - if sigma == 1.0 - U = log(c) - else - U = (c .^ (1.0 - sigma) - 1.0) / (1.0 - sigma) - end - return U - n .^ (1 + gamma) / (1 + gamma) - end - # Derivatives of utility function - Uc(c, n) = c .^ (-sigma) - Ucc(c, n) = -sigma * c .^ (-sigma - 1.0) - Un(c, n) = -n .^ gamma - Unn(c, n) = -gamma * n .^ (gamma - 1.0) - n_less_than_one = false - return Model(beta, Pi, G, Theta, transfers, - U, Uc, Ucc, Un, Unn, n_less_than_one) -end -``` - -The following figure plots the Ramsey plan under both complete and incomplete -markets for both possible realizations of the state at time $t=3$. - -Optimal policies when the government has access to state contingent debt are -represented by black lines, while the optimal policies when there is only a risk -free bond are in red. - -Paths with circles are histories in which there is peace, while those with -triangle denote war. - -```{code-cell} julia -time_example = CRRAModel(;G = [0.1, 0.1, 0.1, 0.2, 0.1, 0.1], - Theta = ones(6)) # Theta can in principle be random - -time_example.Pi = [0.0 1.0 0.0 0.0 0.0 0.0; - 0.0 0.0 1.0 0.0 0.0 0.0; - 0.0 0.0 0.0 0.5 0.5 0.0; - 0.0 0.0 0.0 0.0 0.0 1.0; - 0.0 0.0 0.0 0.0 0.0 1.0; - 0.0 0.0 0.0 0.0 0.0 1.0] - -# Initialize mugrid for value function iteration -mugrid = range(-0.7, 0.01, length = 200) - -time_example.transfers = true # Government can use transfers -time_sequential = SequentialAllocation(time_example) # Solve sequential problem - -time_bellman = RecursiveAllocation(time_example, mugrid) - -sHist_h = [1, 2, 3, 4, 6, 6, 6] -sHist_l = [1, 2, 3, 5, 6, 6, 6] - -sim_seq_h = simulate(time_sequential, 1.0, 1, 7, sHist_h) -sim_bel_h = simulate(time_bellman, 1.0, 1, 7, sHist_h) -sim_seq_l = simulate(time_sequential, 1.0, 1, 7, sHist_l) -sim_bel_l = simulate(time_bellman, 1.0, 1, 7, sHist_l) - -using Plots - -titles = hcat("Consumption", "Labor Supply", "Government Debt", - "Tax Rate", "Government Spending", "Output") -sim_seq_l_plot = hcat(sim_seq_l[1:3]..., sim_seq_l[4], - time_example.G[sHist_l], - time_example.Theta[sHist_l] .* sim_seq_l[2]) -sim_bel_l_plot = hcat(sim_bel_l[1:3]..., sim_bel_l[5], - time_example.G[sHist_l], - time_example.Theta[sHist_l] .* sim_bel_l[2]) -sim_seq_h_plot = hcat(sim_seq_h[1:3]..., sim_seq_h[4], - time_example.G[sHist_h], - time_example.Theta[sHist_h] .* sim_seq_h[2]) -sim_bel_h_plot = hcat(sim_bel_h[1:3]..., sim_bel_h[5], - time_example.G[sHist_h], - time_example.Theta[sHist_h] .* sim_bel_h[2]) -p = plot(size = (700, 700), layout = (3, 2), - xaxis = (0:6), grid = false, titlefont = Plots.font("sans-serif", 10)) -plot!(p, title = titles) -for i in 1:6 - plot!(p[i], 0:6, sim_seq_l_plot[:, i], marker = :circle, color = :black, - lab = "") - plot!(p[i], 0:6, sim_bel_l_plot[:, i], marker = :circle, color = :red, - lab = "") - plot!(p[i], 0:6, sim_seq_h_plot[:, i], marker = :utriangle, color = :black, - lab = "") - plot!(p[i], 0:6, sim_bel_h_plot[:, i], marker = :utriangle, color = :red, - lab = "") -end -p -``` - -How a Ramsey planner responds to war depends on the structure of the asset market. - -If it is able to trade state-contingent debt, then at time $t=2$ - -* the government purchases an Arrow security that pays off when $g_3 = g_h$ -* the government sells an Arrow security that pays off when $g_3 = g_l$ -* These purchases are designed in such a way that regardless of whether or not there is a war at $t=3$, the government will begin period $t=4$ with the *same* government debt. - -This pattern facilities smoothing tax rates across states. - -The government without state contingent debt cannot do this. - -Instead, it must enter time $t=3$ with the same level of debt falling due whether there is peace or war at $t=3$. - -It responds to this constraint by smoothing tax rates across time. - -To finance a war it raises taxes and issues more debt. - -To service the additional debt burden, it raises taxes in all future periods. - -The absence of state contingent debt leads to an important difference in the -optimal tax policy. - -When the Ramsey planner has access to state contingent debt, the optimal tax -policy is history independent - -* the tax rate is a function of the current level of government spending only, - given the Lagrange multiplier on the implementability constraint. - -Without state contingent debt, the optimal tax rate is history dependent. - -* A war at time $t=3$ causes a permanent increase in the tax rate. - -#### Perpetual War Alert - -History dependence occurs more dramatically in a case in which the government -perpetually faces the prospect of war. - -This case was studied in the final example of the lecture on -{doc}`optimal taxation with state-contingent debt <../dynamic_programming_squared/opt_tax_recur>`. - -There, each period the government faces a constant probability, $0.5$, of war. - -In addition, this example features the following preferences - -$$ -u(c,n) = \log(c) + 0.69 \log(1-n) -$$ - -In accordance, we will re-define our utility function - -```{code-cell} julia -function log_utility(; beta = 0.9, - psi = 0.69, - Pi = 0.5 * ones(2, 2), - G = [0.1, 0.2], - Theta = ones(2), - transfers = false) - # Derivatives of utility function - U(c, n) = log(c) + psi * log(1 - n) - Uc(c, n) = 1 ./ c - Ucc(c, n) = -c .^ (-2.0) - Un(c, n) = -psi ./ (1.0 .- n) - Unn(c, n) = -psi ./ (1.0 .- n) .^ 2.0 - n_less_than_one = true - return Model(beta, Pi, G, Theta, transfers, - U, Uc, Ucc, Un, Unn, n_less_than_one) -end -``` - -With these preferences, Ramsey tax rates will vary even in the Lucas-Stokey -model with state-contingent debt. - -The figure below plots optimal tax policies for both the economy with -state contingent debt (circles) and the economy with only a risk-free bond -(triangles) - -```{code-cell} julia -log_example = log_utility() - -log_example.transfers = true # Government can use transfers -log_sequential = SequentialAllocation(log_example) # Solve sequential problem -log_bellman = RecursiveAllocation(log_example, mugrid) # Solve recursive problem - -T = 20 -sHist = [1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2, 1] - -#simulate -sim_seq = simulate(log_sequential, 0.5, 1, T, sHist) -sim_bel = simulate(log_bellman, 0.5, 1, T, sHist) - -sim_seq_plot = hcat(sim_seq[1:3]..., - sim_seq[4], log_example.G[sHist], - log_example.Theta[sHist] .* sim_seq[2]) -sim_bel_plot = hcat(sim_bel[1:3]..., - sim_bel[5], log_example.G[sHist], - log_example.Theta[sHist] .* sim_bel[2]) - -#plot policies -p = plot(size = (700, 700), layout = grid(3, 2), - xaxis = (0:T), grid = false, titlefont = Plots.font("sans-serif", 10)) -labels = fill(("", ""), 6) -labels[3] = ("Complete Market", "Incomplete Market") -plot!(p, title = titles) -for i in vcat(collect(1:4), 6) - plot!(p[i], sim_seq_plot[:, i], marker = :circle, color = :black, - lab = labels[i][1]) - plot!(p[i], sim_bel_plot[:, i], marker = :utriangle, color = :blue, - lab = labels[i][2], - legend = :bottomright) -end -plot!(p[5], sim_seq_plot[:, 5], marker = :circle, color = :blue, lab = "") -``` - -When the government experiences a prolonged period of peace, it is able to reduce -government debt and set permanently lower tax rates. - -However, the government finances a long war by borrowing and raising taxes. - -This results in a drift away from policies with state contingent debt that -depends on the history of shocks. - -This is even more evident in the following figure that plots the evolution of -the two policies over 200 periods - -```{code-cell} julia ---- -tags: [remove-cell] ---- -Random.seed!(42) -``` - -```{code-cell} julia -T_long = 200 -sim_seq_long = simulate(log_sequential, 0.5, 1, T_long) -sHist_long = sim_seq_long[end - 2] -sim_bel_long = simulate(log_bellman, 0.5, 1, T_long, sHist_long) -sim_seq_long_plot = hcat(sim_seq_long[1:4]..., - log_example.G[sHist_long], - log_example.Theta[sHist_long] .* sim_seq_long[2]) -sim_bel_long_plot = hcat(sim_bel_long[1:3]..., sim_bel_long[5], - log_example.G[sHist_long], - log_example.Theta[sHist_long] .* sim_bel_long[2]) - -p = plot(size = (700, 700), layout = (3, 2), xaxis = (0:50:T_long), - grid = false, - titlefont = Plots.font("sans-serif", 10)) -plot!(p, title = titles) -for i in 1:6 - plot!(p[i], sim_seq_long_plot[:, i], color = :black, linestyle = :solid, - lab = labels[i][1]) - plot!(p[i], sim_bel_long_plot[:, i], color = :blue, linestyle = :dot, - lab = labels[i][2], - legend = :bottomright) -end -p -``` - -[^fn_a]: In an allocation that solves the Ramsey problem and that levies distorting -taxes on labor, why would the government ever want to hand revenues back -to the private sector? It would not in an economy with state-contingent debt, since -any such allocation could be improved by lowering distortionary taxes -rather than handing out lump-sum transfers. But without state-contingent -debt there can be circumstances when a government would like to make -lump-sum transfers to the private sector. - -[^fn_b]: From the first-order conditions for the Ramsey -problem, there exists another realization $\tilde s^t$ with -the same history up until the previous period, i.e., $\tilde s^{t-1}= -s^{t-1}$, but where the multiplier on constraint {eq}`AMSS_46` takes a positive value, so -$\gamma_t(\tilde s^t)>0$. - diff --git a/lectures/dynamic_programming_squared/dyn_stack.md b/lectures/dynamic_programming_squared/dyn_stack.md deleted file mode 100644 index 490b4ccf..00000000 --- a/lectures/dynamic_programming_squared/dyn_stack.md +++ /dev/null @@ -1,1451 +0,0 @@ ---- -jupytext: - text_representation: - extension: .md - format_name: myst -kernelspec: - display_name: Julia - language: julia - name: julia-1.11 ---- - -(dyn_stack)= -```{raw} html - -``` - -# Dynamic Stackelberg Problems - -```{contents} Contents -:depth: 2 -``` - -This notebook formulates and computes a plan that a **Stackelberg -leader** uses to manipulate forward-looking decisions of a **Stackelberg -follower** that depend on continuation sequences of decisions made once -and for all by the Stackelberg leader at time $0$. - -To facilitate computation and interpretation, we formulate things in a -context that allows us to apply linear optimal dynamic programming. - -From the beginning we carry along a linear-quadratic model of duopoly in -which firms face adjustment costs that make them want to forecast -actions of other firms that influence future prices. - -## Duopoly - -Time is discrete and is indexed by $t = 0, 1, \ldots$. - -Two firms produce a single good whose demand is governed by the linear -inverse demand curve - -$$ -p_t = a_0 - a_1 (q_{1t}+ q_{2t} ) -$$ - -where $q_{it}$ is output of firm $i$ at time $t$ and -$a_0$ and $a_1$ are both positive. - -$q_{10}, q_{20}$ are given numbers that serve as initial -conditions at time $0$. - -By incurring a cost of change - -$$ -\gamma v_{it}^2 -$$ - -where $\gamma > 0$, firm $i$ can change its output according -to - -$$ -q_{it+1} = q_{it} + v_{it} -$$ - -Firm $i$'s profits at time $t$ equal - -$$ -\pi_{it} = p_t q_{it} - \gamma v_{it}^2 -$$ - -Firm $i$ wants to maximize the present value of its profits - -$$ -\sum_{t=0}^\infty \beta^t \pi_{it} -$$ - -where $\beta \in (0,1)$ is a time discount factor. - -### Stackelberg Leader and Follower - -Each firm $i=1,2$ chooses a sequence -$\vec q_i \equiv \{q_{it+1}\}_{t=0}^\infty$ once and for all at -time $0$. - -We let firm 2 be a **Stackelberg leader** and firm 1 be a **Stackelberg -follower**. - -The leader firm 2 goes first and chooses -$\{q_{2t+1}\}_{t=0}^\infty$ once and for all at time $0$. - -Knowing that firm 2 has chosen $\{q_{2t+1}\}_{t=0}^\infty$, the -follower firm 1 goes second and chooses -$\{q_{1t+1}\}_{t=0}^\infty$ once and for all at time $0$. - -In choosing $\vec q_2$, firm 2 takes into account that firm 1 will -base its choice of $\vec q_1$ on firm 2's choice of -$\vec q_2$. - -### Abstract Statement of the Leader's and Follower's Problems - -We can express firm 1's problem as - -$$ -\max_{\vec q_1} \Pi_1(\vec q_1; \vec q_2) -$$ - -where the appearance behind the semi-colon indicates that -$\vec q_2$ is given. - -Firm 1's problem induces a best response mapping - -$$ -\vec q_1 = B(\vec q_2) -$$ - -(Here $B$ maps a sequence into a sequence) - -The Stackelberg leader's problem is - -$$ -\max_{\vec q_2} \Pi_2 (B(\vec q_2), \vec q_2) -$$ - -whose maximizer is a sequence $\vec q_2$ that depends on the -initial conditions $q_{10}, q_{20}$ and the parameters of the -model $a_0, a_1, \gamma$. - -This formulation captures key features of the model - -- Both firms make once-and-for-all choices at time $0$. -- This is true even though both firms are choosing sequences of - quantities that are indexed by **time**. -- The Stackelberg leader chooses first **within time** $0$, - knowing that the Stackelberg follower will choose second **within - time** $0$. - -While our abstract formulation reveals the timing protocol and -equilibrium concept well, it obscures details that must be addressed -when we want to compute and interpret a Stackelberg plan and the -follower's best response to it. - -To gain insights about these things, we study them in more detail. - -### Firms' Problems - -Firm 1 acts as if firm 2's sequence $\{q_{2t+1}\}_{t=0}^\infty$ is -given and beyond its control. - -Firm 2 knows that firm 1 chooses second and takes this into account in -choosing $\{q_{2t+1}\}_{t=0}^\infty$. - -In the spirit of *working backwards*, we study firm 1's problem first, -taking $\{q_{2t+1}\}_{t=0}^\infty$ as given. - -We can formulate firm 1's optimum problem in terms of the Lagrangian - -$$ -L=\sum_{t=0}^{\infty}\beta^{t}\{a_{0}q_{1t}-a_{1}q_{1t}^{2}-a_{1}q_{1t}q_{2t}-\gamma v_{1t}^{2}+\lambda_{t}[q_{1t}+v_{1t}-q_{1t+1}]\} -$$ - -Firm 1 seeks a maximum with respect to -$\{q_{1t+1}, v_{1t} \}_{t=0}^\infty$ and a minimum with respect to -$\{ \lambda_t\}_{t=0}^\infty$. - -We approach this problem using methods described in Ljungqvist and -Sargent RMT5 chapter 2, appendix A and Macroeconomic Theory, 2nd -edition, chapter IX. - -First-order conditions for this problem are - -$$ -\begin{aligned} -\frac{\partial L}{\partial q_{1t}} & = a_0 - 2 a_1 q_{1t} - a_1 q_{2t} + \lambda_t - \beta^{-1} - \lambda_{t-1} = 0 , \quad t \geq 1 \cr - \frac{\partial L}{\partial v_{1t}} & = -2 \gamma v_{1t} + \lambda_t = 0 , \quad t \geq 0 - \end{aligned} -$$ - -These first-order conditions and the constraint $q_{1t+1} = q_{1t} + v_{1t}$ can be rearranged to take the form - -$$ -\begin{aligned} -v_{1t} & = \beta v_{1t+1} + \frac{\beta a_0}{2 \gamma} - \frac{\beta a_1}{\gamma} q_{1t+1} - - \frac{\beta a_1}{2 \gamma} q_{2t+1} \cr - q_{t+1} & = q_{1t} + v_{1t} -\end{aligned} -$$ - -We can substitute the second equation into the first equation to obtain - -$$ -(q_{1t+1} - q_{1t} ) = \beta (q_{1t+2} - q_{1t+1}) + c_0 - c_1 q_{1t+1} - c_2 q_{2t+1} -$$ - -where -$c_0 = \frac{\beta a_0}{2 \gamma}, c_1 = \frac{\beta a_1}{\gamma}, c_2 = \frac{\beta a_1}{2 \gamma}$. - -This equation can in turn be rearranged to become the second-order -difference equation - -```{math} -:label: sstack1 - -q_{1t} + (1+\beta + c_1) q_{1t+1} - \beta q_{1t+2} = c_0 - c_2 q_{2t+1} -``` - -Equation {eq}`sstack1` is a second-order difference equation in the sequence -$\vec q_1$ whose solution we want. - -It satisfies **two boundary conditions:** - -- an initial condition that $q_{1,0}$, which is given -- a terminal condition requiring that - $\lim_{T \rightarrow + \infty} \beta^T q_{1t}^2 < + \infty$ - -Using the lag operators described in chapter IX of *Macroeconomic -Theory, Second edition (1987)*, difference equation -{eq}`sstack1` can be written as - -$$ -\beta(1 - \frac{1+\beta + c_1}{\beta} L + \beta^{-1} L^2 ) q_{1t+2} = - c_0 + c_2 q_{2t+1} -$$ - -The polynomial in the lag operator on the left side can be **factored** -as - -```{math} -:label: sstack2 - -(1 - \frac{1+\beta + c_1}{\beta} L + \beta^{-1} L^2 ) = ( 1 - \delta_1 L ) (1 - \delta_2 L) -``` - -where $0 < \delta_1 < 1 < \frac{1}{\sqrt{\beta}} < \delta_2$. - -Because $\delta_2 > \frac{1}{\sqrt{\beta}}$ the operator -$(1 - \delta_2 L)$ contributes an **unstable** component if solved -**backwards** but a **stable** component if solved **forwards**. - -Mechanically, write - -$$ -(1- \delta_2 L) = -\delta_{2} L (1 - \delta_2^{-1} L^{-1} ) -$$ - -and compute the following inverse operator - -$$ -\left[-\delta_{2} L (1 - \delta_2^{-1} L^{-1} )\right]^{-1} = - \delta_2 (1 - {\delta_2}^{-1} )^{-1} L^{-1} -$$ - -Operating on both sides of equation {eq}`sstack2` with -$\beta^{-1}$ times this inverse operator gives the follower's -decision rule for setting $q_{1t+1}$ in the -**feedback-feedforward** form - -```{math} -:label: sstack3 - -q_{1t+1} = \delta_1 q_{1t} - c_0 \delta_2^{-1} \beta^{-1} \frac{1}{1 -\delta_2^{-1}} + c_2 \delta_2^{-1} \beta^{-1} \sum_{j=0}^\infty \delta_2^j q_{2t+j+1} , \quad t \geq 0 -``` - -The problem of the Stackelberg leader firm 2 is to choose the sequence -$\{q_{2t+1}\}_{t=0}^\infty$ to maximize its discounted profits - -$$ -\sum_{t=0}^\infty \beta^t \{ (a_0 - a_1 (q_{1t} + q_{2t}) ) q_{2t} - \gamma (q_{2t+1} - q_{2t})^2 \} -$$ - -subject to the sequence of constraints {eq}`sstack3` for $t \geq 0$. - -We can put a sequence $\{\theta_t\}_{t=0}^\infty$ of Lagrange -multipliers on the sequence of equations {eq}`sstack3` -and formulate the following Lagrangian for the Stackelberg leader firm -2's problem - -```{math} -:label: sstack4 - -\begin{aligned} -\tilde L & = \sum_{t=0}^\infty \beta^t\{ (a_0 - a_1 (q_{1t} + q_{2t}) ) q_{2t} - \gamma (q_{2t+1} - q_{2t})^2 \} \cr - & + \sum_{t=0}^\infty \beta^t \theta_t \{ \delta_1 q_{1t} - c_0 \delta_2^{-1} \beta^{-1} \frac{1}{1 -\delta_2^{-1}} + c_2 \delta_2^{-1} \beta^{-1} - \sum_{j=0}^\infty \delta_2^{-j} q_{2t+j+1} - q_{1t+1} -\end{aligned} -``` - -subject to initial conditions for $q_{1t}, q_{2t}$ at $t=0$. - -**Comments:** We have formulated the Stackelberg problem in a space of -sequences. - -The max-min problem associated with Lagrangian -{eq}`sstack4` is unpleasant because the time $t$ -component of firm $1$'s payoff function depends on the entire -future of its choices of $\{q_{1t+j}\}_{j=0}^\infty$. - -This renders a direct attack on the problem cumbersome. - -Therefore, below, we will formulate the Stackelberg leader's problem -recursively. - -We'll put our little duopoly model into a broader class of models with -the same conceptual structure. - -## The Stackelberg Problem - -We formulate a class of linear-quadratic Stackelberg leader-follower -problems of which our duopoly model is an instance. - -We use the optimal linear regulator (a.k.a. the linear-quadratic dynamic -programming problem described in [LQ Dynamic Programming -problems](https://python-intro.quantecon.org/lqcontrol.html)) to -represent a Stackelberg leader's problem recursively. - -Let $z_t$ be an $n_z \times 1$ vector of **natural -state variables**. - -Let $x_t$ be an $n_x \times 1$ vector of endogenous -forward-looking variables that are physically free to jump at $t$. - -In our duopoly example $x_t = v_{1t}$, the time $t$ decision -of the Stackelberg **follower**. - -Let $u_t$ be a vector of decisions chosen by the Stackelberg leader -at $t$. - -The $z_t$ vector is inherited physically from the past. - -But $x_t$ is a decision made by the Stackelberg follower at time -$t$ that is the follower's best response to the choice of an -entire sequence of decisions made by the Stackelberg leader at time -$t=0$. - -Let - -$$ -y_t = \begin{bmatrix} z_t \\ x_t \end{bmatrix} -$$ - -Represent the Stackelberg leader's one-period loss function as - -$$ -r(y, u) = y' R y + u' Q u -$$ - -Subject to an initial condition for $z_0$, but not for $x_0$, the -Stackelberg leader wants to maximize - -```{math} -:label: maxeq - --\sum_{t=0}^\infty \beta^t r(y_t, u_t) -``` - -The Stackelberg leader faces the model - -```{math} -:label: new2 - -\begin{bmatrix} I & 0 \\ G_{21} & G_{22} \end{bmatrix} -\begin{bmatrix} z_{t+1} \\ x_{t+1} \end{bmatrix} -= \begin{bmatrix} \hat A_{11} & \hat A_{12} \\ \hat A_{21} & \hat A_{22} \end{bmatrix} \begin{bmatrix} z_t \\ x_t \end{bmatrix} + \hat B u_t -``` - -We assume that the matrix -$\begin{bmatrix} I & 0 \\ G_{21} & G_{22} \end{bmatrix}$ on the -left side of equation {eq}`new2` is invertible, so that we -can multiply both sides by its inverse to obtain - -```{math} -:label: new3 - -\begin{bmatrix} z_{t+1} \\ x_{t+1} \end{bmatrix} -= \begin{bmatrix} A_{11} & A_{12} \\ A_{21} & A_{22} \end{bmatrix} -\begin{bmatrix} z_t \\ x_t \end{bmatrix} + B u_t -``` - -or - -```{math} -:label: constrainteq - -y_{t+1} = A y_t + B u_t -``` - -### Interpretation of the Second Block of Equations - -The Stackelberg follower's best response mapping is summarized by the -second block of equations of {eq}`new3`. - -In particular, these equations are the first-order conditions of the -Stackelberg follower's optimization problem (i.e., its Euler equations). - -These Euler equations summarize the forward-looking aspect of the -follower's behavior and express how its time $t$ decision depends on -the leader's actions at times $s \geq t$. - -When combined with a stability condition to be imposed below, the Euler -equations summarize the follower’s best response to the sequence of -actions by the leader. - -The Stackelberg leader maximizes {eq}`maxeq` by -choosing sequences $\{u_t, x_t, z_{t+1}\}_{t=0}^{\infty}$ -subject to {eq}`constrainteq` and an initial condition for $z_0$. - -Note that we have an initial condition for $z_0$ but not for $x_0$. - -$x_0$ is among the variables to be chosen at time $0$ by the -Stackelberg leader. - -The Stackelberg leader uses its understanding of the responses -restricted by {eq}`constrainteq` to manipulate the follower's -decisions. - -### More Mechanical Details - -For any vector $a_t$, define $\vec a_t = [a_t, -a_{t+1} \ldots ]$. - -Define a feasible set of $(\vec y_1, \vec u_0)$ sequences - -$$ -\Omega(y_0) = \left\{ (\vec y_1, \vec u_0) : y_{t+1} = A y_t + B u_t, \forall t \geq 0 \right\} -$$ - -Please remember that the follower's Euler equation is embedded in the -system of dynamic equations $y_{t+1} = A y_t + B u_t$. - -Note that in the definition of $\Omega(y_0)$, $y_0$ -is taken as given. - -Although it is taken as given in $\Omega(y_0)$, -eventually, the $x_0$ component of $y_0$ will be chosen by the -Stackelberg leader. - -### Two Subproblems - -Once again we use backward induction. - -We express the Stackelberg problem in terms of **two subproblems**. - -Subproblem 1 is solved by a **continuation Stackelberg leader** at each -date $t \geq 0$. - -Subproblem 2 is solved the **Stackelberg leader** at $t=0$. - -The two subproblems are designed - -- to respect the protocol in which the follower chooses - $\vec q_1$ after seeing $\vec q_2$ chosen by the leader -- to make the leader choose $\vec q_2$ while respecting that - $\vec q_1$ will be the follower's best response to - $\vec q_2$ -- to represent the leader's problem recursively by artfully choosing - the state variables confronting and the control variables available - to the leader - -#### Subproblem 1 - -$$ -v(y_0) = \max_{(\vec y_1, \vec u_0) \in \Omega(y_0)} - \sum_{t=0}^\infty \beta^t r(y_t, u_t) -$$ - -#### Subproblem 2 - -$$ -w(z_0) = \max_{x_0} v(y_0) -$$ - -Subproblem 1 takes the vector of forward-looking variables $x_0$ as -given. - -Subproblem 2 optimizes over $x_0$. - -The value function $w(z_0)$ tells the value of the Stackelberg plan -as a function of the vector of natural state variables at time $0$, -$z_0$. - -### Two Bellman Equations - -We now describe Bellman equations for $v(y)$ and $w(z_0)$. - -#### Subproblem 1 - -The value function $v(y)$ in subproblem 1 satisfies the Bellman -equation - -```{math} -:label: bellman-stack - -v(y) = \max_{u, y^*} \left\{ - r(y,u) + \beta v(y^*) \right\} -``` - -where the maximization is subject to - -$$ -y^* = A y + B u -$$ - -and $y^*$ denotes next period’s value. - -Substituting $v(y) = - y'P y$ into Bellman equation {eq}`bellman-stack` gives - -$$ --y' P y = {\rm max}_{ u, y^*} \left\{ - y' R y - u'Q u - \beta y^{* \prime} P y^* \right\} -$$ - -which as in lecture [linear regulator](https://python-intro.quantecon.org/lqcontrol.html) gives -rise to the algebraic matrix Riccati equation - -$$ -P = R + \beta A' P A - \beta^2 A' P B ( Q + \beta B' P B)^{-1} B' P A -$$ - -and the optimal decision rule coefficient vector - -$$ -F = \beta( Q + \beta B' P B)^{-1} B' P A -$$ - -where the optimal decision rule is - -$$ -u_t = - F y_t -$$ - -#### Subproblem 2 - -We find an optimal $x_0$ by equating to zero the gradient of $v(y_0)$ -with respect to $x_0$: - -$$ --2 P_{21} z_0 - 2 P_{22} x_0 =0, -$$ - -which implies that - -$$ -x_0 = - P_{22}^{-1} P_{21} z_0 -$$ - -## Stackelberg Plan - -Now let's map our duopoly model into the above setup. - -We we'll formulate a state space system - -$$ -y_t = \begin{bmatrix} z_t \cr x_t \end{bmatrix} -$$ - -where in this instance $x_t = v_{1t}$, the time $t$ decision -of the follower firm 1. - -### Calculations to Prepare Duopoly Model - -Now we'll proceed to cast our duopoly model within the framework of the -more general linear-quadratic structure described above. - -That will allow us to compute a Stackelberg plan simply by enlisting a -Riccati equation to solve a linear-quadratic dynamic program. - -As emphasized above, firm 1 acts as if firm 2's decisions -$\{q_{2t+1}, v_{2t}\}_{t=0}^\infty$ are given and beyond its -control. - -### Firm 1's Problem - -We again formulate firm 1's optimum problem in terms of the Lagrangian - -$$ -L=\sum_{t=0}^{\infty}\beta^{t}\{a_{0}q_{1t}-a_{1}q_{1t}^{2}-a_{1}q_{1t}q_{2t}-\gamma v_{1t}^{2}+\lambda_{t}[q_{1t}+v_{1t}-q_{1t+1}]\} -$$ - -Firm 1 seeks a maximum with respect to -$\{q_{1t+1}, v_{1t} \}_{t=0}^\infty$ and a minimum with respect to -$\{ \lambda_t\}_{t=0}^\infty$. - -First-order conditions for this problem are - -$$ -\begin{aligned} -\frac{\partial L}{\partial q_{1t}} & = a_0 - 2 a_1 q_{1t} - a_1 q_{2t} + \lambda_t - \beta^{-1} - \lambda_{t-1} = 0 , \quad t \geq 1 \cr - \frac{\partial L}{\partial v_{1t}} & = -2 \gamma v_{1t} + \lambda_t = 0 , \quad t \geq 0 - \end{aligned} -$$ - -These first-order order conditions and the constraint $q_{1t+1} = -q_{1t} + v_{1t}$ can be rearranged to take the form - -$$ -\begin{aligned} -v_{1t} & = \beta v_{1t+1} + \frac{\beta a_0}{2 \gamma} - \frac{\beta a_1}{\gamma} q_{1t+1} - - \frac{\beta a_1}{2 \gamma} q_{2t+1} \cr - q_{t+1} & = q_{1t} + v_{1t} -\end{aligned} -$$ - -We use these two equations as components of the following linear system -that confronts a Stackelberg continuation leader at time $t$ - -$$ -\begin{bmatrix} 1 & 0 & 0 & 0 \cr - 0 & 1 & 0 & 0 \cr - 0 & 0 & 1 & 0 \cr - \frac{\beta a_0}{2 \gamma} & - \frac{\beta a_1}{2 \gamma} & -\frac{\beta a_1}{\gamma} & \beta \end{bmatrix} - \begin{bmatrix} 1 \cr q_{2t+1} \cr q_{1t+1} \cr v_{1t+1} \end{bmatrix} - = \begin{bmatrix} 1 & 0 & 0 & 0 \cr - 0 & 1 & 0 & 0 \cr - 0 & 0 & 1 & 1 \cr - 0 & 0 & 0 & 1 \end{bmatrix} \begin{bmatrix} 1 \cr q_{2t} \cr q_{1t} \cr v_{1t} \end{bmatrix} - + \begin{bmatrix} 0 \cr 1 \cr 0 \cr 0 \end{bmatrix} v_{2t} -$$ - -Time $t$ revenues of firm 2 are -$\pi_{2t} = a_0 q_{2t} - a_1 q_{2t}^2 - a_1 q_{1t} q_{2t}$ which -evidently equal - -$$ -z_t' R_1 z_t \equiv \begin{bmatrix} 1 \cr q_{2t} \cr q_{1t} \end{bmatrix}' - \begin{bmatrix} 0 & \frac{a_0}{2}& 0 \cr - \frac{a_0}{2} & -a_1 & -\frac{a_1}{2}\cr - 0 & -\frac{a_1}{2} & 0 \end{bmatrix} -\begin{bmatrix} 1 \cr q_{2t} \cr q_{1t} \end{bmatrix} -$$ - -If we set $Q = \gamma$, then firm 2's period $t$ profits can -then be written - -$$ -y_t' R y_t - Q v_{2t}^2 -$$ - -where - -$$ -y_t = \begin{bmatrix} z_t \cr x_t \end{bmatrix} -$$ - -with $x_t = v_{1t}$ and - -$$ -R = -\begin{bmatrix} R_1 & 0 \cr 0 & 0 \end{bmatrix} -$$ - -We'll report results of implementing this code soon. - -But first we want to represent the Stackelberg leader's optimal choices -recursively. - -It is important to do this for several reasons: - -- properly to interpret a representation of the Stackelberg leaders's - choice as a sequence of history-dependent functions -- to formulate a recursive version of the follower's choice problem - -First let's get a recursive representation of the Stackelberg leader's -choice of $\vec q_2$ for our duopoly model. - -## Recursive Representation of Stackelberg Plan - -In order to attain an appropriate representation of the Stackelberg -leader's history-dependent plan, we will employ what amounts to a -version of the **Big K, little k** device often used in -macroeconomics by distinguishing $z_t$, which depends partly on -decisions $x_t$ of the followers, from another vector -$\check z_t$, which does not. - -We will use $\check z_t$ and its history $\check z^t -= [\check z_t, \check z_{t-1}, \ldots, \check z_0]$ to describe the -sequence of the Stackelberg leader's decisions that the Stackelberg -follower takes as given. - -Thus, we let -$\check y_t' = \begin{bmatrix}\check z_t' & \check x_t'\end{bmatrix}$ -with initial condition $\check z_0 = z_0$ given. - -That we distinguish $\check z_t$ from $z_t$ is part and -parcel of the **Big K, little k** device in this -instance. - -We have demonstrated that a Stackelberg plan for -$\{u_t\}_{t=0}^\infty$ has a recursive representation - -$$ -\begin{aligned} -\check x_0 & = - P_{22}^{-1} P_{21} z_0 \cr - u_t & = - F \check y_t, \quad t \geq 0 \cr - \check y_{t+1} & = (A - BF) \check y_t, \quad t \geq 0 -\end{aligned} -$$ - -From this representation we can deduce the sequence of functions -$\sigma = \{\sigma_t(\check z^t)\}_{t=0}^\infty$ that comprise a -Stackelberg plan. - -For convenience, let $\check A \equiv A - BF$ and partition -$\check A$ conformably to the partition -$y_t = \begin{bmatrix}\check z_t \cr \check x_t \end{bmatrix}$ as - -$$ -\begin{bmatrix}\check A_{11} & \check A_{12} \cr \check A_{21} & \check A_{22} \end{bmatrix} -$$ - -Let $H^0_0 \equiv - P_{22}^{-1} P_{21}$ so that -$\check x_0 = H^0_0 \check z_0$. - -Then iterations on $\check y_{t+1} = \check A \check y_t$ starting from initial -condition $\check y_0 = \begin{bmatrix}\check z_0 \cr H^0_0 \check z_0\end{bmatrix}$ -imply that for $t \geq 1$ - -$$ -x_t = \sum_{j=1}^t H_j^t \check z_{t-j} -$$ - -where - -$$ -\begin{aligned} -H^t_1 & = \check A_{21} \cr - H^t_2 & = \check A_{22} \check A_{21} \cr - \ \ \vdots \ \ & \ \ \quad \vdots \cr - H^t_{t-1} & = \check A_{22}^{t-2} \check A_{21} \cr - H^t_t & = \check A_{22}^{t-1}(\check A_{21} + \check A_{22} H^0_0 ) - \end{aligned} -$$ - -An optimal decision rule for the Stackelberg's choice of $u_t$ is - -$$ -u_t = - F \check y_t \equiv - \begin{bmatrix} F_z & F_x \cr \end{bmatrix} -\begin{bmatrix}\check z_t \cr x_t \cr \end{bmatrix} -$$ - -or - -```{math} -:label: finalrule - -u_t = - F_z \check z_t - F_x \sum_{j=1}^t H^t_j z_{t-j} = \sigma_t(\check z^t) -``` - -Representation {eq}`finalrule` confirms that whenever -$F_x \neq 0$, the typical situation, the time $t$ component -$\sigma_t$ of a Stackelberg plan is **history dependent**, meaning -that the Stackelberg leader's choice $u_t$ depends not just on -$\check z_t$ but on components of $\check z^{t-1}$. - -### Comments and Interpretations - -After all, at the end of the day, it will turn out that because we set -$\check z_0 = z_0$, it will be true that $z_t = \check z_t$ -for all $t \geq 0$. - -Then why did we distinguish $\check z_t$ from $z_t$? - -The answer is that if we want to present to the Stackelberg **follower** -a history-dependent representation of the Stackelberg **leader's** -sequence $\vec q_2$, we must use representation -{eq}`finalrule` cast in terms of the history -$\check z^t$ and **not** a corresponding representation cast in -terms of $z^t$. - -### Dynamic Programming and Time Consistency of **follower's** Problem - -Given the sequence $\vec q_2$ chosen by the Stackelberg leader in -our duopoly model, it turns out that the Stackelberg **follower's** -problem is recursive in the *natural* state variables that confront a -follower at any time $t \geq 0$. - -This means that the follower's plan is time consistent. - -To verify these claims, we'll formulate a recursive version of a -follower's problem that builds on our recursive representation of the -Stackelberg leader's plan and our use of the **Big K, little k** idea. - -### Recursive Formulation of a Follower’s Problem - -We now use what amounts to another “Big $K$, little $k$” trick (see -[rational expectations equilibrium](https://lectures.quantecon.org/py/rational_expectations.html)) -to formulate a recursive version of a follower’s problem cast in terms -of an ordinary Bellman equation. - -Firm 1, the follower, faces $\{q_{2t}\}_{t=0}^\infty$ as -a given quantity sequence chosen by the leader and believes that its -output price at $t$ satisfies - -$$ -p_t = a_0 - a_1 ( q_{1t} + q_{2t}) , \quad t \geq 0 -$$ - -Our challenge is to represent $\{q_{2t}\}_{t=0}^\infty$ as -a given sequence. - -To do so, recall that under the Stackelberg plan, firm 2 sets output -according to the $q_{2t}$ component of - -$$ -y_{t+1} = \begin{bmatrix} 1 \cr q_{2t} \cr q_{1t} \cr x_t \end{bmatrix} -$$ - -which is governed by - -$$ -y_{t+1} = (A - BF) y_t -$$ - -To obtain a recursive representation of a $\{q_{2t}\}$ sequence -that is exogenous to firm 1, we define a state $\tilde y_t$ - -$$ -\tilde y_t = \begin{bmatrix} 1 \cr q_{2t} \cr \tilde q_{1t} \cr \tilde x_t \end{bmatrix} -$$ - -that evolves according to - -$$ -\tilde y_{t+1} = (A - BF) \tilde y_t -$$ - -subject to the initial condition $\tilde q_{10} = q_{10}$ and -$\tilde x_0 = x_0$ where $x_0 = - P_{22}^{-1} P_{21}$ as -stated above. - -Firm 1's state vector is - -$$ -X_t = \begin{bmatrix} \tilde y_t \cr q_{1t} \end{bmatrix} -$$ - -It follows that the follower firm 1 faces law of motion - -```{math} -:label: law-motion - -\begin{bmatrix} \tilde y_{t+1} \\ -q_{1t+1} \end{bmatrix} = \begin{bmatrix} A - BF & 0 \\ -0 & 1 \end{bmatrix} \begin{bmatrix} \tilde y_{t} \\ -q_{1t} \end{bmatrix} + \begin{bmatrix} 0 \\ 1 \end{bmatrix} x_t -``` - -This specfification assures that from the point of the view of a firm 1, -$q_{2t}$ is an exogenous process. - -Here - -- $\tilde q_{1t}, \tilde x_t$ play the role of **Big K**. -- $q_{1t}, x_t$ play the role of **little k**. - -The time $t$ component of firm 1's objective is - -$$ -\tilde X_t' \tilde R x_t - x_t^2 \tilde Q = \begin{bmatrix} 1 \cr q_{2t} \cr \tilde q_{1t} \cr \tilde x_t \cr q_{1t} \end{bmatrix}' - \begin{bmatrix} 0 & 0 & 0 & 0 & \frac{a_0}{2} \cr - 0 & 0 & 0 & 0 & - \frac{a_1}{2} \cr - 0 & 0 & 0 & 0 & 0 \cr - 0 & 0 & 0 & 0 & 0 \cr - \frac{a_0}{2} & -\frac{a_1}{2} & 0 & 0 & - a_1 \end{bmatrix} - \begin{bmatrix} 1 \cr q_{2t} \cr \tilde q_{1t} \cr \tilde x_t \cr q_{1t} \end{bmatrix} - \gamma - x_t^2 -$$ - -Firm 1's optimal decision rule is - -$$ -x_t = - \tilde F X_t -$$ - -and it's state evolves according to - -$$ -\tilde X_{t+1} = (\tilde A - \tilde B \tilde F) X_t -$$ - -under its optimal decision rule. - -Later we shall compute $\tilde F$ and verify that when we set - -$$ -X_0 = \begin{bmatrix} 1 \cr q_{20} \cr q_{10} \cr x_0 \cr q_{10} \end{bmatrix} -$$ - -we recover - -$$ -x_0 = - \tilde F \tilde X_0 -$$ - -which will verify that we have properly set up a recursive -representation of the follower's problem facing the Stackelberg leader's -$\vec q_2$. - -### Time Consistency of Follower's Plan - -Since the follower can solve its problem using dynamic programming its -problem is recursive in what for it are the **natural state variables**, -namely - -$$ -\begin{bmatrix} 1 \cr q_{2t} \cr \tilde q_{10} \cr \tilde x_0 \end{bmatrix} -$$ - -It follows that the follower's plan is time consistent. - -## Computing the Stackelberg Plan - -Here is our code to compute a Stackelberg plan via a linear-quadratic -dynamic program as outlined above - - -```{code-cell} julia -using LaTeXStrings, QuantEcon, Plots, LinearAlgebra, Statistics, - Random - -``` - -```{code-cell} julia ---- -tags: [remove-cell] ---- -using Test -``` - -We define named tuples and default values for the model and solver settings, and -instantiate one copy of each - -```{code-cell} julia -function model(; a0 = 10, a1 = 2, beta = 0.96, gamma = 120.0, n = 300) - return (; a0, a1, beta, gamma, n) -end - -# things like tolerances, etc. -settings(; tol0 = 1e-8, tol1 = 1e-16, tol2 = 1e-2) = (; tol0, tol1, tol2) - -defaultModel = model(); -defaultSettings = settings(); -``` - -Now we can compute the actual policy using the LQ routine from QuantEcon.jl - -```{code-cell} julia -(; a0, a1, beta, gamma, n) = defaultModel -(; tol0, tol1, tol2) = defaultSettings - -betas = [beta^x for x in 0:(n - 1)] -Alhs = I + zeros(4, 4); -Alhs[4, :] = [ - beta * a0 / (2 * gamma), - -beta * a1 / (2 * gamma), - -beta * a1 / gamma, - beta, -] # Euler equation coefficients -Arhs = I + zeros(4, 4); -Arhs[3, 4] = 1.0; -Alhsinv = inv(Alhs); - -A = Alhsinv * Arhs; -B = Alhsinv * [0, 1, 0, 0]; -R = [0 -a0/2 0 0; -a0/2 a1 a1/2 0; 0 a1/2 0 0; 0 0 0 0]; -Q = gamma; -lq = QuantEcon.LQ(Q, R, A, B, bet = beta); -P, F, d = stationary_values(lq) - -P22 = P[4:end, 4:end]; -P21 = P[4:end, 1:3]; -P22inv = inv(P22); -H_0_0 = -P22inv * P21; - -# simulate forward -pi_leader = zeros(n); -z0 = [1, 1, 1]; -x0 = H_0_0 * z0; -y0 = vcat(z0, x0); - -Random.seed!(1) # for reproducibility -yt, ut = compute_sequence(lq, y0, n); -pi_matrix = R + F' * Q * F; - -for t in 1:n - pi_leader[t] = -(yt[:, t]' * pi_matrix * yt[:, t]) -end - -println("Computed policy for Stackelberg leader: $F") -``` - -```{code-cell} julia ---- -tags: [remove-cell] ---- -@test F ≈ [-1.580044538772657, 0.29461312747031404, 0.6748093760774972, 6.539705936147515]' -``` - -### Implied Time Series for Price and Quantities - -The following code plots the price and quantities - -```{code-cell} julia -q_leader = yt[2, 1:end]; -q_follower = yt[3, 1:end]; -q = q_leader + q_follower; -p = a0 .- a1 * q; - -plot(1:(n + 1), [q_leader, q_follower, p], - title = "Output and Prices, Stackelberg Duopoly", - labels = ["leader output" "follower output" "price"], - xlabel = L"t") -``` - -### Value of Stackelberg Leader - -We'll compute the present value earned by the Stackelberg leader. - -We'll compute it two ways (they give identical answers -- just a check -on coding and thinking) - -```{code-cell} julia -v_leader_forward = sum(betas .* pi_leader); -v_leader_direct = -yt[:, 1]' * P * yt[:, 1]; - -println("v_leader_forward (forward sim) is $v_leader_forward") -println("v_leader_direct is $v_leader_direct") -``` - -```{code-cell} julia ---- -tags: [remove-cell] ---- -@test v_leader_forward ≈ 150.0316212532547 -@test v_leader_direct ≈ 150.03237147548967 -``` - -```{code-cell} julia -# manually check whether P is an approximate fixed point -P_next = (R + F' * Q * F + beta * (A - B * F)' * P * (A - B * F)); -all(P - P_next .< tol0) -``` - -```{code-cell} julia ---- -tags: [remove-cell] ---- -@test all(P - P_next .< tol0) -``` - -```{code-cell} julia -# manually checks whether two different ways of computing the -# value function give approximately the same answer -v_expanded = -((y0' * R * y0 + ut[:, 1]' * Q * ut[:, 1] + - beta * (y0' * (A - B * F)' * P * (A - B * F) * y0))); -(v_leader_direct - v_expanded < tol0)[1, 1] -``` - -```{code-cell} julia ---- -tags: [remove-cell] ---- -@test (v_leader_direct - v_expanded < tol0)[1, 1] -``` - -## Exhibiting Time Inconsistency of Stackelberg Plan - -In the code below we compare two values - -- the continuation value $- y_t P y_t$ earned by a continuation - Stackelberg leader who inherits state $y_t$ at $t$ -- the value of a **reborn Stackelberg leader** who inherits state - $z_t$ at $t$ and sets $x_t = - P_{22}^{-1} P_{21}$ - -The difference between these two values is a tell-tale time of the time -inconsistency of the Stackelberg plan - -```{code-cell} julia -# Compute value function over time with reset at time t -vt_leader = zeros(n); -vt_reset_leader = similar(vt_leader); - -yt_reset = copy(yt) -yt_reset[end, :] = (H_0_0 * yt[1:3, :]) - -for t in 1:n - vt_leader[t] = -yt[:, t]' * P * yt[:, t] - vt_reset_leader[t] = -yt_reset[:, t]' * P * yt_reset[:, t] -end - -p1 = plot(1:(n + 1), [(-F * yt)', (-F * yt_reset)'], - labels = ["Stackelberg Leader" L"Continuation Leader at $t$"], - title = "Leader Control Variable", xlabel = L"t"); -p2 = plot(1:(n + 1), [yt[4, :], yt_reset[4, :]], - title = "Follower Control Variable", xlabel = L"t", legend = false); -p3 = plot(1:n, [vt_leader, vt_reset_leader], legend = false, - xlabel = L"t", title = "Leader Value Function"); -plot(p1, p2, p3, layout = (3, 1), size = (800, 600)) -``` - -## Recursive Formulation of the Follower's Problem - -We now formulate and compute the recursive version of the follower's -problem. - -We check that the recursive **Big** $K$ **, little** $k$ formulation of the follower's problem produces the same output path -$\vec q_1$ that we computed when we solved the Stackelberg problem - -```{code-cell} julia -A_tilde = I + zeros(5, 5); -A_tilde[1:4, 1:4] .= A - B * F; -R_tilde = [0 0 0 0 -a0/2; 0 0 0 0 a1/2; 0 0 0 0 0; 0 0 0 0 0; -a0/2 a1/2 0 0 a1]; -Q_tilde = Q; -B_tilde = [0, 0, 0, 0, 1]; - -lq_tilde = QuantEcon.LQ(Q_tilde, R_tilde, A_tilde, B_tilde, bet = beta); -P_tilde, F_tilde, d_tilde = stationary_values(lq_tilde); -y0_tilde = vcat(y0, y0[3]); -yt_tilde = compute_sequence(lq_tilde, y0_tilde, n)[1]; -``` - -```{code-cell} julia -# checks that the recursive formulation of the follower's problem gives -# the same solution as the original Stackelberg problem -plot(1:(n + 1), [yt_tilde[5, :], yt_tilde[3, :]], labels = [L"\tilde{q}" L"q"]) -``` - -Note: Variables with `_tilde` are obtained from solving the follower's -problem -- those without are from the Stackelberg problem. - -```{code-cell} julia -# maximum absolute difference in quantities over time between the first and second solution methods -max(abs(yt_tilde[5] - yt_tilde[3])) -``` - -```{code-cell} julia ---- -tags: [remove-cell] ---- -@test max(abs(yt_tilde[5] - yt_tilde[3])) ≈ 0. atol = 1e-15 -``` - -```{code-cell} julia -# x0 == x0_tilde -yt[:, 1][end] - (yt_tilde[:, 2] - yt_tilde[:, 1])[end] < tol0 -``` - -```{code-cell} julia ---- -tags: [remove-cell] ---- -@test yt[:, 1][end] - (yt_tilde[:, 2] - yt_tilde[:, 1])[end] < tol0 -``` - -### Explanation of Alignment - -If we inspect the coefficients in the decision rule $- \tilde F$, -we can spot the reason that the follower chooses to set $x_t = -\tilde x_t$ when it sets $x_t = - \tilde F X_t$ in -the recursive formulation of the follower problem. - -Can you spot what features of $\tilde F$ imply this? - -Hint: remember the components of $X_t$ - -```{code-cell} julia -F_tilde # policy function in the follower's problem -``` - -```{code-cell} julia -P # value function in the Stackelberg problem -``` - -```{code-cell} julia -P_tilde # value function in the follower's problem -``` - -```{code-cell} julia -# manually check that P is an approximate fixed point -all((P - ((R + F' * Q * F) + beta * (A - B * F)' * P * (A - B * F)) .< tol0)) -``` - -```{code-cell} julia ---- -tags: [remove-cell] ---- -@test all((P - ((R + F' * Q * F) + beta * (A - B * F)' * P * (A - B * F)) .< tol0)) -``` - -```{code-cell} julia -# compute `P_guess` using `F_tilde_star` -F_tilde_star = -[0, 0, 0, 1, 0]'; -P_guess = zeros(5, 5); - -for i in 1:1000 - P_guess = ((R_tilde + F_tilde_star' * Q_tilde * F_tilde_star) + - beta * (A_tilde - B_tilde * F_tilde_star)' * P_guess - * (A_tilde - B_tilde * F_tilde_star)) -end -``` - -```{code-cell} julia -# value function in the follower's problem --(y0_tilde' * P_tilde * y0_tilde)[1, 1] -``` - -```{code-cell} julia ---- -tags: [remove-cell] ---- -@test -(y0_tilde' * P_tilde * y0_tilde)[1, 1] ≈ 112.65590740578173 -``` - -```{code-cell} julia -# value function using P_guess --(y0_tilde' * P_guess * y0_tilde)[1, 1] -``` - -```{code-cell} julia ---- -tags: [remove-cell] ---- -@test -(y0_tilde' * P_guess * y0_tilde)[1, 1] ≈ 112.65590740578186 -``` - -```{code-cell} julia -# c policy using policy iteration algorithm -F_iter = (beta * inv(Q + beta * B_tilde' * P_guess * B_tilde) - * B_tilde' * P_guess * A_tilde); -P_iter = zeros(5, 5); -dist_vec = zeros(5, 5); - -for i in 1:100 - # compute P_iter - dist_vec = similar(P_iter) - for j in 1:1000 - P_iter = (R_tilde + F_iter' * Q * F_iter) + - beta * - (A_tilde - B_tilde * F_iter)' * P_iter * - (A_tilde - B_tilde * F_iter) - - # update F_iter - F_iter = beta * inv(Q + beta * B_tilde' * P_iter * B_tilde) * - B_tilde' * P_iter * A_tilde - - dist_vec = P_iter - ((R_tilde + F_iter' * Q * F_iter) + - beta * (A_tilde - B_tilde * F_iter)' * P_iter * - (A_tilde - B_tilde * F_iter)) - end -end - -if maximum(abs.(dist_vec)) < 1e-8 - dist_vec2 = F_iter - - (beta * inv(Q + beta * B_tilde' * P_iter * B_tilde) * B_tilde' * - P_iter * A_tilde) - if maximum(abs.(dist_vec2)) < 1e-8 - @show F_iter - else - println("The policy didn't converge: try increasing the number of outer loop iterations") - end -else - println("The policy didn't converge: try increasing the number of inner loop iterations") -end -``` - -```{code-cell} julia -yt_tilde_star = zeros(n, 5); -yt_tilde_star[1, :] = y0_tilde; - -for t in 1:(n - 1) - yt_tilde_star[t + 1, :] = (A_tilde - B_tilde * F_tilde_star) * - yt_tilde_star[t, :] -end - -plot([yt_tilde_star[:, 5], yt_tilde[3, :]], labels = [L"\tilde{q}" L"q"]) -``` - -```{code-cell} julia -maximum(abs.(yt_tilde_star[:, 5] - yt_tilde[3, 1:(end - 1)])) -``` - -```{code-cell} julia ---- -tags: [remove-cell] ---- -@test maximum(abs.(yt_tilde_star[:, 5] - yt_tilde[3, 1:end-1])) < 1e-15 -``` - -## Markov Perfect Equilibrium - -The **state** vector is - -$$ -z_t = \begin{bmatrix} 1 \cr q_{2t} \cr q_{1t} \end{bmatrix} -$$ - -and the state transition dynamics are - -$$ -z_{t+1} = A z_t + B_1 v_{1t} + B_2 v_{2t} -$$ - -where $A$ is a $3 \times 3$ identity matrix and - -$$ -B_1 = \begin{bmatrix} 0 \cr 0 \cr 1 \end{bmatrix} , -\quad B_2 = \begin{bmatrix} 0 \cr 1 \cr 0 \end{bmatrix} -$$ - -The Markov perfect decision rules are - -$$ -v_{1t} = - F_1 z_t , \quad v_{2t} = - F_2 z_t -$$ - -and in the Markov perfect equilibrium the state evolves according to - -$$ -z_{t+1} = (A - B_1 F_1 - B_2 F_2) z_t -$$ - -```{code-cell} julia -# in LQ form -A = I + zeros(3, 3); -B1 = [0, 0, 1]; -B2 = [0, 1, 0]; -R1 = [0 0 -a0/2; 0 0 a1/2; -a0/2 a1/2 a1]; -R2 = [0 -a0/2 0; -a0/2 a1 a1/2; 0 a1/2 0]; -Q1 = Q2 = gamma; -S1 = S2 = W1 = W2 = M1 = M2 = 0.0; - -# solve using nnash from QE -F1, F2, P1, P2 = nnash(A, B1, B2, R1, R2, Q1, Q2, - S1, S2, W1, W2, M1, M2, - beta = beta, - tol = tol1); - -# simulate forward -AF = A - B1 * F1 - B2 * F2; -z = zeros(3, n); -z[:, 1] .= 1; -for t in 1:(n - 1) - z[:, t + 1] = AF * z[:, t] -end - -println("Policy for F1 is $F1") -println("Policy for F2 is $F2") -``` - -```{code-cell} julia ---- -tags: [remove-cell] ---- -@test round(F1[1], digits = 4) == -0.227 -@test round(F2[2], digits = 4) == 0.0945 -``` - -```{code-cell} julia -q1 = z[2, :]; -q2 = z[3, :]; -q = q1 + q2; # total output, MPE -p = a0 .- a1 * q; # total price, MPE -plot([q, p], labels = ["total ouput" "total price"], - title = "Output and prices, duopoly MPE", xlabel = L"t") -``` - -```{code-cell} julia -# computes the maximum difference in quantities across firms -maximum(abs.(q1 - q2)) -``` - -```{code-cell} julia ---- -tags: [remove-cell] ---- -@test maximum(abs.(q1 - q2)) < 1e-14 -``` - -```{code-cell} julia -# compute values -u1 = -F1 * z; -u2 = -F2 * z; -pi_1 = (p .* q1)' - gamma * u1 .^ 2; -pi_2 = (p .* q2)' - gamma * u2 .^ 2; - -v1_forward = pi_1 * betas; -v2_forward = pi_2 * betas; - -v1_direct = -z[:, 1]' * P1 * z[:, 1]; -v2_direct = -z[:, 1]' * P2 * z[:, 1]; - -println("Firm 1: Direct is $v1_direct, Forward is $(v1_forward[1])"); -println("Firm 2: Direct is $v2_direct, Forward is $(v2_forward[1])"); -``` - -```{code-cell} julia ---- -tags: [remove-cell] ---- -@test round(v1_direct, digits = 3) == 133.329 -@test round(v2_direct, digits = 3) == 133.329 -@test round(v1_forward[1], digits = 3) == 133.330 -@test round(v2_forward[1], digits = 3) == 133.330 -``` - -```{code-cell} julia -# sanity check -Lambda_1 = A - B2 * F2; -lq1 = QuantEcon.LQ(Q1, R1, Lambda_1, B1, bet = beta); -P1_ih, F1_ih, d = stationary_values(lq1); - -v2_direct_alt = -z[:, 1]' * P1_ih * z[:, 1] + d; -all(abs.(v2_direct - v2_direct_alt) < tol2) -``` - -## MPE vs. Stackelberg - -```{code-cell} julia -vt_MPE = zeros(n); -vt_follower = zeros(n); - -for t in 1:n - vt_MPE[t] = -z[:, t]' * P1 * z[:, t] - vt_follower[t] = -yt_tilde[:, t]' * P_tilde * yt_tilde[:, t] -end - -plot([vt_MPE, vt_leader, vt_follower], - labels = ["MPE" "Stackelberg leader" "Stackelberg follower"], - title = "MPE vs Stackelberg Values", - xlabel = L"t", - legend = :outertopright) -``` - -```{code-cell} julia -# display values -println("vt_leader(y0) = $(vt_leader[1])"); -println("vt_follower(y0) = $(vt_follower[1])") -println("vt_MPE(y0) = $(vt_MPE[1])"); -``` - -```{code-cell} julia -# total difference in value b/t Stackelberg and MPE -vt_leader[1] + vt_follower[1] - 2 * vt_MPE[1] -``` - -```{code-cell} julia ---- -tags: [remove-cell] ---- -@test round(vt_leader[1] + vt_follower[1] - 2*vt_MPE[1], digits = 4) == -3.9707 -``` - diff --git a/lectures/dynamic_programming_squared/lqramsey.md b/lectures/dynamic_programming_squared/lqramsey.md deleted file mode 100644 index 69a74665..00000000 --- a/lectures/dynamic_programming_squared/lqramsey.md +++ /dev/null @@ -1,984 +0,0 @@ ---- -jupytext: - text_representation: - extension: .md - format_name: myst -kernelspec: - display_name: Julia - language: julia - name: julia-1.11 ---- - -(lqramsey)= -```{raw} html - -``` - -# {index}`Optimal Taxation in an LQ Economy ` - -```{index} single: Ramsey Problem; Optimal Taxation -``` - -```{contents} Contents -:depth: 2 -``` - -## Overview - -In this lecture we study optimal fiscal policy in a linear quadratic setting. - -We slightly modify a well-known model of Robert Lucas and Nancy Stokey {cite}`LucasStokey1983` so that convenient formulas for -solving linear-quadratic models can be applied to simplify the calculations. - -The economy consists of a representative household and a benevolent government. - -The government finances an exogenous stream of government purchases with state-contingent loans and a linear tax on labor income. - -A linear tax is sometimes called a flat-rate tax. - -The household maximizes utility by choosing paths for consumption and labor, taking prices and the government's tax rate and borrowing plans as given. - -Maximum attainable utility for the household depends on the government's tax and borrowing plans. - -The *Ramsey problem* {cite}`Ramsey1927` is to choose tax and borrowing plans that maximize the household's welfare, taking the household's optimizing behavior as given. - -There is a large number of competitive equilibria indexed by different government fiscal policies. - -The Ramsey planner chooses the best competitive equilibrium. - -We want to study the dynamics of tax rates, tax revenues, government debt under a Ramsey plan. - -Because the Lucas and Stokey model features state-contingent government debt, the government debt dynamics differ substantially from those in a model of Robert Barro {cite}`Barro1979`. - -```{only} html -The treatment given here closely follows this manuscript, prepared -by Thomas J. Sargent and Francois R. Velde. -``` - -```{only} latex -The treatment given here closely follows [this manuscript](https://lectures.quantecon.org/_downloads/firenze.pdf), prepared -by Thomas J. Sargent and Francois R. Velde. -``` - -We cover only the key features of the problem in this lecture, leaving you to refer to that source for additional results and intuition. - -### Model Features - -* Linear quadratic (LQ) model -* Representative household -* Stochastic dynamic programming over an infinite horizon -* Distortionary taxation - - -```{code-cell} julia -using LinearAlgebra, Statistics -``` - -## The Ramsey Problem - -We begin by outlining the key assumptions regarding technology, households and the government sector. - -### Technology - -Labor can be converted one-for-one into a single, non-storable consumption good. - -In the usual spirit of the LQ model, the amount of labor supplied in each period is unrestricted. - -This is unrealistic, but helpful when it comes to solving the model. - -Realistic labor supply can be induced by suitable parameter values. - -### Households - -Consider a representative household who chooses a path $\{\ell_t, c_t\}$ -for labor and consumption to maximize - -```{math} -:label: lq_hu - --\mathbb E \frac{1}{2} \sum_{t=0}^{\infty} \beta^t -\left[ - (c_t - b_t)^2 + \ell_t^2 -\right] -``` - -subject to the budget constraint - -```{math} -:label: lq_hc - -\mathbb E \sum_{t=0}^{\infty} \beta^t p^0_t -\left[ - d_t + (1 - \tau_t) \ell_t + s_t - c_t -\right] = 0 -``` - -Here - -* $\beta$ is a discount factor in $(0, 1)$ -* $p_t^0$ is a scaled Arrow-Debreu price at time $0$ of history contingent goods at time $t+j$ -* $b_t$ is a stochastic preference parameter -* $d_t$ is an endowment process -* $\tau_t$ is a flat tax rate on labor income -* $s_t$ is a promised time-$t$ coupon payment on debt issued by the government - -The scaled Arrow-Debreu price $p^0_t$ is related to the unscaled Arrow-Debreu price as follows. - -If we let $\pi^0_t(x^t)$ -denote the probability (density) of a history $x^t = [x_t, x_{t-1}, \ldots, x_0]$ of the state $x^t$, then -the Arrow-Debreu time $0$ price of a claim on one unit of consumption at date $t$, history $x^t$ would be - -$$ -\frac{\beta^t p^0_t} {\pi_t^0(x^t)} -$$ - -Thus, our scaled Arrow-Debreu price is the ordinary Arrow-Debreu price multiplied by the discount factor $\beta^t$ and divided -by an appropriate probability. - -The budget constraint {eq}`lq_hc` requires that the present value of consumption be restricted to equal the present value of endowments, labor income and coupon payments on bond holdings. - -### Government - -The government imposes a linear tax on labor income, fully committing to a stochastic path of tax rates at time zero. - -The government also issues state-contingent debt. - -Given government tax and borrowing plans, we can construct a competitive equilibrium with distorting government taxes. - -Among all such competitive equilibria, the Ramsey plan is the one that maximizes the welfare of the representative consumer. - -### Exogenous Variables - -Endowments, government expenditure, the preference shock process $b_t$, and -promised coupon payments on initial government debt $s_t$ are all exogenous, and given by - -* $d_t = S_d x_t$ -* $g_t = S_g x_t$ -* $b_t = S_b x_t$ -* $s_t = S_s x_t$ - -The matrices $S_d, S_g, S_b, S_s$ are primitives and $\{x_t\}$ is -an exogenous stochastic process taking values in $\mathbb R^k$. - -We consider two specifications for $\{x_t\}$. - -(lq_twospec)= -1. Discrete case: $\{x_t\}$ is a discrete state Markov chain with transition matrix $P$. -1. VAR case: $\{x_t\}$ obeys $x_{t+1} = A x_t + C w_{t+1}$ where $\{w_t\}$ is independent zero mean Gaussian with identify covariance matrix. - -### Feasibility - -The period-by-period feasibility restriction for this economy is - -```{math} -:label: lq_feasible - -c_t + g_t = d_t + \ell_t -``` - -A labor-consumption process $\{\ell_t, c_t\}$ is called *feasible* if {eq}`lq_feasible` holds for all $t$. - -### Government budget constraint - -Where $p_t^0$ is again a scaled Arrow-Debreu price, the time zero government budget constraint is - -```{math} -:label: lq_gc - -\mathbb E \sum_{t=0}^{\infty} \beta^t p^0_t -(s_t + g_t - \tau_t \ell_t ) = 0 -``` - -### Equilibrium - -An *equilibrium* is a feasible allocation $\{\ell_t, c_t\}$, a sequence -of prices $\{p_t^0\}$, and a tax system $\{\tau_t\}$ such that - -1. The allocation $\{\ell_t, c_t\}$ is optimal for the household given $\{p_t^0\}$ and $\{\tau_t\}$. -1. The government's budget constraint {eq}`lq_gc` is satisfied. - -The *Ramsey problem* is to choose the equilibrium $\{\ell_t, c_t, \tau_t, p_t^0\}$ that maximizes the -household's welfare. - -If $\{\ell_t, c_t, \tau_t, p_t^0\}$ solves the Ramsey problem, -then $\{\tau_t\}$ is called the *Ramsey plan*. - -The solution procedure we adopt is - -1. Use the first-order conditions from the household problem to pin down - prices and allocations given $\{\tau_t\}$. -1. Use these expressions to rewrite the government budget constraint - {eq}`lq_gc` in terms of exogenous variables and allocations. -1. Maximize the household's objective function {eq}`lq_hu` subject to the - constraint constructed in step 2 and the feasibility constraint - {eq}`lq_feasible`. - -The solution to this maximization problem pins down all quantities of interest. - -### Solution - -Step one is to obtain the first-conditions for the household's problem, -taking taxes and prices as given. - -Letting $\mu$ be the Lagrange multiplier on {eq}`lq_hc`, the first-order -conditions are $p_t^0 = (c_t - b_t) / \mu$ and $\ell_t = (c_t - b_t) -(1 - \tau_t)$. - -Rearranging and normalizing at $\mu = b_0 - c_0$, we can write these -conditions as - -```{math} -:label: lq_hfoc - -p_t^0 = \frac{b_t - c_t}{b_0 - c_0} -\quad \text{and} \quad -\tau_t = 1 - \frac{\ell_t}{b_t - c_t} -``` - -Substituting {eq}`lq_hfoc` into the government's budget constraint {eq}`lq_gc` -yields - -```{math} -:label: lq_gc2 - -\mathbb E \sum_{t=0}^{\infty} \beta^t -\left[ (b_t - c_t)(s_t + g_t - \ell_t) + \ell_t^2 \right] = 0 -``` - -The Ramsey problem now amounts to maximizing {eq}`lq_hu` subject to -{eq}`lq_gc2` and {eq}`lq_feasible`. - -The associated Lagrangian is - -```{math} -:label: lq_rp - -\mathscr L = -\mathbb E \sum_{t=0}^{\infty} \beta^t -\left\{ --\frac{1}{2} \left[ (c_t - b_t)^2 + \ell_t^2 \right] + -\lambda -\left[ (b_t - c_t)(\ell_t - s_t - g_t) - \ell_t^2 \right] + -\mu_t -[d_t + \ell_t - c_t - g_t] -\right\} -``` - -The first order conditions associated with $c_t$ and $\ell_t$ are - -$$ --(c_t - b_t ) + \lambda [- \ell_t + (g_t + s_t )] = \mu_t -$$ - -and - -$$ -\ell_t - \lambda [(b_t - c_t) - 2 \ell_t ] = \mu_t -$$ - -Combining these last two equalities with {eq}`lq_feasible` and working -through the algebra, one can show that - -```{math} -:label: lq_lcex - -\ell_t = \bar \ell_t - \nu m_t -\quad \text{and} \quad -c_t = \bar c_t - \nu m_t -``` - -where - -* $\nu := \lambda / (1 + 2 \lambda)$ -* $\bar \ell_t := (b_t - d_t + g_t) / 2$ -* $\bar c_t := (b_t + d_t - g_t) / 2$ -* $m_t := (b_t - d_t - s_t ) / 2$ - -Apart from $\nu$, all of these quantities are expressed in terms of exogenous variables. - -To solve for $\nu$, we can use the government's budget constraint again. - -The term inside the brackets in {eq}`lq_gc2` is $(b_t - c_t)(s_t + g_t) - (b_t - c_t) \ell_t + \ell_t^2$. - -Using {eq}`lq_lcex`, the definitions above and the fact that $\bar \ell -= b - \bar c$, this term can be rewritten as - -$$ -(b_t - \bar c_t) (g_t + s_t ) + 2 m_t^2 ( \nu^2 - \nu) -$$ - -Reinserting into {eq}`lq_gc2`, we get - -```{math} -:label: lq_gc22 - -\mathbb E -\left\{ -\sum_{t=0}^{\infty} \beta^t -(b_t - \bar c_t) (g_t + s_t ) -\right\} -+ -( \nu^2 - \nu) \mathbb E -\left\{ -\sum_{t=0}^{\infty} \beta^t 2 m_t^2 -\right\} -= 0 -``` - -Although it might not be clear yet, we are nearly there because: - -* The two expectations terms in {eq}`lq_gc22` can be solved for in terms of model primitives. -* This in turn allows us to solve for the Lagrange multiplier $\nu$. -* With $\nu$ in hand, we can go back and solve for the allocations via {eq}`lq_lcex`. -* Once we have the allocations, prices and the tax system can be derived from - {eq}`lq_hfoc`. - -### Computing the Quadratic Term - -Let's consider how to obtain the term $\nu$ in {eq}`lq_gc22`. - -If we can compute the two expected geometric sums - -```{math} -:label: lq_gc3 - -b_0 := \mathbb E -\left\{ -\sum_{t=0}^{\infty} \beta^t -(b_t - \bar c_t) (g_t + s_t ) -\right\} -\quad \text{and} \quad -a_0 := \mathbb E -\left\{ -\sum_{t=0}^{\infty} \beta^t 2 m_t^2 -\right\} -``` - -then the problem reduces to solving - -$$ -b_0 + a_0 (\nu^2 - \nu) = 0 -$$ - -for $\nu$. - -Provided that $4 b_0 < a_0$, there is a unique solution $\nu \in -(0, 1/2)$, and a unique corresponding $\lambda > 0$. - -Let's work out how to compute mathematical expectations in {eq}`lq_gc3`. - -For the first one, the random variable $(b_t - \bar c_t) (g_t + s_t )$ inside the summation can be expressed as - -$$ -\frac{1}{2} x_t' (S_b - S_d + S_g)' (S_g + S_s) x_t -$$ - -For the second expectation in {eq}`lq_gc3`, the random variable $2 m_t^2$ can be written as - -$$ -\frac{1}{2} x_t' (S_b - S_d - S_s)' (S_b - S_d - S_s) x_t -$$ - -It follows that both objects of interest are special cases of the expression - -```{math} -:label: lq_eqs - -q(x_0) = \mathbb E \sum_{t=0}^{\infty} \beta^t x_t' H x_t -``` - -where $H$ is a matrix conformable to $x_t$ and $x_t'$ is the transpose of column vector $x_t$. - -Suppose first that $\{x_t\}$ is the Gaussian VAR described {ref}`above `. - -In this case, the formula for computing $q(x_0)$ is known to be $q(x_0) = x_0' Q x_0 + v$, where - -* $Q$ is the solution to $Q = H + \beta A' Q A$, and -* $v = \text{trace} \, (C' Q C) \beta / (1 - \beta)$ - -The first equation is known as a discrete Lyapunov equation, and can be solved -using [this function](https://github.com/QuantEcon/QuantEcon.jl/blob/master/src/matrix_eqn.jl#L6). - -### Finite state Markov case - -Next suppose that $\{x_t\}$ is the discrete Markov process described {ref}`above `. - -Suppose further that each $x_t$ takes values in the state space $\{x^1, \ldots, x^N\} \subset \mathbb R^k$. - -Let $h \colon \mathbb R^k \to \mathbb R$ be a given function, and suppose that we -wish to evaluate - -$$ -q(x_0) = \mathbb E \sum_{t=0}^{\infty} \beta^t h(x_t) -\quad \text{given} \quad x_0 = x^j -$$ - -For example, in the discussion above, $h(x_t) = x_t' H x_t$. - -It is legitimate to pass the expectation through the sum, leading to - -```{math} -:label: lq_ise - -q(x_0) = \sum_{t=0}^{\infty} \beta^t (P^t h)[j] -``` - -Here - -* $P^t$ is the $t$-th power of the transition matrix $P$ -* $h$ is, with some abuse of notation, the vector $(h(x^1), \ldots, h(x^N))$ -* $(P^t h)[j]$ indicates the $j$-th element of $P^t h$ - -It can be show that {eq}`lq_ise` is in fact equal to the $j$-th element of -the vector $(I - \beta P)^{-1} h$. - -This last fact is applied in the calculations below. - -### Other Variables - -We are interested in tracking several other variables besides the ones -described above. - -To prepare the way for this, we define - -$$ -p^t_{t+j} = \frac{b_{t+j}- c_{t+j}}{b_t - c_t} -$$ - -as the scaled Arrow-Debreu time $t$ price of a history contingent claim on one unit of consumption at time $t+j$. - -These are prices that would prevail at time $t$ if market were reopened at time $t$. - -These prices are constituents of the present value of government obligations outstanding at time $t$, which can be expressed as - -```{math} -:label: lq_cb - -B_t := -\mathbb E_t \sum_{j=0}^{\infty} \beta^j p^t_{t+j} -(\tau_{t+j} \ell_{t+j} - g_{t+j}) -``` - -Using our expression for prices and the Ramsey plan, we can also write -$B_t$ as - -$$ -B_t = -\mathbb E_t \sum_{j=0}^{\infty} \beta^j -\frac{ (b_{t+j} - c_{t+j})(\ell_{t+j} - g_{t+j}) - \ell^2_{t+j} } -{ b_t - c_t } -$$ - -This version is more convenient for computation. - -Using the equation - -$$ -p^t_{t+j} = p^t_{t+1} p^{t+1}_{t+j} -$$ - -it is possible to verity that {eq}`lq_cb` implies that - -$$ -B_t = (\tau_t \ell_t - g_t) + E_t \sum_{j=1}^\infty p^t_{t+j} (\tau_{t+j} \ell_{t+j} - g_{t+j}) -$$ - -and - -```{math} -:label: lq_cb22 - -B_t = (\tau_t \ell_t - g_t) + \beta E_t p^t_{t+1} B_{t+1} -``` - -Define - -```{math} -:label: lq_rfr - -R^{-1}_{t} := \mathbb E_t \beta^j p^t_{t+1} -``` - -$R_{t}$ is the gross $1$-period risk-free rate for loans -between $t$ and $t+1$. - -### A Martingale - -We now want to study the following two objects, namely, - -$$ -\pi_{t+1} := B_{t+1} - R_t [B_t - (\tau_t \ell_t - g_t)] -$$ - -and the cumulation of $\pi_t$ - -$$ -\Pi_t := \sum_{s=0}^t \pi_t -$$ - -The term $\pi_{t+1}$ is the difference between two quantities: - -* $B_{t+1}$, the value of government debt at the start of period $t+1$. -* $R_t [B_t + g_t - \tau_t ]$, which is what the government would have owed at the beginning of - period $t+1$ if it had simply borrowed at the one-period risk-free rate rather than selling state-contingent securities. - -> - -Thus, $\pi_{t+1}$ is the excess payout on the actual portfolio of state contingent government debt relative to an alternative -portfolio sufficient to finance $B_t + g_t - \tau_t \ell_t$ and consisting entirely of risk-free one-period bonds. - -Use expressions {eq}`lq_cb22` and {eq}`lq_rfr` to obtain - -$$ -\pi_{t+1} = B_{t+1} - \frac{1}{\beta E_t p^t_{t+1}} \left[\beta E_t p^t_{t+1} B_{t+1} \right] -$$ - -or - -```{math} -:label: lq_pidist - -\pi_{t+1} = B_{t+1} - \tilde E_t B_{t+1} -``` - -where $\tilde E_t$ is the conditional mathematical expectation taken with respect to a one-step transition density -that has been formed by multiplying the original transition density with the likelihood ratio - -$$ -m^t_{t+1} = \frac{p^t_{t+1}}{E_t p^t_{t+1}} -$$ - -It follows from equation {eq}`lq_pidist` that - -$$ -\tilde E_t \pi_{t+1} = \tilde E_t B_{t+1} - \tilde E_t B_{t+1} = 0 -$$ - -which asserts that $\{\pi_{t+1}\}$ is a martingale difference sequence under the distorted probability measure, and -that $\{\Pi_t\}$ is a martingale under the distorted probability measure. - -In the tax-smoothing model of Robert Barro {cite}`Barro1979`, government debt is a random walk. - -In the current model, government debt $\{B_t\}$ is not a random walk, but the `excess payoff` $\{\Pi_t\}$ on it is. - -## Implementation - -The following code provides functions for - -1. Solving for the Ramsey plan given a specification of the economy. -1. Simulating the dynamics of the major variables. - -Description and clarifications are given below - -```{code-cell} julia ---- -tags: [remove-cell] ---- -using Test -``` - -```{code-cell} julia -using LaTeXStrings, QuantEcon, Plots, LinearAlgebra - -abstract type AbstractStochProcess end - -struct ContStochProcess{TF <: AbstractFloat} <: AbstractStochProcess - A::Matrix{TF} - C::Matrix{TF} -end - -struct DiscreteStochProcess{TF <: AbstractFloat} <: AbstractStochProcess - P::Matrix{TF} - x_vals::Matrix{TF} -end - -struct Economy{TF <: AbstractFloat, SP <: AbstractStochProcess} - beta::TF - Sg::Matrix{TF} - Sd::Matrix{TF} - Sb::Matrix{TF} - Ss::Matrix{TF} - proc::SP -end - -function compute_exog_sequences(econ, x) - # compute exogenous variable sequences - Sg, Sd, Sb, Ss = econ.Sg, econ.Sd, econ.Sb, econ.Ss - g, d, b, s = [dropdims(S * x, dims = 1) for S in (Sg, Sd, Sb, Ss)] - - #= solve for Lagrange multiplier in the govt budget constraint - In fact we solve for nu = lambda / (1 + 2*lambda). Here nu is the - solution to a quadratic equation a(nu^2 - nu) + b = 0 where - a and b are expected discounted sums of quadratic forms of the state. =# - Sm = Sb - Sd - Ss - - return g, d, b, s, Sm -end - -function compute_allocation(econ, Sm, nu, x, b) - (; Sg, Sd, Sb, Ss) = econ - # solve for the allocation given nu and x - Sc = 0.5 .* (Sb + Sd - Sg - nu .* Sm) - Sl = 0.5 .* (Sb - Sd + Sg - nu .* Sm) - c = dropdims(Sc * x, dims = 1) - l = dropdims(Sl * x, dims = 1) - p = dropdims((Sb - Sc) * x, dims = 1) # Price without normalization - tau = 1 .- l ./ (b .- c) - rvn = l .* tau - - return Sc, Sl, c, l, p, tau, rvn -end - -function compute_nu(a0, b0) - disc = a0^2 - 4a0 * b0 - - if disc >= 0 - nu = 0.5 * (a0 - sqrt(disc)) / a0 - else - println("There is no Ramsey equilibrium for these parameters.") - error("Government spending (economy.g) too low") - end - - # Test that the Lagrange multiplier has the right sign - if nu * (0.5 - nu) < 0 - print("Negative multiplier on the government budget constraint.") - error("Government spending (economy.g) too low") - end - - return nu -end - -function compute_Pi(B, R, rvn, g, xi) - pi = B[2:end] - R[1:(end - 1)] .* B[1:(end - 1)] - rvn[1:(end - 1)] + - g[1:(end - 1)] - Pi = cumsum(pi .* xi) - return pi, Pi -end - -function compute_paths(econ::Economy{<:AbstractFloat, <:DiscreteStochProcess}, - T) - # simplify notation - (; beta, Sg, Sd, Sb, Ss) = econ - (; P, x_vals) = econ.proc - - mc = MarkovChain(P) - state = simulate(mc, T, init = 1) - x = x_vals[:, state] - - # Compute exogenous sequence - g, d, b, s, Sm = compute_exog_sequences(econ, x) - - # compute a0, b0 - ns = size(P, 1) - F = I - beta .* P - a0 = (F \ ((Sm * x_vals)' .^ 2))[1] ./ 2 - H = ((Sb - Sd + Sg) * x_vals) .* ((Sg - Ss) * x_vals) - b0 = (F \ H')[1] ./ 2 - - # compute lagrange multiplier - nu = compute_nu(a0, b0) - - # Solve for the allocation given nu and x - Sc, Sl, c, l, p, tau, rvn = compute_allocation(econ, Sm, nu, x, b) - - # compute remaining variables - H = ((Sb - Sc) * x_vals) .* ((Sl - Sg) * x_vals) - (Sl * x_vals) .^ 2 - temp = dropdims(F * H', dims = 2) - B = temp[state] ./ p - H = dropdims(P[state, :] * ((Sb - Sc) * x_vals)', dims = 2) - R = p ./ (beta .* H) - temp = dropdims(P[state, :] * ((Sb - Sc) * x_vals)', dims = 2) - xi = p[2:end] ./ temp[1:(end - 1)] - - # compute pi - pi, Pi = compute_Pi(B, R, rvn, g, xi) - - return (; g, d, b, s, c, l, p, tau, rvn, B, R, pi, Pi, xi) -end - -function compute_paths(econ::Economy{<:AbstractFloat, <:ContStochProcess}, T) - # simplify notation - (; beta, Sg, Sd, Sb, Ss) = econ - (; A, C) = econ.proc - - # generate an initial condition x0 satisfying x0 = A x0 - nx, nx = size(A) - x0 = nullspace(I - A) - x0 = x0[end] < 0 ? -x0 : x0 - x0 = x0 ./ x0[end] - x0 = dropdims(x0, dims = 2) - - # generate a time series x of length T starting from x0 - nx, nw = size(C) - x = zeros(nx, T) - w = randn(nw, T) - x[:, 1] = x0 - for t in 2:T - x[:, t] = A * x[:, t - 1] + C * w[:, t] - end - - # compute exogenous sequence - g, d, b, s, Sm = compute_exog_sequences(econ, x) - - # compute a0 and b0 - H = Sm'Sm - a0 = 0.5 * var_quadratic_sum(A, C, H, beta, x0) - H = (Sb - Sd + Sg)' * (Sg + Ss) - b0 = 0.5 * var_quadratic_sum(A, C, H, beta, x0) - - # compute lagrange multiplier - nu = compute_nu(a0, b0) - - # solve for the allocation given nu and x - Sc, Sl, c, l, p, tau, rvn = compute_allocation(econ, Sm, nu, x, b) - - # compute remaining variables - H = Sl'Sl - (Sb - Sc)' * (Sl - Sg) - L = zeros(T) - for t in eachindex(L) - L[t] = var_quadratic_sum(A, C, H, beta, x[:, t]) - end - B = L ./ p - Rinv = dropdims(beta .* (Sb - Sc) * A * x, dims = 1) ./ p - R = 1 ./ Rinv - AF1 = (Sb - Sc) * x[:, 2:end] - AF2 = (Sb - Sc) * A * x[:, 1:(end - 1)] - xi = AF1 ./ AF2 - xi = dropdims(xi, dims = 1) - - # compute pi - pi, Pi = compute_Pi(B, R, rvn, g, xi) - - return (; g, d, b, s, c, l, p, tau, rvn, B, R, pi, Pi, xi) -end - -function gen_fig_1(path) - T = length(path.c) - - plt_1 = plot(path.rvn, lw = 2, label = L"\tau_t l_t") - plot!(plt_1, path.g, lw = 2, label = L"g_t") - plot!(plt_1, path.c, lw = 2, label = L"c_t") - plot!(xlabel = "Time", grid = true) - - plt_2 = plot(path.rvn, lw = 2, label = L"\tau_t l_t") - plot!(plt_2, path.g, lw = 2, label = L"g_t") - plot!(plt_2, path.B[2:end], lw = 2, label = L"B_{t+1}") - plot!(xlabel = "Time", grid = true) - - plt_3 = plot(path.R, lw = 2, label = L"R_{t-1}") - plot!(plt_3, xlabel = "Time", grid = true) - - plt_4 = plot(path.rvn, lw = 2, label = L"\tau_t l_t") - plot!(plt_4, path.g, lw = 2, label = L"g_t") - plot!(plt_4, path.pi, lw = 2, label = L"\pi_t") - plot!(plt_4, xlabel = "Time", grid = true) - - plot(plt_1, plt_2, plt_3, plt_4, layout = (2, 2), size = (800, 600)) -end - -function gen_fig_2(path) - T = length(path.c) - - paths = [path.xi, path.Pi] - labels = [L"\xi_t", L"\Pi_t"] - plt_1 = plot() - plt_2 = plot() - plots = [plt_1, plt_2] - - for (plot, path, label) in zip(plots, paths, labels) - plot!(plot, 2:T, path, lw = 2, label = label, xlabel = "Time", - grid = true) - end - plot(plt_1, plt_2, layout = (2, 1), size = (600, 500)) -end -``` - -### Comments on the Code - -The function `var_quadratic_sum` From `QuantEcon.jl` is for computing the value of {eq}`lq_eqs` -when the exogenous process $\{ x_t \}$ is of the VAR type described {ref}`above `. - -This code defines two Types: `Economy` and `Path`. - -The first is used to collect all the parameters and primitives of a given LQ -economy, while the second collects output of the computations. - -## Examples - -Let's look at two examples of usage. - -(lq_cc)= -### The Continuous Case - -Our first example adopts the VAR specification described {ref}`above `. - -Regarding the primitives, we set - -* $\beta = 1 / 1.05$ -* $b_t = 2.135$ and $s_t = d_t = 0$ for all $t$ - -Government spending evolves according to - -$$ -g_{t+1} - \mu_g = \rho (g_t - \mu_g) + C_g w_{g, t+1} -$$ - -with $\rho = 0.7$, $\mu_g = 0.35$ and $C_g = \mu_g \sqrt{1 - \rho^2} / 10$. - -Here's the code - -```{code-cell} julia -# for reproducible results -using Random -Random.seed!(42) - -# parameters -beta = 1 / 1.05 -rho, mg = 0.7, 0.35 -A = [rho mg*(1 - rho); 0.0 1.0] -C = [sqrt(1 - rho^2) * mg/10 0.0; 0 0] -Sg = [1.0 0.0] -Sd = [0.0 0.0] -Sb = [0 2.135] -Ss = [0.0 0.0] -proc = ContStochProcess(A, C) - -econ = Economy(beta, Sg, Sd, Sb, Ss, proc) -T = 50 -path = compute_paths(econ, T) - -gen_fig_1(path) -``` - - -The legends on the figures indicate the variables being tracked. - -Most obvious from the figure is tax smoothing in the sense that tax revenue is -much less variable than government expenditure - -```{code-cell} julia -gen_fig_2(path) -``` - -```{only} html -See the original manuscript for comments and interpretation -``` - -```{only} latex -See the original [manuscript](https://lectures.quantecon.org/_downloads/firenze.pdf) for comments and interpretation -``` - -### The Discrete Case - -Our second example adopts a discrete Markov specification for the exogenous process - -```{code-cell} julia ---- -tags: [remove-cell] ---- -Random.seed!(42); -``` - -```{code-cell} julia -# Parameters -beta = 1 / 1.05 -P = [0.8 0.2 0.0 - 0.0 0.5 0.5 - 0.0 0.0 1.0] - -# Possible states of the world -# Each column is a state of the world. The rows are [g d b s 1] -x_vals = [0.5 0.5 0.25; - 0.0 0.0 0.0; - 2.2 2.2 2.2; - 0.0 0.0 0.0; - 1.0 1.0 1.0] -Sg = [1.0 0.0 0.0 0.0 0.0] -Sd = [0.0 1.0 0.0 0.0 0.0] -Sb = [0.0 0.0 1.0 0.0 0.0] -Ss = [0.0 0.0 0.0 1.0 0.0] -proc = DiscreteStochProcess(P, x_vals) - -econ = Economy(beta, Sg, Sd, Sb, Ss, proc) -T = 15 -path = compute_paths(econ, T) - -gen_fig_1(path) -``` - - -The call `gen_fig_2(path)` generates - -```{code-cell} julia -gen_fig_2(path) -``` - -```{only} html -See the original manuscript for comments and interpretation -``` - -```{only} latex -See the original [manuscript](https://lectures.quantecon.org/_downloads/firenze.pdf) for comments and interpretation -``` - -## Exercises - -(lqramsey_ex1)= -### Exercise 1 - -Modify the VAR example {ref}`given above `, setting - -$$ -g_{t+1} - \mu_g = \rho (g_{t-3} - \mu_g) + C_g w_{g, t+1} -$$ - -with $\rho = 0.95$ and $C_g = 0.7 \sqrt{1 - \rho^2}$. - -Produce the corresponding figures. - -## Solutions - -```{code-cell} julia ---- -tags: [remove-cell] ---- -Random.seed!(42); -``` - -```{code-cell} julia ---- -tags: [hide-output] ---- -# parameters -beta = 1 / 1.05 -rho, mg = .95, .35 -A = [0. 0. 0. rho mg*(1-rho); - 1. 0. 0. 0. 0.; - 0. 1. 0. 0. 0.; - 0. 0. 1. 0. 0.; - 0. 0. 0. 0. 1.] -C = zeros(5, 5) -C[1, 1] = sqrt(1 - rho^2) * mg / 8 -Sg = [1. 0. 0. 0. 0.] -Sd = [0. 0. 0. 0. 0.] -Sb = [0. 0. 0. 0. 2.135] -Ss = [0. 0. 0. 0. 0.] -proc = ContStochProcess(A, C) -econ = Economy(beta, Sg, Sd, Sb, Ss, proc) - -T = 50 -path = compute_paths(econ, T) -``` - -```{code-cell} julia -gen_fig_1(path) -``` - -```{code-cell} julia -gen_fig_2(path) -``` - diff --git a/lectures/dynamic_programming_squared/opt_tax_recur.md b/lectures/dynamic_programming_squared/opt_tax_recur.md deleted file mode 100644 index 3637df51..00000000 --- a/lectures/dynamic_programming_squared/opt_tax_recur.md +++ /dev/null @@ -1,1893 +0,0 @@ ---- -jupytext: - text_representation: - extension: .md - format_name: myst -kernelspec: - display_name: Julia - language: julia - name: julia-1.11 ---- - -(opt_tax_recur)= -```{raw} html - -``` - -# Optimal Taxation with State-Contingent Debt - -```{contents} Contents -:depth: 2 -``` - -## Overview - -This lecture describes a celebrated model of optimal fiscal policy by Robert E. -Lucas, Jr., and Nancy Stokey {cite}`LucasStokey1983`. - -The model revisits classic issues about how to pay for a war. - -Here a *war* means a more or less temporary surge in an exogenous government expenditure process. - -The model features - -* a government that must finance an exogenous stream of government expenditures with either - * a flat rate tax on labor, or - * purchases and sales from a full array of Arrow state-contingent securities -* a representative household that values consumption and leisure -* a linear production function mapping labor into a single good -* a Ramsey planner who at time $t=0$ chooses a plan for taxes and - trades of [Arrow securities](https://en.wikipedia.org/wiki/Arrow_security) for all $t \geq 0$ - -After first presenting the model in a space of sequences, we shall represent it -recursively in terms of two Bellman equations formulated along lines that we -encountered in {doc}`Dynamic Stackelberg models <../dynamic_programming_squared/dyn_stack>`. - -As in {doc}`Dynamic Stackelberg models <../dynamic_programming_squared/dyn_stack>`, to apply dynamic programming -we shall define the state vector artfully. - -In particular, we shall include forward-looking variables that summarize optimal -responses of private agents to a Ramsey plan. - -See {doc}`Optimal taxation <../dynamic_programming_squared/lqramsey>` for an analysis within a linear-quadratic setting. - - -```{code-cell} julia -using LinearAlgebra, Statistics -using QuantEcon, NLsolve, NLopt, Interpolations -``` - -## A Competitive Equilibrium with Distorting Taxes - -For $t \geq 0$, a history $s^t = [s_t, s_{t-1}, \ldots, s_0]$ of an -exogenous state $s_t$ has joint probability density $\pi_t(s^t)$. - -We begin by assuming that government purchases $g_t(s^t)$ at time $t \geq 0$ depend on $s^t$. - -Let $c_t(s^t)$, $\ell_t(s^t)$, and $n_t(s^t)$ denote consumption, -leisure, and labor supply, respectively, at history $s^t$ and date $t$. - -A representative household is endowed with one unit of time that can be divided -between leisure $\ell_t$ and labor $n_t$: - -```{math} -:label: feas1_opt_tax - -n_t(s^t) + \ell_t(s^t) = 1 -``` - -Output equals $n_t(s^t)$ and can be divided between $c_t(s^t)$ and $g_t(s^t)$ - -```{math} -:label: TSs_techr_opt_tax - -c_t(s^t) + g_t(s^t) = n_t(s^t) -``` - -A representative household's preferences over $\{c_t(s^t), \ell_t(s^t)\}_{t=0}^\infty$ are ordered by - -```{math} -:label: TS_prefr_opt_tax - -\sum_{t=0}^\infty \sum_{s^t} \beta^t \pi_t(s^t) u[c_t(s^t), \ell_t(s^t)] -``` - -where the utility function $u$ is increasing, strictly concave, and three -times continuously differentiable in both arguments. - -The technology pins down a pre-tax wage rate to unity for all $t, s^t$. - -The government imposes a flat-rate tax $\tau_t(s^t)$ on labor income at -time $t$, history $s^t$. - -There are complete markets in one-period Arrow securities. - -One unit of an Arrow security issued at time $t$ at history $s^t$ -and promising to pay one unit of time $t+1$ consumption in state $s_{t+1}$ -costs $p_{t+1}(s_{t+1}|s^t)$. - -The government issues one-period Arrow securities each period. - -The government has a sequence of budget constraints whose time $t \geq 0$ component is - -```{math} -:label: TS_govr - -g_t(s^t) = \tau_t(s^t) n_t(s^t) + \sum_{s_{t+1}} p_{t+1}(s_{t+1} | s^t) b_{t+1}(s_{t+1} | s^t) - -b_t(s_t | s^{t-1}) -``` - -where - -* $p_{t+1}(s_{t+1}|s^t)$ is a competitive equilibrium price of one unit of - consumption at date $t+1$ in state $s_{t+1}$ at date $t$ and history $s^t$ -* $b_t(s_t|s^{t-1})$ is government debt falling due at time $t$, history $s^t$. - -Government debt $b_0(s_0)$ is an exogenous initial condition. - -The representative household has a sequence of budget constraints whose time $t\geq 0$ component is - -```{math} -:label: TS_bcr - -c_t(s^t) + \sum_{s_{t+1}} p_t(s_{t+1} | s^t) b_{t+1}(s_{t+1} | s^t) -= \left[1-\tau_t(s^t)\right] n_t(s^t) + b_t(s_t | s^{t-1}) \quad \forall t \geq 0. -``` - -A **government policy** is an exogenous sequence $\{g(s_t)\}_{t=0}^\infty$, -a tax rate sequence $\{\tau_t(s^t)\}_{t=0}^\infty$, and a government debt sequence $\{b_{t+1}(s^{t+1})\}_{t=0}^\infty$. - -A **feasible allocation** is a consumption-labor supply plan $\{c_t(s^t), n_t(s^t)\}_{t=0}^\infty$ -that satisfies {eq}`TSs_techr_opt_tax` at all $t, s^t$. - -A **price system** is a sequence of Arrow security prices $\{p_{t+1}(s_{t+1} | s^t) \}_{t=0}^\infty$. - -The household faces the price system as a price-taker and takes the government policy as given. - -The household chooses $\{c_t(s^t), \ell_t(s^t)\}_{t=0}^\infty$ to maximize {eq}`TS_prefr_opt_tax` subject to {eq}`TS_bcr` and {eq}`feas1_opt_tax` for all $t, s^t$. - -A **competitive equilibrium with distorting taxes** is a feasible allocation, -a price system, and a government policy such that - -* Given the price system and the government policy, the allocation solves the - household's optimization problem. -* Given the allocation, government policy, and price system, the government's - budget constraint is satisfied for all $t, s^t$. - -Note: There are many competitive equilibria with distorting taxes. - -They are indexed by different government policies. - -The **Ramsey problem** or **optimal taxation problem** is to choose a competitive -equilibrium with distorting taxes that maximizes {eq}`TS_prefr_opt_tax`. - -### Arrow-Debreu Version of Price System - -We find it convenient sometimes to work with the Arrow-Debreu price system that is -implied by a sequence of Arrow securities prices. - -Let $q_t^0(s^t)$ be the price at time $0$, measured in time $0$ -consumption goods, of one unit of consumption at time $t$, -history $s^t$. - -The following recursion relates Arrow-Debreu prices $\{q_t^0(s^t)\}_{t=0}^\infty$ -to Arrow securities prices $\{p_{t+1}(s_{t+1}|s^t)\}_{t=0}^\infty$ - -```{math} -:label: TS_foc - -q^0_{t+1}(s^{t+1}) = p_{t+1}(s_{t+1}|s^t) q^0_t(s^t) \quad s.t. \quad q_0^0(s^0) = 1 -``` - -Arrow-Debreu prices are useful when we want to compress a sequence of budget -constraints into a single intertemporal budget constraint, as we shall find it -convenient to do below. - -### Primal Approach - -We apply a popular approach to solving a Ramsey problem, called the *primal approach*. - -The idea is to use first-order conditions for household optimization to -eliminate taxes and prices in favor of quantities, then pose an optimization problem -cast entirely in terms of quantities. - -After Ramsey quantities have been found, taxes and prices can then be unwound -from the allocation. - -The primal approach uses four steps: - -* This intertemporal constraint involves only the allocation and is regarded - as an *implementability constraint*. - -> - -3. Find the allocation that maximizes the utility of the representative household -{eq}`TS_prefr_opt_tax` subject to the feasibility constraints {eq}`feas1_opt_tax` -and {eq}`TSs_techr_opt_tax` and the implementability condition derived in step 2. - -* This optimal allocation is called the **Ramsey allocation**. - -> - -4. Use the Ramsey allocation together with the formulas from step 1 to find -taxes and prices. - - - -### The Implementability Constraint - -By sequential substitution of one one-period budget constraint {eq}`TS_bcr` into -another, we can obtain the household's present-value budget constraint: - -```{math} -:label: TS_bcPV2 - -\sum_{t=0}^\infty \sum_{s^t} q^0_t(s^t) c_t(s^t) = -\sum_{t=0}^\infty \sum_{s^t} q^0_t(s^t) [1-\tau_t(s^t)] n_t(s^t) + -b_0 -``` - -$\{q^0_t(s^t)\}_{t=1}^\infty$ can be interpreted as a time $0$ -Arrow-Debreu price system. - -To approach the Ramsey problem, we study the household's optimization problem. - -First-order conditions for the household’s problem for $\ell_t(s^t)$ -and $b_t(s_{t+1}| s^t)$, respectively, imply - -```{math} -:label: LSA_taxr - -(1 - \tau_t(s^t)) = {\frac{u_l(s^t)}{u_c(s^t)}} -``` - -and - -```{math} -:label: LS101 - -p_{t+1}(s_{t+1}| s^t) = \beta \pi(s_{t+1} | s^t) \left({\frac{u_c(s^{t+1})}{u_c({s^t})}} \right) -``` - -where $\pi(s_{t+1} | s^t)$ is the probability distribution of $s_{t+1}$ -conditional on history $s^t$. - -Equation {eq}`LS101` implies that the Arrow-Debreu price system satisfies - -```{math} -:label: LS102 - -q_t^0(s^t) = \beta^{t} \pi_{t}(s^{t}) - {u_c(s^{t}) \over u_c(s^0)} -``` - -Using the first-order conditions {eq}`LSA_taxr` and {eq}`LS101` to eliminate -taxes and prices from {eq}`TS_bcPV2`, we derive the *implementability condition* - -```{math} -:label: TSs_cham1 - -\sum_{t=0}^\infty \sum_{s^t} \beta^t \pi_t(s^t) - [u_c(s^t) c_t(s^t) - u_\ell(s^t) n_t(s^t)] - - u_c(s^0) b_0 = 0. -``` - -The **Ramsey problem** is to choose a feasible allocation that maximizes - -```{math} -:label: TS_prefr2 - -\sum_{t=0}^\infty \sum_{s^t} \beta^t \pi_t(s^t) u[c_t(s^t), 1 - n_t(s^t)] -``` - -subject to {eq}`TSs_cham1`. - -### Solution Details - -First define a "pseudo utility function" - -```{math} -:label: TS_cham17 - -V\left[c_t(s^t), n_t(s^t), \Phi\right] = -u[c_t(s^t),1-n_t(s^t)] + -\Phi \left[ u_c(s^t) c_t(s^t) - -u_\ell(s^t) n_t(s^t) \right] -``` - -where $\Phi$ is a Lagrange multiplier on the implementability condition {eq}`TS_bcPV2`. - -Next form the Lagrangian - -```{math} -:label: TS_chamlag - -J = \sum_{t=0}^\infty -\sum_{s^t} \beta^t \pi_t(s^t) -\Bigl\{ - V[c_t(s^t), n_t(s^t), \Phi] + \theta_t(s^t) - \Bigl[ n_t(s^t) - c_t(s^t) - g_t(s_t) \Bigr] -\Bigr\} - \Phi u_c(0) b_0 -``` - -where $\{\theta_t(s^t); \forall s^t\}_{t\geq0}$ is a sequence of Lagrange -multipliers on the feasible conditions {eq}`TSs_techr_opt_tax`. - -Given an initial government debt $b_0$, we want to maximize $J$ -with respect to $\{c_t(s^t), n_t(s^t); \forall s^t \}_{t\geq0}$ and to minimize with respect -to $\{\theta(s^t); \forall s^t \}_{t\geq0}$. - -The first-order conditions for the Ramsey problem for periods $t \geq 1$ and $t=0$, respectively, are - -```{math} -:label: eqFONCRamsey1 - -\begin{aligned} - c_t(s^t)\rm{:} & - \; (1+\Phi) u_c(s^t) + \Phi \left[u_{cc}(s^t) c_t(s^t) - - u_{\ell c}(s^t) n_t(s^t) \right] - \theta_t(s^t) = 0, \quad t \geq 1 - \\ - n_t(s^t)\rm{:} & - \; -(1+\Phi) u_{\ell}(s^t) - \Phi \left[u_{c\ell}(s^t) c_t(s^t) - - u_{\ell \ell}(s^t) n_t(s^t) \right] + \theta_t(s^t) = 0, \quad t \geq 1 -\end{aligned} -``` - -and - -```{math} -:label: eqFONCRamsey0 - -\begin{aligned} - c_0(s^0, b_0)\rm{:} & - \; (1+\Phi) u_c(s^0, b_0) + \Phi \left[u_{cc}(s^0, b_0) c_0(s^0, b_0) - - u_{\ell c}(s^0, b_0) n_0(s^0, b_0) \right] - \theta_0(s^0, b_0) \\ - & \quad \quad \quad \quad \quad \quad - \Phi u_{cc}(s^0, b_0) b_0 = 0 - \\ - n_0(s^0, b_0)\rm{:} & - \; -(1+\Phi) u_{\ell}(s^0, b_0) - \Phi \left[u_{c\ell}(s^0, b_0) c_0(s^0, b_0) - - u_{\ell \ell}(s^0, b_0) n_0(s^0, b_0) \right] + \theta_0(s^0, b_0) \\ - & \quad \quad \quad \quad \quad \quad + \Phi u_{c \ell}(s^0, b_0) b_0 = 0 -\end{aligned} -``` - -Please note how these first-order conditions differ between $t=0$ and $t \geq 1$. - -It is instructive to use first-order conditions {eq}`eqFONCRamsey1` for -$t \geq 1$ to eliminate the multipliers $\theta_t(s^t)$. - -For convenience, we suppress the time subscript and the index $s^t$ and obtain - -```{math} -:label: TS_barg - -\begin{aligned} - (1+\Phi) &u_c(c,1-c-g) + \Phi \bigl[c u_{cc}(c,1-c-g) - - (c+g) u_{\ell c}(c,1-c-g) \bigr] - \\ - &= (1+\Phi) u_{\ell}(c,1-c-g) + \Phi \bigl[c u_{c\ell}(c,1-c-g) - - (c+g) u_{\ell \ell}(c,1-c-g) \bigr] -\end{aligned} -``` - -where we have imposed conditions {eq}`feas1_opt_tax` and {eq}`TSs_techr_opt_tax`. - -Equation {eq}`TS_barg` is one equation that can be solved to express the -unknown $c$ as a function of the exogenous variable $g$. - -We also know that time $t=0$ quantities $c_0$ and $n_0$ satisfy - -```{math} -:label: TS_barg_aust - -\begin{aligned} - (1+\Phi) &u_c(c,1-c-g) + \Phi \bigl[c u_{cc}(c,1-c-g) - - (c+g) u_{\ell c}(c,1-c-g) \bigr] - \\ - &= (1+\Phi) u_{\ell}(c,1-c-g) + \Phi \bigl[c u_{c\ell}(c,1-c-g) - - (c+g) u_{\ell \ell}(c,1-c-g) \bigr] + \Phi (u_{cc} - u_{c,\ell}) b_0 -\end{aligned} -``` - -Notice that a counterpart to $b_0$ does *not* appear -in {eq}`TS_barg`, so $c$ does not depend on it for $t \geq 1$. - -But things are different for time $t=0$. - -An analogous argument for the $t=0$ equations {eq}`eqFONCRamsey0` leads -to one equation that can be solved for $c_0$ as a function of the -pair $(g(s_0), b_0)$. - -These outcomes mean that the following statement would be true even when -government purchases are history-dependent functions $g_t(s^t)$ of the -history of $s^t$. - -**Proposition:** -If government purchases are equal after two histories -$s^t$ and $\tilde s^\tau$ for $t,\tau\geq0$, i.e., if - -$$ -g_t(s^t) = g^\tau(\tilde s^\tau) = g -$$ - -then it follows from {eq}`TS_barg` that the Ramsey choices of consumption and leisure, -$(c_t(s^t),\ell_t(s^t))$ and $(c_j(\tilde s^\tau),\ell_j(\tilde -s^\tau))$, are identical. - -The proposition asserts that the optimal allocation is a function of the -currently realized quantity of government purchases $g$ only and does -*not* depend on the specific history that preceded that realization of $g$. - -### The Ramsey Allocation for a Given $\Phi$ - -Temporarily take $\Phi$ as given. - -We shall compute $c_0(s^0, b_0)$ and $n_0(s^0, b_0)$ from the first-order -conditions {eq}`eqFONCRamsey0`. - -Evidently, for $t \geq 1$, $c$ and -$n$ depend on the time $t$ realization of $g$ only. - -But for $t=0$, $c$ and $n$ depend on both $g_0$ and the -government’s initial debt $b_0$. - -Thus, while $b_0$ -influences $c_0$ and $n_0$, there appears no analogous -variable $b_t$ that influences $c_t$ and $n_t$ for -$t \geq 1$. - -The absence of $b_t$ as a determinant of the Ramsey allocation for -$t \geq 1$ and its presence for $t=0$ is a symptom of the -*time-inconsistency* of a Ramsey plan. - -$\Phi$ has to take a value that assures that -the household and the government’s budget constraints are both -satisfied at a candidate Ramsey allocation and price system associated -with that $\Phi$. - -### Further Specialization - -At this point, it is useful to specialize the model in the following ways. - -We assume that $s$ is governed by a finite state Markov chain with states -$s\in [1, \ldots, S]$ and transition matrix $\Pi$, where - -$$ -\Pi(s'|s) = {\rm Prob}(s_{t+1} = s'| s_t =s) -$$ - -Also, assume that government purchases $g$ are an exact time-invariant function -$g(s)$ of $s$. - -We maintain these assumptions throughout the remainder of this lecture. - -### Determining $\Phi$ - -We complete the Ramsey plan by computing the Lagrange multiplier $\Phi$ -on the implementability constraint {eq}`TSs_cham1`. - -Government budget balance restricts $\Phi$ via the following line of reasoning. - -The household's first-order conditions imply - -```{math} -:label: LSA_ta - -(1 - \tau_t(s^t)) = {\frac{u_l(s^t)}{u_c(s^t)} } -``` - -and the implied one-period Arrow securities prices - -```{math} -:label: LSA_Arro - -p_{t+1}(s_{t+1}| s^t) = \beta \Pi(s_{t+1} | s_t) {\frac{u_c(s^{t+1})}{u_c({s^t})}} -``` - -Substituting from {eq}`LSA_ta`, {eq}`LSA_Arro`, and the feasibility -condition {eq}`TSs_techr_opt_tax` into the recursive version {eq}`TS_bcr` of -the household budget constraint gives - -```{math} -:label: LSA_budget - -\begin{aligned} - u_c(s^t) [ n_t(s^t) - g_t(s^t)] + - \beta \sum_{s_{t+1}} \Pi (s_{t+1}| s_t) u_c(s^{t+1}) b_{t+1}(s_{t+1} | s^t) = \\ - u_l (s^t) n_t(s^t) + u_c(s^t) b_t(s_t | s^{t-1}) -\end{aligned} -``` - -Define $x_t(s^t) = u_c(s^t) b_t(s_t | s^{t-1})$. - -Notice that $x_t(s^t)$ appears on the right side of {eq}`LSA_budget` while -$\beta$ times the conditional expectation of -$x_{t+1}(s^{t+1})$ appears on the left side. - -Hence the equation shares much of the structure of a simple asset pricing equation with -$x_t$ being analogous to the price of the asset at time $t$. - -We learned earlier that for a Ramsey allocation -$c_t(s^t), n_t(s^t)$ and $b_t(s_t|s^{t-1})$, and therefore -also $x_t(s^t)$, are each functions of $s_t$ only, being -independent of the history $s^{t-1}$ for $t \geq 1$. - -That means that we can express equation {eq}`LSA_budget` as - -```{math} -:label: LSA_budget2 - -u_c(s) -[ n(s) - g(s)] + \beta -\sum_{s'} \Pi(s' | s) x'(s') = u_l(s) n(s) + x(s) -``` - -where $s'$ denotes a next period value of $s$ and -$x'(s')$ denotes a next period value of $x$. - -Equation {eq}`LSA_budget2` is easy to solve for $x(s)$ for -$s = 1, \ldots , S$. - -If we let $\vec n, \vec g, \vec x$ -denote $S \times 1$ vectors whose $i$th elements are the -respective $n, g$, and $x$ values when $s=i$, and let -$\Pi$ be the transition matrix for the Markov state $s$, -then we can express {eq}`LSA_budget2` as the matrix equation - -```{math} -:label: LSA_budget20 - -\vec u_c(\vec n - \vec g) + \beta \Pi \vec x = \vec u_l \vec n + \vec x -``` - -This is a system of $S$ linear equations in the $S \times 1$ -vector $x$, whose solution is - -```{math} -:label: LSA_xsol - -\vec x= (I - \beta \Pi )^{-1} [ \vec u_c (\vec n-\vec g) - \vec u_l \vec n] -``` - -In these equations, by $\vec u_c \vec n$, for example, we mean -element-by-element multiplication of the two vectors. - -After solving for $\vec x$, we can find $b(s_t|s^{t-1})$ in Markov -state $s_t=s$ from $b(s) = {\frac{x(s)}{u_c(s)}}$ or the matrix equation - -```{math} -:label: LSA_bsol - -\vec b = {\frac{ \vec x }{\vec u_c}} -``` - -where division here means element-by-element division of the respective -components of the $S \times 1$ vectors $\vec x$ and -$\vec u_c$. - -Here is a computational algorithm: - -1. Start with a guess for the value for $\Phi$, then use the - first-order conditions and the feasibility conditions to compute - $c(s_t), n(s_t)$ for $s \in [1,\ldots, S]$ and - $c_0(s_0,b_0)$ and $n_0(s_0, b_0)$, given $\Phi$ - * these are $2 (S+1)$ equations in $2 (S+1)$ unknowns -1. Solve the $S$ equations {eq}`LSA_xsol` for the $S$ elements - of $\vec x$ - * these depend on $\Phi$ -1. Find a $\Phi$ that satisfies - - ```{math} - :label: Bellman2cons - - u_{c,0} b_0 = u_{c,0} (n_0 - g_0) - u_{l,0} n_0 + \beta \sum_{s=1}^S \Pi(s | s_0) x(s) - ``` - by gradually raising $\Phi$ if the left side of {eq}`Bellman2cons` - exceeds the right side and lowering $\Phi$ if the left side is less than the right side. -1. After computing a Ramsey allocation, recover the flat tax rate on - labor from {eq}`LSA_taxr` and the implied one-period Arrow securities - prices from {eq}`LS101`. - -In summary, when $g_t$ is a time invariant function of a Markov state -$s_t$, a Ramsey plan can be constructed by solving $3S +3$ -equations in $S$ components each of $\vec c$, $\vec n$, and -$\vec x$ together with $n_0, c_0$, and $\Phi$. - -### Time Inconsistency - -Let $\{\tau_t(s^t)\}_{t=0}^\infty, \{b_{t+1}(s_{t+1}| s^t)\}_{t=0}^\infty$ -be a time $0$, state $s_0$ Ramsey plan. - -Then $\{\tau_j(s^j)\}_{j=t}^\infty, \{b_{j+1}(s_{j+1}| s^j)\}_{j=t}^\infty$ -is a time $t$, history $s^t$ continuation of a time -$0$, state $s_0$ Ramsey plan. - -A time $t$, history $s^t$ Ramsey plan is a Ramsey plan that -starts from initial conditions $s^t, b_t(s_t|s^{t-1})$. - -A time $t$, history $s^t$ -continuation of a time $0$, state $0$ Ramsey plan is -*not* a time $t$, history $s^t$ Ramsey plan. - -The means that a Ramsey plan is *not time consistent*. - -Another way to say the same thing is that a Ramsey plan is *time inconsistent*. - -The reason is that a continuation Ramsey plan takes $u_{ct} b_t(s_t|s^{t-1})$ as given, not -$b_t(s_t|s^{t-1})$. - -We shall discuss this more below. - -### Specification with CRRA Utility - -In our calculations below and in a {doc}`subsequent lecture <../dynamic_programming_squared/amss>` based on an extension of the Lucas-Stokey model -by Aiyagari, Marcet, Sargent, and Seppälä (2002) {cite}`amss2002`, we shall modify the one-period utility function assumed above. - -(We adopted the preceding utility specification because it was the one used in the original {cite}`LucasStokey1983` paper) - -We will modify their specification by instead assuming that the representative agent has utility function - -$$ -u(c,n) = {\frac{c^{1-\sigma}}{1-\sigma}} - {\frac{n^{1+\gamma}}{1+\gamma}} -$$ - -where $\sigma > 0$, $\gamma >0$. - -We continue to assume that - -$$ -c_t + g_t = n_t -$$ - -We eliminate leisure from the model. - -We also eliminate Lucas and Stokey's restriction that $\ell_t + n_t \leq 1$. - -We replace these two things with the assumption that -labor $n_t \in [0, +\infty]$. - -With these adjustments, the analysis of Lucas and Stokey prevails once we make the following replacements - -$$ -\begin{aligned} -u_\ell(c, \ell) &\sim - u_n(c, n) \\ -u_c(c,\ell) &\sim u_c(c,n) \\ -u_{\ell,\ell}(c,\ell) &\sim u_{nn}(c,n) \\ -u_{c,c}(c,\ell)& \sim u_{c,c}(c,n) \\ -u_{c,\ell} (c,\ell) &\sim 0 \\ -\end{aligned} -$$ - -With these understandings, equations {eq}`TS_barg` and {eq}`TS_barg_aust` simplify in the case of the CRRA utility function. - -They become - -```{math} -:label: TS_barg10 - -(1+\Phi) [u_c(c) + u_n(c+g)] + \Phi[c u_{cc}(c) + (c+g) u_{nn}(c+g)] = 0 -``` - -and - -```{math} -:label: TS_barg11 - -(1+\Phi) [u_c(c_0) + u_n(c_0+g_0)] + \Phi[c_0 u_{cc}(c_0) + (c_0+g_0) u_{nn}(c_0+g_0)] - \Phi u_{cc}(c_0) b_0 = 0 -``` - -In equation {eq}`TS_barg10`, it is understood that $c$ and $g$ are each functions of the Markov state $s$. - -In addition, the time $t=0$ budget constraint is satisfied at $c_0$ and initial government debt -$b_0$: - -```{math} -:label: opt_tax_eqn_10 - -b_0 + g_0 = \tau_0 (c_0 + g_0) + \frac{\bar b}{R_0} -``` - -where $R_0$ is the gross interest rate for the Markov state $s_0$ that is assumed to prevail at time $t =0$ -and $\tau_0$ is the time $t=0$ tax rate. - -In equation {eq}`opt_tax_eqn_10`, it is understood that - -```{math} - -\begin{aligned} -\tau_0 = 1 - \frac{u_{l,0}}{u_{c,0}} \\ -R_0 = \beta \sum_{s=1}^S \Pi(s | s_0) \frac{u_c(s)}{u_{c,0}} -\end{aligned} -``` - -### Sequence Implementation - -The above steps are implemented in a type called SequentialAllocation - -```{code-cell} julia ---- -tags: [remove-cell] ---- -using Test -``` - -```{code-cell} julia ---- -tags: [output_scroll] ---- -using QuantEcon, NLsolve, NLopt, LinearAlgebra, Interpolations - -import QuantEcon: simulate - - mutable struct Model{TF <: AbstractFloat, - TM <: AbstractMatrix{TF}, - TV <: AbstractVector{TF}} - beta::TF - Pi::TM - G::TV - Theta::TV - transfers::Bool - U::Function - Uc::Function - Ucc::Function - Un::Function - Unn::Function - n_less_than_one::Bool - end - - struct SequentialAllocation{TP <: Model, - TI <: Integer, - TV <: AbstractVector} - model::TP - mc::MarkovChain - S::TI - cFB::TV - nFB::TV - XiFB::TV - zFB::TV - end - -function SequentialAllocation(model) - beta, Pi, G, Theta = model.beta, model.Pi, model.G, model.Theta - mc = MarkovChain(Pi) - S = size(Pi, 1) # Number of states - # now find the first best allocation - cFB, nFB, XiFB, zFB = find_first_best(model, S, 1) - - return SequentialAllocation(model, mc, S, cFB, nFB, XiFB, zFB) -end - -function find_first_best(model, S, version) - if version != 1 && version != 2 - throw(ArgumentError("version must be 1 or 2")) - end - beta, Theta, Uc, Un, G, Pi = - model.beta, model.Theta, model.Uc, model.Un, model.G, model.Pi - function res!(out, z) - c = z[1:S] - n = z[S+1:end] - out[1:S] = Theta .* Uc(c, n) + Un(c, n) - out[S+1:end] = Theta .* n - c - G - end - res = nlsolve(res!, 0.5 * ones(2 * S)) - - if converged(res) == false - error("Could not find first best") - end - - if version == 1 - cFB = res.zero[1:S] - nFB = res.zero[S+1:end] - XiFB = Uc(cFB, nFB) # Multiplier on the resource constraint - zFB = vcat(cFB, nFB, XiFB) - return cFB, nFB, XiFB, zFB - elseif version == 2 - cFB = res.zero[1:S] - nFB = res.zero[S+1:end] - IFB = Uc(cFB, nFB) .* cFB + Un(cFB, nFB) .* nFB - xFB = \(I - beta * Pi, IFB) - zFB = [vcat(cFB[s], xFB[s], xFB) for s in 1:S] - return cFB, nFB, IFB, xFB, zFB - end -end - -function time1_allocation(pas::SequentialAllocation, mu) - model, S = pas.model, pas.S - Theta, beta, Pi, G, Uc, Ucc, Un, Unn = - model.Theta, model.beta, model.Pi, model.G, - model.Uc, model.Ucc, model.Un, model.Unn - function FOC!(out, z) - c = z[1:S] - n = z[S+1:2S] - Xi = z[2S+1:end] - out[1:S] = Uc(c, n) .- mu * (Ucc(c, n) .* c .+ Uc(c, n)) .- Xi # FOC c - out[S+1:2S] = Un(c, n) .- mu * (Unn(c, n) .* n .+ Un(c, n)) + Theta .* Xi # FOC n - out[2S+1:end] = Theta .* n - c - G # Resource constraint - return out - end - # Find the root of the FOC - res = nlsolve(FOC!, pas.zFB) - if res.f_converged == false - error("Could not find LS allocation.") - end - z = res.zero - c, n, Xi = z[1:S], z[S+1:2S], z[2S+1:end] - # Now compute x - Inv = Uc(c, n) .* c + Un(c, n) .* n - x = \(I - beta * model.Pi, Inv) - return c, n, x, Xi -end - -function time0_allocation(pas::SequentialAllocation, B_, s_0) - model = pas.model - Pi, Theta, G, beta = model.Pi, model.Theta, model.G, model.beta - Uc, Ucc, Un, Unn = - model.Uc, model.Ucc, model.Un, model.Unn - - # First order conditions of planner's problem - function FOC!(out, z) - mu, c, n, Xi = z[1], z[2], z[3], z[4] - xprime = time1_allocation(pas, mu)[3] - out .= vcat( - Uc(c, n) .* (c - B_) .+ Un(c, n) .* n + beta * dot(Pi[s_0, :], xprime), - Uc(c, n) .- mu * (Ucc(c, n) .* (c - B_) + Uc(c, n)) - Xi, - Un(c, n) .- mu * (Unn(c, n) .* n .+ Un(c, n)) + Theta[s_0] .* Xi, - (Theta .* n .- c - G)[s_0] - ) - end - - # Find root - res = nlsolve(FOC!, [0.0, pas.cFB[s_0], pas.nFB[s_0], pas.XiFB[s_0]]) - if res.f_converged == false - error("Could not find time 0 LS allocation.") - end - return (res.zero...,) -end - -function time1_value(pas::SequentialAllocation, mu) - model = pas.model - c, n, x, Xi = time1_allocation(pas, mu) - U_val = model.U.(c, n) - V = \(I - model.beta*model.Pi, U_val) - return c, n, x, V -end - -function Τ(model, c, n) - Uc, Un = model.Uc.(c, n), model.Un.(c, n) - return 1 .+ Un ./ (model.Theta .* Uc) -end - -function simulate(pas::SequentialAllocation, B_, s_0, T, sHist = nothing) - - model = pas.model - Pi, beta, Uc = model.Pi, model.beta, model.Uc - - if isnothing(sHist) - sHist = QuantEcon.simulate(pas.mc, T, init=s_0) - end - cHist = zeros(T) - nHist = similar(cHist) - Bhist = similar(cHist) - ΤHist = similar(cHist) - muHist = similar(cHist) - RHist = zeros(T-1) - # time 0 - mu, cHist[1], nHist[1], _ = time0_allocation(pas, B_, s_0) - ΤHist[1] = Τ(pas.model, cHist[1], nHist[1])[s_0] - Bhist[1] = B_ - muHist[1] = mu - # time 1 onward - for t in 2:T - c, n, x, Xi = time1_allocation(pas,mu) - u_c = Uc(c,n) - s = sHist[t] - ΤHist[t] = Τ(pas.model, c, n)[s] - Eu_c = dot(Pi[sHist[t-1],:], u_c) - cHist[t], nHist[t], Bhist[t] = c[s], n[s], x[s] / u_c[s] - RHist[t-1] = Uc(cHist[t-1], nHist[t-1]) / (beta * Eu_c) - muHist[t] = mu - end - return cHist, nHist, Bhist, ΤHist, sHist, muHist, RHist -end - - mutable struct BellmanEquation{TP <: Model, - TI <: Integer, - TV <: AbstractVector, - TM <: AbstractMatrix{TV}, - TVV <: AbstractVector{TV}} - model::TP - S::TI - xbar::TV - time_0::Bool - z0::TM - cFB::TV - nFB::TV - xFB::TV - zFB::TVV - end - -function BellmanEquation(model, xgrid, policies0) - S = size(model.Pi, 1) # Number of states - xbar = collect(extrema(xgrid)) - time_0 = false - cf, nf, xprimef = policies0 - z0 = [vcat(cf[s](x), nf[s](x), [xprimef[s, sprime](x) - for sprime in 1:S]) - for x in xgrid, s in 1:S] - cFB, nFB, IFB, xFB, zFB = find_first_best(model, S, 2) - return BellmanEquation(model, S, xbar, time_0, z0, cFB, nFB, xFB, zFB) -end - -function get_policies_time1(T, i_x, x, s, Vf) - model, S = T.model, T.S - beta, Theta, G, Pi = model.beta, model.Theta, model.G, model.Pi - U, Uc, Un = model.U, model.Uc, model.Un - - function objf(z, grad) - c, xprime = z[1], z[2:end] - n = c + G[s] - Vprime = [Vf[sprime](xprime[sprime]) for sprime in 1:S] - return -(U(c, n) + beta * dot(Pi[s, :], Vprime)) - end - function cons(z, grad) - c, xprime = z[1], z[2:end] - n = c+G[s] - return x - Uc(c, n) * c - Un(c, n) * n - beta * dot(Pi[s, :], xprime) - end - lb = vcat(0, T.xbar[1] * ones(S)) - ub = vcat(1 - G[s], T.xbar[2] * ones(S)) - opt = Opt(:LN_COBYLA, length(T.z0[i_x, s])-1) - min_objective!(opt, objf) - equality_constraint!(opt, cons) - lower_bounds!(opt, lb) - upper_bounds!(opt, ub) - maxeval!(opt, 300) - maxtime!(opt, 10) - init = vcat(T.z0[i_x, s][1], T.z0[i_x, s][3:end]) - for (i, val) in enumerate(init) - if val > ub[i] - init[i] = ub[i] - elseif val < lb[i] - init[i] = lb[i] - end - end - (minf, minx, ret) = optimize(opt, init) - T.z0[i_x, s] = vcat(minx[1], minx[1] + G[s], minx[2:end]) - return vcat(-minf, T.z0[i_x, s]) -end - -function get_policies_time0(T, B_, s0, Vf) - model, S = T.model, T.S - beta, Theta, G, Pi = model.beta, model.Theta, model.G, model.Pi - U, Uc, Un = model.U, model.Uc, model.Un - function objf(z, grad) - c, xprime = z[1], z[2:end] - n = c + G[s0] - Vprime = [Vf[sprime](xprime[sprime]) for sprime in 1:S] - return -(U(c, n) + beta * dot(Pi[s0, :], Vprime)) - end - function cons(z, grad) - c, xprime = z[1], z[2:end] - n = c + G[s0] - return -Uc(c, n) * (c - B_) - Un(c, n) * n - beta * dot(Pi[s0, :], xprime) - end - lb = vcat(0, T.xbar[1] * ones(S)) - ub = vcat(1-G[s0], T.xbar[2] * ones(S)) - opt = Opt(:LN_COBYLA, length(T.zFB[s0])-1) - min_objective!(opt, objf) - equality_constraint!(opt, cons) - lower_bounds!(opt, lb) - upper_bounds!(opt, ub) - maxeval!(opt, 300) - maxtime!(opt, 10) - init = vcat(T.zFB[s0][1], T.zFB[s0][3:end]) - for (i, val) in enumerate(init) - if val > ub[i] - init[i] = ub[i] - elseif val < lb[i] - init[i] = lb[i] - end - end - (minf, minx, ret) = optimize(opt, init) - return vcat(-minf, vcat(minx[1], minx[1]+G[s0], minx[2:end])) -end -``` - -## Recursive Formulation of the Ramsey problem - -$x_t(s^t) = u_c(s^t) b_t(s_t | s^{t-1})$ in equation {eq}`LSA_budget` -appears to be a purely “forward-looking” variable. - -But $x_t(s^t)$ is a also a natural candidate for a state variable in -a recursive formulation of the Ramsey problem. - -### Intertemporal Delegation - -To express a Ramsey plan recursively, we imagine that a time $0$ -Ramsey planner is followed by a sequence of continuation Ramsey planners -at times $t = 1, 2, \ldots$. - -A “continuation Ramsey planner” has a -different objective function and faces different constraints than a -Ramsey planner. - -A key step in representing a Ramsey plan recursively is -to regard the marginal utility scaled government debts -$x_t(s^t) = u_c(s^t) b_t(s_t|s^{t-1})$ as predetermined quantities -that continuation Ramsey planners at times $t \geq 1$ are -obligated to attain. - -Continuation Ramsey planners do this by choosing continuation policies that induce the representative -household to make choices that imply that $u_c(s^t) b_t(s_t|s^{t-1})= x_t(s^t)$. - -A time $t\geq 1$ continuation Ramsey planner -delivers $x_t$ by choosing a suitable $n_t, c_t$ pair and a list of -$s_{t+1}$-contingent continuation quantities $x_{t+1}$ to -bequeath to a time $t+1$ continuation Ramsey planner. - -A time $t \geq 1$ continuation Ramsey planner faces $x_t, s_t$ as -state variables. - -But the time $0$ Ramsey planner faces $b_0$, not $x_0$, -as a state variable. - -Furthermore, the Ramsey planner cares about $(c_0(s_0), \ell_0(s_0))$, while -continuation Ramsey planners do not. - -The time $0$ Ramsey planner -hands $x_1$ as a function of $s_1$ to a time $1$ -continuation Ramsey planner. - -These lines of delegated authorities and -responsibilities across time express the continuation Ramsey planners’ -obligations to implement their parts of the original Ramsey plan, -designed once-and-for-all at time $0$. - -### Two Bellman Equations - -After $s_t$ has been realized at time $t \geq 1$, the state -variables confronting the time $t$ **continuation Ramsey planner** are -$(x_t, s_t)$. - -* Let $V(x, s)$ be the value of a **continuation Ramsey plan** at $x_t = x, s_t =s$ for $t \geq 1$. -* Let $W(b, s)$ be the value of a **Ramsey plan** at time $0$ at $b_0=b$ and $s_0 = s$. - -We work backwards by presenting a Bellman equation for -$V(x,s)$ first, then a Bellman equation for $W(b,s)$. - -### The Continuation Ramsey Problem - -The Bellman equation for a time $t \geq 1$ continuation Ramsey -planner is - -```{math} -:label: LSA_Bellman1 - -V(x, s) = \max_{n, \{x'(s')\}} u(n-g(s), 1-n) + \beta \sum_{s'\in S} \Pi(s'| s) V(x', s') -``` - -where maximization over $n$ and the $S$ elements of -$x'(s')$ is subject to the single implementability constraint for -$t \geq 1$ - -```{math} -:label: LSA_Bellman1cons - -x = u_c(n-g(s)) - u_l n + \beta \sum_{s' \in {\cal S}} \Pi(s' | s) x'(s') -``` - -Here $u_c$ and $u_l$ are today’s values of the marginal utilities. - -For each given value of $x, s$, the continuation Ramsey planner chooses $n$ and an $x'(s')$ -for each $s' \in {\cal S}$. - -Associated with a value function $V(x,s)$ that solves Bellman equation {eq}`LSA_Bellman1` -are $S+1$ time-invariant policy functions - -```{math} -:label: RRpolicyt - -\begin{aligned} - n_t & = f(x_t, s_t), \quad t \geq 1 - \\ - x_{t+1}(s_{t+1}) & = h(s_{t+1}; x_t, s_t), \, s_{t+1} \in {\cal S}, \, t \geq 1 -\end{aligned} -``` - -### The Ramsey Problem - -The Bellman equation for the time $0$ Ramsey planner is - -```{math} -:label: LSA_Bellman2 - -W(b_0, s_0) = \max_{n_0, \{x'(s_1)\}} u(n_0 - g_0, 1 - n_0) + \beta \sum_{s_1 \in {\cal S}} \Pi(s_1| s_0) V( x'(s_1), s_1) -``` - -where maximization over $n_0$ and the $S$ elements of -$x'(s_1)$ is subject to the time $0$ implementability -constraint - -```{math} -:label: Bellman2cons2 - -u_{c,0} b_0 = u_{c,0} (n_0 - g_0) - u_{l,0} n_0 + \beta \sum_{s_1\in {\cal S}} \Pi(s_1 | s_0) x'(s_1) -``` - -coming from restriction {eq}`Bellman2cons`. - -Associated with a value function $W(b_0, n_0)$ that solves Bellman equation {eq}`LSA_Bellman2` are -$S +1$ time $0$ policy functions - -```{math} -:label: RRpolicy0 - -\begin{aligned} - n_0 - & = f_0(b_0, s_0) \cr - x_1(s_1) - & = h_0(s_1; b_0, s_0) -\end{aligned} -``` - -Notice the appearance of state variables $(b_0, s_0)$ in the time -$0$ policy functions for the Ramsey planner as compared to -$(x_t, s_t)$ in the policy functions {eq}`RRpolicyt` for the time $t \geq 1$ -continuation Ramsey planners. - -The value function $V(x_t, s_t)$ of the time $t$ -continuation Ramsey planner equals -$E_t \sum_{\tau = t}^\infty \beta^{\tau - t} u(c_t, l_t)$, where -the consumption and leisure processes are evaluated along the original -time $0$ Ramsey plan. - -### First-Order Conditions - -Attach a Lagrange multiplier $\Phi_1(x,s)$ to constraint {eq}`LSA_Bellman1cons` and a -Lagrange multiplier $\Phi_0$ to constraint {eq}`Bellman2cons`. - -Time $t \geq 1$: the first-order conditions for the time $t \geq 1$ constrained -maximization problem on the right side of the continuation Ramsey -planner’s Bellman equation {eq}`LSA_Bellman1` are - -```{math} -:label: LSARxt - -\beta \Pi(s' | s) V_x (x', s') - \beta \Pi(s' | s) \Phi_1 = 0 -``` - -for $x'(s')$ and - -```{math} -:label: LSARnt - -(1 + \Phi_1) (u_c - u_l ) + \Phi_1 \left[ n (u_{ll} - u_{lc}) + (n-g(s)) (u_{cc} - u_{lc}) \right] = 0 -``` - -for $n$. - -Given $\Phi_1$, equation {eq}`LSARnt` is one equation to be -solved for $n$ as a function of $s$ (or of $g(s)$). - -Equation {eq}`LSARxt` implies $V_x(x', s')= \Phi_1$, while an envelope -condition is $V_x(x,s) = \Phi_1$, so it follows that - -```{math} -:label: LSAenv - -V_x(x', s') = V_x(x,s) = \Phi_1(x,s) -``` - -Time $t=0$: For the time $0$ problem on the right side of the Ramsey planner’s -Bellman equation {eq}`LSA_Bellman2`, first-order conditions are - -```{math} -:label: LSAx0 - -V_x(x(s_1), s_1) = \Phi_0 -``` - -for $x(s_1), s_1 \in {\cal S}$, and - -```{math} -:label: LSAn0 - -\begin{aligned} - (1 + \Phi_0) (u_{c,0} - u_{n,0}) - & + \Phi_0 \bigl[ n_0 (u_{ll,0} - u_{lc,0} ) + (n_0 - g(s_0)) (u_{cc,0} - u_{cl,0}) \Bigr] - \\ - & \quad \quad \quad - \Phi_0 (u_{cc,0} - u_{cl,0}) b_0 = 0 -\end{aligned} -``` - -Notice similarities and differences between the first-order -conditions for $t \geq 1$ and for $t=0$. - -An additional term is present in {eq}`LSAn0` except in three special cases - -* $b_0 = 0$, or -* $u_c$ is constant (i.e., preferences are quasi-linear in consumption), or -* initial government assets are sufficiently large to finance all government - purchases with interest earnings from those assets, so that $\Phi_0= 0$ - -Except in these special cases, the allocation and the labor tax rate as -functions of $s_t$ differ between dates $t=0$ and subsequent -dates $t \geq 1$. - -Naturally, the first-order conditions in this recursive formulation of the -Ramsey problem agree with the first-order conditions derived when we first -formulated the Ramsey plan in the space of sequences. - -### State Variable Degeneracy - -Equations {eq}`LSAx0` and {eq}`LSAn0` imply that $\Phi_0 = \Phi_1$ -and that - -```{math} -:label: FONCx - -V_x(x_t, s_t) = \Phi_0 -``` - -for all $t \geq 1$. - -When $V$ is concave in $x$, this implies *state-variable degeneracy* -along a Ramsey plan in the sense that for $t \geq 1$, $x_t$ will be -a time-invariant function of $s_t$. - -Given $\Phi_0$, this function mapping $s_t$ into $x_t$ -can be expressed as a vector $\vec x$ that solves equation {eq}`Bellman2cons2` -for $n$ and $c$ as functions of $g$ that are associated -with $\Phi = \Phi_0$. - -### Manifestations of Time Inconsistency - -While the marginal utility adjusted level of government debt $x_t$ -is a key state variable for the continuation Ramsey planners at -$t \geq 1$, it is not a state variable at time $0$. - -The time $0$ Ramsey planner faces $b_0$, not $x_0 = u_{c,0} b_0$, as a state variable. - -The discrepancy in state variables faced by the time $0$ Ramsey planner and the time -$t \geq 1$ continuation Ramsey planners captures the differing -obligations and incentives faced by the time $0$ Ramsey planner -and the time $t \geq 1$ continuation Ramsey planners. - -* The time $0$ Ramsey planner is obligated to honor government - debt $b_0$ measured in time $0$ consumption goods. -* The time $0$ Ramsey planner can manipulate the *value* of government - debt as measured by $u_{c,0} b_0$. -* In contrast, time $t \geq 1$ continuation Ramsey planners are - obligated *not* to alter values of debt, as measured by - $u_{c,t} b_t$, that they inherit from a preceding Ramsey planner or - continuation Ramsey planner. - -When government expenditures $g_t$ are a time invariant function -of a Markov state $s_t$, a Ramsey plan and associated Ramsey -allocation feature marginal utilities of consumption $u_c(s_t)$ -that, given $\Phi$, for $t \geq 1$ depend only on -$s_t$, but that for $t=0$ depend on $b_0$ as well. - -This means that $u_c(s_t)$ will be a time invariant function of -$s_t$ for $t \geq 1$, but except when $b_0 = 0$, a -different function for $t=0$. - -This in turn means that prices of -one period Arrow securities $p_{t+1}(s_{t+1} | s_t) = p(s_{t+1}|s_t)$ -will be the *same* time invariant functions of $(s_{t+1}, s_t)$ -for $t \geq 1$, but a different function $p_0(s_1|s_0)$ for -$t=0$, except when $b_0=0$. - -The differences between these -time $0$ and time $t \geq 1$ objects reflect -the Ramsey planner’s incentive to manipulate Arrow security prices and, -through them, the value of initial government debt $b_0$. - -### Recursive Implementation - -The above steps are implemented in a type called RecursiveAllocation - -```{code-cell} julia ---- -tags: [output_scroll] ---- -struct RecursiveAllocation{TP <: Model, TI <: Integer, - TVg <: AbstractVector, TVv <: AbstractVector, - TVp <: AbstractArray} - model::TP - mc::MarkovChain - S::TI - T::BellmanEquation - mugrid::TVg - xgrid::TVg - Vf::TVv - policies::TVp - end - - -function RecursiveAllocation(model, mugrid) - mc = MarkovChain(model.Pi) - G = model.G - S = size(model.Pi, 1) # Number of states - # Now find the first best allocation - Vf, policies, T, xgrid = solve_time1_bellman(model, mugrid) - T.time_0 = true # Bellman equation now solves time 0 problem - return RecursiveAllocation(model, mc, S, T, mugrid, xgrid, Vf, policies) -end - -function solve_time1_bellman(model, mugrid) - mugrid0 = mugrid - S = size(model.Pi, 1) - # First get initial fit - PP = SequentialAllocation(model) - c = zeros(length(mugrid), 2) - n = similar(c) - x = similar(c) - V = similar(c) - for (i, mu) in enumerate(mugrid0) - c[i, :], n[i, :], x[i, :], V[i, :] = time1_value(PP, mu) - end - Vf = Vector{AbstractInterpolation}(undef, 2) - cf = similar(Vf) - nf = similar(Vf) - xprimef = similar(Vf, 2, S) - for s in 1:2 - cf[s] = LinearInterpolation(x[:, s][end:-1:1], c[:, s][end:-1:1]) - nf[s] = LinearInterpolation(x[:, s][end:-1:1], n[:, s][end:-1:1]) - Vf[s] = LinearInterpolation(x[:, s][end:-1:1], V[:, s][end:-1:1]) - for sprime in 1:S - xprimef[s, sprime] = LinearInterpolation(x[:, s][end:-1:1], x[:, s][end:-1:1]) - end - end - policies = [cf, nf, xprimef] - # Create xgrid - xbar = [maximum(minimum(x, dims = 1)), minimum(maximum(x, dims = 1))] - xgrid = range(xbar[1], xbar[2], length = length(mugrid0)) - # Now iterate on bellman equation - T = BellmanEquation(model, xgrid, policies) - diff = 1.0 - while diff > 1e-6 - if T.time_0 == false - Vfnew, policies = - fit_policy_function(PP, - (i_x, x, s) -> get_policies_time1(T, i_x, x, s, Vf), xgrid) - elseif T.time_0 == true - Vfnew, policies = - fit_policy_function(PP, - (i_x, B_, s0) -> get_policies_time0(T, i_x, B_, s0, Vf), xgrid) - else - error("T.time_0 is $(T.time_0), which is invalid") - end - diff = 0.0 - for s in 1:S - diff = max(diff, maximum(abs, (Vf[s].(xgrid)-Vfnew[s].(xgrid))./Vf[s].(xgrid))) - end - print("diff = $diff \n") - Vf = Vfnew - end - # Store value function policies and Bellman Equations - return Vf, policies, T, xgrid -end - -function fit_policy_function(PP, PF, xgrid) - S = PP.S - Vf = Vector{AbstractInterpolation}(undef, S) - cf = similar(Vf) - nf = similar(Vf) - xprimef = similar(Vf, S, S) - for s in 1:S - PFvec = zeros(length(xgrid), 3+S) - for (i_x, x) in enumerate(xgrid) - PFvec[i_x, :] = PF(i_x, x, s) - end - Vf[s] = LinearInterpolation(xgrid, PFvec[:, 1]) - cf[s] = LinearInterpolation(xgrid, PFvec[:, 2]) - nf[s] = LinearInterpolation(xgrid, PFvec[:, 3]) - for sprime in 1:S - xprimef[s, sprime] = LinearInterpolation(xgrid, PFvec[:, 3+sprime]) - end - end - return Vf, [cf, nf, xprimef] -end - -function time0_allocation(pab::RecursiveAllocation, B_, s0) - xgrid = pab.xgrid - if pab.T.time_0 == false - z0 = get_policies_time1(pab.T, i_x, x, s, pab.Vf) - elseif pab.T.time_0 == true - z0 = get_policies_time0(pab.T, B_, s0, pab.Vf) - else - error("T.time_0 is $(T.time_0), which is invalid") - end - c0, n0, xprime0 = z0[2], z0[3], z0[4:end] - return c0, n0, xprime0 -end - -function simulate(pab::RecursiveAllocation, B_, s_0, T, - sHist = QuantEcon.simulate(mc, s_0, T)) - model, S, policies = pab.model, pab.S, pab.policies - beta, Pi, Uc = model.beta, model.Pi, model.Uc - cf, nf, xprimef = policies[1], policies[2], policies[3] - cHist = zeros(T) - nHist = similar(cHist) - Bhist = similar(cHist) - ΤHist = similar(cHist) - muHist = similar(cHist) - RHist = zeros(T - 1) - # time 0 - cHist[1], nHist[1], xprime = time0_allocation(pab, B_, s_0) - ΤHist[1] = Τ(pab.model, cHist[1], nHist[1])[s_0] - Bhist[1] = B_ - muHist[1] = 0.0 - # time 1 onward - for t in 2:T - s, x = sHist[t], xprime[sHist[t]] - n = nf[s](x) - c = [cf[shat](x) for shat in 1:S] - xprime = [xprimef[s, sprime](x) for sprime in 1:S] - ΤHist[t] = Τ(pab.model, c, n)[s] - u_c = Uc(c, n) - Eu_c = dot(Pi[sHist[t-1], :], u_c) - muHist[t] = pab.Vf[s](x) - RHist[t-1] = Uc(cHist[t-1], nHist[t-1]) / (beta * Eu_c) - cHist[t], nHist[t], Bhist[t] = c[s], n, x / u_c[s] - end - return cHist, nHist, Bhist, ΤHist, sHist, muHist, RHist -end -``` - -## Examples - -### Anticipated One Period War - -This example illustrates in a simple setting how a Ramsey planner manages risk. - -Government expenditures are known for sure in all periods except one. - -* For $t<3$ and $t > 3$ we assume that $g_t = g_l = 0.1$. -* At $t = 3$ a war occcurs with probability 0.5. - * If there is war, $g_3 = g_h = 0.2$. - * If there is no war $g_3 = g_l = 0.1$. - -We define the components of the state vector as the following six $(t,g)$ -pairs: $(0,g_l),(1,g_l),(2,g_l),(3,g_l),(3,g_h), (t\geq 4,g_l)$. - -We think of these 6 states as corresponding to $s=1,2,3,4,5,6$. - -The transition matrix is - -$$ -\Pi = \left(\begin{matrix}0 & 1 & 0 & 0 & 0 & 0\\ - 0 & 0 & 1 & 0 & 0 & 0\\ - 0 & 0 & 0 & 0.5 & 0.5 & 0\\ - 0 & 0 & 0 & 0 & 0 & 1\\ - 0 & 0 & 0 & 0 & 0 & 1\\ - 0 & 0 & 0 & 0 & 0 & 1\end{matrix}\right) -$$ - -Government expenditures at each state are - -$$ -g = \left(\begin{matrix} 0.1\\0.1\\0.1\\0.1\\0.2\\0.1 \end{matrix}\right). -$$ - -We assume that the representative agent has utility function - -$$ -u(c,n) = {\frac{c^{1-\sigma}}{1-\sigma}} - {\frac{n^{1+\gamma}}{1+\gamma}} -$$ - -and set $\sigma = 2$, $\gamma = 2$, and the discount factor $\beta = 0.9$. - -Note: For convenience in terms of matching our code, we have expressed -utility as a function of $n$ rather than leisure $l$. - -This utility function is implemented in the type CRRAutility - -```{code-cell} julia -function crra_utility(; - beta = 0.9, - sigma = 2.0, - gamma = 2.0, - Pi = 0.5 * ones(2, 2), - G = [0.1, 0.2], - Theta = ones(2), - transfers = false) - function U(c, n) - if sigma == 1.0 - U = log(c) - else - U = (c .^ (1.0 .- sigma) .- 1.0) / (1.0 - sigma) - end - return U .- n .^ (1 + gamma) / (1 + gamma) - end - # Derivatives of utility function - Uc(c, n) = c .^ (-sigma) - Ucc(c, n) = -sigma * c .^ (-sigma - 1.0) - Un(c, n) = -n .^ gamma - Unn(c, n) = -gamma * n .^ (gamma - 1.0) - n_less_than_one = false - return Model(beta, Pi, G, Theta, transfers, - U, Uc, Ucc, Un, Unn, n_less_than_one) -end -``` - -We set initial government debt $b_0 = 1$. - -We can now plot the Ramsey tax under both realizations of time $t = 3$ government expenditures - -* black when $g_3 = .1$, and -* red when $g_3 = .2$ - -```{code-cell} julia -using Random -Random.seed!(42) # For reproducible results. - -M_time_example = crra_utility(G = [0.1, 0.1, 0.1, 0.2, 0.1, 0.1], - Theta = ones(6)) # Theta can in principle be random - -M_time_example.Pi = [0.0 1.0 0.0 0.0 0.0 0.0; - 0.0 0.0 1.0 0.0 0.0 0.0; - 0.0 0.0 0.0 0.5 0.5 0.0; - 0.0 0.0 0.0 0.0 0.0 1.0; - 0.0 0.0 0.0 0.0 0.0 1.0; - 0.0 0.0 0.0 0.0 0.0 1.0] - -PP_seq_time = SequentialAllocation(M_time_example) # Solve sequential problem - -sHist_h = [1, 2, 3, 4, 6, 6, 6] -sHist_l = [1, 2, 3, 5, 6, 6, 6] - -sim_seq_h = simulate(PP_seq_time, 1.0, 1, 7, sHist_h) -sim_seq_l = simulate(PP_seq_time, 1.0, 1, 7, sHist_l) - -using LaTeXStrings, Plots - -titles = hcat("Consumption", - "Labor Supply", - "Government Debt", - "Tax Rate", - "Government Spending", - "Output") - -sim_seq_l_plot = [sim_seq_l[1:4]..., M_time_example.G[sHist_l], - M_time_example.Theta[sHist_l] .* sim_seq_l[2]] -sim_seq_h_plot = [sim_seq_h[1:4]..., M_time_example.G[sHist_h], - M_time_example.Theta[sHist_h] .* sim_seq_h[2]] - -#plots = plot(layout=(3,2), size=(800,600)) -plots = [plot(), plot(), plot(), plot(), plot(), plot()] -for i in 1:6 - plot!(plots[i], sim_seq_l_plot[i], color = :black, lw = 2, - marker = :circle, markersize = 2, label = "") - plot!(plots[i], sim_seq_h_plot[i], color = :red, lw = 2, - marker = :circle, markersize = 2, label = "") - plot!(plots[i], title = titles[i], grid = true) -end -plot(plots[1], plots[2], plots[3], plots[4], plots[5], plots[6], - layout = (3, 2), size = (800, 600)) -``` - - -**Tax smoothing** - -* the tax rate is constant for all $t\geq 1$ - * For $t \geq 1, t \neq 3$, this is a consequence of $g_t$ - being the same at all those dates - * For $t = 3$, it is a consequence of the special one-period utility - function that we have assumed - * Under other one-period utility functions, the time $t=3$ tax rate - could be either higher or lower than for dates $t \geq 1, t \neq 3$ -* the tax rate is the same at $t=3$ for both the high $g_t$ outcome and the low $g_t$ outcome - -We have assumed that at $t=0$, the government owes positive debt $b_0$. - -It sets the time $t=0$ tax rate partly with an eye to reducing the value $u_{c,0} b_0$ of $b_0$. - -It does this by increasing consumption at time $t=0$ relative to -consumption in later periods. - -This has the consequence of *raising* the time $t=0$ value of the gross -interest rate for risk-free loans between periods $t$ and $t+1$, which equals - -$$ -R_t = \frac{u_{c,t}}{\beta\mathbb E_{t}[u_{c,t+1}]} -$$ - -A tax policy that makes time $t=0$ consumption be higher than time $t=1$ consumption evidently increases the -risk-free rate one-period interest rate, $R_t$, at $t=0$. - -Raising the time $t=0$ risk-free interest rate makes time $t=0$ -consumption goods cheaper relative to consumption goods at later dates, thereby -lowering the value $u_{c,0} b_0$ of initial government debt $b_0$. - -We see this in a figure below that plots the time path for the risk free interest -rate under both realizations of the time $t=3$ government expenditure shock. - -The following plot illustrates how the government lowers the interest rate at -time 0 by raising consumption - -```{code-cell} julia -plot(sim_seq_l[end], color = :black, lw = 2, - marker = :circle, markersize = 2, label = "") -plot!(sim_seq_h[end], color = :red, lw = 2, - marker = :circle, markersize = 2, label = "") -plot!(title = "Gross Interest Rate", grid = true) -``` - -### Government Saving - -At time $t=0$ the government evidently *dissaves* since $b_1> b_0$. - -* This is a consequence of it setting a *lower* tax rate at $t=0$, - implying more consumption at $t=0$. - -> - -At time $t=1$, the government evidently *saves* since it has set the tax -rate sufficiently high to allow it to set $b_2 < b_1$. - -* Its motive for doing this is that it anticipates a likely war at $t=3$. - -> - -At time $t=2$ the government trades state-contingent Arrow securities -to hedge against war at $t=3$. - -* It purchases a security that pays off when $g_3 = g_h$. -* It sells a security that pays off when $g_3 = g_l$. -* These purchases are designed in such a way that regardless of whether or - not there is a war at $t=3$, the government will begin period - $t=4$ with the *same* government debt. -* The time $t=4$ debt level can be serviced with revenues from the - constant tax rate set at times $t\geq 1$. - -> - -At times $t \geq 4$ the government rolls over its debt, knowing that the -tax rate is set at level required to service the interest payments -on the debt and government expenditures. - -### Time 0 Manipulation of Interest Rate - -We have seen that when $b_0>0$, the Ramsey plan sets the time $t=0$ -tax rate partly with an eye toward raising a risk-free interest -rate for one-period loans between times $t=0$ and $t=1$. - -By raising this interest rate, the plan makes time $t=0$ goods cheap -relative to consumption goods at later times. - -By doing this, it lowers the value of time $t=0$ debt that it has inherited -and must finance. - -### Time 0 and Time-Inconsistency - -In the preceding example, the Ramsey tax rate at time 0 differs from its value at time 1. - -To explore what is going on here, let's simplify things by removing the possibility of war at time $t=3$. - -The Ramsey problem then includes no randomness because $g_t = g_l$ for all $t$. - -The figure below plots the Ramsey tax rates and gross interest rates at time -$t=0$ and time $t\geq1$ as functions of the initial government debt -(using the sequential allocation solution and a CRRA utility function defined -above) - -```{code-cell} julia ---- -tags: [remove-cell] ---- -Random.seed!(42); # For reproducible results. -``` - -```{code-cell} julia -M2 = crra_utility(G = [0.15], Pi = ones(1, 1), Theta = [1.0]) - -PP_seq_time0 = SequentialAllocation(M2) # solve sequential problem - -B_vec = range(-1.5, 1.0, length = 100) -taxpolicy = Matrix(hcat([simulate(PP_seq_time0, B_, 1, 2)[4] for B_ in B_vec]...)') -interest_rate = Matrix(hcat([simulate(PP_seq_time0, B_, 1, 3)[end] - for B_ in B_vec]...)') - -titles = ["Tax Rate" "Gross Interest Rate"] -labels = [[L"Time , $t = 0$" L"Time , $t \geq 0$"], ""] -plots = plot(layout = (2, 1), size = (700, 600)) -for (i, series) in enumerate((taxpolicy, interest_rate)) - plot!(plots[i], B_vec, series, linewidth = 2, label = labels[i]) - plot!(plots[i], title = titles[i], grid = true, legend = :topleft) -end -plot(plots) -``` - -The figure indicates that if the government enters with positive debt, it sets -a tax rate at $t=0$ that is less than all later tax rates. - -By setting a lower tax rate at $t = 0$, the government raises consumption, -which reduces the *value* $u_{c,0} b_0$ of its initial debt. - -It does this by increasing $c_0$ and thereby lowering $u_{c,0}$. - -Conversely, if $b_{0} < 0$, the Ramsey planner sets the tax rate at $t=0$ -higher than in subsequent periods. - -A side effect of lowering time $t=0$ consumption is that it raises the -one-period interest rate at time 0 above that of subsequent periods. - -There are only two values of initial government debt at which the tax rate is -constant for all $t \geq 0$. - -The first is $b_{0} = 0$ - -* Here the government can't use the $t=0$ tax rate to alter the - value of the initial debt. - -> - -The second occurs when the government enters with sufficiently large assets -that the Ramsey planner can achieve first best and sets $\tau_t = 0$ -for all $t$. - -It is only for these two values of initial government debt that the Ramsey -plan is time-consistent. - -Another way of saying this is that, except for these two values of initial -government debt, a continuation of a Ramsey plan is not a Ramsey plan. - -To illustrate this, consider a Ramsey planner who starts with an initial -government debt $b_1$ associated with one of the Ramsey plans computed above. - -Call $\tau_1^R$ the time $t=0$ tax rate chosen by the Ramsey planner -confronting this value for initial government debt government. - -The figure below shows both the tax rate at time 1 chosen by our original -Ramsey planner and what a new Ramsey planner would choose for its -time $t=0$ tax rate - -```{code-cell} julia -# Compute the debt entered with at time 1 -B1_vec = hcat([simulate(PP_seq_time0, B_, 1, 2)[3][2] for B_ in B_vec]...)' - -# Compute the optimal policy if the government could reset -tau1_reset = Matrix(hcat([simulate(PP_seq_time0, B1, 1, 1)[4] for B1 in B1_vec]...)') - -plot(B_vec, taxpolicy[:, 2], linewidth = 2, label = L"\tau_1") -plot!(B_vec, tau1_reset, linewidth = 2, label = L"\tau_1^R") -plot!(title = "Tax Rate", xlabel = "Initial Government Debt", legend = :topleft, - grid = true) -``` - -The tax rates in the figure are equal for only two values of initial government debt. - -### Tax Smoothing and non-CRRA Preferences - -The complete tax smoothing for $t \geq 1$ in the preceding example is a -consequence of our having assumed CRRA preferences. - -To see what is driving this outcome, we begin by noting that the Ramsey tax rate for $t\geq 1$ is -a time invariant function $\tau(\Phi,g)$ of the Lagrange multiplier on -the implementability constraint and government expenditures. - -For CRRA preferences, we can exploit the relations $U_{cc}c = -\sigma U_c$ -and $U_{nn} n = \gamma U_n$ to derive - -$$ -\frac{(1+(1-\sigma)\Phi)U_c}{(1+(1-\gamma)\Phi)U_n} = 1 -$$ - -from the first-order conditions. - -This equation immediately implies that the tax rate is constant. - -For other preferences, the tax rate may not be constant. - -For example, let the period utility function be - -$$ -u(c,n) = \log(c) + 0.69 \log(1-n) -$$ - -We will write a new constructor LogUtility to represent this utility function - -```{code-cell} julia -function log_utility(; beta = 0.9, - psi = 0.69, - Pi = 0.5 * ones(2, 2), - G = [0.1, 0.2], - Theta = ones(2), - transfers = false) - # Derivatives of utility function - U(c, n) = log(c) + psi * log(1 - n) - Uc(c, n) = 1 ./ c - Ucc(c, n) = -c .^ (-2.0) - Un(c, n) = -psi ./ (1.0 .- n) - Unn(c, n) = -psi ./ (1.0 .- n) .^ 2.0 - n_less_than_one = true - return Model(beta, Pi, G, Theta, transfers, - U, Uc, Ucc, Un, Unn, n_less_than_one) -end -``` - -Also suppose that $g_t$ follows a two state i.i.d. process with equal -probabilities attached to $g_l$ and $g_h$. - -To compute the tax rate, we will use both the sequential and recursive approaches described above. - -The figure below plots a sample path of the Ramsey tax rate - -```{code-cell} julia ---- -tags: [remove-cell] ---- -Random.seed!(42); # For reproducible results. -``` - -```{code-cell} julia -M1 = log_utility() -mu_grid = range(-0.6, 0.0, length = 200) -PP_seq = SequentialAllocation(M1) # Solve sequential problem -PP_bel = RecursiveAllocation(M1, mu_grid) # Solve recursive problem - -T = 20 -sHist = [1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2, 1] - -# Simulate -sim_seq = simulate(PP_seq, 0.5, 1, T, sHist) -sim_bel = simulate(PP_bel, 0.5, 1, T, sHist) - -# Plot policies -sim_seq_plot = [sim_seq[1:4]..., M1.G[sHist], M1.Theta[sHist] .* sim_seq[2]] -sim_bel_plot = [sim_bel[1:4]..., M1.G[sHist], M1.Theta[sHist] .* sim_bel[2]] - -titles = hcat("Consumption", - "Labor Supply", - "Government Debt", - "Tax Rate", - "Government Spending", - "Output") -labels = [ - ["Sequential", "Recursive"], - ["", ""], - ["", ""], - ["", ""], - ["", ""], - ["", ""], -] -plots = plot(layout = (3, 2), size = (850, 780)) - -for i in 1:6 - plot!(plots[i], sim_seq_plot[i], color = :black, lw = 2, marker = :circle, - markersize = 2, label = labels[i][1]) - plot!(plots[i], sim_bel_plot[i], color = :blue, lw = 2, marker = :xcross, - markersize = 2, label = labels[i][2]) - plot!(plots[i], title = titles[i], grid = true, legend = :topright) -end -plot(plots) -``` - -As should be expected, the recursive and sequential solutions produce almost -identical allocations. - -Unlike outcomes with CRRA preferences, the tax rate is not perfectly smoothed. - -Instead the government raises the tax rate when $g_t$ is high. - -## Further Comments - -A {doc}`related lecture <../dynamic_programming_squared/amss>` describes an extension of the Lucas-Stokey model -by Aiyagari, Marcet, Sargent, and Seppälä (2002) {cite}`amss2002`. - -In th AMSS economy, only a risk-free bond is traded. - -That lecture compares the recursive representation of the Lucas-Stokey model -presented in this lecture with one for an AMSS economy. - -By comparing these recursive formulations, we shall glean a sense in which the -dimension of the state is lower in the Lucas Stokey model. - -Accompanying that difference in dimension will be different dynamics of government debt. - diff --git a/lectures/getting_started_julia/fundamental_types.md b/lectures/getting_started_julia/fundamental_types.md index 9ba0435f..3c3df9a1 100644 --- a/lectures/getting_started_julia/fundamental_types.md +++ b/lectures/getting_started_julia/fundamental_types.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (fundamental_types)= diff --git a/lectures/getting_started_julia/getting_started.md b/lectures/getting_started_julia/getting_started.md index 486674a9..46947410 100644 --- a/lectures/getting_started_julia/getting_started.md +++ b/lectures/getting_started_julia/getting_started.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (getting_started)= @@ -250,7 +250,7 @@ Recall that you can start this directly from the [command palette](command_palet This process will take several minutes to download and compile all of the files used by the lectures. ```{attention} -If the cursor is instead `(@v1.11) pkg>` then you may not have started the integrated terminal in the correct location, or you used an external REPL. Assuming that you are in the correct location, if you type `activate .` in the package mode, the cursor should change to `(quantecon-notebooks-julia) pkg>` as it activates this project file. +If the cursor is instead `(@v1.12) pkg>` then you may not have started the integrated terminal in the correct location, or you used an external REPL. Assuming that you are in the correct location, if you type `activate .` in the package mode, the cursor should change to `(quantecon-notebooks-julia) pkg>` as it activates this project file. One benefit of using the integrated REPL is that it will set important options for launching Julia (e.g. the number of threads) and activate the local project files (i.e. the `Project.toml` file in the notebooks directory) automatically. If you use an external REPL, you will need to set these manually. Here you would want to run the REPL with `julia --project --threads auto` to tell Julia to set the number of threads equal to your local machine's number of cores, and to activate the existing project. See [here](repl_main) for more details. ``` @@ -534,7 +534,7 @@ To do so, click on the `Choose Kernel` or `Select Another Kernel...` which may d :width: 80% ``` -Choose the `Julia` kernel, rather than the `Jupyter Kernel...` to bypass the Python Jupyter setup. If successful, you will see the kernel name as `Julia 1.11 channel` or something similar. +Choose the `Julia` kernel, rather than the `Jupyter Kernel...` to bypass the Python Jupyter setup. If successful, you will see the kernel name as `Julia 1.12 channel` or something similar. With the kernel selected, you will be able to run cells in the VS Code UI with similar features to Jupyter Lab. For example, below shows the results of play icon next to a code cell, which will `Execute Cell` and display the results inline. diff --git a/lectures/getting_started_julia/introduction_to_types.md b/lectures/getting_started_julia/introduction_to_types.md index 207d764a..94b89e49 100644 --- a/lectures/getting_started_julia/introduction_to_types.md +++ b/lectures/getting_started_julia/introduction_to_types.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (introduction_to_types)= diff --git a/lectures/getting_started_julia/julia_by_example.md b/lectures/getting_started_julia/julia_by_example.md index 05a107fe..b2693380 100644 --- a/lectures/getting_started_julia/julia_by_example.md +++ b/lectures/getting_started_julia/julia_by_example.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (julia_by_example)= diff --git a/lectures/getting_started_julia/julia_essentials.md b/lectures/getting_started_julia/julia_essentials.md index 5770b046..13e5d1a2 100644 --- a/lectures/getting_started_julia/julia_essentials.md +++ b/lectures/getting_started_julia/julia_essentials.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (julia_essentials)= diff --git a/lectures/introduction_dynamics/ar1_processes.md b/lectures/introduction_dynamics/ar1_processes.md index c0f70e66..9daa1a9f 100644 --- a/lectures/introduction_dynamics/ar1_processes.md +++ b/lectures/introduction_dynamics/ar1_processes.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (ar1)= diff --git a/lectures/introduction_dynamics/finite_markov.md b/lectures/introduction_dynamics/finite_markov.md index c1a7ebcb..cd055f40 100644 --- a/lectures/introduction_dynamics/finite_markov.md +++ b/lectures/introduction_dynamics/finite_markov.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (mc)= diff --git a/lectures/introduction_dynamics/kalman.md b/lectures/introduction_dynamics/kalman.md index bd4f802d..151b1499 100644 --- a/lectures/introduction_dynamics/kalman.md +++ b/lectures/introduction_dynamics/kalman.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (kalman)= diff --git a/lectures/introduction_dynamics/linear_models.md b/lectures/introduction_dynamics/linear_models.md index f2b01e53..5191b312 100644 --- a/lectures/introduction_dynamics/linear_models.md +++ b/lectures/introduction_dynamics/linear_models.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (lssm)= diff --git a/lectures/introduction_dynamics/scalar_dynam.md b/lectures/introduction_dynamics/scalar_dynam.md index 5b528dd6..907da93b 100644 --- a/lectures/introduction_dynamics/scalar_dynam.md +++ b/lectures/introduction_dynamics/scalar_dynam.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (scalar_dynam)= diff --git a/lectures/introduction_dynamics/short_path.md b/lectures/introduction_dynamics/short_path.md index 9e26e778..8c5637b1 100644 --- a/lectures/introduction_dynamics/short_path.md +++ b/lectures/introduction_dynamics/short_path.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (short_path)= diff --git a/lectures/introduction_dynamics/wealth_dynamics.md b/lectures/introduction_dynamics/wealth_dynamics.md index 2ad8ad16..f27adc65 100644 --- a/lectures/introduction_dynamics/wealth_dynamics.md +++ b/lectures/introduction_dynamics/wealth_dynamics.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (wd)= diff --git a/lectures/more_julia/data_statistical_packages.md b/lectures/more_julia/data_statistical_packages.md index 04069f62..2633d5ac 100644 --- a/lectures/more_julia/data_statistical_packages.md +++ b/lectures/more_julia/data_statistical_packages.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (data_statistical_packages)= @@ -39,9 +39,7 @@ This list is not exhaustive, and others can be found in organizations such as [J --- tags: [hide-output] --- -using LinearAlgebra, Statistics -using DataFrames, RDatasets, DataFramesMeta, CategoricalArrays, Query -using GLM +using LinearAlgebra, Statistics, DataFrames ``` ## DataFrames @@ -62,7 +60,7 @@ There are a few different ways to create a DataFrame. The first is to set up columns and construct a dataframe by assigning names ```{code-cell} julia -using DataFrames, RDatasets # RDatasets provides good standard data examples from R +using DataFrames # note use of missing commodities = ["crude", "gas", "gold", "silver"] @@ -145,9 +143,11 @@ df2.col1 .= coalesce.(df2.col1, 0.0) # replace all missing with 0.0 ### Manipulating and Transforming DataFrames -One way to do an additional calculation with a `DataFrame` is to tuse the `@transform` macro from `DataFramesMeta.jl`. +One way to do an additional calculation with a `DataFrame` is to use the `@transform` macro from `DataFramesMeta.jl`. -```{code-cell} julia +The following are code only blocks, which would require installation of the packages in a separate environment. + +```{code-block} julia using DataFramesMeta f(x) = x^2 df2 = @transform(df2, :col2=f.(:col1)) @@ -157,7 +157,7 @@ df2 = @transform(df2, :col2=f.(:col1)) For data that is [categorical](https://juliadata.github.io/DataFrames.jl/stable/man/categorical/) -```{code-cell} julia +```{code-block} julia using CategoricalArrays id = [1, 2, 3, 4] y = ["old", "young", "young", "old"] @@ -165,7 +165,7 @@ y = CategoricalArray(y) df = DataFrame(id = id, y = y) ``` -```{code-cell} julia +```{code-block} julia levels(df.y) ``` @@ -177,43 +177,6 @@ One set of them is the [QueryVerse](https://github.com/queryverse). **Note:** The QueryVerse, in the same spirit as R's tidyverse, makes heavy use of the pipeline syntax `|>`. -```{code-cell} julia -x = 3.0 -f(x) = x^2 -g(x) = log(x) - -@show g(f(x)) -@show x |> f |> g; # pipes nest function calls -``` - -To give an example directly from the source of the LINQ inspired [Query.jl](http://www.queryverse.org/Query.jl/stable/) - -```{code-cell} julia -using Query - -df = DataFrame(name = ["John", "Sally", "Kirk"], - age = [23.0, 42.0, 59.0], - children = [3, 5, 2]) - -x = @from i in df begin - @where i.age > 50 - @select {i.name, i.children} - @collect DataFrame -end -``` - -While it is possible to just use the `Plots.jl` library, there are other options for displaying tabular data -- such as [VegaLite.jl](https://github.com/queryverse/VegaLite.jl). - ## Statistics and Econometrics While Julia is not intended as a replacement for R, Stata, and similar specialty languages, it has a growing number of packages aimed at statistics and econometrics. @@ -229,7 +192,7 @@ A few to point out To run linear regressions and similar statistics, use the [GLM](http://juliastats.github.io/GLM.jl/latest/) package. -```{code-cell} julia +```{code-block} julia using GLM x = randn(100) @@ -242,28 +205,8 @@ To display the results in a useful tables for LaTeX and the REPL, use [RegressionTables](https://github.com/jmboehm/RegressionTables.jl/) for output similar to the Stata package esttab and the R package stargazer. -```{code-cell} julia +```{code-block} julia using RegressionTables regtable(ols) # regtable(ols, renderSettings = latexOutput()) # for LaTex output -``` - -### Fixed Effects - -While Julia may be overkill for estimating a simple linear regression, -fixed-effects estimation with dummies for multiple variables are much more computationally intensive. - -For a 2-way fixed-effect, taking the example directly from the documentation using [cigarette consumption data](https://github.com/johnmyleswhite/RDatasets.jl/blob/master/doc/plm/rst/Cigar.rst) - -```{code-cell} julia -using FixedEffectModels -cigar = dataset("plm", "Cigar") -cigar.StateCategorical = categorical(cigar.State) -cigar.YearCategorical = categorical(cigar.Year) -fixedeffectresults = reg(cigar, - @formula(Sales~NDI + fe(StateCategorical) + - fe(YearCategorical)), - weights = :Pop, Vcov.cluster(:State)) -regtable(fixedeffectresults) -``` - +``` \ No newline at end of file diff --git a/lectures/more_julia/general_packages.md b/lectures/more_julia/general_packages.md index 50062391..7aa8d8ba 100644 --- a/lectures/more_julia/general_packages.md +++ b/lectures/more_julia/general_packages.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (general_packages)= diff --git a/lectures/more_julia/generic_programming.md b/lectures/more_julia/generic_programming.md index 092e5198..098b3ec8 100644 --- a/lectures/more_julia/generic_programming.md +++ b/lectures/more_julia/generic_programming.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (generic_programming)= diff --git a/lectures/more_julia/optimization_solver_packages.md b/lectures/more_julia/optimization_solver_packages.md index 745f5951..e78e8e8a 100644 --- a/lectures/more_julia/optimization_solver_packages.md +++ b/lectures/more_julia/optimization_solver_packages.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (optimization_solver_packages)= @@ -35,7 +35,7 @@ In this lecture we introduce a few of the Julia libraries that we've found parti tags: [hide-output] --- using LinearAlgebra, Statistics -using ForwardDiff, Optim, JuMP, Ipopt, Roots, NLsolve +using ForwardDiff, Optim, Roots, NLsolve using Optim: converged, maximum, maximizer, minimizer, iterations #some extra functions ``` @@ -191,7 +191,6 @@ If you then follow all of the same scalar operations above with a seeded dual nu Dual-numbers are at the heart of one of the AD packages we have already seen. ```{code-cell} julia -using ForwardDiff h(x) = sin(x[1]) + x[1] * x[2] + sinh(x[1] * x[2]) # multivariate. x = [1.4 2.2] @show ForwardDiff.gradient(h, x) # use AD, seeds from x @@ -217,7 +216,6 @@ squareroot(2.0) ``` ```{code-cell} julia -using ForwardDiff dsqrt(x) = ForwardDiff.derivative(squareroot, x) dsqrt(2.0) ``` @@ -232,7 +230,7 @@ At this point, Julia does not have a single consistently usable reverse-mode AD - [ReverseDiff.jl](https://github.com/JuliaDiff/ReverseDiff.jl), a relatively dependable but limited package. Not really intended for standard ML-pipline usage - [Zygote.jl](https://github.com/FluxML/Zygote.jl), which is flexible but buggy and less reliable. In a slow process of deprecation, but often the primary alternative. -- [Enzyme.jl](https://enzyme.mit.edu/julia/stable/), which is the most promising (and supports both forward and reverse mode). However, the usage is more tailored for scientific machine learning and scalar functions rather than fast GPU kernels, and it relies on a innovative (but not fully stable) approach to compilation. +- [Enzyme.jl](https://enzyme.mit.edu/julia/stable/), which is the most promising (and supports both forward and reverse mode). However, as it works at a lower level of the compiler, it cannot support all Julia code. In particular, it prefers in-place rather than "pure" functions. ## Optimization @@ -256,7 +254,6 @@ maximization, so if a function is called `optimize` it will mean minimization. defaults to a robust hybrid optimization routine called [Brent's method](https://en.wikipedia.org/wiki/Brent%27s_method). ```{code-cell} julia -using Optim using Optim: converged, maximum, maximizer, minimizer, iterations #some extra functions result = optimize(x -> x^2, -2.0, 1.0) @@ -274,18 +271,6 @@ The first line is a logical OR between `converged(result)` and `error("...")`. If the convergence check passes, the logical sentence is true, and it will proceed to the next line; if not, it will throw the error. -Or to maximize - -```{code-cell} julia -f(x) = -x^2 -result = maximize(f, -2.0, 1.0) -converged(result) || error("Failed to converge in $(iterations(result)) iterations") -xmin = maximizer(result) -fmax = maximum(result) -``` - -**Note:** Notice that we call `optimize` results using `result.minimizer`, and `maximize` results using `maximizer(result)`. - #### Unconstrained Multivariate Optimization There are a variety of [algorithms and options](http://julianlsolvers.github.io/Optim.jl/stable/user/minimization/#_top) for multivariate optimization. @@ -369,7 +354,7 @@ The following is an example of calling a linear objective with a nonlinear const Here `Ipopt` stands for `Interior Point OPTimizer`, a [nonlinear solver](https://github.com/JuliaOpt/Ipopt.jl) in Julia -```{code-cell} julia +```{code-block} julia using JuMP, Ipopt # solve # max( x[1] + x[2] ) @@ -394,7 +379,7 @@ JuMP.register(m, :squareroot, 1, squareroot, autodiff = true) And this is an example of a quadratic objective -```{code-cell} julia +```{code-block} julia # solve # min (1-x)^2 + (100(y-x^2)^2) # st x + y >= 10 @@ -548,7 +533,7 @@ f(x, y) = 3.0 + x + y x = DualNumber(2.0, 1.0) # x -> 2.0 + 1.0\epsilon y = DualNumber(3.0, 0.0) # i.e. y = 3.0, no derivative -# seeded calculates both teh function and the d/dx gradient! +# seeded calculates both the function and the d/dx gradient! f(x, y) ``` diff --git a/lectures/multi_agent_models/aiyagari.md b/lectures/multi_agent_models/aiyagari.md index 893cba19..824b8b12 100644 --- a/lectures/multi_agent_models/aiyagari.md +++ b/lectures/multi_agent_models/aiyagari.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (aiyagari)= diff --git a/lectures/multi_agent_models/arellano.md b/lectures/multi_agent_models/arellano.md index 80d33ab4..a1028556 100644 --- a/lectures/multi_agent_models/arellano.md +++ b/lectures/multi_agent_models/arellano.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (arellano)= diff --git a/lectures/multi_agent_models/harrison_kreps.md b/lectures/multi_agent_models/harrison_kreps.md index 540fdcc1..9588dbde 100644 --- a/lectures/multi_agent_models/harrison_kreps.md +++ b/lectures/multi_agent_models/harrison_kreps.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (harrison_kreps)= diff --git a/lectures/multi_agent_models/lake_model.md b/lectures/multi_agent_models/lake_model.md index 4a59f0f5..651f0a52 100644 --- a/lectures/multi_agent_models/lake_model.md +++ b/lectures/multi_agent_models/lake_model.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (lake_model)= diff --git a/lectures/multi_agent_models/lucas_model.md b/lectures/multi_agent_models/lucas_model.md index b3dff9f2..5ff2020f 100644 --- a/lectures/multi_agent_models/lucas_model.md +++ b/lectures/multi_agent_models/lucas_model.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (lucas_asset)= diff --git a/lectures/multi_agent_models/markov_asset.md b/lectures/multi_agent_models/markov_asset.md index 01a44c1c..486b9709 100644 --- a/lectures/multi_agent_models/markov_asset.md +++ b/lectures/multi_agent_models/markov_asset.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (mass)= diff --git a/lectures/multi_agent_models/markov_perf.md b/lectures/multi_agent_models/markov_perf.md index 41ce44ef..f9a8ca3b 100644 --- a/lectures/multi_agent_models/markov_perf.md +++ b/lectures/multi_agent_models/markov_perf.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (markov_perf)= diff --git a/lectures/multi_agent_models/matsuyama.md b/lectures/multi_agent_models/matsuyama.md index a5413286..aae63bcd 100644 --- a/lectures/multi_agent_models/matsuyama.md +++ b/lectures/multi_agent_models/matsuyama.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (matsuyama)= diff --git a/lectures/multi_agent_models/rational_expectations.md b/lectures/multi_agent_models/rational_expectations.md index 20deecd5..fb0c91f5 100644 --- a/lectures/multi_agent_models/rational_expectations.md +++ b/lectures/multi_agent_models/rational_expectations.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (ree)= diff --git a/lectures/multi_agent_models/schelling.md b/lectures/multi_agent_models/schelling.md index 95d8fd6a..7bb0d471 100644 --- a/lectures/multi_agent_models/schelling.md +++ b/lectures/multi_agent_models/schelling.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (schelling)= ```{raw} html diff --git a/lectures/multi_agent_models/uncertainty_traps.md b/lectures/multi_agent_models/uncertainty_traps.md index 3320cba1..0569480c 100644 --- a/lectures/multi_agent_models/uncertainty_traps.md +++ b/lectures/multi_agent_models/uncertainty_traps.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (uncertainty_traps)= diff --git a/lectures/software_engineering/need_for_speed.md b/lectures/software_engineering/need_for_speed.md index 62a5b371..e3572689 100644 --- a/lectures/software_engineering/need_for_speed.md +++ b/lectures/software_engineering/need_for_speed.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (types_methods)= diff --git a/lectures/software_engineering/testing.md b/lectures/software_engineering/testing.md index ce21f0d7..47f22710 100644 --- a/lectures/software_engineering/testing.md +++ b/lectures/software_engineering/testing.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (testing)= diff --git a/lectures/software_engineering/tools_editors.md b/lectures/software_engineering/tools_editors.md index bd6ba8c8..e9981a8e 100644 --- a/lectures/software_engineering/tools_editors.md +++ b/lectures/software_engineering/tools_editors.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (tools_editors)= @@ -240,8 +240,8 @@ The most important choice is the `--project` toggle which determines whether you To emphasize this point, this is an example of the `]st ` showing the global environment has only the bare minimum of packages installed. With this workflow, all other packages are installed only when a given project is activated. ```{code-block} none -(@v1.11) pkg> st -Status `~/.julia/environments/v1.11/Project.toml` +(@v1.12) pkg> st +Status `~/.julia/environments/v1.12/Project.toml` [7073ff75] IJulia v1.30.6 [14b8a8f1] PkgTemplates v0.7.56 [295af30f] Revise v3.10.0 diff --git a/lectures/software_engineering/version_control.md b/lectures/software_engineering/version_control.md index 790c7d45..9639def1 100644 --- a/lectures/software_engineering/version_control.md +++ b/lectures/software_engineering/version_control.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (version_control)= diff --git a/lectures/status.md b/lectures/status.md index 0f31ac7f..b6a1f63a 100644 --- a/lectures/status.md +++ b/lectures/status.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- # Execution Statistics diff --git a/lectures/time_series_models/additive_functionals.md b/lectures/time_series_models/additive_functionals.md deleted file mode 100644 index 4c729e3d..00000000 --- a/lectures/time_series_models/additive_functionals.md +++ /dev/null @@ -1,947 +0,0 @@ ---- -jupytext: - text_representation: - extension: .md - format_name: myst -kernelspec: - display_name: Julia - language: julia - name: julia-1.11 ---- - -(additive_functionals)= -```{raw} html - -``` - -# Additive Functionals - -```{index} single: Models; Additive functionals -``` - -```{contents} Contents -:depth: 2 -``` - -Co-authored with Chase Coleman and Balint Szoke - -## Overview - -Some time series are nonstationary. - -For example, output, prices, and dividends are typically nonstationary, due to irregular but persistent growth. - -Which kinds of models are useful for studying such time series? - -Hansen and Scheinkman {cite}`hans-scheink2009` analyze two classes of time series models that accommodate growth. - -They are: - -1. **additive functionals** that display random "arithmetic growth" -1. **multiplicative functionals** that display random "geometric growth" - -These two classes of processes are closely connected. - -For example, if a process $\{y_t\}$ is an additive functional and $\phi_t = \exp(y_t)$, then $\{\phi_t\}$ is a multiplicative functional. - -Hansen and Sargent {cite}`hans-sarg-book2016` (chs. 5 and 6) describe discrete time versions of additive and multiplicative functionals. - -In this lecture we discuss the former (i.e., additive functionals). - -In the {doc}`next lecture <../time_series_models/multiplicative_functionals>` we discuss multiplicative functionals. - -We also consider fruitful decompositions of additive and multiplicative processes, a more in depth discussion of which can be found in Hansen and Sargent {cite}`hans-sarg-book2016`. - -## A Particular Additive Functional - -This lecture focuses on a particular type of additive functional: a scalar process $\{y_t\}_{t=0}^\infty$ whose increments are driven by a Gaussian vector autoregression. - -It is simple to construct, simulate, and analyze. - -This additive functional consists of two components, the first of which is a **first-order vector autoregression** (VAR) - -```{math} -:label: old1_additive_functionals - -x_{t+1} = A x_t + B z_{t+1} -``` - -Here - -* $x_t$ is an $n \times 1$ vector, -* $A$ is an $n \times n$ stable matrix (all eigenvalues lie within the open unit circle), -* $z_{t+1} \sim {\cal N}(0,I)$ is an $m \times 1$ i.i.d. shock, -* $B$ is an $n \times m$ matrix, and -* $x_0 \sim {\cal N}(\mu_0, \Sigma_0)$ is a random initial condition for $x$ - -The second component is an equation that expresses increments -of $\{y_t\}_{t=0}^\infty$ as linear functions of - -* a scalar constant $\nu$, -* the vector $x_t$, and -* the same Gaussian vector $z_{t+1}$ that appears in the VAR {eq}`old1_additive_functionals` - -In particular, - -```{math} -:label: old2_additive_functionals - -y_{t+1} - y_{t} = \nu + D x_{t} + F z_{t+1} -``` - -Here $y_0 \sim {\cal N}(\mu_{y0}, \Sigma_{y0})$ is a random -initial condition. - -The nonstationary random process $\{y_t\}_{t=0}^\infty$ displays -systematic but random *arithmetic growth*. - -### A linear state space representation - -One way to represent the overall dynamics is to use a {doc}`linear state space system <../introduction_dynamics/linear_models>`. - -To do this, we set up state and observation vectors - -$$ -\hat{x}_t = \begin{bmatrix} 1 \\ x_t \\ y_t \end{bmatrix} -\quad \text{and} \quad -\hat{y}_t = \begin{bmatrix} x_t \\ y_t \end{bmatrix} -$$ - -Now we construct the state space system - -$$ -\begin{bmatrix} - 1 \\ - x_{t+1} \\ - y_{t+1} - \end{bmatrix} - = - \begin{bmatrix} - 1 & 0 & 0 \\ - 0 & A & 0 \\ - \nu & D' & 1 \\ -\end{bmatrix} -\begin{bmatrix} - 1 \\ - x_t \\ - y_t -\end{bmatrix} + -\begin{bmatrix} - 0 \\ B \\ F' -\end{bmatrix} -z_{t+1} -$$ - -$$ -\begin{bmatrix} - x_t \\ - y_t -\end{bmatrix} -= \begin{bmatrix} - 0 & I & 0 \\ - 0 & 0 & 1 -\end{bmatrix} -\begin{bmatrix} - 1 \\ x_t \\ y_t -\end{bmatrix} -$$ - -This can be written as - -$$ -\begin{aligned} - \hat{x}_{t+1} &= \hat{A} \hat{x}_t + \hat{B} z_{t+1} \\ - \hat{y}_{t} &= \hat{D} \hat{x}_t -\end{aligned} -$$ - -which is a standard linear state space system. - -To study it, we could map it into an instance of [LSS](https://github.com/QuantEcon/QuantEcon.jl/blob/master/src/lss.jl) from [QuantEcon.jl](http://quantecon.org/quantecon-jl). - -We will in fact use a different set of code for simulation, for reasons described below. - -## Dynamics - -Let's run some simulations to build intuition. - -(addfunc_eg1)= -In doing so we'll assume that $z_{t+1}$ is scalar and that $\tilde x_t$ follows a 4th-order scalar autoregession - -```{math} -:label: ftaf - -\tilde x_{t+1} = \phi_1 \tilde x_{t} + \phi_2 \tilde x_{t-1} + -\phi_3 \tilde x_{t-2} + -\phi_4 \tilde x_{t-3} + \sigma z_{t+1} -``` - -Let the increment in $\{y_t\}$ obey - -$$ -y_{t+1} - y_t = \nu + \tilde x_t + \sigma z_{t+1} -$$ - -with an initial condition for $y_0$. - -While {eq}`ftaf` is not a first order system like {eq}`old1_additive_functionals`, we know that it can be mapped into a first order system - -* for an example of such a mapping, see {ref}`this example ` - -In fact this whole model can be mapped into the additive functional system definition in {eq}`old1_additive_functionals` -- {eq}`old2_additive_functionals` by appropriate selection of the matrices $A, B, D, F$. - -You can try writing these matrices down now as an exercise --- the correct expressions will appear in the code below. - -### Simulation - -When simulating we embed our variables into a bigger system. - -This system also constructs the components of the decompositions of $y_t$ and of $\exp(y_t)$ proposed by Hansen and Scheinkman {cite}`hans-scheink2009`. - -All of these objects are computed using the code below. - - - -```{code-cell} julia ---- -tags: [remove-cell] ---- -using Test, Random -``` - -```{code-cell} julia -using Distributions, LaTeXStrings, Plots, QuantEcon -using LinearAlgebra, Statistics -``` - -```{code-cell} julia -function AMF_LSS_VAR(A, B, D, F = nothing; upsilon = nothing) - if B isa AbstractVector - B = reshape(B, length(B), 1) - end - # unpack required elements - nx, nk = size(B) - - # checking the dimension of D (extended from the scalar case) - if ndims(D) > 1 - nm = size(D, 1) - if D isa Union{Adjoint, Transpose} - D = convert(Matrix, D) - end - else - nm = 1 - D = reshape(D, 1, length(D)) - end - - # set F - if isnothing(F) - F = zeros(nk, 1) - elseif ndims(F) == 1 - F = reshape(F, length(F), 1) - end - - # set upsilon - if isnothing(upsilon) - upsilon = zeros(nm, 1) - elseif ndims(upsilon) == 1 - upsilon = reshape(upsilon, length(upsilon), 1) - else - throw(ArgumentError("upsilon must be column vector!")) - end - - if size(upsilon, 1) != size(D, 1) - error("The size of upsilon is inconsistent with D!") - end - - # construct BIG state space representation - lss = construct_ss(A, B, D, F, upsilon, nx, nk, nm) - - return (; A, B, D, F, upsilon, nx, nk, nm, lss) -end - -AMF_LSS_VAR(A, B, D) = AMF_LSS_VAR(A, B, D, nothing, upsilon = nothing) -function AMF_LSS_VAR(A, B, D, F, upsilon) - AMF_LSS_VAR(A, B, D, [F], upsilon = [upsilon]) -end - -function construct_ss(A, B, D, F, upsilon, nx, nk, nm) - H, g = additive_decomp(A, B, D, F, nx) - - # auxiliary blocks with 0's and 1's to fill out the lss matrices - nx0c = zeros(nx, 1) - nx0r = zeros(1, nx) - nx1 = ones(1, nx) - nk0 = zeros(1, nk) - ny0c = zeros(nm, 1) - ny0r = zeros(1, nm) - ny1m = I + zeros(nm, nm) - ny0m = zeros(nm, nm) - nyx0m = similar(D) - - # build A matrix for LSS - # order of states is: [1, t, xt, yt, mt] - A1 = hcat(1, 0, nx0r, ny0r, ny0r) # transition for 1 - A2 = hcat(1, 1, nx0r, ny0r, ny0r) # transition for t - A3 = hcat(nx0c, nx0c, A, nyx0m', nyx0m') # transition for x_{t+1} - A4 = hcat(upsilon, ny0c, D, ny1m, ny0m) # transition for y_{t+1} - A5 = hcat(ny0c, ny0c, nyx0m, ny0m, ny1m) # transition for m_{t+1} - Abar = vcat(A1, A2, A3, A4, A5) - - # build B matrix for LSS - Bbar = vcat(nk0, nk0, B, F, H) - - # build G matrix for LSS - # order of observation is: [xt, yt, mt, st, tt] - G1 = hcat(nx0c, nx0c, I, nyx0m', nyx0m') # selector for x_{t} - G2 = hcat(ny0c, ny0c, nyx0m, ny1m, ny0m) # selector for y_{t} - G3 = hcat(ny0c, ny0c, nyx0m, ny0m, ny1m) # selector for martingale - G4 = hcat(ny0c, ny0c, -g, ny0m, ny0m) # selector for stationary - G5 = hcat(ny0c, upsilon, nyx0m, ny0m, ny0m) # selector for trend - Gbar = vcat(G1, G2, G3, G4, G5) - - # build LSS type - x0 = hcat(1, 0, nx0r, ny0r, ny0r) - S0 = zeros(length(x0), length(x0)) - lss = LSS(Abar, Bbar, Gbar, zeros(nx + 4nm, 1), x0, S0) - - return lss -end - -function additive_decomp(A, B, D, F, nx) - A_res = \(I - A, I) - g = D * A_res - H = F .+ D * A_res * B - - return H, g -end - -function multiplicative_decomp(A, B, D, F, upsilon, nx) - H, g = additive_decomp(A, B, D, F, nx) - upsilon_tilde = upsilon .+ 0.5 * diag(H * H') - - return H, g, upsilon_tilde -end - -function loglikelihood_path(amf, x, y) - (; A, B, D, F) = amf - k, T = size(y) - FF = F * F' - FFinv = inv(FF) - temp = y[:, 2:end] - y[:, 1:(end - 1)] - D * x[:, 1:(end - 1)] - obs = temp .* FFinv .* temp - obssum = cumsum(obs) - scalar = (logdet(FF) + k * log(2pi)) * (1:T) - - return -(obssum + scalar) / 2 -end - -function loglikelihood(amf, x, y) - llh = loglikelihood_path(amf, x, y) - - return llh[end] -end - -function plot_additive(amf, T; npaths = 25, show_trend = true) - # pull out right sizes so we know how to increment - (; nx, nk, nm) = amf - - # allocate space (nm is the number of additive functionals - we want npaths for each) - mpath = zeros(nm * npaths, T) - mbounds = zeros(2nm, T) - spath = zeros(nm * npaths, T) - sbounds = zeros(2nm, T) - tpath = zeros(nm * npaths, T) - ypath = zeros(nm * npaths, T) - - # simulate for as long as we wanted - moment_generator = moment_sequence(amf.lss) - - # pull out population moments - for (t, x) in enumerate(moment_generator) - ymeans = x[2] - yvar = x[4] - - # lower and upper bounds - for each additive functional - for ii in 1:nm - li, ui = 2(ii - 1) + 1, 2ii - if sqrt(yvar[nx + nm + ii, nx + nm + ii]) != 0.0 - madd_dist = Normal(ymeans[nx + nm + ii], - sqrt(yvar[nx + nm + ii, nx + nm + ii])) - mbounds[li, t] = quantile(madd_dist, 0.01) - mbounds[ui, t] = quantile(madd_dist, 0.99) - elseif sqrt(yvar[nx + nm + ii, nx + nm + ii]) == 0.0 - mbounds[li, t] = ymeans[nx + nm + ii] - mbounds[ui, t] = ymeans[nx + nm + ii] - else - error("standard error is negative") - end - - if sqrt(yvar[nx + 2nm + ii, nx + 2nm + ii]) != 0.0 - sadd_dist = Normal(ymeans[nx + 2nm + ii], - sqrt(yvar[nx + 2nm + ii, nx + 2nm + ii])) - sbounds[li, t] = quantile(sadd_dist, 0.01) - sbounds[ui, t] = quantile(sadd_dist, 0.99) - elseif sqrt(yvar[nx + 2nm + ii, nx + 2nm + ii]) == 0.0 - sbounds[li, t] = ymeans[nx + 2nm + ii] - sbounds[ui, t] = ymeans[nx + 2nm + ii] - else - error("standard error is negative") - end - end - t == T && break - end - - # pull out paths - for n in 1:npaths - x, y = simulate(amf.lss, T) - for ii in 0:(nm - 1) - ypath[npaths * ii + n, :] = y[nx + ii + 1, :] - mpath[npaths * ii + n, :] = y[nx + nm + ii + 1, :] - spath[npaths * ii + n, :] = y[nx + 2nm + ii + 1, :] - tpath[npaths * ii + n, :] = y[nx + 3nm + ii + 1, :] - end - end - - add_figs = [] - - for ii in 0:(nm - 1) - li, ui = npaths * (ii), npaths * (ii + 1) - LI, UI = 2ii, 2(ii + 1) - push!(add_figs, - plot_given_paths(T, ypath[(li + 1):ui, :], mpath[(li + 1):ui, :], - spath[(li + 1):ui, :], - tpath[(li + 1):ui, :], mbounds[(LI + 1):UI, :], - sbounds[(LI + 1):UI, :], - show_trend = show_trend)) - end - return add_figs -end - -function plot_multiplicative(amf, T, npaths = 25, show_trend = true) - # pull out right sizes so we know how to increment - (; nx, nk, nm) = amf - # matrices for the multiplicative decomposition - H, g, upsilon_tilde = multiplicative_decomp(A, B, D, F, upsilon, nx) - - # allocate space (nm is the number of functionals - we want npaths for each) - mpath_mult = zeros(nm * npaths, T) - mbounds_mult = zeros(2nm, T) - spath_mult = zeros(nm * npaths, T) - sbounds_mult = zeros(2nm, T) - tpath_mult = zeros(nm * npaths, T) - ypath_mult = zeros(nm * npaths, T) - - # simulate for as long as we wanted - moment_generator = moment_sequence(amf.lss) - - # pull out population moments - for (t, x) in enumerate(moment_generator) - ymeans = x[2] - yvar = x[4] - - # lower and upper bounds - for each multiplicative functional - for ii in 1:nm - li, ui = 2(ii - 1) + 1, 2ii - if yvar[nx + nm + ii, nx + nm + ii] != 0.0 - Mdist = LogNormal(ymeans[nx + nm + ii] - - 0.5t * diag(H * H')[ii], - sqrt(yvar[nx + nm + ii, nx + nm + ii])) - mbounds_mult[li, t] = quantile(Mdist, 0.01) - mbounds_mult[ui, t] = quantile(Mdist, 0.99) - elseif yvar[nx + nm + ii, nx + nm + ii] == 0.0 - mbounds_mult[li, t] = exp.(ymeans[nx + nm + ii] - - 0.5t * diag(H * H')[ii]) - mbounds_mult[ui, t] = exp.(ymeans[nx + nm + ii] - - 0.5t * diag(H * H')[ii]) - else - error("standard error is negative") - end - if yvar[nx + 2nm + ii, nx + 2nm + ii] != 0.0 - Sdist = LogNormal(-ymeans[nx + 2nm + ii], - sqrt(yvar[nx + 2nm + ii, nx + 2nm + ii])) - sbounds_mult[li, t] = quantile(Sdist, 0.01) - sbounds_mult[ui, t] = quantile(Sdist, 0.99) - elseif yvar[nx + 2nm + ii, nx + 2nm + ii] == 0.0 - sbounds_mult[li, t] = exp.(-ymeans[nx + 2nm + ii]) - sbounds_mult[ui, t] = exp.(-ymeans[nx + 2nm + ii]) - else - error("standard error is negative") - end - end - t == T && break - end - - # pull out paths - for n in 1:npaths - x, y = simulate(amf.lss, T) - for ii in 0:(nm - 1) - ypath_mult[npaths * ii + n, :] = exp.(y[nx + ii + 1, :]) - mpath_mult[npaths * ii + n, :] = exp.(y[nx + nm + ii + 1, :] - - collect(1:T) * 0.5 * - diag(H * H')[ii + 1]) - spath_mult[npaths * ii + n, :] = 1 ./ - exp.(-y[nx + 2 * nm + ii + 1, :]) - tpath_mult[npaths * ii + n, :] = exp.(y[nx + 3nm + ii + 1, :] + - (1:T) * 0.5 * - diag(H * H')[ii + 1]) - end - end - - mult_figs = [] - - for ii in 0:(nm - 1) - li, ui = npaths * ii, npaths * (ii + 1) - LI, UI = 2ii, 2(ii + 1) - push!(mult_figs, - plot_given_paths(T, ypath_mult[(li + 1):ui, :], - mpath_mult[(li + 1):ui, :], - spath_mult[(li + 1):ui, :], - tpath_mult[(li + 1):ui, :], - mbounds_mult[(LI + 1):UI, :], - sbounds_mult[(LI + 1):UI, :], - horline = 1.0, show_trend = show_trend)) - end - - return mult_figs -end - -function plot_martingales(amf, T, npaths = 25) - # pull out right sizes so we know how to increment - (; A, B, D, F, upsilon, nx, nk, nm) = amf - # matrices for the multiplicative decomposition - H, g, upsilon_tilde = multiplicative_decomp(A, B, D, F, upsilon, nx) - - # allocate space (nm is the number of functionals - we want npaths for each) - mpath_mult = zeros(nm * npaths, T) - mbounds_mult = zeros(2nm, T) - - # simulate for as long as we wanted - moment_generator = moment_sequence(amf.lss) - # pull out population moments - for (t, x) in enumerate(moment_generator) - ymeans = x[2] - yvar = x[4] - - # lower and upper bounds - for each functional - for ii in 1:nm - li, ui = 2(ii - 1) + 1, 2ii - if yvar[nx + nm + ii, nx + nm + ii] != 0.0 - Mdist = LogNormal(ymeans[nx + nm + ii] - - 0.5^2 * t * diag(H * H')[ii], - sqrt(yvar[nx + nm + ii, nx + nm + ii])) - mbounds_mult[li, t] = quantile(Mdist, 0.01) - mbounds_mult[ui, t] = quantile(Mdist, 0.99) - elseif yvar[nx + nm + ii, nx + nm + ii] == 0.0 - mbounds_mult[li, t] = ymeans[nx + nm + ii] - - 0.5^2 * t * diag(H * H')[ii] - mbounds_mult[ui, t] = ymeans[nx + nm + ii] - - 0.5t * diag(H * H')[ii] - else - error("standard error is negative") - end - end - t == T && break - end - - # pull out paths - for n in 1:npaths - x, y = simulate(amf.lss, T) - for ii in 0:(nm - 1) - mpath_mult[npaths * ii + n, :] = exp.(y[nx + nm + ii + 1, :] - - (1:T) * 0.5 * - diag(H * H')[ii + 1]) - end - end - - mart_figs = [] - - for ii in 0:(nm - 1) - li, ui = npaths * (ii), npaths * (ii + 1) - LI, UI = 2ii, 2(ii + 1) - push!(mart_figs, - plot_martingale_paths(T, mpath_mult[(li + 1):ui, :], - mbounds_mult[(LI + 1):UI, :], horline = 1)) - plot!(mart_figs[ii + 1], - title = L"Martingale components for many paths of $y_{ii + 1}$", - titlefontsize = 12) - end - - return mart_figs -end - -function plot_given_paths(T, ypath, mpath, spath, tpath, mbounds, sbounds; - horline = 0.0, show_trend = true) - - # allocate space - trange = 1:T - - # allocate transpose - mpath_T = Matrix(mpath') - - # create figure - plots = plot(layout = (2, 2), size = (800, 800)) - - # plot all paths together - - plot!(plots[1], trange, ypath[1, :], label = L"y_t", color = :black) - plot!(plots[1], trange, mpath[1, :], label = L"m_t", color = :magenta) - plot!(plots[1], trange, spath[1, :], label = L"s_t", color = :green) - if show_trend - plot!(plots[1], trange, tpath[1, :], label = L"t_t", color = :red) - end - plot!(plots[1], seriestype = :hline, [horline], color = :black, - linestyle = :dash, label = "") - plot!(plots[1], title = "One Path of All Variables", legend = :topleft, - titlefontsize = 12) - - # plot martingale component - plot!(plots[2], trange, mpath[1, :], color = :magenta, label = "") - plot!(plots[2], trange, mpath_T, alpha = 0.45, color = :magenta, label = "") - ub = mbounds[2, :] - lb = mbounds[1, :] - plot!(plots[2], ub, fillrange = lb, alpha = 0.25, color = :magenta, - label = "") - plot!(plots[2], seriestype = :hline, [horline], color = :black, - linestyle = :dash, label = "") - plot!(plots[2], title = "Martingale Components for Many Paths", - titlefontsize = 12) - - # plot stationary component - plot!(plots[3], spath[1, :], color = :green, label = "") - plot!(plots[3], Matrix(spath'), alpha = 0.25, color = :green, label = "") - ub = sbounds[2, :] - lb = sbounds[1, :] - plot!(plots[3], ub, fillrange = lb, alpha = 0.25, color = :green, - label = "") - plot!(plots[3], seriestype = :hline, [horline], color = :black, - linestyle = :dash, label = "") - plot!(plots[3], title = "Stationary Components for Many Paths", - titlefontsize = 12) - - # plot trend component - if show_trend == true - plot!(plots[4], Matrix(tpath'), color = :red, label = "") - end - plot!(plots[4], seriestype = :hline, [horline], color = :black, - linestyle = :dash, label = "") - plot!(plots[4], title = "Trend Components for Many Paths", - titlefontsize = 12) - - return plots -end - -function plot_martingale_paths(T, mpath, mbounds; - horline = 1, show_trend = false) - # allocate space - trange = 1:T - - # create the plot - plt = plot() - - # plot martingale component - ub = mbounds[2, :] - lb = mbounds[1, :] - - plot!(plt, ub, fillrange = lb, alpha = 0.25, color = :magenta, label = "") - plot!(plt, seriestype = :hline, [horline], color = :black, - linestyle = :dash, label = "") - plot!(plt, trange, Matrix(mpath'), linewidth = 0.25, color = :black, - label = "") - - return plt -end -``` - -For now, we just plot $y_t$ and $x_t$, postponing until later a description of exactly how we compute them. - -(addfunc_egcode)= -```{code-cell} julia ---- -tags: [remove-cell] ---- -Random.seed!(43); -``` - -```{code-cell} julia -phi_1, phi_2, phi_3, phi_4 = 0.5, -0.2, 0, 0.5 -sigma = 0.01 -upsilon = 0.01 # growth rate - -## A matrix should be n x n -A = [phi_1 phi_2 phi_3 phi_4; - 1 0 0 0; - 0 1 0 0; - 0 0 1 0] - -# B matrix should be n x k -B = [sigma, 0, 0, 0] - -D = [1 0 0 0] * A -F = [1, 0, 0, 0] ⋅ vec(B) - -amf = AMF_LSS_VAR(A, B, D, F, upsilon) - -T = 150 -x, y = simulate(amf.lss, T) - -plt_1 = plot() -plt_2 = plot() -plots = [plt_1, plt_2] - -plot!(plots[1], 1:T, y[amf.nx + 1, :], color = :black, lw = 2, label = "") -plot!(plots[1], title = L"A particular path of $y_t$", - titlefontsize = 12) -plot!(plots[2], 1:T, y[1, :], color = :green, lw = 2, label = "") -plot!(plots[2], seriestype = :hline, [0], color = :black, lw = 2, - linestyle = :dashdot, label = "") -plot!(plots[2], title = L"Associated path of $x_t$", - titlefontsize = 12) -plot(plots[1], plots[2], layout = (2, 1), size = (700, 600)) -``` - -```{code-cell} julia ---- -tags: [remove-cell] ---- -@testset begin - #test y[79] ≈ -0.07268127992877046 - #test y[amf.nx + 1, :][19] ≈ 0.0951768424187828 - @test F ≈ 0.01 && T == 150 # a few constants. -end -``` - -Notice the irregular but persistent growth in $y_t$. - -### Decomposition - -Hansen and Sargent {cite}`hans-sarg-book2016` describe how to construct a decomposition of -an additive functional into four parts: - -- a constant inherited from initial values $x_0$ and $y_0$ -- a linear trend -- a martingale -- an (asymptotically) stationary component - -To attain this decomposition for the particular class of additive -functionals defined by {eq}`old1_additive_functionals` and {eq}`old2_additive_functionals`, we first construct the matrices - -$$ -\begin{aligned} - H & := F + B'(I - A')^{-1} D - \\ - g & := D' (I - A)^{-1} -\end{aligned} -$$ - -Then the Hansen-Scheinkman {cite}`hans-scheink2009` decomposition is - -$$ -\begin{aligned} - y_t - &= \underbrace{t \nu}_{\text{trend component}} + - \overbrace{\sum_{j=1}^t H z_j}^{\text{Martingale component}} - - \underbrace{g x_t}_{\text{stationary component}} + - \overbrace{g x_0 + y_0}^{\text{initial conditions}} -\end{aligned} -$$ - -At this stage you should pause and verify that $y_{t+1} - y_t$ satisfies {eq}`old2_additive_functionals`. - -It is convenient for us to introduce the following notation: - -- $\tau_t = \nu t$ , a linear, deterministic trend -- $m_t = \sum_{j=1}^t H z_j$, a martingale with time $t+1$ increment $H z_{t+1}$ -- $s_t = g x_t$, an (asymptotically) stationary component - -We want to characterize and simulate components $\tau_t, m_t, s_t$ of the decomposition. - -A convenient way to do this is to construct an appropriate instance of a {doc}`linear state space system <../introduction_dynamics/linear_models>` by using [LSS](https://github.com/QuantEcon/QuantEcon.jl/blob/master/src/lss.jl) from [QuantEcon.jl](http://quantecon.org/quantecon-jl). - -This will allow us to use the routines in [LSS](https://github.com/QuantEcon/QuantEcon.jl/blob/master/src/lss.jl) to study dynamics. - -To start, observe that, under the dynamics in {eq}`old1_additive_functionals` and {eq}`old2_additive_functionals` and with the -definitions just given, - -$$ -\begin{bmatrix} - 1 \\ - t+1 \\ - x_{t+1} \\ - y_{t+1} \\ - m_{t+1} -\end{bmatrix} -= -\begin{bmatrix} - 1 & 0 & 0 & 0 & 0 \\ - 1 & 1 & 0 & 0 & 0 \\ - 0 & 0 & A & 0 & 0 \\ - \nu & 0 & D' & 1 & 0 \\ - 0 & 0 & 0 & 0 & 1 -\end{bmatrix} -\begin{bmatrix} - 1 \\ - t \\ - x_t \\ - y_t \\ - m_t -\end{bmatrix} -+ -\begin{bmatrix} - 0 \\ - 0 \\ - B \\ - F' \\ - H' -\end{bmatrix} -z_{t+1} -$$ - -and - -$$ -\begin{bmatrix} - x_t \\ - y_t \\ - \tau_t \\ - m_t \\ - s_t -\end{bmatrix} -= -\begin{bmatrix} - 0 & 0 & I & 0 & 0 \\ - 0 & 0 & 0 & 1 & 0 \\ - 0 & \nu & 0 & 0 & 0 \\ - 0 & 0 & 0 & 0 & 1 \\ - 0 & 0 & -g & 0 & 0 -\end{bmatrix} -\begin{bmatrix} - 1 \\ - t \\ - x_t \\ - y_t \\ - m_t -\end{bmatrix} -$$ - -With - -$$ -\tilde{x} := \begin{bmatrix} 1 \\ t \\ x_t \\ y_t \\ m_t \end{bmatrix} -\quad \text{and} \quad -\tilde{y} := \begin{bmatrix} x_t \\ y_t \\ \tau_t \\ m_t \\ s_t \end{bmatrix} -$$ - -we can write this as the linear state space system - -$$ -\begin{aligned} - \tilde{x}_{t+1} &= \tilde{A} \tilde{x}_t + \tilde{B} z_{t+1} \\ - \tilde{y}_{t} &= \tilde{D} \tilde{x}_t -\end{aligned} -$$ - -By picking out components of $\tilde y_t$, we can track all variables of -interest. - -## Code - -The type [AMF_LSS_VAR](https://github.com/QuantEcon/QuantEcon.lectures.code/blob/master/additive_functionals/amflss.jl) mentioned above does all that we want to study our additive functional. - -In fact [AMF_LSS_VAR](https://github.com/QuantEcon/QuantEcon.lectures.code/blob/master/additive_functionals/amflss.jl) does more, as we shall explain below. - -(A hint that it does more is the name of the type -- here AMF stands for -"additive and multiplicative functional" -- the code will do things for -multiplicative functionals too) - -Let's use this code (embedded above) to explore the {ref}`example process described above `. - -If you run {ref}`the code that first simulated that example ` again and then the method call -you will generate (modulo randomness) the plot - -```{code-cell} julia -plt = plot_additive(amf, T) -plt[1] -``` - -When we plot multiple realizations of a component in the 2nd, 3rd, and 4th panels, we also plot population 95% probability coverage sets computed using the LSS type. - -We have chosen to simulate many paths, all starting from the *same* nonrandom initial conditions $x_0, y_0$ (you can tell this from the shape of the 95% probability coverage shaded areas). - -Notice tell-tale signs of these probability coverage shaded areas - -* the purple one for the martingale component $m_t$ grows with - $\sqrt{t}$ -* the green one for the stationary component $s_t$ converges to a - constant band - -### An associated multiplicative functional - -Where $\{y_t\}$ is our additive functional, let $M_t = \exp(y_t)$. - -As mentioned above, the process $\{M_t\}$ is called a **multiplicative functional**. - -Corresponding to the additive decomposition described above we have the multiplicative decomposition of the $M_t$ - -$$ -\frac{M_t}{M_0} -= \exp (t \nu) \exp \Bigl(\sum_{j=1}^t H \cdot Z_j \Bigr) \exp \biggl( D'(I-A)^{-1} x_0 - D'(I-A)^{-1} x_t \biggr) -$$ - -or - -$$ -\frac{M_t}{M_0} = \exp\left( \tilde \nu t \right) \Biggl( \frac{\widetilde M_t}{\widetilde M_0}\Biggr) \left( \frac{\tilde e (X_0)} {\tilde e(x_t)} \right) -$$ - -where - -$$ -\tilde \nu = \nu + \frac{H \cdot H}{2} , -\quad -\widetilde M_t = \exp \biggl( \sum_{j=1}^t \biggl(H \cdot z_j -\frac{ H \cdot H }{2} \biggr) \biggr), \quad \widetilde M_0 =1 -$$ - -and - -$$ -\tilde e(x) = \exp[g(x)] = \exp \bigl[ D' (I - A)^{-1} x \bigr] -$$ - -An instance of type [AMF_LSS_VAR](https://github.com/QuantEcon/QuantEcon.lectures.code/blob/master/additive_functionals/amflss.jl) includes this associated multiplicative functional as an attribute. - -Let's plot this multiplicative functional for our example. - -If you run {ref}`the code that first simulated that example ` again and then the method call - -```{code-cell} julia -plt = plot_multiplicative(amf, T) -plt[1] -``` - -As before, when we plotted multiple realizations of a component in the 2nd, 3rd, and 4th panels, we also plotted population 95% confidence bands computed using the LSS type. - -Comparing this figure and the last also helps show how geometric growth differs from -arithmetic growth. - -### A peculiar large sample property - -Hansen and Sargent {cite}`hans-sarg-book2016` (ch. 6) note that the martingale component -$\widetilde M_t$ of the multiplicative decomposition has a peculiar property. - -* While $E_0 \widetilde M_t = 1$ for all $t \geq 0$, - nevertheless $\ldots$. -* As $t \rightarrow +\infty$, $\widetilde M_t$ converges to - zero almost surely. - -The following simulation of many paths of $\widetilde M_t$ illustrates this property - - -```{code-cell} julia -plt = plot_martingales(amf, 12000) -plt[1] -``` - diff --git a/lectures/time_series_models/arma.md b/lectures/time_series_models/arma.md deleted file mode 100644 index b4c1e096..00000000 --- a/lectures/time_series_models/arma.md +++ /dev/null @@ -1,931 +0,0 @@ ---- -jupytext: - text_representation: - extension: .md - format_name: myst -kernelspec: - display_name: Julia - language: julia - name: julia-1.11 ---- - -(arma)= -```{raw} html - -``` - -# {index}`Covariance Stationary Processes ` - -```{contents} Contents -:depth: 2 -``` - -## Overview - -In this lecture we study covariance stationary linear stochastic processes, a -class of models routinely used to study economic and financial time series. - -This class has the advantage of being - -1. simple enough to be described by an elegant and comprehensive theory -1. relatively broad in terms of the kinds of dynamics it can represent - -We consider these models in both the time and frequency domain. - -### {index}`ARMA Processes ` - -We will focus much of our attention on linear covariance stationary models with a finite number of parameters. - -In particular, we will study stationary ARMA processes, which form a cornerstone of the standard theory of time series analysis. - -Every ARMA processes can be represented in {doc}`linear state space <../introduction_dynamics/linear_models>` form. - -However, ARMA have some important structure that makes it valuable to study them separately. - -### {index}`Spectral Analysis ` - -Analysis in the frequency domain is also called spectral analysis. - -In essence, spectral analysis provides an alternative representation of the -autocovariance function of a covariance stationary process. - -Having a second representation of this important object - -* shines light on the dynamics of the process in question -* allows for a simpler, more tractable representation in some important cases - -The famous *Fourier transform* and its inverse are used to map between the two representations. - -### Other Reading - -For supplementary reading, see. - -```{only} html -* {cite}`Ljungqvist2012`, chapter 2 -* {cite}`Sargent1987`, chapter 11 -* John Cochrane's notes on time series analysis, chapter 8 -* {cite}`Shiryaev1995`, chapter 6 -* {cite}`CryerChan2008`, all -``` - -```{only} latex -* {cite}`Ljungqvist2012`, chapter 2 -* {cite}`Sargent1987`, chapter 11 -* John Cochrane's [notes on time series analysis](https://lectures.quantecon.org/_downloads/time_series_book.pdf), chapter 8 -* {cite}`Shiryaev1995`, chapter 6 -* {cite}`CryerChan2008`, all -``` - -## Introduction - -Consider a sequence of random variables $\{ X_t \}$ indexed by $t \in \mathbb Z$ and taking values in $\mathbb R$. - -Thus, $\{ X_t \}$ begins in the infinite past and extends to the infinite future --- a convenient and standard assumption. - -As in other fields, successful economic modeling typically assumes the existence of features that are constant over time. - -If these assumptions are correct, then each new observation $X_t, X_{t+1},\ldots$ can provide additional information about the time-invariant features, allowing us to learn from as data arrive. - -For this reason, we will focus in what follows on processes that are *stationary* --- or become so after a transformation -(see for example {doc}`this lecture <../time_series_models/additive_functionals>` and {doc}`this lecture <../time_series_models/multiplicative_functionals>`). - -(arma_defs)= -### Definitions - -```{index} single: Covariance Stationary -``` - -A real-valued stochastic process $\{ X_t \}$ is called *covariance stationary* if - -1. Its mean $\mu := \mathbb E X_t$ does not depend on $t$. -1. For all $k$ in $\mathbb Z$, the $k$-th autocovariance $\gamma(k) := \mathbb E (X_t - \mu)(X_{t + k} - \mu)$ is finite and depends only on $k$. - -The function $\gamma \colon \mathbb Z \to \mathbb R$ is called the *autocovariance function* of the process. - -Throughout this lecture, we will work exclusively with zero-mean (i.e., $\mu = 0$) covariance stationary processes. - -The zero-mean assumption costs nothing in terms of generality, since working with non-zero-mean processes involves no more than adding a constant. - -### Example 1: {index}`White Noise ` - -Perhaps the simplest class of covariance stationary processes is the white noise processes. - -A process $\{ \epsilon_t \}$ is called a *white noise process* if - -1. $\mathbb E \epsilon_t = 0$ -1. $\gamma(k) = \sigma^2 \mathbf 1\{k = 0\}$ for some $\sigma > 0$ - -(Here $\mathbf 1\{k = 0\}$ is defined to be 1 if $k = 0$ and zero otherwise) - -White noise processes play the role of **building blocks** for processes with more complicated dynamics. - -(generalized_lps)= -### Example 2: {index}`General Linear Processes ` - -From the simple building block provided by white noise, we can construct a very flexible family of covariance stationary processes --- the *general linear processes* - -```{math} -:label: ma_inf - -X_t = \sum_{j=0}^{\infty} \psi_j \epsilon_{t-j}, -\qquad t \in \mathbb Z -``` - -where - -* $\{\epsilon_t\}$ is white noise -* $\{\psi_t\}$ is a square summable sequence in $\mathbb R$ (that is, $\sum_{t=0}^{\infty} \psi_t^2 < \infty$) - -The sequence $\{\psi_t\}$ is often called a *linear filter*. - -Equation {eq}`ma_inf` is said to present a **moving average** process or a moving average representation. - -With some manipulations it is possible to confirm that the autocovariance function for {eq}`ma_inf` is - -```{math} -:label: ma_inf_ac - -\gamma(k) = \sigma^2 \sum_{j=0}^{\infty} \psi_j \psi_{j+k} -``` - -By the [Cauchy-Schwartz inequality](https://en.wikipedia.org/wiki/Cauchy%E2%80%93Schwarz_inequality) one can show that $\gamma(k)$ satisfies equation {eq}`ma_inf_ac`. - -Evidently, $\gamma(k)$ does not depend on $t$. - -### {index}`Wold's Decomposition ` - -Remarkably, the class of general linear processes goes a long way towards -describing the entire class of zero-mean covariance stationary processes. - -In particular, [Wold's decomposition theorem](https://en.wikipedia.org/wiki/Wold%27s_theorem) states that every -zero-mean covariance stationary process $\{X_t\}$ can be written as - -$$ -X_t = \sum_{j=0}^{\infty} \psi_j \epsilon_{t-j} + \eta_t -$$ - -where - -* $\{\epsilon_t\}$ is white noise -* $\{\psi_t\}$ is square summable -* $\eta_t$ can be expressed as a linear function of $X_{t-1}, X_{t-2},\ldots$ and is perfectly predictable over arbitrarily long horizons - -For intuition and further discussion, see {cite}`Sargent1987`, p. 286. - -### AR and MA - -```{index} single: Covariance Stationary Processes; AR -``` - -```{index} single: Covariance Stationary Processes; MA -``` - -General linear processes are a very broad class of processes. - -It often pays to specialize to those for which there exists a representation having only finitely many parameters. - -(Experience and theory combine to indicate that models with a relatively small number of parameters typically perform better than larger models, especially for forecasting) - -One very simple example of such a model is the first-order autoregressive or AR(1) process - -```{math} -:label: ar1_rep - -X_t = \phi X_{t-1} + \epsilon_t -\quad \text{where} \quad -| \phi | < 1 -\quad \text{and } \{ \epsilon_t \} \text{ is white noise} -``` - -By direct substitution, it is easy to verify that $X_t = \sum_{j=0}^{\infty} \phi^j \epsilon_{t-j}$. - -Hence $\{X_t\}$ is a general linear process. - -Applying {eq}`ma_inf_ac` to the previous expression for $X_t$, we get the AR(1) autocovariance function - -```{math} -:label: ar_acov - -\gamma(k) = \phi^k \frac{\sigma^2}{1 - \phi^2}, -\qquad k = 0, 1, \ldots -``` - -The next figure plots an example of this function for $\phi = 0.8$ and $\phi = -0.8$ with $\sigma = 1$ - -```{code-cell} julia ---- -tags: [remove-cell] ---- -using Test -``` - -```{code-cell} julia -using LaTeXStrings, Plots -using LinearAlgebra, Statistics - -plt_1 = plot() -plt_2 = plot() -plots = [plt_1, plt_2] - -for (i, phi) in enumerate((0.8, -0.8)) - times = 0:16 - acov = [phi .^ k ./ (1 - phi .^ 2) for k in times] - label = L"autocovariance, $\phi = %$phi$" - plot!(plots[i], times, acov, color = :blue, lw = 2, marker = :circle, - markersize = 3, - alpha = 0.6, label = label) - plot!(plots[i], legend = :topright, xlabel = "time", xlim = (0, 15)) - plot!(plots[i], seriestype = :hline, [0], linestyle = :dash, alpha = 0.5, - lw = 2, label = "") -end -plot(plots[1], plots[2], layout = (2, 1), size = (700, 500)) -``` - -```{code-cell} julia ---- -tags: [remove-cell] ---- -@testset begin - phi = 0.8 - times = 0:16 - acov = [phi.^k ./ (1 - phi.^2) for k in times] - @test acov[4] ≈ 1.422222222222223 # Can't access acov directly because of scoping. - @test acov[1] ≈ 2.7777777777777786 -end -``` - -Another very simple process is the MA(1) process (here MA means "moving average") - -$$ -X_t = \epsilon_t + \theta \epsilon_{t-1} -$$ - -You will be able to verify that - -$$ -\gamma(0) = \sigma^2 (1 + \theta^2), -\quad -\gamma(1) = \sigma^2 \theta, -\quad \text{and} \quad -\gamma(k) = 0 \quad \forall \, k > 1 -$$ - -The AR(1) can be generalized to an AR($p$) and likewise for the MA(1). - -Putting all of this together, we get the - -### {index}`ARMA ` Processes - -A stochastic process $\{X_t\}$ is called an *autoregressive moving -average process*, or ARMA($p,q$), if it can be written as - -```{math} -:label: arma - -X_t = \phi_1 X_{t-1} + \cdots + \phi_p X_{t-p} + - \epsilon_t + \theta_1 \epsilon_{t-1} + \cdots + \theta_q \epsilon_{t-q} -``` - -where $\{ \epsilon_t \}$ is white noise. - -An alternative notation for ARMA processes uses the *lag operator* $L$. - -**Def.** Given arbitrary variable $Y_t$, let $L^k Y_t := Y_{t-k}$. - -It turns out that - -* lag operators facilitate succinct representations for linear stochastic processes -* algebraic manipulations that treat the lag operator as an ordinary scalar are legitimate - -Using $L$, we can rewrite {eq}`arma` as - -```{math} -:label: arma_lag - -L^0 X_t - \phi_1 L^1 X_t - \cdots - \phi_p L^p X_t = L^0 \epsilon_t + \theta_1 L^1 \epsilon_t + \cdots + \theta_q L^q \epsilon_t -``` - -If we let $\phi(z)$ and $\theta(z)$ be the polynomials - -```{math} -:label: arma_poly - -\phi(z) := 1 - \phi_1 z - \cdots - \phi_p z^p -\quad \text{and} \quad -\theta(z) := 1 + \theta_1 z + \cdots + \theta_q z^q -``` - -then {eq}`arma_lag` becomes - -```{math} -:label: arma_lag1 - -\phi(L) X_t = \theta(L) \epsilon_t -``` - -In what follows we **always assume** that the roots of the polynomial $\phi(z)$ lie outside the unit circle in the complex plane. - -This condition is sufficient to guarantee that the ARMA($p,q$) process is convariance stationary. - -In fact it implies that the process falls within the class of general linear processes {ref}`described above `. - -That is, given an ARMA($p,q$) process $\{ X_t \}$ satisfying the unit circle condition, there exists a square summable sequence $\{\psi_t\}$ with $X_t = \sum_{j=0}^{\infty} \psi_j \epsilon_{t-j}$ for all $t$. - -The sequence $\{\psi_t\}$ can be obtained by a recursive procedure outlined on page 79 of {cite}`CryerChan2008`. - -The function $t \mapsto \psi_t$ is often called the *impulse response function*. - -## {index}`Spectral Analysis ` - -Autocovariance functions provide a great deal of information about covariance stationary processes. - -In fact, for zero-mean Gaussian processes, the autocovariance function characterizes the entire joint distribution. - -Even for non-Gaussian processes, it provides a significant amount of information. - -It turns out that there is an alternative representation of the autocovariance function of a covariance stationary process, called the *spectral density*. - -At times, the spectral density is easier to derive, easier to manipulate, and provides additional intuition. - -### {index}`Complex Numbers ` - -Before discussing the spectral density, we invite you to recall the main properties of complex numbers (or {ref}`skip to the next section `). - -It can be helpful to remember that, in a formal sense, complex numbers are just points $(x, y) \in \mathbb R^2$ endowed with a specific notion of multiplication. - -When $(x, y)$ is regarded as a complex number, $x$ is called the *real part* and $y$ is called the *imaginary part*. - -The *modulus* or *absolute value* of a complex number $z = (x, y)$ is just its Euclidean norm in $\mathbb R^2$, but is usually written as $|z|$ instead of $\|z\|$. - -The product of two complex numbers $(x, y)$ and $(u, v)$ is defined to be $(xu - vy, xv + yu)$, while addition is standard pointwise vector addition. - -When endowed with these notions of multiplication and addition, the set of complex numbers forms a [field](https://en.wikipedia.org/wiki/Field_%28mathematics%29) --- addition and multiplication play well together, just as they do in $\mathbb R$. - -The complex number $(x, y)$ is often written as $x + i y$, where $i$ is called the *imaginary unit*, and is understood to obey $i^2 = -1$. - -The $x + i y$ notation provides an easy way to remember the definition of multiplication given above, because, proceeding naively, - -$$ -(x + i y) (u + i v) = xu - yv + i (xv + yu) -$$ - -Converted back to our first notation, this becomes $(xu - vy, xv + yu)$ as promised. - -Complex numbers can be represented in the polar form $r e^{i \omega}$ where - -$$ -r e^{i \omega} := r (\cos(\omega) + i \sin(\omega)) = x + i y -$$ - -where $x = r \cos(\omega), y = r \sin(\omega)$, and $\omega = \arctan(y/z)$ or $\tan(\omega) = y/x$. - -(arma_specd)= -### {index}`Spectral Densities ` - -Let $\{ X_t \}$ be a covariance stationary process with autocovariance function $\gamma$ satisfying $\sum_{k} \gamma(k)^2 < \infty$. - -The *spectral density* $f$ of $\{ X_t \}$ is defined as the [discrete time Fourier transform](https://en.wikipedia.org/wiki/Discrete-time_Fourier_transform) of its autocovariance function $\gamma$ - -$$ -f(\omega) := \sum_{k \in \mathbb Z} \gamma(k) e^{-i \omega k}, -\qquad \omega \in \mathbb R -$$ - -(Some authors normalize the expression on the right by constants such as $1/\pi$ --- the convention chosen makes little difference provided you are consistent) - -Using the fact that $\gamma$ is *even*, in the sense that $\gamma(t) = \gamma(-t)$ for all $t$, we can show that - -```{math} -:label: arma_sd_cos - -f(\omega) = \gamma(0) + 2 \sum_{k \geq 1} \gamma(k) \cos(\omega k) -``` - -It is not difficult to confirm that $f$ is - -* real-valued -* even ($f(\omega) = f(-\omega)$ ), and -* $2\pi$-periodic, in the sense that $f(2\pi + \omega) = f(\omega)$ for all $\omega$ - -It follows that the values of $f$ on $[0, \pi]$ determine the values of $f$ on -all of $\mathbb R$ --- the proof is an exercise. - -For this reason it is standard to plot the spectral density only on the interval $[0, \pi]$. - -(arma_wnsd)= -### Example 1: {index}`White Noise ` - -Consider a white noise process $\{\epsilon_t\}$ with standard deviation $\sigma$. - -It is easy to check that in this case $f(\omega) = \sigma^2$. So $f$ is a constant function. - -As we will see, this can be interpreted as meaning that "all frequencies are equally present". - -(White light has this property when frequency refers to the visible spectrum, a connection that provides the origins of the term "white noise") - -### Example 2: {index}`AR ` and {index}`MA ` and {index}`ARMA ` - -(ar1_acov)= -It is an exercise to show that the MA(1) process $X_t = \theta \epsilon_{t-1} + \epsilon_t$ has spectral density - -```{math} -:label: ma1_sd_ed - -f(\omega) -= \sigma^2 ( 1 + 2 \theta \cos(\omega) + \theta^2 ) -``` - -With a bit more effort, it is possible to show (see, e.g., p. 261 of {cite}`Sargent1987`) that the spectral density of the AR(1) process $X_t = \phi X_{t-1} + \epsilon_t$ is - -```{math} -:label: ar1_sd_ed - -f(\omega) -= \frac{\sigma^2}{ 1 - 2 \phi \cos(\omega) + \phi^2 } -``` - -More generally, it can be shown that the spectral density of the ARMA process {eq}`arma` is - -(arma_spec_den)= -```{math} -:label: arma_sd - -f(\omega) = \left| \frac{\theta(e^{i\omega})}{\phi(e^{i\omega})} \right|^2 \sigma^2 -``` - -where - -* $\sigma$ is the standard deviation of the white noise process $\{\epsilon_t\}$ -* the polynomials $\phi(\cdot)$ and $\theta(\cdot)$ are as defined in {eq}`arma_poly` - -The derivation of {eq}`arma_sd` uses the fact that convolutions become products under Fourier transformations. - -The proof is elegant and can be found in many places --- see, for example, {cite}`Sargent1987`, chapter 11, section 4. - -It is a nice exercise to verify that {eq}`ma1_sd_ed` and {eq}`ar1_sd_ed` are indeed special cases of {eq}`arma_sd`. - -### Interpreting the {index}`Spectral Density ` - -```{index} single: Spectral Density; interpretation -``` - -Plotting {eq}`ar1_sd_ed` reveals the shape of the spectral density for the AR(1) model when $\phi$ takes the values 0.8 and -0.8 respectively - -```{code-cell} julia -ar1_sd(phi, omega) = 1 ./ (1 .- 2 * phi * cos.(omega) .+ phi .^ 2) - -omega_s = range(0, pi, length = 180) - -plt_1 = plot() -plt_2 = plot() -plots = [plt_1, plt_2] - -for (i, phi) in enumerate((0.8, -0.8)) - sd = ar1_sd(phi, omega_s) - label = L"spectral density, $\phi = %$phi$" - plot!(plots[i], omega_s, sd, color = :blue, alpha = 0.6, lw = 2, - label = label) - plot!(plots[i], legend = :top, xlabel = "frequency", xlim = (0, pi)) -end -plot(plots[1], plots[2], layout = (2, 1), size = (700, 500)) -``` - -```{code-cell} julia ---- -tags: [remove-cell] ---- -@testset begin - @test ar1_sd(0.8, omega_s)[18] ≈ 9.034248169239635 - @test ar1_sd(-0.8, omega_s)[18] ≈ 0.3155260821833043 - @test omega_s[1] == 0.0 && omega_s[end] ≈ pi && length(omega_s) == 180 # Grid invariant. -end -``` - -These spectral densities correspond to the autocovariance functions for the -AR(1) process {ref}`shown above `. - -Informally, we think of the spectral density as being large at those $\omega \in [0, \pi]$ at which -the autocovariance function seems approximately to exhibit big damped cycles. - -To see the idea, let's consider why, in the lower panel of the preceding figure, the spectral density for the case $\phi = -0.8$ is large at $\omega = \pi$. - -Recall that the spectral density can be expressed as - -```{math} -:label: sumpr - -f(\omega) -= \gamma(0) + 2 \sum_{k \geq 1} \gamma(k) \cos(\omega k) -= \gamma(0) + 2 \sum_{k \geq 1} (-0.8)^k \cos(\omega k) -``` - -When we evaluate this at $\omega = \pi$, we get a large number because -$\cos(\pi k)$ is large and positive when $(-0.8)^k$ is -positive, and large in absolute value and negative when $(-0.8)^k$ is negative. - -Hence the product is always large and positive, and hence the sum of the -products on the right-hand side of {eq}`sumpr` is large. - -These ideas are illustrated in the next figure, which has $k$ on the horizontal axis - -```{code-cell} julia -phi = -0.8 -times = 0:16 -y1 = [phi .^ k ./ (1 - phi .^ 2) for k in times] -y2 = [cos.(pi * k) for k in times] -y3 = [a * b for (a, b) in zip(y1, y2)] - -# Autocovariance when phi = -0.8 -plt_1 = plot(times, y1, color = :blue, lw = 2, marker = :circle, markersize = 3, - alpha = 0.6, label = L"\gamma(k)") -plot!(plt_1, seriestype = :hline, [0], linestyle = :dash, alpha = 0.5, - lw = 2, label = "") -plot!(plt_1, legend = :topright, xlim = (0, 15), yticks = [-2, 0, 2]) - -# Cycles at frequence pi -plt_2 = plot(times, y2, color = :blue, lw = 2, marker = :circle, markersize = 3, - alpha = 0.6, label = L"cos(\pi k)") -plot!(plt_2, seriestype = :hline, [0], linestyle = :dash, alpha = 0.5, - lw = 2, label = "") -plot!(plt_2, legend = :topright, xlim = (0, 15), yticks = [-1, 0, 1]) - -# Product -plt_3 = plot(times, y3, seriestype = :sticks, marker = :circle, markersize = 3, - lw = 2, label = L"\gamma(k) cos(\pi k)") -plot!(plt_3, seriestype = :hline, [0], linestyle = :dash, alpha = 0.5, - lw = 2, label = "") -plot!(plt_3, legend = :topright, xlim = (0, 15), ylim = (-3, 3), - yticks = [-1, 0, 1, 2, 3]) - -plot(plt_1, plt_2, plt_3, layout = (3, 1), size = (800, 600)) -``` - -```{code-cell} julia ---- -tags: [remove-cell] ---- -@testset begin - @test y1[4] ≈ -1.422222222222223 - @test y2 == [1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, - 1.0, -1.0, 1.0] - @test y3[15] ≈ 0.12216795864177792 -end -``` - -On the other hand, if we evaluate $f(\omega)$ at $\omega = \pi / 3$, then the cycles are -not matched, the sequence $\gamma(k) \cos(\omega k)$ contains -both positive and negative terms, and hence the sum of these terms is much smaller - -```{code-cell} julia -phi = -0.8 -times = 0:16 -y1 = [phi .^ k ./ (1 - phi .^ 2) for k in times] -y2 = [cos.(pi * k / 3) for k in times] -y3 = [a * b for (a, b) in zip(y1, y2)] - -# Autocovariance when phi = -0.8 -plt_1 = plot(times, y1, color = :blue, lw = 2, marker = :circle, markersize = 3, - alpha = 0.6, label = L"\gamma(k)") -plot!(plt_1, seriestype = :hline, [0], linestyle = :dash, alpha = 0.5, - lw = 2, label = "") -plot!(plt_1, legend = :topright, xlim = (0, 15), yticks = [-2, 0, 2]) - -# Cycles at frequence pi -plt_2 = plot(times, y2, color = :blue, lw = 2, marker = :circle, markersize = 3, - alpha = 0.6, label = L"cos(\pi k/3)") -plot!(plt_2, seriestype = :hline, [0], linestyle = :dash, alpha = 0.5, - lw = 2, label = "") -plot!(plt_2, legend = :topright, xlim = (0, 15), yticks = [-1, 0, 1]) - -# Product -plt_3 = plot(times, y3, seriestype = :sticks, marker = :circle, markersize = 3, - lw = 2, label = L"\gamma(k) cos(\pi k/3)") -plot!(plt_3, seriestype = :hline, [0], linestyle = :dash, alpha = 0.5, - lw = 2, label = "") -plot!(plt_3, legend = :topright, xlim = (0, 15), ylim = (-3, 3), - yticks = [-1, 0, 1, 2, 3]) - -plot(plt_1, plt_2, plt_3, layout = (3, 1), size = (600, 600)) -``` - -```{code-cell} julia ---- -tags: [remove-cell] ---- -@testset begin - @test y1[4] ≈ -1.422222222222223 - @test y2 ≈ [1.0, 0.5, -0.5, -1.0, -0.5, 0.5, 1.0, 0.5, -0.5, -1.0, -0.5, 0.5, 1.0, 0.5, - -0.5, -1.0, -0.5] - @test y3[15] ≈ -0.06108397932088883 -end -``` - -In summary, the spectral density is large at frequencies $\omega$ where the autocovariance function exhibits damped cycles. - -### Inverting the Transformation - -```{index} single: Spectral Density; Inverting the Transformation -``` - -We have just seen that the spectral density is useful in the sense that it provides a frequency-based perspective on the autocovariance structure of a covariance stationary process. - -Another reason that the spectral density is useful is that it can be "inverted" to recover the autocovariance function via the *inverse Fourier transform*. - -In particular, for all $k \in \mathbb Z$, we have - -```{math} -:label: ift - -\gamma(k) = \frac{1}{2 \pi} \int_{-\pi}^{\pi} f(\omega) e^{i \omega k} d\omega -``` - -This is convenient in situations where the spectral density is easier to calculate and manipulate than the autocovariance function. - -(For example, the expression {eq}`arma_sd` for the ARMA spectral density is much easier to work with than the expression for the ARMA autocovariance) - -### Mathematical Theory - -```{index} single: Spectral Density; Mathematical Theory -``` - -This section is loosely based on {cite}`Sargent1987`, p. 249-253, and included for those who - -* would like a bit more insight into spectral densities -* and have at least some background in [Hilbert space](https://en.wikipedia.org/wiki/Hilbert_space) theory - -Others should feel free to skip to the {ref}`next section ` --- none of this material is necessary to progress to computation. - -Recall that every [separable](https://en.wikipedia.org/wiki/Separable_space) Hilbert space $H$ has a countable orthonormal basis $\{ h_k \}$. - -The nice thing about such a basis is that every $f \in H$ satisfies - -```{math} -:label: arma_fc - -f = \sum_k \alpha_k h_k -\quad \text{where} \quad -\alpha_k := \langle f, h_k \rangle -``` - -where $\langle \cdot, \cdot \rangle$ denotes the inner product in $H$. - -Thus, $f$ can be represented to any degree of precision by linearly combining basis vectors. - -The scalar sequence $\alpha = \{\alpha_k\}$ is called the *Fourier coefficients* of $f$, and satisfies $\sum_k |\alpha_k|^2 < \infty$. - -In other words, $\alpha$ is in $\ell_2$, the set of square summable sequences. - -Consider an operator $T$ that maps $\alpha \in \ell_2$ into its expansion $\sum_k \alpha_k h_k \in H$. - -The Fourier coefficients of $T\alpha$ are just $\alpha = \{ \alpha_k \}$, as you can verify by confirming that $\langle T \alpha, h_k \rangle = \alpha_k$. - -Using elementary results from Hilbert space theory, it can be shown that - -* $T$ is one-to-one --- if $\alpha$ and $\beta$ are distinct in $\ell_2$, then so are their expansions in $H$ -* $T$ is onto --- if $f \in H$ then its preimage in $\ell_2$ is the sequence $\alpha$ given by $\alpha_k = \langle f, h_k \rangle$ -* $T$ is a linear isometry --- in particular $\langle \alpha, \beta \rangle = \langle T\alpha, T\beta \rangle$ - -Summarizing these results, we say that any separable Hilbert space is isometrically isomorphic to $\ell_2$. - -In essence, this says that each separable Hilbert space we consider is just a different way of looking at the fundamental space $\ell_2$. - -With this in mind, let's specialize to a setting where - -* $\gamma \in \ell_2$ is the autocovariance function of a covariance stationary process, and $f$ is the spectral density -* $H = L_2$, where $L_2$ is the set of square summable functions on the interval $[-\pi, \pi]$, with inner product $\langle g, h \rangle = \int_{-\pi}^{\pi} g(\omega) h(\omega) d \omega$ -* $\{h_k\} =$ the orthonormal basis for $L_2$ given by the set of trigonometric functions - -$$ -h_k(\omega) = \frac{e^{i \omega k}}{\sqrt{2 \pi}}, -\quad k \in \mathbb Z, -\quad \omega \in [-\pi, \pi] -$$ - -Using the definition of $T$ from above and the fact that $f$ is even, we now have - -```{math} -:label: arma_it - -T \gamma -= \sum_{k \in \mathbb Z} -\gamma(k) \frac{e^{i \omega k}}{\sqrt{2 \pi}} = \frac{1}{\sqrt{2 \pi}} f(\omega) -``` - -In other words, apart from a scalar multiple, the spectral density is just an transformation of $\gamma \in \ell_2$ under a certain linear isometry --- a different way to view $\gamma$. - -In particular, it is an expansion of the autocovariance function with respect to the trigonometric basis functions in $L_2$. - -As discussed above, the Fourier coefficients of $T \gamma$ are given by the sequence $\gamma$, and, -in particular, $\gamma(k) = \langle T \gamma, h_k \rangle$. - -Transforming this inner product into its integral expression and using {eq}`arma_it` gives -{eq}`ift`, justifying our earlier expression for the inverse transform. - -(arma_imp)= -## Implementation - -Most code for working with covariance stationary models deals with ARMA models. - -Julia code for studying ARMA models can be found in the `DSP.jl` package. - -Since this code doesn't quite cover our needs --- particularly vis-a-vis spectral analysis --- we've put together the module [arma.jl](https://github.com/QuantEcon/QuantEcon.jl/blob/master/src/arma.jl), which is part of [QuantEcon.jl](http://quantecon.org/quantecon-jl) package. - -The module provides functions for mapping ARMA($p,q$) models into their - -1. impulse response function -1. simulated time series -1. autocovariance function -1. spectral density - -### Application - -Let's use this code to replicate the plots on pages 68--69 of {cite}`Ljungqvist2012`. - -Here are some functions to generate the plots - -```{code-cell} julia -using QuantEcon, Random - -# plot functions -function plot_spectral_density(arma, plt) - (w, spect) = spectral_density(arma, two_pi = false) - plot!(plt, w, spect, lw = 2, alpha = 0.7, label = "") - plot!(plt, title = "Spectral density", xlim = (0, pi), - xlabel = "frequency", ylabel = "spectrum", yscale = :log) - return plt -end - -function plot_spectral_density(arma) - plt = plot() - plot_spectral_density(arma, plt = plt) - return plt -end - -function plot_autocovariance(arma, plt) - acov = autocovariance(arma) - n = length(acov) - plot!(plt, 0:(n - 1), acov, seriestype = :sticks, marker = :circle, - markersize = 2, label = "") - plot!(plt, seriestype = :hline, [0], color = :red, label = "") - plot!(plt, title = "Autocovariance", xlim = (-0.5, n - 0.5), - xlabel = "time", ylabel = "autocovariance") - return plt -end - -function plot_autocovariance(arma) - plt = plot() - plot_spectral_density(arma, plt = plt) - return plt -end - -function plot_impulse_response(arma, plt) - psi = impulse_response(arma) - n = length(psi) - plot!(plt, 0:(n - 1), psi, seriestype = :sticks, marker = :circle, - markersize = 2, label = "") - plot!(plt, seriestype = :hline, [0], color = :red, label = "") - plot!(plt, title = "Impluse response", xlim = (-0.5, n - 0.5), - xlabel = "time", ylabel = "response") - return plt -end - -function plot_impulse_response(arma) - plt = plot() - plot_spectral_density(arma, plt = plt) - return plt -end - -function plot_simulation(arma, plt) - X = simulation(arma) - n = length(X) - plot!(plt, 0:(n - 1), X, lw = 2, alpha = 0.7, label = "") - plot!(plt, title = "Sample path", xlim = (0, 0, n), xlabel = "time", - ylabel = "state space") - return plt -end - -function plot_simulation(arma) - plt = plot() - plot_spectral_density(arma, plt = plt) - return plt -end - -function quad_plot(arma) - plt_1 = plot() - plt_2 = plot() - plt_3 = plot() - plt_4 = plot() - plots = [plt_1, plt_2, plt_3, plt_4] - - plot_functions = [plot_spectral_density, - plot_impulse_response, - plot_autocovariance, - plot_simulation] - for (i, plt, plot_func) in zip(1:1:4, plots, plot_functions) - plots[i] = plot_func(arma, plt) - end - return plot(plots[1], plots[2], plots[3], plots[4], layout = (2, 2), - size = (800, 800)) -end -``` - -Now let's call these functions to generate the plots. - -We'll use the model $X_t = 0.5 X_{t-1} + \epsilon_t - 0.8 \epsilon_{t-2}$ - -```{code-cell} julia -Random.seed!(42) # For reproducible results. -phi = 0.5; -theta = [0, -0.8]; -arma = ARMA(phi, theta, 1.0) -quad_plot(arma) -``` - -```{code-cell} julia ---- -tags: [remove-cell] ---- -@testset begin - @test spectral_density(arma, two_pi=false)[2][4] ≈ 0.16077100233347555 - # As before, we need to repeat the calculations, since we don't have access to the results. - @test (autocovariance(arma))[3] ≈ -0.5886222919837174 atol = 1e-3 - #test (impulse_response(arma))[10] == -0.004296875 - Random.seed!(42) -# #test (simulation(arma))[20] ≈ 1.5309398262415883 atol = 1e-3 -end -``` - -### Explanation - -The call - -```{code-block} julia -arma = ARMA(phi, theta, sigma) -``` - -creates an instance `arma` that represents the ARMA($p, q$) model - -$$ -X_t = \phi_1 X_{t-1} + ... + \phi_p X_{t-p} + - \epsilon_t + \theta_1 \epsilon_{t-1} + ... + \theta_q \epsilon_{t-q} -$$ - -If `phi` and `theta` are arrays or sequences, then the interpretation will -be - -* `phi` holds the vector of parameters $(\phi_1, \phi_2,..., \phi_p)$ -* `theta` holds the vector of parameters $(\theta_1, \theta_2,..., \theta_q)$ - -The parameter `sigma` is always a scalar, the standard deviation of the white noise. - -We also permit `phi` and `theta` to be scalars, in which case the model will be interpreted as - -$$ -X_t = \phi X_{t-1} + \epsilon_t + \theta \epsilon_{t-1} -$$ - -The two numerical packages most useful for working with ARMA models are `DSP.jl` and the `fft` routine in Julia. - -### Computing the Autocovariance Function - -As discussed above, for ARMA processes the spectral density has a {ref}`simple representation ` that is relatively easy to calculate. - -Given this fact, the easiest way to obtain the autocovariance function is to recover it from the spectral -density via the inverse Fourier transform. - -Here we use Julia's Fourier transform routine fft, which wraps a standard C-based package called FFTW. - -A look at [the fft documentation](https://docs.julialang.org/en/stable/stdlib/math/#Base.DFT.fft) shows that the inverse transform ifft takes a given sequence $A_0, A_1, \ldots, A_{n-1}$ and -returns the sequence $a_0, a_1, \ldots, a_{n-1}$ defined by - -$$ -a_k = \frac{1}{n} \sum_{t=0}^{n-1} A_t e^{ik 2\pi t / n} -$$ - -Thus, if we set $A_t = f(\omega_t)$, where $f$ is the spectral density and -$\omega_t := 2 \pi t / n$, then - -$$ -a_k -= \frac{1}{n} \sum_{t=0}^{n-1} f(\omega_t) e^{i \omega_t k} -= \frac{1}{2\pi} \frac{2 \pi}{n} \sum_{t=0}^{n-1} f(\omega_t) e^{i \omega_t k}, -\qquad -\omega_t := 2 \pi t / n -$$ - -For $n$ sufficiently large, we then have - -$$ -a_k -\approx \frac{1}{2\pi} \int_0^{2 \pi} f(\omega) e^{i \omega k} d \omega -= \frac{1}{2\pi} \int_{-\pi}^{\pi} f(\omega) e^{i \omega k} d \omega -$$ - -(You can check the last equality) - -In view of {eq}`ift` we have now shown that, for $n$ sufficiently large, $a_k \approx \gamma(k)$ --- which is exactly what we want to compute. - diff --git a/lectures/time_series_models/classical_filtering.md b/lectures/time_series_models/classical_filtering.md deleted file mode 100644 index 724fa6c4..00000000 --- a/lectures/time_series_models/classical_filtering.md +++ /dev/null @@ -1,1224 +0,0 @@ ---- -jupytext: - text_representation: - extension: .md - format_name: myst -kernelspec: - display_name: Julia - language: julia - name: julia-1.11 ---- - -(classical_filtering)= -```{raw} html - -``` - -# Classical Filtering With Linear Algebra - -```{contents} Contents -:depth: 2 -``` - -## Overview - -This is a sequel to the earlier lecture {doc}`Classical Control with Linear Algebra <../time_series_models/classical_filtering>`. - -That lecture used linear algebra -- in particular, the [LU decomposition](https://en.wikipedia.org/wiki/LU_decomposition) -- to formulate and solve a class of linear-quadratic optimal control problems. - -In this lecture, we'll be using a closely related decomposition, the [Cholesky decomposition](https://en.wikipedia.org/wiki/Cholesky_decomposition) , to solve linear prediction and filtering problems. - -We exploit the useful fact that there is an intimate connection between two superficially different classes of problems: - -* deterministic linear-quadratic (LQ) optimal control problems -* linear least squares prediction and filtering problems - -The first class of problems involves no randomness, while the second is all about randomness. - -Nevertheless, essentially the same mathematics solves both type of problem. - -This connection, which is often termed "duality," is present whether one uses "classical" or "recursive" solution procedures. - -In fact we saw duality at work earlier when we formulated control and prediction problems recursively in lectures {doc}`LQ dynamic programming problems <../dynamic_programming/lqcontrol>`, {doc}`A first look at the Kalman filter <../introduction_dynamics/kalman>`, and {doc}`The permanent income model <../dynamic_programming/perm_income>`. - -A useful consequence of duality is that - -* With every LQ control problem there is implicitly affiliated a linear least squares prediction or filtering problem. -* With every linear least squares prediction or filtering problem there is implicitly affiliated a LQ control problem. - -An understanding of these connections has repeatedly proved useful in cracking interesting applied problems. - -For example, Sargent {cite}`Sargent1987` [chs. IX, XIV] and Hansen and Sargent {cite}`HanSar1980` formulated and solved control and filtering problems using $z$-transform methods. - -In this lecture we investigate these ideas using mostly elementary linear algebra. - -### References - -Useful references include {cite}`Whittle1963`, {cite}`HanSar1980`, {cite}`Orfanidisoptimum1988`, {cite}`Athanasios1991`, and {cite}`Muth1960`. - - -## Infinite Horizon Prediction and Filtering Problems - -We pose two related prediction and filtering problems. - -We let $Y_t$ be a univariate $m^{\rm th}$ order moving average, covariance stationary stochastic process, - -```{math} -:label: eq_24 - -Y_t = d(L) u_t -``` - -where $d(L) = \sum^m_{j=0} d_j L^j$, and $u_t$ is a serially uncorrelated stationary random process satisfying - -```{math} -:label: eq_25 - -\begin{aligned} - \mathbb{E} u_t &= 0\\ - \mathbb{E} u_t u_s &= - \begin{cases} - 1 & \text{ if } t = s \\ - 0 & \text{ otherwise} - \end{cases} -\end{aligned} -``` - -We impose no conditions on the zeros of $d(z)$. - -A second covariance stationary process is $X_t$ given by - -```{math} -:label: eq_26 - -X_t = Y_t + \varepsilon_t -``` - -where $\varepsilon_t$ is a serially uncorrelated stationary -random process with $\mathbb{E} \varepsilon_t = 0$ and $\mathbb{E} \varepsilon_t \varepsilon_s$ = $0$ for all distinct $t$ and $s$. - -We also assume that $\mathbb{E} \varepsilon_t u_s = 0$ for all $t$ and $s$. - -The **linear least squares prediction problem** is to find the $L_2$ -random variable $\hat X_{t+j}$ among linear combinations of -$\{ X_t,\ X_{t-1}, -\ldots \}$ that minimizes $\mathbb{E}(\hat X_{t+j} - X_{t+j})^2$. - -That is, the problem is to find a $\gamma_j (L) = \sum^\infty_{k=0} \gamma_{jk}\, L^k$ such that $\sum^\infty_{k=0} \vert \gamma_{jk} \vert^2 < \infty$ and $\mathbb{E} [\gamma_j \, (L) X_t -X_{t+j}]^2$ is minimized. - -The **linear least squares filtering problem** is to find a $b\,(L) = \sum^\infty_{j=0} b_j\, L^j$ such that $\sum^\infty_{j=0}\vert b_j \vert^2 < \infty$ and $\mathbb{E} [b\, (L) X_t -Y_t ]^2$ is minimized. - -Interesting versions of these problems related to the permanent income theory were studied by {cite}`Muth1960`. - -### Problem formulation - -These problems are solved as follows. - -The covariograms of $Y$ and $X$ and their cross covariogram are, respectively, - -```{math} -:label: eq_27 - -\begin{aligned} - C_X (\tau) &= \mathbb{E}X_t X_{t-\tau} \\ - C_Y (\tau) &= \mathbb{E}Y_t Y_{t-\tau} \qquad \tau = 0, \pm 1, \pm 2, \ldots \\ - C_{Y,X} (\tau) &= \mathbb{E}Y_t X_{t-\tau} -\end{aligned} -``` - -The covariance and cross covariance generating functions are defined as - -```{math} -:label: eq_28 - -\begin{aligned} - g_X(z) &= \sum^\infty_{\tau = - \infty} C_X (\tau) z^\tau \\ - g_Y(z) &= \sum^\infty_{\tau = - \infty} C_Y (\tau) z^\tau \\ - g_{YX} (z) &= \sum^\infty_{\tau = - \infty} C_{YX} (\tau) z^\tau -\end{aligned} -``` - -The generating functions can be computed by using the following facts. - -Let $v_{1t}$ and $v_{2t}$ be two mutually and serially uncorrelated white noises with unit variances. - -That is, $\mathbb{E}v^2_{1t} = \mathbb{E}v^2_{2t} = 1, \mathbb{E}v_{1t} = \mathbb{E}v_{2t} = 0, \mathbb{E}v_{1t} v_{2s} = 0$ for all $t$ and $s$, $\mathbb{E}v_{1t} v_{1t-j} = \mathbb{E}v_{2t} v_{2t-j} = 0$ for all $j \not = 0$. - -Let $x_t$ and $y_t$ be two random process given by - -$$ -\begin{aligned} - y_t &= A(L) v_{1t} + B(L) v_{2t} \\ - x_t &= C(L) v_{1t} + D(L) v_{2t} -\end{aligned} -$$ - -Then, as shown for example in {cite}`Sargent1987` [ch. XI], it is true that - -```{math} -:label: eq_29 - -\begin{aligned} - g_y(z) &= A(z) A(z^{-1}) + B (z) B(z^{-1}) \\ - g_x (z) &= C(z) C(z^{-1}) + D(z) D(z^{-1}) \\ - g_{yx} (z) &= A(z) C(z^{-1}) + B(z) D(z^{-1}) -\end{aligned} -``` - -Applying these formulas to {eq}`eq_24` -- {eq}`eq_27`, we have - -```{math} -:label: eq_30 - -\begin{aligned} - g_Y(z) &= d(z)d(z^{-1}) \\ - g_X(z) &= d(z)d(z^{-1}) + h\\ - g_{YX} (z) &= d(z) d(z^{-1}) -\end{aligned} -``` - -The key step in obtaining solutions to our problems is to factor the covariance generating function $g_X(z)$ of $X$. - -The solutions of our problems are given by formulas due to Wiener and Kolmogorov. - -These formulas utilize the Wold moving average representation of the $X_t$ process, - -```{math} -:label: eq_31 - -X_t = c\,(L)\,\eta_t -``` - -where $c(L) = \sum^m_{j=0} c_j\, L^j$, with - -```{math} -:label: eq_32 - -c_0 \eta_t -= X_t - \mathbb{\hat E} [X_t \vert X_{t-1}, X_{t-2}, \ldots] -``` - -Here $\mathbb{\hat E}$ is the linear least squares projection operator. - -Equation {eq}`eq_32` is the condition that $c_0 \eta_t$ can be the one-step ahead error in predicting $X_t$ from its own past values. - -Condition {eq}`eq_32` requires that $\eta_t$ lie in the closed -linear space spanned by $[X_t,\ X_{t-1}, \ldots]$. - -This will be true if and only if the zeros of $c(z)$ do not lie inside the unit circle. - -It is an implication of {eq}`eq_32` that $\eta_t$ is a serially -uncorrelated random process, and that a normalization can be imposed so -that $\mathbb{E}\eta_t^2 = 1$. - -Consequently, an implication of {eq}`eq_31` is -that the covariance generating function of $X_t$ can be expressed -as - -```{math} -:label: eq_33 - -g_X(z) = c\,(z)\,c\,(z^{-1}) -``` - -It remains to discuss how $c(L)$ is to be computed. - -Combining {eq}`eq_29` and {eq}`eq_33` gives - -```{math} -:label: eq_34 - -d(z) \,d(z^{-1}) + h = c \, (z) \,c\,(z^{-1}) -``` - -Therefore, we have already showed constructively how to factor the covariance generating function $g_X(z) = d(z)\,d\,(z^{-1}) + h$. - -We now introduce the **annihilation operator**: - -```{math} -:label: eq_35 - -\left[ - \sum^\infty_{j= - \infty} f_j\, L^j -\right]_+ -\equiv \sum^\infty_{j=0} f_j\,L^j -``` - -In words, $[\phantom{00}]_+$ means "ignore negative powers of $L$". - -We have defined the solution of the prediction problem as $\mathbb{\hat E} [X_{t+j} \vert X_t,\, X_{t-1}, \ldots] = \gamma_j\, (L) X_t$. - -Assuming that the roots of $c(z) = 0$ all lie outside the unit circle, the Wiener-Kolmogorov formula for $\gamma_j (L)$ holds: - -```{math} -:label: eq_36 - -\gamma_j\, (L) = -\left[ - {c (L) \over L^j} -\right]_+ c\,(L)^{-1} -``` - -We have defined the solution of the filtering problem as $\mathbb{\hat E}[Y_t \mid X_t, X_{t-1}, \ldots] = b (L)X_t$. - -The Wiener-Kolomogorov formula for $b(L)$ is - -$$ -b(L) = \left({g_{YX} (L) \over c(L^{-1})}\right)_+ c(L)^{-1} -$$ - -or - -```{math} -:label: eq_37 - -b(L) = \left[ {d(L)d(L^{-1}) \over c(L^{-1})} \right]_+ c(L)^{-1} -``` - -Formulas {eq}`eq_36` and {eq}`eq_37` are discussed in detail in {cite}`Whittle1983` and {cite}`Sargent1987`. - -The interested reader can there find several examples of the use of these formulas in economics -Some classic examples using these formulas are due to {cite}`Muth1960`. - -As an example of the usefulness of formula {eq}`eq_37`, we let $X_t$ be a stochastic process with Wold moving average representation - -$$ -X_t = c(L) \eta_t -$$ - -where $\mathbb{E}\eta^2_t = 1, \hbox { and } c_0 \eta_t = X_t - \mathbb{\hat E} [X_t \vert X_{t-1}, \ldots], c (L) = \sum^m_{j=0} c_j L$. - -Suppose that at time $t$, we wish to predict a geometric sum of future $X$'s, namely - -$$ -y_t \equiv \sum^\infty_{j=0} \delta^j X_{t+j} = {1 \over 1 - \delta L^{-1}} -X_t -$$ - -given knowledge of $X_t, X_{t-1}, \ldots$. - -We shall use {eq}`eq_37` to obtain the answer. - -Using the standard formulas {eq}`eq_29`, we have that - -$$ -\begin{aligned} - g_{yx}(z) &= (1-\delta z^{-1})c(z) c (z^{-1}) \\ - g_x (z) &= c(z) c (z^{-1}) -\end{aligned} -$$ - -Then {eq}`eq_37` becomes - -```{math} -:label: eq_38 - -b(L)=\left[{c(L)\over 1-\delta L^{-1}}\right]_+ c(L)^{-1} -``` - -In order to evaluate the term in the annihilation operator, we use the following result from {cite}`HanSar1980`. - -**Proposition** Let - -* $g(z) = \sum^\infty_{j=0} g_j \, z^j$ where $\sum^\infty_{j=0} \vert g_j \vert^2 < + \infty$ -* $h\,(z^{-1}) =$ $(1- \delta_1 z^{-1}) \ldots (1-\delta_n z^{-1})$, where $\vert \delta_j \vert < 1$, for $j = 1, \ldots, n$ - -Then - -```{math} -:label: eq_39 - -\left[{g(z)\over h(z^{-1})}\right]_+ = {g(z)\over h(z^{-1})} - \sum^n_{j=1} -\ {\delta_j g (\delta_j) \over \prod^n_{k=1 \atop k \not = j} (\delta_j - -\delta_k)} \ \left({1 \over z- \delta_j}\right) -``` - -and, alternatively, - -```{math} -:label: eq_40 - -\left[ - {g(z)\over h(z^{-1})} -\right]_+ -=\sum^n_{j=1} B_j -\left( - {zg(z)-\delta_j g (\delta_j) \over z- \delta_j} -\right) -``` - -where $B_j = 1 / \prod^n_{k=1\atop k+j} (1 - \delta_k / \delta_j)$. - -Applying formula {eq}`eq_40` of the proposition to evaluating {eq}`eq_38` with $g(z) = c(z)$ and $h(z^{-1}) = 1 - \delta z^{-1}$ gives - -$$ -b(L)=\left[{Lc(L)-\delta c(\delta)\over L-\delta}\right] c(L)^{-1} -$$ - -or - -$$ -b(L) = -\left[ - {1-\delta c (\delta) L^{-1} c (L)^{-1}\over 1-\delta L^{-1}} -\right] -$$ - -Thus, we have - -```{math} -:label: eq_41 - -\mathbb{\hat E} -\left[ - \sum^\infty_{j=0} \delta^j X_{t+j}\vert X_t,\, x_{t-1}, - \ldots -\right] = -\left[ - {1-\delta c (\delta) L^{-1} c(L)^{-1} \over 1 - \delta L^{-1}} -\right] -\, X_t -``` - -This formula is useful in solving stochastic versions of problem 1 of lecture {doc}`Classical Control with Linear Algebra ` in which the randomness emerges because $\{a_t\}$ is a stochastic -process. - -The problem is to maximize - -```{math} -:label: eq_42 - -\mathbb{E}_0 -\lim_{N \rightarrow \infty}\ -\sum^N_{t-0} \beta^t -\left[ - a_t\, y_t - {1 \over 2}\ hy^2_t-{1 \over 2}\ [d(L)y_t]^2 -\right] -``` - -where $\mathbb{E}_t$ is mathematical expectation conditioned on information -known at $t$, and where $\{ a_t\}$ is a covariance -stationary stochastic process with Wold moving average representation - -$$ -a_t = c(L)\, \eta_t -$$ - -where - -$$ -c(L) = \sum^{\tilde n}_{j=0} c_j L^j -$$ - -and - -$$ -\eta_t = -a_t - \mathbb{\hat E} [a_t \vert a_{t-1}, \ldots] -$$ - -The problem is to maximize {eq}`eq_42` with respect to a contingency plan -expressing $y_t$ as a function of information known at $t$, -which is assumed to be -$(y_{t-1},\ y_{t-2}, \ldots, a_t, \ a_{t-1}, \ldots)$. - -The solution of this problem can be achieved in two steps. - -First, ignoring the uncertainty, we can solve the problem assuming that $\{a_t\}$ is a known sequence. - -The solution is, from above, - -$$ -c(L) y_t = c(\beta L^{-1})^{-1} a_t -$$ - -or - -```{math} -:label: eq_43 - -(1-\lambda_1 L) \ldots (1 - \lambda_m L) y_t -= \sum^m_{j=1} A_j -\sum^\infty_{k=0} (\lambda_j \beta)^k\, a_{t+k} -``` - -Second, the solution of the problem under uncertainty is obtained by -replacing the terms on the right-hand side of the above expressions with -their linear least squares predictors. - -Using {eq}`eq_41` and {eq}`eq_43`, we have -the following solution - -$$ -(1-\lambda_1 L) \ldots (1-\lambda_m L) y_t -= -\sum^m_{j=1} A_j - \left[ - \frac{1-\beta \lambda_j \, c (\beta \lambda_j) L^{-1} c(L)^{-1} } - { 1-\beta \lambda_j L^{-1} } - \right] a_t -$$ - -## Finite Dimensional Prediction - -Let $(x_1, x_2, \ldots, x_T)^\prime = x$ be a $T \times 1$ vector of random variables with mean $\mathbb{E} x = 0$ and covariance matrix $\mathbb{E} xx^\prime = V$. - -Here $V$ is a $T \times T$ positive definite matrix. - -We shall regard the random variables as being -ordered in time, so that $x_t$ is thought of as the value of some -economic variable at time $t$. - -For example, $x_t$ could be generated by the random process described by the Wold representation presented in equation {eq}`eq_31`. - -In this case, $V_{ij}$ is given by the coefficient on $z^{\mid i-j \mid}$ in the expansion of $g_x (z) = d(z) \, d(z^{-1}) + h$, which equals -$h+\sum^\infty_{k=0} d_k d_{k+\mid i-j \mid}$. - -We shall be interested in constructing $j$ step ahead linear least squares predictors of the form - -$$ -\mathbb{\hat E} -\left[ - x_T\vert x_{T-j}, x_{T-j + 1}, \ldots, x_1 -\right] -$$ - -where $\mathbb{\hat E}$ is the linear least squares projection operator. - -The solution of this problem can be exhibited by first constructing an -orthonormal basis of random variables $\varepsilon$ for $x$. - -Since $V$ is a positive definite and symmetric, we know that there -exists a (Cholesky) decomposition of $V$ such that - -$$ -V = L^{-1} (L^{-1})^\prime -$$ - -or - -$$ -L \, V \, L^\prime = I -$$ - -where $L$ is lower-trangular, and therefore so is $L^{-1}$. - -Form the random variable $Lx = \varepsilon$. - -Then $\varepsilon$ is an orthonormal basis for $x$, since $L$ is nonsingular, and $\mathbb{E} \, \varepsilon \, \varepsilon^\prime = -L \mathbb{E} xx^\prime L^\prime = I$. - -It is convenient to write out the equations $Lx = \varepsilon$ and $L^{-1} \varepsilon = x$ - -```{math} -:label: eq_53 - -\begin{aligned} - L_{11} x_1 &= \varepsilon_1 \\ - L_{21}x_1 + L_{22} x_2 &= \varepsilon_2 \\ \, \vdots \\ - L_{T1} \, x_1 \, \ldots \, + L_{TTx_T} &= \varepsilon_T -\end{aligned} -``` - -or - -```{math} -:label: eq_54 - -\sum^{t-1}_{j=0} L_{t,t-j}\, x_{t-j} = \varepsilon_t, \quad t = 1, \, 2, \ldots T -``` - -We also have - -```{math} -:label: eq_55 - -x_t = \sum^{t-1}_{j=0} L^{-1}_{t,t-j}\, \varepsilon_{t-j}\ . -``` - -Notice from {eq}`eq_55` that $x_t$ is in the space spanned by -$\varepsilon_t, \, \varepsilon_{t-1}, \ldots, \varepsilon_1$, and from {eq}`eq_54` that -$\varepsilon_t$ is in the space spanned by $x_t,\, x_{t-1}, \ldots,\, x_1$. - -Therefore, we have that for $t-1\geq m \geq 1$ - -```{math} -:label: eq_56 - -\mathbb{\hat E} -[ x_t \mid x_{t-m},\, x_{t-m-1}, \ldots, x_1 ] = -\mathbb{\hat E} -[x_t \mid \varepsilon_{t-m}, \varepsilon_{t-m-1},\ldots, \varepsilon_1] -``` - -For $t-1 \geq m \geq 1$ rewrite {eq}`eq_55` as - -```{math} -:label: eq_57 - -x_t = \sum^{m-1}_{j=0} L_{t,t-j}^{-1}\, \varepsilon_{t-j} + \sum^{t-1}_{j=m} -L^{-1}_{t, t-j}\, \varepsilon_{t-j} -``` - -Representation {eq}`eq_57` is an orthogonal decomposition of $x_t$ into a part $\sum^{t-1}_{j=m} L_{t, t-j}^{-1}\, \varepsilon_{t-j}$ that lies in the space spanned by -$[x_{t-m},\, x_{t-m+1},\, \ldots, x_1]$, and an orthogonal -component not in this space. - -### Implementation - -Code that computes solutions to LQ control and filtering problems using the methods described here and in {doc}`Classical Control with Linear Algebra ` can be found in the file [control_and_filter.jl](https://github.com/QuantEcon/QuantEcon.lectures.code/blob/master/lu_tricks/control_and_filter.jl). - -Here's how it looks - -```{code-cell} julia ---- -tags: [remove-cell] ---- -using Test -``` - -```{code-cell} julia -using LinearAlgebra, Statistics -using Polynomials.PolyCompat, LinearAlgebra -import Polynomials.PolyCompat: roots, coeffs - -function LQFilter(d, h, y_m; - r = nothing, - beta = nothing, - h_eps = nothing) - m = length(d) - 1 - m == length(y_m) || - throw(ArgumentError("y_m and d must be of same length = $m")) - - # define the coefficients of phi up front - phi = zeros(2m + 1) - for i in (-m):m - phi[m - i + 1] = sum(diag(d * d', -i)) - end - phi[m + 1] = phi[m + 1] + h - - # if r is given calculate the vector phi_r - if isnothing(r) - k = nothing - phi_r = nothing - else - k = size(r, 1) - 1 - phi_r = zeros(2k + 1) - - for i in (-k):k - phi_r[k - i + 1] = sum(diag(r * r', -i)) - end - - if isnothing(h_eps) == false - phi_r[k + 1] = phi_r[k + 1] + h_eps - end - end - - # if beta is given, define the transformed variables - if isnothing(beta) - beta = 1.0 - else - d = beta .^ (collect(0:m) / 2) * d - y_m = y_m * beta .^ (-collect(1:m) / 2) - end - - return (; d, h, y_m, m, phi, beta, phi_r, k) -end - -function construct_W_and_Wm(lqf, N) - (; d, m) = lqf - - W = zeros(N + 1, N + 1) - W_m = zeros(N + 1, m) - - # terminal conditions - D_m1 = zeros(m + 1, m + 1) - M = zeros(m + 1, m) - - # (1) constuct the D_{m+1} matrix using the formula - - for j in 1:(m + 1) - for k in j:(m + 1) - D_m1[j, k] = dot(d[1:j, 1], d[(k - j + 1):k, 1]) - end - end - - # Make the matrix symmetric - D_m1 = D_m1 + D_m1' - Diagonal(diag(D_m1)) - - # (2) Construct the M matrix using the entries of D_m1 - - for j in 1:m - for i in (j + 1):(m + 1) - M[i, j] = D_m1[i - j, m + 1] - end - end - M - - # Euler equations for t = 0, 1, ..., N-(m+1) - phi, h = lqf.phi, lqf.h - - W[1:(m + 1), 1:(m + 1)] = D_m1 + h * I - W[1:(m + 1), (m + 2):(2m + 1)] = M - - for (i, row) in enumerate((m + 2):(N + 1 - m)) - W[row, (i + 1):(2m + 1 + i)] = phi' - end - - for i in 1:m - W[N - m + i + 1, (end - (2m + 1 - i) + 1):end] = phi[1:(end - i)] - end - - for i in 1:m - W_m[N - i + 2, 1:((m - i) + 1)] = phi[(m + 1 + i):end] - end - - return W, W_m -end - -function roots_of_characteristic(lqf) - (; m, phi) = lqf - - # Calculate the roots of the 2m-polynomial - phi_poly = Poly(phi[end:-1:1]) - proots = roots(phi_poly) - # sort the roots according to their length (in descending order) - roots_sorted = sort(proots, by = abs)[end:-1:1] - z_0 = sum(phi) / polyval(poly(proots), 1.0) - z_1_to_m = roots_sorted[1:m] # we need only those outside the unit circle - lambda = 1 ./ z_1_to_m - return z_1_to_m, z_0, lambda -end - -function coeffs_of_c(lqf) - (; m) = lqf - z_1_to_m, z_0, lambda = roots_of_characteristic(lqf) - c_0 = (z_0 * prod(z_1_to_m) * (-1.0)^m)^(0.5) - c_coeffs = coeffs(poly(z_1_to_m)) * z_0 / c_0 - return c_coeffs -end - -function solution(lqf) - z_1_to_m, z_0, lambda = roots_of_characteristic(lqf) - c_0 = coeffs_of_c(lqf)[end] - A = zeros(m) - for j in 1:m - denom = 1 - lambda / lambda[j] - A[j] = c_0^(-2) / prod(denom[1:m .!= j]) - end - return lambda, A -end - -function construct_V(lqf; N = nothing) - if isnothing(N) - error("N must be provided!!") - end - if !(N isa Integer) - throw(ArgumentError("N must be Integer!")) - end - - (; phi_r, k) = lqf - V = zeros(N, N) - for i in 1:N - for j in 1:N - if abs(i - j) <= k - V[i, j] = phi_r[k + abs(i - j) + 1] - end - end - end - return V -end - -function simulate_a(lqf, N) - V = construct_V(N + 1) - d = MVNSampler(zeros(N + 1), V) - return rand(d) -end - -function predict(lqf, a_hist, t) - N = length(a_hist) - 1 - V = construct_V(N + 1) - - aux_matrix = zeros(N + 1, N + 1) - aux_matrix[1:(t + 1), 1:(t + 1)] = Matrix(I, t + 1, t + 1) - L = cholesky(V).U' - Ea_hist = inv(L) * aux_matrix * L * a_hist - - return Ea_hist -end - -function optimal_y(lqf, a_hist, t = nothing) - (; beta, y_m, m) = lqf - - N = length(a_hist) - 1 - W, W_m = construct_W_and_Wm(lqf, N) - - F = lu(W, Val(true)) - - L, U = F - D = diagm(0 => 1.0 ./ diag(U)) - U = D * U - L = L * diagm(0 => 1.0 ./ diag(D)) - - J = reverse(Matrix(I, N + 1, N + 1), dims = 2) - - if isnothing(t) # if the problem is deterministic - a_hist = J * a_hist - - # transform the a sequence if beta is given - if beta != 1 - a_hist = reshape(a_hist * (beta^(collect(N:0) / 2)), N + 1, 1) - end - - a_bar = a_hist - W_m * y_m # a_bar from the lecutre - Uy = \(L, a_bar) # U @ y_bar = L^{-1}a_bar from the lecture - y_bar = \(U, Uy) # y_bar = U^{-1}L^{-1}a_bar - # Reverse the order of y_bar with the matrix J - J = reverse(Matrix(I, N + m + 1, N + m + 1), dims = 2) - y_hist = J * vcat(y_bar, y_m) # y_hist : concatenated y_m and y_bar - # transform the optimal sequence back if beta is given - if beta != 1 - y_hist = y_hist .* beta .^ (-collect((-m):N) / 2) - end - - else # if the problem is stochastic and we look at it - Ea_hist = reshape(predict(a_hist, t), N + 1, 1) - Ea_hist = J * Ea_hist - - a_bar = Ea_hist - W_m * y_m # a_bar from the lecutre - Uy = \(L, a_bar) # U @ y_bar = L^{-1}a_bar from the lecture - y_bar = \(U, Uy) # y_bar = U^{-1}L^{-1}a_bar - - # Reverse the order of y_bar with the matrix J - J = reverse(Matrix(I, N + m + 1, N + m + 1), dims = 2) - y_hist = J * vcat(y_bar, y_m) # y_hist : concatenated y_m and y_bar - end - return y_hist, L, U, y_bar -end -``` - -Let's use this code to tackle two interesting examples. - -### Example 1 - -Consider a stochastic process with moving average representation - -$$ -x_t = (1 - 2 L) \varepsilon_t -$$ - -where $\varepsilon_t$ is a serially uncorrelated random process with mean zero and variance unity. - -We want to use the Wiener-Kolmogorov formula {eq}`eq_36` to compute the linear least squares forecasts $\mathbb{E} [x_{t+j} \mid x_t, x_{t-1}, \ldots]$, for $j = 1,\, 2$. - -We can do everything we want by setting $d = r$, generating an instance of LQFilter, then invoking pertinent methods of LQFilter - -```{code-cell} julia -m = 1 -y_m = zeros(m) -d = [1.0, -2.0] -r = [1.0, -2.0] -h = 0.0 -example = LQFilter(d, h, y_m, r = d) -``` - -The Wold representation is computed by example.coefficients_of_c(). - -Let's check that it "flips roots" as required - -```{code-cell} julia -coeffs_of_c(example) -``` - -```{code-cell} julia -roots_of_characteristic(example) -``` - -```{code-cell} julia ---- -tags: [remove-cell] ---- -@testset begin - @test coeffs_of_c(example) ≈ [2.0, -1.0] - @test roots_of_characteristic(example) == ([2.0], -2.0, [0.5]) -end -``` - -Now let's form the covariance matrix of a time series vector of length $N$ -and put it in $V$. - -Then we'll take a Cholesky decomposition of $V = L^{-1} L^{-1} = Li Li'$ and use it to form the vector of "moving average representations" $x = Li \varepsilon$ and the vector of "autoregressive representations" $L x = \varepsilon$ - -```{code-cell} julia -V = construct_V(example, N = 5) -``` - -Notice how the lower rows of the "moving average representations" are converging to the appropriate infinite history Wold representation - -```{code-cell} julia -F = cholesky(V) -Li = F.L -``` - -Notice how the lower rows of the "autoregressive representations" are converging to the appropriate infinite history autoregressive representation - -```{code-cell} julia -L = inv(Li) -``` - -```{code-cell} julia ---- -tags: [remove-cell] ---- -@testset begin - @test L[2, 1] ≈ 0.1951800145897066 - @test L[3, 3] ≈ 0.4970501217477084 -end -``` - -**Remark** Let $\pi (z) = \sum^m_{j=0} \pi_j z^j$ and let $z_1, \ldots, -z_k$ be the zeros of $\pi (z)$ that are inside the unit circle, $k < m$. - -Then define - -$$ -\theta (z) = \pi (z) \Biggl( {(z_1 z-1) \over (z-z_1)} \Biggr) -\Biggl( { (z_2 z-1) \over (z-z_2) } \Biggr ) \ldots \Biggl({(z_kz-1) \over -(z-z_k) }\Biggr) -$$ - -The term multiplying $\pi (z)$ is termed a "Blaschke factor". - -Then it can be proved directly that - -$$ -\theta (z^{-1}) \theta (z) = \pi (z^{-1}) \pi (z) -$$ - -and that the zeros of $\theta (z)$ are not inside the unit circle. - -### Example 2 - -Consider a stochastic process $X_t$ with moving average -representation - -$$ -X_t = (1 - \sqrt 2 L^2) \varepsilon_t -$$ - -where $\varepsilon_t$ is a serially uncorrelated random process -with mean zero and variance unity. - -Let's find a Wold moving average representation for $x_t$. - -Let's use the Wiener-Kolomogorov formula {eq}`eq_36` to compute the linear least squares forecasts -$\mathbb{\hat E}\left[X_{t+j} \mid X_{t-1}, \ldots\right] \hbox { for } j = 1,\, 2,\, 3$. - -We proceed in the same way as example 1 - -```{code-cell} julia -m = 2 -y_m = [0.0, 0.0] -d = [1, 0, -sqrt(2)] -r = [1, 0, -sqrt(2)] -h = 0.0 -example = LQFilter(d, h, y_m, r = d) -``` - -```{code-cell} julia -coeffs_of_c(example) -``` - -```{code-cell} julia -roots_of_characteristic(example) -``` - -```{code-cell} julia -V = construct_V(example, N = 8) -``` - -```{code-cell} julia -F = cholesky(V) -Li = F.L -Li[(end - 2):end, :] -``` - -```{code-cell} julia -L = inv(Li) -``` - -```{code-cell} julia ---- -tags: [remove-cell] ---- -@testset begin - @test L[3, 1] ≈ 0.30860669992418377 - @test L[2, 2] ≈ 0.5773502691896257 -end -``` - -### Prediction - -It immediately follows from the "orthogonality principle" of least squares (see {cite}`Athanasios1991` or {cite}`Sargent1987` [ch. X]) that - -```{math} -:label: eq_58 - -\begin{aligned} - \mathbb{\hat E} & [x_t \mid x_{t-m},\, x_{t-m+1}, \ldots x_1] - = \sum^{t-1}_{j=m} L^{-1}_{t,t-j}\, \varepsilon_{t-j} \\ - & = [L_{t, 1}^{-1}\, L^{-1}_{t,2},\, \ldots, L^{-1}_{t,t-m}\ 0 \ 0 \ldots 0] L \, x -\end{aligned} -``` - -This can be interpreted as a finite-dimensional version of the Wiener-Kolmogorov $m$-step ahead prediction formula. - -We can use {eq}`eq_58` to represent the linear least squares projection of -the vector $x$ conditioned on the first $s$ observations -$[x_s, x_{s-1} \ldots, x_1]$. - -We have - -```{math} -:label: eq_59 - -\mathbb{\hat E}[x \mid x_s, x_{s-1}, \ldots, x_1] -= L^{-1} -\left[ - \begin{matrix} - I_s & 0 \\ - 0 & 0_{(t-s)} - \end{matrix} -\right] L x -``` - -This formula will be convenient in representing the solution of control problems under uncertainty. - -Equation {eq}`eq_55` can be recognized as a finite dimensional version of a moving average representation. - -Equation {eq}`eq_54` can be viewed as a finite dimension version of an autoregressive representation. - -Notice that even -if the $x_t$ process is covariance stationary, so that $V$ -is such that $V_{ij}$ depends only on $\vert i-j\vert$, the -coefficients in the moving average representation are time-dependent, -there being a different moving average for each $t$. - -If -$x_t$ is a covariance stationary process, the last row of -$L^{-1}$ converges to the coefficients in the Wold moving average -representation for $\{ x_t\}$ as $T \rightarrow \infty$. - -Further, if $x_t$ is covariance stationary, for fixed $k$ -and $j > 0, \, L^{-1}_{T,T-j}$ converges to -$L^{-1}_{T-k, T-k-j}$ as $T \rightarrow \infty$. - -That is, -the “bottom” rows of $L^{-1}$ converge to each other and to the -Wold moving average coefficients as $T \rightarrow \infty$. - -This last observation gives one simple and widely-used practical way of -forming a finite $T$ approximation to a Wold moving average -representation. - -First, form the covariance matrix -$\mathbb{E}xx^\prime = V$, then obtain the Cholesky decomposition -$L^{-1} L^{-1^\prime}$ of $V$, which can be accomplished -quickly on a computer. - -The last row of $L^{-1}$ gives the approximate Wold moving average coefficients. - -This method can readily be generalized to multivariate systems. - -(fdcp)= -## Combined Finite Dimensional Control and Prediction - -Consider the finite-dimensional control problem, maximize - -$$ -\mathbb{E} \, \sum^N_{t=0} \, -\left\{ - a_t y_t - {1 \over 2} h y^2_t - {1 \over 2} [d(L) y_t ]^2 -\right\},\ \quad h > 0 -$$ - -where $d(L) = d_0 + d_1 L+ \ldots + d_m L^m$, $L$ is the -lag operator, $\bar a = [ a_N, a_{N-1} \ldots, a_1, a_0]^\prime$ a -random vector with mean zero and $\mathbb{E}\,\bar a \bar a^\prime = V$. - -The variables $y_{-1}, \ldots, y_{-m}$ are given. - -Maximization is over choices of $y_0, -y_1 \ldots, y_N$, where $y_t$ is required to be a linear function -of $\{y_{t-s-1}, t+m-1\geq 0;\ a_{t-s}, t\geq s\geq 0\}$. - -We saw in the lecture {doc}`Classical Control with Linear Algebra <../time_series_models/lu_tricks>` that the solution of this problem under certainty could be represented in feedback-feedforward form - -$$ -U \bar y - = L^{-1}\bar a + K - \left[ - \begin{matrix} - y_{-1}\\ - \vdots\\ - y_{-m} - \end{matrix} - \right] -$$ - -for some $(N+1)\times m$ matrix $K$. - -Using a version of formula {eq}`eq_58`, we can express $\mathbb{\hat E}[\bar a \mid a_s,\, a_{s-1}, \ldots, a_0 ]$ as - -$$ -\mathbb{\hat E} -[ \bar a \mid a_s,\, a_{s-1}, \ldots, a_0] -= \tilde U^{-1} -\left[ - \begin{matrix} - 0 & 0 \\ - 0 & I_{(s+1)} - \end{matrix} -\right] -\tilde U \bar a -$$ - -where $I_{(s + 1)}$ is the $(s+1) \times (s+1)$ identity -matrix, and $V = \tilde U^{-1} \tilde U^{-1^{\prime}}$, where -$\tilde U$ is the *upper* trangular Cholesky factor of the -covariance matrix $V$. - -(We have reversed the time axis in dating the $a$'s relative to earlier) - -The time axis can be reversed in representation {eq}`eq_59` by replacing $L$ with $L^T$. - -The optimal decision rule to use at time $0 \leq t \leq N$ is then -given by the $(N-t +1)^{\rm th}$ row of - -$$ -U \bar y = L^{-1} \tilde U^{-1} - \left[ - \begin{matrix} - 0 & 0 \\ - 0 & I_{(t+1)} - \end{matrix} - \right] - \tilde U \bar a + K - \left[ - \begin{matrix} - y_{-1}\\ - \vdots\\ - y_{-m} - \end{matrix} - \right] -$$ - -## Exercises - -### Exercise 1 - -Let $Y_t = (1 - 2 L ) u_t$ where $u_t$ is a mean zero -white noise with $\mathbb{E} u^2_t = 1$. Let - -$$ -X_t = Y_t + \varepsilon_t -$$ - -where $\varepsilon_t$ is a serially uncorrelated white noise with -$\mathbb{E} \varepsilon^2_t = 9$, and $\mathbb{E} \varepsilon_t u_s = 0$ for all -$t$ and $s$. - -Find the Wold moving average representation for $X_t$. - -Find a formula for the $A_{1j}$'s in - -$$ -\mathbb{E} \widehat X_{t+1} \mid X_t, X_{t-1}, \ldots = \sum^\infty_{j=0} A_{1j} -X_{t-j} -$$ - -Find a formula for the $A_{2j}$'s in - -$$ -\mathbb{\hat E} X_{t+2} \mid X_t, X_{t-1}, \ldots = \sum^\infty_{j=0} A_{2j} -X_{t-j} -$$ - -### Exercise 2 - -(Multivariable Prediction) Let $Y_t$ be an $(n\times 1)$ -vector stochastic process with moving average representation - -$$ -Y_t = D(L) U_t -$$ - -where $D(L) = \sum^m_{j=0} D_j L^J, D_j$ an $n \times n$ -matrix, $U_t$ an $(n \times 1)$ vector white noise with -:math: mathbb{E} U_t =0 for all $t$, $\mathbb{E} U_t U_s' = 0$ for all $s \neq t$, -and $\mathbb{E} U_t U_t' = I$ for all $t$. - -Let $\varepsilon_t$ be an $n \times 1$ vector white noise with mean $0$ and contemporaneous covariance matrix $H$, where $H$ is a positive definite matrix. - -Let $X_t = Y_t +\varepsilon_t$. - -Define the covariograms as $C_X -(\tau) = \mathbb{E} X_t X^\prime_{t-\tau}, C_Y (\tau) = \mathbb{E} Y_t Y^\prime_{t-\tau}, -C_{YX} (\tau) = \mathbb{E} Y_t X^\prime_{t-\tau}$. - -Then define the matrix -covariance generating function, as in {eq}`onetwenty`, only interpret all the -objects in {eq}`onetwenty` as matrices. - -Show that the covariance generating functions are given by - -$$ -\begin{aligned} - g_y (z) &= D (z) D (z^{-1})^\prime \\ - g_X (z) &= D (z) D (z^{-1})^\prime + H \\ - g_{YX} (z) &= D (z) D (z^{-1})^\prime -\end{aligned} -$$ - -A factorization of $g_X (z)$ can be found (see {cite}`Rozanov1967` or {cite}`Whittle1983`) of the form - -$$ -D (z) D (z^{-1})^\prime + H = C (z) C (z^{-1})^\prime, \quad C (z) = -\sum^m_{j=0} C_j z^j -$$ - -where the zeros of $\vert C(z)\vert$ do not lie inside the unit -circle. - -A vector Wold moving average representation of $X_t$ is then - -$$ -X_t = C(L) \eta_t -$$ - -where $\eta_t$ is an $(n\times 1)$ vector white noise that -is "fundamental" for $X_t$. - -That is, $X_t - \mathbb{\hat E}\left[X_t \mid X_{t-1}, X_{t-2} -\ldots\right] = C_0 \, \eta_t$. - -The optimum predictor of $X_{t+j}$ is - -$$ -\mathbb{\hat E} \left[X_{t+j} \mid X_t, X_{t-1}, \ldots\right] - = \left[{C(L) \over L^j} \right]_+ \eta_t -$$ - -If $C(L)$ is invertible, i.e., if the zeros of $\det$ -$C(z)$ lie strictly outside the unit circle, then this formula can -be written - -$$ -\mathbb{\hat E} \left[X_{t+j} \mid X_t, X_{t-1}, \ldots\right] - = \left[{C(L) \over L^J} \right]_+ C(L)^{-1}\, X_t -$$ - diff --git a/lectures/time_series_models/estspec.md b/lectures/time_series_models/estspec.md deleted file mode 100644 index 383f2486..00000000 --- a/lectures/time_series_models/estspec.md +++ /dev/null @@ -1,624 +0,0 @@ ---- -jupytext: - text_representation: - extension: .md - format_name: myst -kernelspec: - display_name: Julia - language: julia - name: julia-1.11 ---- - -(estspec)= -```{raw} html - -``` - -# Estimation of {index}`Spectra ` - -```{index} single: Spectra; Estimation -``` - -```{contents} Contents -:depth: 2 -``` - -## Overview - -In a {ref}`previous lecture ` we covered some fundamental properties of covariance stationary linear stochastic processes. - -One objective for that lecture was to introduce spectral densities --- a standard and very useful technique for analyzing such processes. - -In this lecture we turn to the problem of estimating spectral densities and other related quantities from data. - -```{index} single: Spectra, Estimation; Fast Fourier Transform -``` - -Estimates of the spectral density are computed using what is known as a periodogram --- which in -turn is computed via the famous [fast Fourier transform](https://en.wikipedia.org/wiki/Fast_Fourier_transform). - -Once the basic technique has been explained, we will apply it to the analysis of several key macroeconomic time series. - -For supplementary reading, see {cite}`Sargent1987` or {cite}`CryerChan2008`. - -(periodograms)= -## {index}`Periodograms ` - -{ref}`Recall that ` the spectral density $f$ of a covariance stationary process with -autocorrelation function $\gamma$ can be written - -$$ -f(\omega) = \gamma(0) + 2 \sum_{k \geq 1} \gamma(k) \cos(\omega k), -\qquad \omega \in \mathbb R -$$ - -Now consider the problem of estimating the spectral density of a given time series, when $\gamma$ is unknown. - -In particular, let $X_0, \ldots, X_{n-1}$ be $n$ consecutive observations of a single time series that is assumed to be covariance stationary. - -The most common estimator of the spectral density of this process is the *periodogram* of $X_0, \ldots, X_{n-1}$, which is defined as - -```{math} -:label: estspec_p - -I(\omega) -:= \frac{1}{n} \left| \sum_{t=0}^{n-1} X_t e^{i t \omega} \right|^2, -\qquad \omega \in \mathbb R -``` - -(Recall that $|z|$ denotes the modulus of complex number $z$) - -Alternatively, $I(\omega)$ can be expressed as - -$$ -I(\omega) -= \frac{1}{n} -\left\{ -\left[\sum_{t=0}^{n-1} X_t \cos(\omega t) \right]^2 -+ -\left[\sum_{t=0}^{n-1} X_t \sin(\omega t) \right]^2 -\right\} -$$ - -It is straightforward to show that the function $I$ is even and $2 -\pi$-periodic (i.e., $I(\omega) = I(-\omega)$ and $I(\omega + -2\pi) = I(\omega)$ for all $\omega \in \mathbb R$). - -From these two results, you will be able to verify that the values of -$I$ on $[0, \pi]$ determine the values of $I$ on all of -$\mathbb R$. - -The next section helps to explain the connection between the periodogram and the spectral density. - -### Interpretation - -```{index} single: Periodograms; Interpretation -``` - -To interpret the periodogram, it is convenient to focus on its values at the *Fourier frequencies* - -$$ -\omega_j := \frac{2 \pi j}{n}, -\quad j = 0, \ldots, n - 1 -$$ - -In what sense is $I(\omega_j)$ an estimate of $f(\omega_j)$? - -The answer is straightforward, although it does involve some algebra. - -With a bit of effort one can show that, for any integer $j > 0$, - -$$ -\sum_{t=0}^{n-1} e^{i t \omega_j } -= \sum_{t=0}^{n-1} \exp \left\{ i 2 \pi j \frac{t}{n} \right\} = 0 -$$ - -Letting $\bar X$ denote the sample mean $n^{-1} \sum_{t=0}^{n-1} X_t$, we then have - -$$ -n I(\omega_j) - = \left| \sum_{t=0}^{n-1} (X_t - \bar X) e^{i t \omega_j } \right|^2 - = \sum_{t=0}^{n-1} (X_t - \bar X) e^{i t \omega_j } -\sum_{r=0}^{n-1} (X_r - \bar X) e^{-i r \omega_j } -$$ - -By carefully working through the sums, one can transform this to - -$$ -n I(\omega_j) -= \sum_{t=0}^{n-1} (X_t - \bar X)^2 -+ 2 \sum_{k=1}^{n-1} \sum_{t=k}^{n-1} (X_t - \bar X)(X_{t-k} - \bar X) -\cos(\omega_j k) -$$ - -Now let - -$$ -\hat \gamma(k) -:= \frac{1}{n} \sum_{t=k}^{n-1} (X_t - \bar X)(X_{t-k} - \bar X), -\qquad k = 0,1,\ldots, n-1 -$$ - -This is the sample autocovariance function, the natural "plug-in estimator" of the {ref}`autocovariance function ` $\gamma$. - -("Plug-in estimator" is an informal term for an estimator found by replacing expectations with sample means) - -With this notation, we can now write - -$$ -I(\omega_j) -= \hat \gamma(0) -+ 2 \sum_{k=1}^{n-1} \hat \gamma(k) \cos(\omega_j k) -$$ - -Recalling our expression for $f$ given {ref}`above `, -we see that $I(\omega_j)$ is just a sample analog of $f(\omega_j)$. - -### Calculation - -```{index} single: Periodograms; Computation -``` - -Let's now consider how to compute the periodogram as defined in {eq}`estspec_p`. - -There are already functions available that will do this for us ---- an example is `periodogram` in the `DSP.jl` package. - -However, it is very simple to replicate their results, and this will give us a platform to make useful extensions. - -The most common way to calculate the periodogram is via the discrete Fourier transform, -which in turn is implemented through the [fast Fourier transform](https://en.wikipedia.org/wiki/Fast_Fourier_transform) algorithm. - -In general, given a sequence $a_0, \ldots, a_{n-1}$, the discrete -Fourier transform computes the sequence - -$$ -A_j := \sum_{t=0}^{n-1} a_t \exp \left\{ i 2 \pi \frac{tj}{n} \right\}, -\qquad j = 0, \ldots, n-1 -$$ - -With $a_0, \ldots, a_{n-1}$ stored in Julia array `a`, the function call `fft(a)` returns the values $A_0, \ldots, A_{n-1}$ as a Julia array. - -It follows that, when the data $X_0, \ldots, X_{n-1}$ are stored in array `X`, the values $I(\omega_j)$ at the Fourier frequencies, which are given by - -$$ -\frac{1}{n} \left| \sum_{t=0}^{n-1} X_t \exp \left\{ i 2 \pi \frac{t j}{n} \right\} \right|^2, -\qquad j = 0, \ldots, n-1 -$$ - -can be computed by `abs(fft(X)).^2 / length(X)`. - -Note: The Julia function `abs` acts elementwise, and correctly handles complex numbers (by computing their modulus, which is exactly what we need). - -A function called `periodogram` that puts all this together can be found [here](https://github.com/QuantEcon/QuantEcon.jl/blob/master/src/estspec.jl). - -Let's generate some data for this function using the `ARMA` type from [QuantEcon.jl](https://github.com/QuantEcon/QuantEcon.jl) (see the {ref}`lecture on linear processes ` for more details). - -Here's a code snippet that, once the preceding code has been run, generates data from the process - -```{math} -:label: esp_arma - -X_t = 0.5 X_{t-1} + \epsilon_t - 0.8 \epsilon_{t-2} -``` - -where $\{ \epsilon_t \}$ is white noise with unit variance, and compares the periodogram to the actual spectral density - -```{code-cell} julia ---- -tags: [remove-cell] ---- -using Test -``` - -```{code-cell} julia -using QuantEcon, Plots, Random -using LinearAlgebra, Statistics - -Random.seed!(42) # For reproducible results. - -n = 40 # Data size -phi = 0.5 # AR parameter -theta = [0, -0.8] # MA parameter -sigma = 1.0 -lp = ARMA(phi, theta, sigma) -X = simulation(lp, ts_length = n) - -x, y = periodogram(X) -x_sd, y_sd = spectral_density(lp, two_pi = false, res = 120) - -plot(x, y, linecolor = "blue", linewidth = 2, linealpha = 0.5, - lab = "periodogram") -plot!(x_sd, y_sd, linecolor = "red", linewidth = 2, linealpha = 0.8, - lab = "spectral density") -``` - -```{code-cell} julia ---- -tags: [remove-cell] ---- -@testset begin - #test y[17] ≈ 0.016056828911473187 - #test x[17] ≈ 2.5132741228718345 - #test y_sd[76] ≈ 1.6587587789586284 -end -``` - -This estimate looks rather disappointing, but the data size is only 40, so -perhaps it's not surprising that the estimate is poor. - -However, if we try again with `n = 1200` the outcome is not much better - -```{figure} /_static/figures/periodogram1.png - -``` - -The periodogram is far too irregular relative to the underlying spectral density. - -This brings us to our next topic. - -## {index}`Smoothing ` - -```{index} single: Spectra, Estimation; Smoothing -``` - -There are two related issues here. - -One is that, given the way the fast Fourier transform is implemented, the -number of points $\omega$ at which $I(\omega)$ is estimated -increases in line with the amount of data. - -In other words, although we have more data, we are also using it to estimate more values. - -A second issue is that densities of all types are fundamentally hard to -estimate without parametric assumptions. - -```{index} single: Nonparametric Estimation -``` - -Typically, nonparametric estimation of densities requires some degree of smoothing. - -The standard way that smoothing is applied to periodograms is by taking local averages. - -In other words, the value $I(\omega_j)$ is replaced with a weighted -average of the adjacent values - -$$ -I(\omega_{j-p}), I(\omega_{j-p+1}), \ldots, I(\omega_j), \ldots, I(\omega_{j+p}) -$$ - -This weighted average can be written as - -```{math} -:label: estspec_ws - -I_S(\omega_j) := \sum_{\ell = -p}^{p} w(\ell) I(\omega_{j+\ell}) -``` - -where the weights $w(-p), \ldots, w(p)$ are a sequence of $2p + 1$ nonnegative -values summing to one. - -In generally, larger values of $p$ indicate more smoothing --- more on -this below. - -The next figure shows the kind of sequence typically used. - -Note the smaller weights towards the edges and larger weights in the center, so that more distant values from $I(\omega_j)$ have less weight than closer ones in the sum {eq}`estspec_ws` - -```{code-cell} julia -function hanning_window(M) - w = [0.5 - 0.5 * cos(2 * pi * n / (M - 1)) for n in 0:(M - 1)] - return w -end - -window = hanning_window(25) / sum(hanning_window(25)) -x = range(-12, 12, length = 25) -plot(x, window, color = "darkblue", title = "Hanning window", - ylabel = "Weights", - xlabel = "Position in sequence of weights", legend = false, grid = false) -``` - -```{code-cell} julia ---- -tags: [remove-cell] ---- -@testset begin - @test window[7] ≈ 0.04166666666666666 - @test window[12] ≈ 0.08191357609537783 -end -``` - -### Estimation with Smoothing - -```{index} single: Spectra, Estimation; Smoothing -``` - -Our next step is to provide code that will not only estimate the periodogram but also provide smoothing as required. - -Such functions have been written in [estspec.jl](https://github.com/QuantEcon/QuantEcon.jl/blob/master/src/estspec.jl) and are available once you've installed [QuantEcon.jl](http://quantecon.org/quantecon-jl). - -The [GitHub listing](https://github.com/QuantEcon/QuantEcon.jl/blob/master/src/estspec.jl) displays three functions, `smooth()`, `periodogram()`, `ar_periodogram()`. We will discuss the first two here and the third one {ref}`below `. - -The `periodogram()` function returns a periodogram, optionally smoothed via the `smooth()` function. - -Regarding the `smooth()` function, since smoothing adds a nontrivial amount of computation, we have applied a fairly terse array-centric method based around `conv`. - -Readers are left either to explore or simply to use this code according to their interests. - -The next three figures each show smoothed and unsmoothed periodograms, as well as the population or "true" spectral density. - -(The model is the same as before --- see equation {eq}`esp_arma` --- and there are 400 observations) - -From top figure to bottom, the window length is varied from small to large. - -(fig_window_smoothing)= -```{figure} /_static/figures/window_smoothing.png - -``` - -In looking at the figure, we can see that for this model and data size, the -window length chosen in the middle figure provides the best fit. - -Relative to this value, the first window length provides insufficient -smoothing, while the third gives too much smoothing. - -Of course in real estimation problems the true spectral density is not visible -and the choice of appropriate smoothing will have to be made based on -judgement/priors or some other theory. - -(estspec_pfas)= -### Pre-Filtering and Smoothing - -```{index} single: Spectra, Estimation; Pre-Filtering -``` - -```{index} single: Spectra, Estimation; Smoothing -``` - -In the [code listing](https://github.com/QuantEcon/QuantEcon.jl/blob/master/src/estspec.jl) we showed three functions from the file `estspec.jl`. - -The third function in the file (`ar_periodogram()`) adds a pre-processing step to periodogram smoothing. - -First we describe the basic idea, and after that we give the code. - -The essential idea is to - -1. Transform the data in order to make estimation of the spectral density more efficient. -1. Compute the periodogram associated with the transformed data. -1. Reverse the effect of the transformation on the periodogram, so that it now - estimates the spectral density of the original process. - -Step 1 is called *pre-filtering* or *pre-whitening*, while step 3 is called *recoloring*. - -The first step is called pre-whitening because the -transformation is usually designed to turn the data into something closer to white noise. - -Why would this be desirable in terms of spectral density estimation? - -The reason is that we are smoothing our estimated periodogram based on -estimated values at nearby points --- recall {eq}`estspec_ws`. - -The underlying assumption that makes this a good idea is that the true -spectral density is relatively regular --- the value of $I(\omega)$ is close -to that of $I(\omega')$ when $\omega$ is close to $\omega'$. - -This will not be true in all cases, but it is certainly true for white noise. - -For white noise, $I$ is as regular as possible --- {ref}`it is a constant function `. - -In this case, values of $I(\omega')$ at points $\omega'$ near to $\omega$ -provided the maximum possible amount of information about the value $I(\omega)$. - -Another way to put this is that if $I$ is relatively constant, then we can use a large amount of smoothing without introducing too much bias. - -(ar_periodograms)= -### The AR(1) Setting - -```{index} single: Spectra, Estimation; AR(1) Setting -``` - -Let's examine this idea more carefully in a particular setting --- where -the data are assumed to be generated by an AR(1) process. - -(More general ARMA settings can be handled using similar techniques to those described below) - -Suppose in particular that $\{X_t\}$ is covariance stationary and AR(1), -with - -```{math} -:label: estspec_ar_dgp - -X_{t+1} = \mu + \phi X_t + \epsilon_{t+1} -``` - -where $\mu$ and $\phi \in (-1, 1)$ are unknown parameters and $\{ \epsilon_t \}$ is white noise. - -It follows that if we regress $X_{t+1}$ on $X_t$ and an intercept, the residuals -will approximate white noise. - -Let - -* $g$ be the spectral density of $\{ \epsilon_t \}$ --- a constant function, as discussed above -* $I_0$ be the periodogram estimated from the residuals --- an estimate of $g$ -* $f$ be the spectral density of $\{ X_t \}$ --- the object we are trying to estimate - -In view of {ref}`an earlier result ` we obtained while discussing ARMA processes, $f$ and $g$ are related by - -```{math} -:label: ar_sdsc - -f(\omega) = \left| \frac{1}{1 - \phi e^{i\omega}} \right|^2 g(\omega) -``` - -This suggests that the recoloring step, which constructs an estimate $I$ of $f$ from $I_0$, should set - -$$ -I(\omega) = \left| \frac{1}{1 - \hat \phi e^{i\omega}} \right|^2 I_0(\omega) -$$ - -where $\hat \phi$ is the OLS estimate of $\phi$. - -The code for `ar_periodogram()` --- the third function in `estspec.jl` --- does exactly this. (See the code [here](https://github.com/QuantEcon/QuantEcon.jl/blob/master/src/estspec.jl)). - -The next figure shows realizations of the two kinds of smoothed periodograms - -1. "standard smoothed periodogram", the ordinary smoothed periodogram, and -1. "AR smoothed periodogram", the pre-whitened and recolored one generated by `ar_periodogram()` - -The periodograms are calculated from time series drawn from {eq}`estspec_ar_dgp` with $\mu = 0$ and $\phi = -0.9$. - -Each time series is of length 150. - -The difference between the three subfigures is just randomness --- each one uses a different draw of the time series. - -(fig_ar_smoothed_periodogram)= -```{figure} /_static/figures/ar_smoothed_periodogram.png - -``` - -In all cases, periodograms are fit with the "hamming" window and window length of 65. - -Overall, the fit of the AR smoothed periodogram is much better, in the sense -of being closer to the true spectral density. - -## Exercises - -(estspec_ex1)= -### Exercise 1 - -Replicate {ref}`this figure ` (modulo randomness). - -The model is as in equation {eq}`esp_arma` and there are 400 observations. - -For the smoothed periodogram, the window type is "hamming". - -(estspec_ex2)= -### Exercise 2 - -Replicate {ref}`this figure ` (modulo randomness). - -The model is as in equation {eq}`estspec_ar_dgp`, with $\mu = 0$, $\phi = -0.9$ -and 150 observations in each time series. - -All periodograms are fit with the "hamming" window and window length of 65. - -## Solutions - -### Exercise 1 - -```{code-cell} julia ---- -tags: [remove-cell] ---- -using Random -Random.seed!(42); # reproducible results -``` - -```{code-cell} julia -n = 400 -phi = 0.5 -theta = [0, -0.8] -sigma = 1.0 -lp = ARMA(phi, theta, 1.0) -X = simulation(lp, ts_length = n) - -xs = [] -x_sds = [] -x_sms = [] -ys = [] -y_sds = [] -y_sms = [] -titles = [] - -for (i, wl) in enumerate([15, 55, 175]) # window lengths - x, y = periodogram(X) - push!(xs, x) - push!(ys, y) - - x_sd, y_sd = spectral_density(lp, two_pi = false, res = 120) - push!(x_sds, x_sd) - push!(y_sds, y_sd) - - x, y_smoothed = periodogram(X, "hamming", wl) - push!(x_sms, x) - push!(y_sms, y_smoothed) - - t = "window length = $wl" - push!(titles, t) -end -``` - -```{code-cell} julia ---- -tags: [remove-cell] ---- -@testset begin - #test y_sds[2][12] ≈ 1.0359408815913638 atol = 1e-3 -# #test y_sms[3][45] ≈ 2.7396611185705604 atol = 1e-3 -# #test ys[1][50] ≈ 4.245609056262289 atol = 1e-3 -end -``` - -```{code-cell} julia -plot(xs, ys, layout = (3, 1), color = :blue, alpha = 0.5, - linewidth = 2, label = ["periodogram" "" ""]) -plot!(x_sds, y_sds, layout = (3, 1), color = :red, alpha = 0.8, - linewidth = 2, label = ["spectral density" "" ""]) -plot!(x_sms, y_sms, layout = (3, 1), color = :black, - linewidth = 2, label = ["smoothed periodogram" "" ""]) -plot!(title = reshape(titles, 1, length(titles))) -``` - -### Exercise 2 - -```{code-cell} julia ---- -tags: [remove-cell] ---- -Random.seed!(42); # reproducible results -``` - -```{code-cell} julia -lp2 = ARMA(-0.9, 0.0, 1.0) -wl = 65 -p = plot(layout = (3, 1)) - -for i in 1:3 - X = simulation(lp2, ts_length = 150) - plot!(p[i], xlims = (0, pi)) - - x_sd, y_sd = spectral_density(lp2, two_pi = false, res = 180) - plot!(p[i], x_sd, y_sd, linecolor = :red, linestyle = :solid, - yscale = :log10, linewidth = 2, linealpha = 0.75, - label = "spectral density", legend = :topleft) - - x, y_smoothed = periodogram(X, "hamming", wl) - plot!(p[i], x, y_smoothed, linecolor = :black, linestyle = :solid, - yscale = :log10, linewidth = 2, linealpha = 0.75, - label = "standard smoothed periodogram", legend = :topleft) - - x, y_ar = ar_periodogram(X, "hamming", wl) - plot!(p[i], x, y_ar, linecolor = :blue, linestyle = :solid, - yscale = :log10, linewidth = 2, linealpha = 0.75, - label = "AR smoothed periodogram", legend = :topleft) -end -p -``` - -```{code-cell} julia ---- -tags: [remove-cell] ---- -Random.seed!(42) -y_sd = spectral_density(lp2,two_pi=false, res=180) - -@testset begin - @test y_sd[2][100] ≈ 0.6616951403067453 - @test y_sd[1][100] ≈ 1.7375288977954721 -end -``` - diff --git a/lectures/time_series_models/lu_tricks.md b/lectures/time_series_models/lu_tricks.md deleted file mode 100644 index 707c60b9..00000000 --- a/lectures/time_series_models/lu_tricks.md +++ /dev/null @@ -1,1288 +0,0 @@ ---- -jupytext: - text_representation: - extension: .md - format_name: myst -kernelspec: - display_name: Julia - language: julia - name: julia-1.11 ---- - -(lu_tricks)= -```{raw} html - -``` - -# Classical Control with Linear Algebra - -```{contents} Contents -:depth: 2 -``` - -## Overview - -In an earlier lecture {doc}`Linear Quadratic Dynamic Programming Problems <../dynamic_programming/lqcontrol>` we have studied how to solve a special -class of dynamic optimization and prediction problems by applying the method of dynamic programming. In this class of problems - -* the objective function is **quadratic** in **states** and **controls** -* the one-step transition function is **linear** -* shocks are i.i.d. Gaussian or martingale differences - -> - -In this lecture and a companion lecture {doc}`Classical Filtering with Linear Algebra <../time_series_models/classical_filtering>`, we study the classical theory of linear-quadratic (LQ) optimal control problems. - -The classical approach does not use the two closely related methods -- dynamic programming and Kalman filtering -- that we describe in other lectures, namely, {doc}`Linear Quadratic Dynamic Programming Problems <../dynamic_programming/lqcontrol>` and {doc}`A First Look at the Kalman Filter <../introduction_dynamics/kalman>`. - -Instead they use either. - -* $z$-transform and lag operator methods, or. -* matrix decompositions applied to linear systems of first-order conditions for optimum problems. - -> - -In this lecture and the sequel {doc}`Classical Filtering with Linear Algebra <../time_series_models/classical_filtering>`, we mostly rely on elementary linear algebra. - -The main tool from linear algebra we'll put to work here is [LU decomposition](https://en.wikipedia.org/wiki/LU_decomposition). - -We'll begin with discrete horizon problems. - -Then we'll view infinite horizon problems as appropriate limits of these finite horizon problems. - -Later, we will examine the close connection between LQ control and least squares prediction and filtering problems. - -These classes of problems are connected in the sense that to solve each, essentially the same mathematics is used. - -### References - -Useful references include {cite}`Whittle1963`, {cite}`HanSar1980`, {cite}`Orfanidisoptimum1988`, {cite}`Athanasios1991`, and {cite}`Muth1960`. - - -```{code-cell} julia -using LaTeXStrings, Polynomials, Plots, Random -using LinearAlgebra, Statistics -``` - -## A Control Problem - -Let $L$ be the **lag operator**, so that, for sequence $\{x_t\}$ we have $L x_t = x_{t-1}$. - -More generally, let $L^k x_t = x_{t-k}$ with $L^0 x_t = x_t$ and - -$$ -d(L) = d_0 + d_1 L+ \ldots + d_m L^m -$$ - -where $d_0, d_1, \ldots, d_m$ is a given scalar sequence. - -Consider the discrete time control problem - -```{math} -:label: oneone - -\max_{\{y_t\}} -\lim_{N \to \infty} \sum^N_{t=0} \beta^t\, -\left\{ - a_t y_t - {1 \over 2}\, hy^2_t - {1 \over 2} \, - \left[ d(L)y_t \right]^2 -\right\}, -``` - -where - -* $h$ is a positive parameter and $\beta \in (0,1)$ is a discount factor -* $\{a_t\}_{t \geq 0}$ is a sequence of exponential order less than $\beta^{-1/2}$, by which we mean $\lim_{t \rightarrow \infty} \beta^{\frac{t}{2}} a_t = 0$ - -Maximization in {eq}`oneone` is subject to initial conditions for $y_{-1}, y_{-2} \ldots, y_{-m}$. - -Maximization is over infinite sequences $\{y_t\}_{t \geq 0}$. - -### Example - -The formulation of the LQ problem given above is broad enough to encompass -many useful models. - -As a simple illustration, recall that in {doc}`lqcontrol <../dynamic_programming/lqcontrol>` we consider a monopolist facing stochastic demand -shocks and adjustment costs. - -Let's consider a deterministic version of this problem, where the monopolist -maximizes the discounted sum - -$$ -\sum_{t=0}^{\infty} \beta^t \pi_t -$$ - -and - -$$ -\pi_t = p_t q_t - c q_t - \gamma (q_{t+1} - q_t)^2 -\quad \text{with} \quad -p_t = \alpha_0 - \alpha_1 q_t + d_t -$$ - -In this expression, $q_t$ is output, $c$ is average cost of production, and $d_t$ is a demand shock. - -The term $\gamma (q_{t+1} - q_t)^2$ represents adjustment costs. - -You will be able to confirm that the objective function can be rewritten as {eq}`oneone` when - -* $a_t := \alpha_0 + d_t - c$ -* $h := 2 \alpha_1$ -* $d(L) := \sqrt{2 \gamma}(I - L)$ - -Further examples of this problem for factor demand, economic growth, and government policy problems are given in ch. IX of {cite}`Sargent1987`. - -## Finite Horizon Theory - -We first study a finite $N$ version of the problem. - -Later we will study an infinite horizon problem solution as a limiting version of a finite horizon problem. - -(This will require being careful because the limits as $N \to \infty$ of the necessary and sufficient conditions for maximizing finite $N$ versions of {eq}`oneone` -are not sufficient for maximizing {eq}`oneone`) - -We begin by - -1. fixing $N > m$, -1. differentiating the finite version of {eq}`oneone` with respect to $y_0, y_1, \ldots, y_N$, and -1. setting these derivatives to zero - -For $t=0, \ldots, N-m$ these first-order necessary conditions are the -*Euler equations*. - -For $t = N-m + 1, \ldots, N$, the first-order conditions are a set of -*terminal conditions*. - -Consider the term - -$$ -\begin{aligned} -J -& = \sum^N_{t=0} \beta^t [d(L) y_t] [d(L) y_t] -\\ -& = \sum^N_{t=0} - \beta^t \, (d_0 \, y_t + d_1 \, y_{t-1} + \cdots + d_m \, y_{t-m}) \, - (d_0 \, y_t + d_1 \, y_{t-1} + \cdots + d_m\, y_{t-m}) -\end{aligned} -$$ - -Differentiating $J$ with respect to $y_t$ for -$t=0,\ 1,\ \ldots,\ N-m$ gives - -$$ -\begin{aligned} -{\partial {J} \over \partial y_t} - & = 2 \beta^t \, d_0 \, d(L)y_t + - 2 \beta^{t+1} \, d_1\, d(L)y_{t+1} + \cdots + - 2 \beta^{t+m}\, d_m\, d(L) y_{t+m} \\ - & = 2\beta^t\, \bigl(d_0 + d_1 \, \beta L^{-1} + d_2 \, \beta^2\, L^{-2} + - \cdots + d_m \, \beta^m \, L^{-m}\bigr)\, d (L) y_t\ -\end{aligned} -$$ - -We can write this more succinctly as - -```{math} -:label: onetwo - -{\partial {J} \over \partial y_t} - = 2 \beta^t \, d(\beta L^{-1}) \, d (L) y_t -``` - -Differentiating $J$ with respect to $y_t$ for $t = N-m + 1, \ldots, N$ gives - -```{math} -:label: onethree - -\begin{aligned} - {\partial J \over \partial y_N} - &= 2 \beta^N\, d_0 \, d(L) y_N \cr - {\partial J \over \partial y_{N-1}} - &= 2\beta^{N-1} \,\bigl[d_0 + \beta \, - d_1\, L^{-1}\bigr] \, d(L)y_{N-1} \cr - \vdots - & \quad \quad \vdots \cr - {\partial {J} \over \partial y_{N-m+1}} - &= 2 \beta^{N-m+1}\,\bigl[d_0 + \beta - L^{-1} \,d_1 + \cdots + \beta^{m-1}\, L^{-m+1}\, d_{m-1}\bigr] d(L)y_{N-m+1} -\end{aligned} -``` - -With these preliminaries under our belts, we are ready to differentiate {eq}`oneone`. - -Differentiating {eq}`oneone` with respect to $y_t$ for $t=0, \ldots, N-m$ gives the Euler equations - -```{math} -:label: onefour - -\bigl[h+d\,(\beta L^{-1})\,d(L)\bigr] y_t = a_t, -\quad t=0,\, 1,\, \ldots, N-m -``` - -The system of equations {eq}`onefour` form a $2 \times m$ order linear *difference -equation* that must hold for the values of $t$ indicated. - -Differentiating {eq}`oneone` with respect to $y_t$ for $t = N-m + 1, \ldots, N$ gives the terminal conditions - -```{math} -:label: onefive - -\begin{aligned} -\beta^N (a_N - hy_N - d_0\,d(L)y_N) -&= 0 \cr - \beta^{N-1} \left(a_{N-1}-hy_{N-1}-\Bigl(d_0 + \beta \, d_1\, -L^{-1}\Bigr)\, d(L)\, y_{N-1}\right) -& = 0 \cr - \vdots & \vdots\cr -\beta^{N-m+1} \biggl(a_{N-m+1} - h y_{N-m+1} -(d_0+\beta L^{-1} -d_1+\cdots\ +\beta^{m-1} L^{-m+1} d_{m-1}) d(L) y_{N-m+1}\biggr) -& = 0 -\end{aligned} -``` - -In the finite $N$ problem, we want simultaneously to solve {eq}`onefour` subject to the $m$ initial conditions -$y_{-1}, \ldots, y_{-m}$ and the $m$ terminal conditions -{eq}`onefive`. - -These conditions uniquely pin down the solution of the finite $N$ problem. - -That is, for the finite $N$ problem, -conditions {eq}`onefour` and {eq}`onefive` are necessary and sufficient for a maximum, -by concavity of the objective function. - -Next we describe how to obtain the solution using matrix methods. - -(fdlq)= -### Matrix Methods - -Let's look at how linear algebra can be used to tackle and shed light on the finite horizon LQ control problem. - -#### A Single Lag Term - -Let's begin with the special case in which $m=1$. - -We want to solve the system of $N+1$ linear equations - -```{math} -:label: oneff - -\begin{aligned} -\bigl[h & + d\, (\beta L^{-1})\, d\, (L) ] y_t = a_t, \quad -t = 0,\ 1,\ \ldots,\, N-1\cr -\beta^N & \bigl[a_N-h\, y_N-d_0\, d\, (L) y_N\bigr] = 0 -\end{aligned} -``` - -where $d(L) = d_0 + d_1 L$. - -These equations are to be solved for -$y_0, y_1, \ldots, y_N$ as functions of -$a_0, a_1, \ldots, a_N$ and $y_{-1}$. - -Let - -$$ -\phi (L) -= \phi_0 + \phi_1 L + \beta \phi_1 L^{-1} -= h + d (\beta L^{-1}) d(L) -= (h + d_0^2 + d_1^2) + d_1 d_0 L+ d_1 d_0 \beta L^{-1} -$$ - -Then we can represent {eq}`oneff` as the matrix equation - -```{math} -:label: onefourfive - -\left[ - \begin{matrix} - (\phi_0-d_1^2) & \phi_1 & 0 & 0 & \ldots & \ldots & 0 \cr - \beta \phi_1 & \phi_0 & \phi_1 & 0 & \ldots & \dots & 0 \cr - 0 & \beta \phi_1 & \phi_0 & \phi_1 & \ldots & \ldots & 0 \cr - \vdots &\vdots & \vdots & \ddots & \vdots & \vdots & \vdots \cr - 0 & \ldots & \ldots & \ldots & \beta \phi_1 & \phi_0 &\phi_1 \cr - 0 & \ldots & \ldots & \ldots & 0 & \beta \phi_1 & \phi_0 - \end{matrix} -\right] -\left [ - \begin{matrix} - y_N \cr y_{N-1} \cr y_{N-2} \cr \vdots \cr - y_1 \cr y_0 - \end{matrix} -\right ] = -\left[ -\begin{matrix} - a_N \cr a_{N-1} \cr a_{N-2} \cr \vdots \cr a_1 \cr - a_0 - \phi_1 y_{-1} -\end{matrix} -\right] -``` - -or - -```{math} -:label: onefoursix - -W\bar y = \bar a -``` - -Notice how we have chosen to arrange the $y_t$’s in reverse -time order. - -The matrix $W$ on the left side of {eq}`onefourfive` is "almost" a -[Toeplitz matrix](https://en.wikipedia.org/wiki/Toeplitz_matrix) (where each -descending diagonal is constant). - -There are two sources of deviation from the form of a Toeplitz matrix. - -1. The first element differs from the remaining diagonal elements, reflecting the terminal condition. -1. The subdiagonal elements equal $\beta$ time the superdiagonal elements. - -The solution of {eq}`onefoursix` can be expressed in the form - -```{math} -:label: onefourseven - -\bar y = W^{-1} \bar a -``` - -which represents each element $y_t$ of $\bar y$ as a function of the entire vector $\bar a$. - -That is, $y_t$ is a function of past, present, and future values of $a$'s, as well as of the initial condition $y_{-1}$. - -#### An Alternative Representation - -An alternative way to express the solution to {eq}`onefourfive` or -{eq}`onefoursix` is in so called **feedback-feedforward** form. - -The idea here is to find a solution expressing $y_t$ as a function of *past* $y$'s and *current* and *future* $a$'s. - -To achieve this solution, one can use an [LU decomposition](https://en.wikipedia.org/wiki/LU_decomposition) of $W$. - -There always exists a decomposition of $W$ of the form $W= LU$ -where - -* $L$ is an $(N+1) \times (N+1)$ lower trangular matrix -* $U$ is an $(N+1) \times (N+1)$ upper trangular matrix. - -The factorization can be normalized so that the diagonal elements of $U$ are unity. - -Using the LU representation in {eq}`onefourseven`, we obtain - -```{math} -:label: onefournine - -U \bar y = L^{-1} \bar a -``` - -Since $L^{-1}$ is lower trangular, this representation expresses -$y_t$ as a function of - -* lagged $y$'s (via the term $U \bar y$), and -* current and future $a$’s (via the term $L^{-1} \bar a$) - -Because there are zeros everywhere in the matrix -on the left of {eq}`onefourfive` except on the diagonal, superdiagonal, and -subdiagonal, the $LU$ decomposition takes - -* $L$ to be zero except in the diagional and the leading subdiagonal -* $U$ to be zero except on the diagonal and the superdiagional - -Thus, {eq}`onefournine` has the form - -$$ -\left[ -\begin{matrix} - 1& U_{12} & 0 & 0 & \ldots & 0 & 0 \cr - 0 & 1 & U_{23} & 0 & \ldots & 0 & 0 \cr - 0 & 0 & 1 & U_{34} & \ldots & 0 & 0 \cr - 0 & 0 & 0 & 1 & \ldots & 0 & 0\cr - \vdots & \vdots & \vdots & \vdots & \ddots & \vdots & \vdots\cr - 0 & 0 & 0 & 0 & \ldots & 1 & U_{N,N+1} \cr - 0 & 0 & 0 & 0 & \ldots & 0 & 1 -\end{matrix} -\right] \ \ \ -\left[ -\begin{matrix} - y_N \cr y_{N-1} \cr y_{N-2} \cr y_{N-3} \cr \vdots \cr y_1 \cr y_0 -\end{matrix} -\right] = -$$ - -$$ -\quad -\left[ -\begin{matrix} - L^{-1}_{11} & 0 & 0 & \ldots & 0 \cr - L^{-1}_{21} & L^{-1}_{22} & 0 & \ldots & 0 \cr - L^{-1}_{31} & L^{-1}_{32} & L^{-1}_{33}& \ldots & 0 \cr - \vdots & \vdots & \vdots & \ddots & \vdots\cr - L^{-1}_{N,1} & L^{-1}_{N,2} & L^{-1}_{N,3} & \ldots & 0 \cr - L^{-1}_{N+1,1} & L^{-1}_{N+1,2} & L^{-1}_{N+1,3} & \ldots & - L^{-1}_{N+1\, N+1} -\end{matrix} -\right] -\left[ -\begin{matrix} - a_N \cr a_{N-1} \cr a_{N-2} \cr \vdots \cr a_1 \cr a_0 - - \phi_1 y_{-1} -\end{matrix} -\right ] -$$ - -where $L^{-1}_{ij}$ is the $(i,j)$ element of $L^{-1}$ and $U_{ij}$ is the $(i,j)$ element of $U$. - -Note how the left side for a given $t$ involves $y_t$ and one lagged value $y_{t-1}$ while the right side involves all future values of the forcing process $a_t, a_{t+1}, \ldots, a_N$. - -#### Additional Lag Terms - -We briefly indicate how this approach extends to the problem with -$m > 1$. - -Assume that $\beta = 1$ and let $D_{m+1}$ be the -$(m+1) \times (m+1)$ symmetric matrix whose elements are -determined from the following formula: - -$$ -D_{jk} = d_0 d_{k-j} + d_1 d_{k-j+1} + \ldots + d_{j-1} d_{k-1}, \qquad k -\geq j -$$ - -Let $I_{m+1}$ be the $(m+1) \times (m+1)$ identity matrix. - -Let $\phi_j$ be the coefficients in the expansion $\phi (L) = h + d (L^{-1}) d (L)$. - -Then the first order conditions {eq}`onefour` and {eq}`onefive` can be expressed as: - -$$ -(D_{m+1} + hI_{m+1})\ \ -\left[ -\begin{matrix} - y_N \cr y_{N-1} \cr \vdots \cr y_{N-m} -\end{matrix} -\right]\ -= \ \left[ -\begin{matrix} - a_N \cr a_{N-1} \cr \vdots \cr a_{N-m} - \end{matrix} -\right] + M\ -\left[ - \begin{matrix} - y_{N-m+1}\cr y_{N-m-2}\cr \vdots\cr y_{N-2m} - \end{matrix} -\right] -$$ - -where $M$ is $(m+1)\times m$ and - -$$ -M_{ij} = \begin{cases} -D_{i-j,\,m+1} \textrm{ for } i>j \\ - 0 \textrm{ for } i\leq j\end{cases} -$$ - -$$ -\begin{aligned} -\phi_m y_{N-1} &+ \phi_{m-1} y_{N-2} + \ldots + \phi_0 y_{N-m-1} + -\phi_1 y_{N-m-2} +\cr -&\hskip.75in \ldots + \phi_m y_{N-2m-1} = a_{N-m-1} \cr -\phi_m y_{N-2} &+ \phi_{m-1} y_{N-3} + \ldots + \phi_0 y_{N-m-2} + \phi_1 -y_{N-m-3} +\cr -&\hskip.75in \ldots + \phi_m y_{N-2m-2} = a_{N-m-2} \cr -&\qquad \vdots \cr -\phi_m y_{m+1} &+ \phi_{m-1} y_m + + \ldots + \phi_0 y_1 + \phi_1 y_0 + -\phi_m y_{-m+1} = a_1 \cr -\phi_m y_m + \phi_{m-1}& y_{m-1} + \phi_{m-2} + \ldots + \phi_0 y_0 + \phi_1 -y_{-1} + \ldots + \phi_m y_{-m} = a_0 -\end{aligned} -$$ - -As before, we can express this equation as $W \bar y = \bar a$. - -The matrix on the left of this equation is "almost" Toeplitz, the -exception being the leading $m \times m$ sub matrix in the upper -left hand corner. - -We can represent the solution in feedback-feedforward form by obtaining a decomposition $LU = W$, and obtain - -```{math} -:label: onefivetwo - -U \bar y = L^{-1} \bar a -``` - -$$ -\begin{aligned} \sum^t_{j=0}\, U_{-t+N+1,\,-t+N+j+1}\,y_{t-j} &= \sum^{N-t}_{j=0}\, -L_{-t+N+1,\, -t+N+1-j}\, \bar a_{t+j}\ ,\cr -&\qquad t=0,1,\ldots, N -\end{aligned} -$$ - -where $L^{-1}_{t,s}$ is the element in the $(t,s)$ position -of $L$, and similarly for $U$. - -The left side of equation {eq}`onefivetwo` is the "feedback" part of the optimal -control law for $y_t$, while the right-hand side is the "feedforward" part. - -We note that there is a different control law for each $t$. - -Thus, in the finite horizon case, the optimal control law is time dependent. - -It is natural to suspect that as $N \rightarrow\infty$, {eq}`onefivetwo` -becomes equivalent to the solution of our infinite horizon problem, -which below we shall show can be expressed as - -$$ -c(L) y_t = c (\beta L^{-1})^{-1} a_t\ , -$$ - -so that as $N \rightarrow \infty$ we expect that for each fixed -$t, U^{-1}_{t, t-j} -\rightarrow c_j$ and $L_{t,t+j}$ approaches the coefficient on -$L^{-j}$ in the expansion of $c(\beta L^{-1})^{-1}$. - -This suspicion is true under general conditions that we shall study later. - -For now, we note that by creating the matrix $W$ for large -$N$ and factoring it into the $LU$ form, good approximations -to $c(L)$ and $c(\beta L^{-1})^{-1}$ can be obtained. - -## The Infinite Horizon Limit - -For the infinite horizon problem, we propose to discover first-order -necessary conditions by taking the limits of {eq}`onefour` and {eq}`onefive` as -$N \to \infty$. - -This approach is valid, and the limits of {eq}`onefour` and {eq}`onefive` as $N$ approaches infinity are first-order necessary conditions for a maximum. - -However, for the infinite horizon problem with $\beta < 1$, the limits of {eq}`onefour` and {eq}`onefive` are, in general, not sufficient for a maximum. - -That is, the limits of {eq}`onefive` do not provide enough information uniquely to determine the solution of the Euler equation {eq}`onefour` that maximizes {eq}`oneone`. - -As we shall see below, a side condition on the path of $y_t$ that together with {eq}`onefour` is sufficient for an optimum is - -```{math} -:label: onesix - -\sum^\infty_{t=0}\ \beta^t\, hy^2_t < \infty -``` - -All paths that satisfy the Euler equations, except the one that we shall -select below, violate this condition and, therefore, evidently lead to -(much) lower values of {eq}`oneone` than does the -optimal path selected by the solution procedure below. - -Consider the *characteristic equation* associated with the Euler equation - -```{math} -:label: oneseven - -h+d \, (\beta z^{-1})\, d \, (z) = 0 -``` - -Notice that if $\tilde z$ is a root of equation {eq}`oneseven`, then so is $\beta \tilde z^{-1}$. - -Thus, the roots of {eq}`oneseven` come in "$\beta$-reciprocal" pairs. - -Assume that the roots of {eq}`oneseven` are distinct. - -Let the roots be, in descending order according to their moduli, $z_1, z_2, \ldots, z_{2m}$. - -From the reciprocal pairs property and the assumption of distinct -roots, it follows that $\vert z_j \vert > \sqrt \beta\ \hbox{ for } j\leq m \hbox -{ and } \vert z_j \vert < \sqrt\beta\ \hbox { for } j > m$. - -It also follows that $z_{2m-j} = \beta z^{-1}_{j+1}, j=0, 1, \ldots, m-1$. - -Therefore, the characteristic polynomial on the left side of {eq}`oneseven` can be expressed as - -```{math} -:label: oneeight - -\begin{aligned} -h+d(\beta z^{-1})d(z) -&= z^{-m} z_0(z-z_1)\cdots -(z-z_m)(z-z_{m+1}) \cdots (z-z_{2m}) \cr -&= z^{-m} z_0 (z-z_1)(z-z_2)\cdots (z-z_m)(z-\beta z_m^{-1}) -\cdots (z-\beta z^{-1}_2)(z-\beta z_1^{-1}) -\end{aligned} -``` - -where $z_0$ is a constant. - -In {eq}`oneeight`, we substitute $(z-z_j) = -z_j (1- {1 \over z_j}z)$ and -$(z-\beta z_j^{-1}) = z(1 - {\beta \over z_j} z^{-1})$ for $j = 1, \ldots, m$ to get - -$$ -h+d(\beta z^{-1})d(z) -= (-1)^m(z_0z_1\cdots z_m) -(1- {1\over z_1} z) \cdots (1-{1\over z_m} z)(1- {1\over z_1} \beta z^{-1}) -\cdots(1-{1\over z_m} \beta z^{-1}) -$$ - -Now define $c(z) = \sum^m_{j=0} c_j \, z^j$ as - -```{math} -:label: onenine - -c\,(z)=\Bigl[(-1)^m z_0\, z_1 \cdots z_m\Bigr]^{1/2} (1-{z\over z_1}) \, -(1-{z\over z_2}) \cdots (1- {z\over z_m}) -``` - -Notice that {eq}`oneeight` can be written - -```{math} -:label: oneten - -h + d \ (\beta z^{-1})\ d\ (z) = c\,(\beta z^{-1})\,c\,(z) -``` - -It is useful to write {eq}`onenine` as - -```{math} -:label: oneeleven - -c(z) = c_0(1-\lambda_1\, z) \ldots (1-\lambda_m z) -``` - -where - -$$ -c_0 -= \left[(-1)^m\, z_0\, z_1 \cdots z_m\right]^{1/2}; -\quad \lambda_j={1 \over z_j},\,\ j=1, \ldots, m -$$ - -Since $\vert z_j \vert > \sqrt \beta \hbox { for } j = 1, \ldots, m$ it -follows that $\vert \lambda_j \vert < 1/\sqrt \beta$ for $j = 1, -\ldots, m$. - -Using {eq}`oneeleven`, we can express the factorization {eq}`oneten` as - -$$ -h+d (\beta z^{-1})d(z) = c^2_0 (1-\lambda_1 z) \cdots -(1 - \lambda_m z) (1-\lambda_1 \beta z^{-1}) -\cdots (1 - \lambda_m \beta z^{-1}) -$$ - -In sum, we have constructed a factorization {eq}`oneten` of the characteristic -polynomial for the Euler equation in which the zeros of $c(z)$ -exceed $\beta^{1/2}$ in modulus, and the zeros of -$c\,(\beta z^{-1})$ are less than $\beta^{1/2}$ in modulus. - -Using {eq}`oneten`, we now write the Euler equation as - -$$ -c(\beta L^{-1})\,c\,(L)\, y_t = a_t -$$ - -The unique solution of the Euler equation that satisfies condition {eq}`onesix` -is - -```{math} -:label: onethirteen - -c(L)\,y_t = c\,(\beta L^{-1})^{-1}a_t -``` - -This can be established by using an argument paralleling that in -chapter IX of {cite}`Sargent1987`. - -To exhibit the solution in a form -paralleling that of {cite}`Sargent1987`, we use {eq}`oneeleven` to write -{eq}`onethirteen` as - -```{math} -:label: JUNK - -(1-\lambda_1 L) \cdots (1 - \lambda_mL)y_t = {c^{-2}_0 a_t \over (1-\beta \lambda_1 L^{-1}) \cdots (1 - \beta \lambda_m L^{-1})} -``` - -Using [partial fractions](https://en.wikipedia.org/wiki/Partial_fraction_decomposition), we can write the characteristic polynomial on -the right side of {eq}`JUNK` as - -$$ -\sum^m_{j=1} {A_j \over 1 - \lambda_j \, \beta L^{-1}} - \quad \text{where} \quad -A_j := {c^{-2}_0 \over \prod_{i \not= j}(1-{\lambda_i \over \lambda_j})} -$$ - -Then {eq}`JUNK` can be written - -$$ -(1-\lambda_1 L) \cdots (1-\lambda_m L) y_t = \sum^m_{j=1} \, {A_j \over 1 - -\lambda_j \, \beta L^{-1}} a_t -$$ - -or - -```{math} -:label: onefifteen - -(1 - \lambda_1 L) \cdots (1 - \lambda_m L) y_t = \sum^m_{j=1}\, A_j -\sum^\infty_{k=0}\, (\lambda_j\beta)^k\, a_{t+k} -``` - -Equation {eq}`onefifteen` expresses the optimum sequence for $y_t$ in terms -of $m$ lagged $y$'s, and $m$ weighted infinite -geometric sums of future $a_t$'s. - -Furthermore, {eq}`onefifteen` is the unique solution of the Euler equation that satisfies the initial conditions and condition {eq}`onesix`. - -In effect, condition {eq}`onesix` compels us to -solve the "unstable" roots of $h+d (\beta z^{-1})d(z)$ forward -(see {cite}`Sargent1987`). - -The step of factoring the polynomial $h+d (\beta z^{-1})\, d(z)$ into -$c\, (\beta z^{-1})c\,(z)$, where the zeros of $c\,(z)$ all -have modulus exceeding $\sqrt\beta$, is central to solving the problem. - -We note two features of the solution {eq}`onefifteen` - -* Since $\vert \lambda_j \vert < 1/\sqrt \beta$ for all $j$, it follows that $(\lambda_j \ \beta) < \sqrt \beta$. -* The assumption that $\{ a_t \}$ is of exponential order less than $1 /\sqrt \beta$ is sufficient to guarantee that the geometric sums of future $a_t$'s on the right side of {eq}`onefifteen` converge. - -We immediately see that those sums will -converge under the weaker condition that $\{ a_t\}$ is of -exponential order less than $\phi^{-1}$ where -$\phi = \max \, \{\beta \lambda_i, i=1,\ldots,m\}$. - -Note that with $a_t$ identically zero, {eq}`onefifteen` implies that -in general $\vert y_t \vert$ eventually grows exponentially at a -rate given by $\max_i \vert \lambda_i \vert$. - -The condition -$\max_i \vert \lambda_i \vert <1 /\sqrt \beta$ guarantees that -condition {eq}`onesix` is satisfied. - -In fact, $\max_i \vert \lambda_i -\vert < 1 /\sqrt \beta$ is a necessary condition for {eq}`onesix` to hold. - -Were {eq}`onesix` not satisfied, the objective function would diverge to $- \infty$, implying that the $y_t$ path could not be optimal. - -For example, with $a_t = 0$, for all $t$, it is easy to describe a naive (nonoptimal) policy for $\{y_t, t\geq 0\}$ that gives a finite value of {eq}`oneeleven`. - -We can simply let $y_t = 0 \hbox { for } t\geq 0$. - -This policy involves at most $m$ nonzero values of -$hy^2_t$ and $[d(L)y_t]^2$, and so yields a finite value of -{eq}`oneone`. - -Therefore it is easy to dominate a path that violates {eq}`onesix`. - -## Undiscounted Problems - -It is worthwhile focusing on a special case of the LQ problems above: -the undiscounted problem that emerges when $\beta = 1$. - -In this case, the Euler equation is - -$$ -\Bigl( h + d(L^{-1})d(L) \Bigr)\, y_t = a_t -$$ - -The factorization of the characteristic polynomial {eq}`oneten` becomes - -$$ -\Bigl(h+d \, (z^{-1})d(z)\Bigr) = c\,(z^{-1})\, c\,(z) -$$ - -where - -$$ -\begin{aligned} -c\,(z) &= c_0 (1 - \lambda_1 z) \ldots (1 - \lambda_m z) \cr -c_0 &= \Bigl[(-1)^m z_0 z_1 \ldots z_m\Bigr ] \cr -\vert \lambda_j \vert &< 1 \, \hbox { for } \, j = 1, \ldots, m\cr -\lambda_j &= \frac{1}{z_j} \hbox{ for } j=1,\ldots, m\cr -z_0 &= \hbox{ constant} -\end{aligned} -$$ - -The solution of the problem becomes - -$$ -(1 - \lambda_1 L) \cdots (1 - \lambda_m L) y_t = \sum^m_{j=1} A_j -\sum^\infty_{k=0} \lambda^k_j a_{t+k} -$$ - -### Transforming discounted to undiscounted problem - -Discounted problems can always be converted into undiscounted problems via a simple transformation. - -Consider problem {eq}`oneone` with $0 < \beta < 1$. - -Define the transformed variables - -```{math} -:label: onetwenty - -\tilde a_t = \beta^{t/2} a_t,\ \tilde y_t = \beta^{t/2} y_t -``` - -Then notice that $\beta^t\,[d\, (L) y_t ]^2=[\tilde d\,(L)\tilde y_t]^2$ with -$\tilde d \,(L)=\sum^m_{j=0} \tilde d_j\, L^j$ and $\tilde d_j = \beta^{j/2} -d_j$. - -Then the original criterion function {eq}`oneone` is equivalent to - -```{math} -:label: oneoneprime - -\lim_{N \rightarrow \infty} -\sum^N_{t=0} -\{\tilde a_t\, \tilde y_t - {1 \over 2} h\,\tilde y^2_t - {1\over 2} -[ \tilde d\,(L)\, \tilde y_t]^2 \} -``` - -which is to be maximized over sequences $\{\tilde y_t,\ t=0, \ldots\}$ subject to -$\tilde y_{-1}, \cdots, \tilde y_{-m}$ given and $\{\tilde a_t,\ t=1, \ldots\}$ a known bounded sequence. - -The Euler equation for this problem is $[h+\tilde d \,(L^{-1}) \, \tilde d\, (L) ]\, \tilde y_t = \tilde a_t$. - -The solution is - -$$ -(1 - \tilde \lambda_1 L) \cdots (1 - \tilde \lambda_m L)\,\tilde y_t = -\sum^m_{j=1} \tilde A_j \sum^\infty_{k=0} \tilde \lambda^k_j \, \tilde a_{t+k} -$$ - -or - -```{math} -:label: onetwentyone - -\tilde y_t = \tilde f_1 \, \tilde y_{t-1} + \cdots + \tilde f_m\, -\tilde y_{t-m} + \sum^m_{j=1} \tilde A_j \sum^\infty_{k=0} \tilde \lambda^k_j -\, \tilde a_{t+k}, -``` - -where $\tilde c \,(z^{-1}) \tilde c\,(z) = h + \tilde d\,(z^{-1}) \tilde d \,(z)$, and where - -$$ -\bigl[(-1)^m\, \tilde z_0 \tilde z_1 \ldots \tilde z_m \bigr]^{1/2} -(1 - \tilde \lambda_1\, z) \ldots (1 - \tilde \lambda_m\, z) = \tilde c\,(z), -\hbox { where } \ \vert \tilde \lambda_j \vert < 1 -$$ - -We leave it to the reader to show that {eq}`onetwentyone` implies the equivalent form of the solution - -$$ -y_t = f_1\, y_{t-1} + \cdots + f_m\, y_{t-m} + \sum^m_{j=1} A_j -\sum^\infty_{k=0} \, (\lambda_j\, \beta)^k \, a_{t+k} -$$ - -where - -```{math} -:label: onetwentythree - -f_j = \tilde f_j\, \beta^{-j/2},\ A_j = \tilde A_j,\ \lambda_j = \tilde -\lambda_j \, \beta^{-1/2} -``` - -The transformations {eq}`onetwenty` and the inverse formulas -{eq}`onetwentythree` allow us to solve a discounted problem by first -solving a related undiscounted problem. - -## Implementation - -Code that computes solutions to the LQ problem using the methods described -above can be found in file [control_and_filter.jl](https://github.com/QuantEcon/QuantEcon.lectures.code/blob/master/lu_tricks/control_and_filter.jl). - -Here's how it looks - -```{code-cell} julia ---- -tags: [output_scroll] ---- -function LQFilter(d, h, y_m; - r = nothing, - beta = nothing, - h_eps = nothing) - - m = length(d) - 1 - m == length(y_m) || throw(ArgumentError("y_m and d must be of same length = $m")) - - # define the coefficients of phi up front - phi = zeros(2m + 1) - for i in -m:m - phi[m-i+1] = sum(diag(d*d', -i)) - end - phi[m+1] = phi[m+1] + h - - # if r is given calculate the vector phi_r - if isnothing(r) - k = nothing - phi_r = nothing - else - k = size(r, 1) - 1 - phi_r = zeros(2k + 1) - - for i = -k:k - phi_r[k-i+1] = sum(diag(r*r', -i)) - end - - if h_eps != nothing - phi_r[k+1] = phi_r[k+1] + h_eps - end - end - - # if beta is given, define the transformed variables - if isnothing(beta) - beta = 1.0 - else - d = beta.^(collect(0:m)/2) * d - y_m = y_m * beta.^(- collect(1:m)/2) - end - - return (;d, h, y_m, m, phi, beta, phi_r, k) -end - -function construct_W_and_Wm(lqf, N) - - (;d, m) = lqf - W = zeros(N + 1, N + 1) - W_m = zeros(N + 1, m) - - # terminal conditions - D_m1 = zeros(m + 1, m + 1) - M = zeros(m + 1, m) - - # (1) Constuct the D_{m+1} matrix using the formula - - for j in 1:(m+1) - for k in j:(m+1) - D_m1[j, k] = dot(d[1:j, 1], d[k-j+1:k, 1]) - end - end - - # Make the matrix symmetric - D_m1 = D_m1 + D_m1' - Diagonal(diag(D_m1)) - - # (2) Construct the M matrix using the entries of D_m1 - - for j in 1:m - for i in (j + 1):(m + 1) - M[i, j] = D_m1[i-j, m+1] - end - end - M - - # Euler equations for t = 0, 1, ..., N-(m+1) - (;phi, h) = lqf - - W[1:(m + 1), 1:(m + 1)] = D_m1 + h * I - W[1:(m + 1), (m + 2):(2m + 1)] = M - - for (i, row) in enumerate((m + 2):(N + 1 - m)) - W[row, (i + 1):(2m + 1 + i)] = phi' - end - - for i in 1:m - W[N - m + i + 1 , end-(2m + 1 - i)+1:end] = phi[1:end-i] - end - - for i in 1:m - W_m[N - i + 2, 1:(m - i)+1] = phi[(m + 1 + i):end] - end - - return W, W_m -end - -function roots_of_characteristic(lqf) - (;m, phi) = lqf - - # Calculate the roots of the 2m-polynomial - phi_poly=Polynomial(phi[end:-1:1]) - proots = roots(phi_poly) - - # sort the roots according to their length (in descending order) - roots_sorted = sort(proots, by=abs)[end:-1:1] - z_0 = sum(phi) / (fromroots(proots))(1.0) - z_1_to_m = roots_sorted[1:m] # we need only those outside the unit circle - lambda = 1 ./ z_1_to_m - return z_1_to_m, z_0, lambda -end - -function coeffs_of_c(lqf) - (;m) = lqf - z_1_to_m, z_0, lambda = roots_of_characteristic(lqf) - c_0 = (z_0 * prod(z_1_to_m) * (-1.0)^m)^(0.5) - c_coeffs = coeffs(Polynomial(z_1_to_m)) * z_0 / c_0 - return c_coeffs -end - -function solution(lqf) - z_1_to_m, z_0, lambda = roots_of_characteristic(lqf) - c_0 = coeffs_of_c(lqf)[end] - A = zeros(m) - for j in 1:m - denom = 1 - lambda/lambda[j] - A[j] = c_0^(-2) / prod(denom[1:m .!= j]) - end - return lambda, A -end - -function construct_V(lqf; N=nothing) - (;phi_r, k) = lqf - V = zeros(N, N) - for i in 1:N - for j in 1:N - if abs(i-j) <= k - V[i, j] = phi_r[k + abs(i-j)+1] - end - end - end - return V -end - -function simulate_a(lqf, N) - V = construct_V(lqf, N + 1) - d = MVNSampler(zeros(N + 1), V) - return rand(d) -end - -function predict(lqf, a_hist, t) - N = length(a_hist) - 1 - V = construct_V(lqf, N + 1) - - aux_matrix = zeros(N + 1, N + 1) - aux_matrix[1:t+1 , 1:t+1 ] .= I + zeros(t+1, t+1) - L = chol(V)' - Ea_hist = inv(L) * aux_matrix * L * a_hist - - return Ea_hist -end - -function optimal_y(lqf, a_hist, t = nothing) - (;beta, y_m, m) = lqf - - N = length(a_hist) - 1 - W, W_m = construct_W_and_Wm(lqf, N) - - F = lu(W) - - L, U = F.L, F.U - D = Diagonal(1.0./diag(U)) - U = D * U - L = L * Diagonal(1.0./diag(D)) - - J = reverse(I + zeros(N+1, N + 1), dims = 2) - - if isnothing(t) # if the problem is deterministic - a_hist = J * a_hist - - # transform the a sequence if beta is given - if beta != 1 - a_hist = reshape(a_hist * (beta^(collect(N:0)/ 2)), N + 1, 1) - end - - a_bar = a_hist - W_m * y_m # a_bar from the lecutre - Uy = \(L, a_bar) # U @ y_bar = L^{-1}a_bar from the lecture - y_bar = \(U, Uy) # y_bar = U^{-1}L^{-1}a_bar - # Reverse the order of y_bar with the matrix J - J = reverse(I + zeros(N+m+1, N + m + 1), dims = 2) - y_hist = J * vcat(y_bar, y_m) # y_hist : concatenated y_m and y_bar - # transform the optimal sequence back if beta is given - if beta != 1 - y_hist = y_hist .* beta.^(- collect(-m:N)/2) - end - - else # if the problem is stochastic and we look at it - Ea_hist = reshape(predict(lqf, a_hist, t), N + 1, 1) - Ea_hist = J * Ea_hist - - a_bar = Ea_hist - W_m * y_m # a_bar from the lecutre - Uy = \(L, a_bar) # U @ y_bar = L^{-1}a_bar from the lecture - y_bar = \(U, Uy) # y_bar = U^{-1}L^{-1}a_bar - - # Reverse the order of y_bar with the matrix J - J = reverse(I + zeros(N + m + 1, N + m + 1), dims = 2) - y_hist = J * vcat(y_bar, y_m) # y_hist : concatenated y_m and y_bar - end - return y_hist, L, U, y_bar -end -``` - -### Example - -In this application we'll have one lag, with - -$$ -d(L) y_t = \gamma(I - L) y_t = \gamma (y_t - y_{t-1}) -$$ - -Suppose for the moment that $\gamma = 0$. - -Then the intertemporal component of the LQ problem disappears, and the agent -simply wants to maximize $a_t y_t - hy^2_t / 2$ in each period. - -This means that the agent chooses $y_t = a_t / h$. - -In the following we'll set $h = 1$, so that the agent just wants to -track the $\{a_t\}$ process. - -However, as we increase $\gamma$, the agent gives greater weight to a smooth time path. - -Hence $\{y_t\}$ evolves as a smoothed version of $\{a_t\}$. - -The $\{a_t\}$ sequence we'll choose as a stationary cyclic process plus some white noise. - -Here's some code that generates a plot when $\gamma = 0.8$ - -```{code-cell} julia ---- -tags: [remove-cell] ---- -using Test -``` - -```{code-cell} julia - -# set seed and generate a_t sequence -Random.seed!(123) -n = 100 -a_seq = sin.(range(0, 5 * pi, length = n)) .+ 2 + 0.1 * randn(n) - -function plot_simulation(; gamma = 0.8, m = 1, h = 1.0, y_m = 2.0) - d = gamma * [1, -1] - y_m = [y_m] - - testlq = LQFilter(d, h, y_m) - y_hist, L, U, y = optimal_y(testlq, a_seq) - y = y[end:-1:1] # reverse y - - # plot simulation results - time = 1:length(y) - plt = plot(time, a_seq / h, lw = 2, color = :black, alpha = 0.8, - marker = :circle, - markersize = 2, label = L"a_t") - plot!(plt, time, y, lw = 2, color = :blue, marker = :circle, markersize = 2, - alpha = 0.8, - label = L"y_t") - plot!(plt, xlabel = "Time", grid = true, xlim = (0, maximum(time)), - legend = :bottomleft) -end - -plot_simulation() -``` - -Here's what happens when we change $\gamma$ to 5.0 - -```{code-cell} julia -plot_simulation(gamma = 5.0) -``` - -And here's $\gamma = 10$ - -```{code-cell} julia -plot_simulation(gamma = 10.0) -``` - -```{code-cell} julia ---- -tags: [remove-cell] ---- -gamma = 10.0 -d = gamma*[-1, 1] -y_hist, L, U, y = optimal_y(LQFilter(d , 1., [2.]), a_seq) - -@testset begin - #test a_seq[4] ≈ 2.5041680837681186 - #test y_hist[4] ≈ 2.1262941143293763 - #test L[2, 2] ≈ 101.990099009901 - #test y[79] ≈ 1.931812149101077 -end -``` - -## Exercises - -### Exercise 1 - -Consider solving a discounted version $(\beta < 1)$ of problem -{eq}`oneone`, as follows. - -Convert {eq}`oneone` to the undiscounted problem {eq}`oneoneprime`. - -Let the solution of {eq}`oneoneprime` in feedback form be - -$$ -(1 - \tilde \lambda_1 L)\, \cdots\, (1 - \tilde \lambda_m L) \tilde y_t = -\sum^m_{j=1} \tilde A_j \sum^\infty_{k=0} \tilde \lambda^k_j \tilde a_{t+k} -$$ - -or - -```{math} -:label: estar - -\tilde y_t = \tilde f_1 \tilde y_{t-1} + \cdots + \tilde f_m \tilde y_{t-m} + -\sum^m_{j=1} \tilde A_j \sum^\infty_{k=0} \tilde \lambda^k_j \tilde a_{t+k} -``` - -Here - -* $h + \tilde d (z^{-1}) \tilde d (z) = \tilde c (z^{-1}) \tilde c (z)$ -* $\tilde c (z) = [(-1)^m \tilde z_0 \tilde z_1 \cdots \tilde z_m ]^{1/2} (1 - \tilde \lambda_1 z) \cdots (1 - \tilde \lambda_m z)$ - -where the $\tilde z_j$ are the zeros of $h +\tilde d (z^{-1})\, \tilde d(z)$. - -Prove that {eq}`estar` implies that the solution for $y_t$ in feedback form is - -$$ -y_t = f_1 y_{t-1} + \ldots + f_m y_{t-m} + \sum^m_{j=1} A_j -\sum^\infty_{k=0} \beta^k \lambda^k_j a_{t+k} -$$ - -where $f_j = \tilde f_j \beta^{-j/2}, A_j = \tilde A_j$, and $\lambda_j = \tilde \lambda_j \beta^{-1/2}$. - -### Exercise 2 - -Solve the optimal control problem, maximize - -$$ -\sum^2_{t=0}\ \Bigl\{a_t y_t - {1 \over 2} [(1 - 2 L) y_t]^2\Bigr\} -$$ - -subject to $y_{-1}$ given, and $\{ a_t\}$ a known bounded sequence. - -Express the solution in the "feedback form" {eq}`onefifteen`, giving numerical values for the coefficients. - -Make sure that the boundary conditions {eq}`onefive` are satisfied. - -(Note: this problem differs from the problem in the text in one important way: instead of $h > 0$ in {eq}`oneone`, $h = 0$. This has an important influence on the solution.) - -### Exercise 3 - -Solve the infinite time optimal control problem to maximize - -$$ -\lim_{N \rightarrow \infty} -\sum^N_{t=0}\, -\, {1 \over 2} [(1 -2 L) y_t]^2, -$$ - -subject to $y_{-1}$ given. Prove that the solution is - -$$ -y_t = 2y_{t-1} = 2^{t+1} y_{-1} \qquad t > 0 -$$ - -### Exercise 4 - -Solve the infinite time problem, to maximize - -$$ -\lim_{N \rightarrow \infty}\ \sum^N_{t=0}\ (.0000001)\, y^2_t - {1 \over 2} -[(1 - 2 L) y_t]^2 -$$ - -subject to $y_{-1}$ given. Prove that the solution $y_t = 2y_{t-1}$ violates condition {eq}`onesix`, and so -is not optimal. - -Prove that the optimal solution is approximately $y_t = .5 y_{t-1}$. - diff --git a/lectures/time_series_models/multiplicative_functionals.md b/lectures/time_series_models/multiplicative_functionals.md deleted file mode 100644 index 6f1857d5..00000000 --- a/lectures/time_series_models/multiplicative_functionals.md +++ /dev/null @@ -1,824 +0,0 @@ ---- -jupytext: - text_representation: - extension: .md - format_name: myst -kernelspec: - display_name: Julia - language: julia - name: julia-1.11 ---- - -(multiplicative_functionals)= -```{raw} html - -``` - -# Multiplicative Functionals - -```{index} single: Models; Multiplicative functionals -``` - -```{contents} Contents -:depth: 2 -``` - -Co-authored with Chase Coleman and Balint Szoke - -## Overview - -This lecture is a sequel to the {doc}`lecture on additive functionals <../time_series_models/additive_functionals>`. - -That lecture - -1. defined a special class of **additive functionals** driven by a first-order vector VAR -1. by taking the exponential of that additive functional, created an associated **multiplicative functional** - -This lecture uses this special class to create and analyze two examples - -* A **log likelihood process**, an object at the foundation of both frequentist and Bayesian approaches to statistical inference. -* A version of Robert E. Lucas's {cite}`lucas2003` and Thomas Tallarini's {cite}`Tall2000` approaches to measuring the benefits of moderating aggregate fluctuations. - -## A Log-Likelihood Process - -Consider a vector of additive functionals $\{y_t\}_{t=0}^\infty$ described by - -$$ -\begin{aligned} - x_{t+1} & = A x_t + B z_{t+1} - \\ - y_{t+1} - y_t & = D x_{t} + F z_{t+1}, -\end{aligned} -$$ - -where $A$ is a stable matrix, $\{z_{t+1}\}_{t=0}^\infty$ is -an i.i.d. sequence of ${\cal N}(0,I)$ random vectors, $F$ is -nonsingular, and $x_0$ and $y_0$ are vectors of known -numbers. - -Evidently, - -$$ -x_{t+1} = \left(A - B F^{-1}D \right)x_t - + B F^{-1} \left(y_{t+1} - y_t \right), -$$ - -so that $x_{t+1}$ can be constructed from observations on -$\{y_{s}\}_{s=0}^{t+1}$ and $x_0$. - -The distribution of $y_{t+1} - y_t$ conditional on $x_t$ is normal with mean $Dx_t$ and nonsingular covariance matrix $FF'$. - -Let $\theta$ denote the vector of free parameters of the model. - -These parameters pin down the elements of $A, B, D, F$. - -The **log likelihood function** of $\{y_s\}_{s=1}^t$ is - -$$ -\begin{aligned} - \log L_{t}(\theta) = - & - {\frac 1 2} \sum_{j=1}^{t} (y_{j} - y_{j-1} - - D x_{j-1})'(FF')^{-1}(y_{j} - y_{j-1} - D x_{j-1}) - \\ - & - {\frac t 2} \log \det (FF') - {\frac {k t} 2} \log( 2 \pi) -\end{aligned} -$$ - -Let's consider the case of a scalar process in which $A, B, D, F$ are scalars and $z_{t+1}$ is a scalar stochastic process. - -We let $\theta_o$ denote the "true" values of $\theta$, meaning the values that generate the data. - -For the purposes of this exercise, set $\theta_o = (A, B, D, F) = (0.8, 1, 0.5, 0.2)$. - -Set $x_0 = y_0 = 0$. - -### Simulating sample paths - -Let's write a program to simulate sample paths of $\{ x_t, y_{t} \}_{t=0}^{\infty}$. - -We'll do this by formulating the additive functional as a linear state space model and putting the [LSS](https://github.com/QuantEcon/QuantEcon.jl/blob/master/src/lss.jl) struct to work. - - - -```{code-cell} julia ---- -tags: [remove-cell] ---- -using Test, Random -``` - -```{code-cell} julia -using LinearAlgebra, Statistics -using Distributions, LaTeXStrings, Plots, QuantEcon -import Distributions: loglikelihood - -``` - -```{code-cell} julia -function AMF_LSS_VAR(; A, B, D, F = 0.0, nu = 0.0, - lss = construct_ss(A, B, D, F, nu)) - return (; A, B, D, F, nu, lss) -end - -function construct_ss(A, B, D, F, nu) - H, g = additive_decomp(A, B, D, F) - - # Build A matrix for LSS - # Order of states is: [1, t, xt, yt, mt] - A1 = [1 0 0 0 0] # Transition for 1 - A2 = [1 1 0 0 0] # Transition for t - A3 = [0 0 A 0 0] # Transition for x_{t+1} - A4 = [nu 0 D 1 0] # Transition for y_{t+1} - A5 = [0 0 0 0 1] # Transition for m_{t+1} - Abar = vcat(A1, A2, A3, A4, A5) - - # Build B matrix for LSS - Bbar = [0, 0, B, F, H] - - # Build G matrix for LSS - # Order of observation is: [xt, yt, mt, st, tt] - G1 = [0 0 1 0 0] # Selector for x_{t} - G2 = [0 0 0 1 0] # Selector for y_{t} - G3 = [0 0 0 0 1] # Selector for martingale - G4 = [0 0 -g 0 0] # Selector for stationary - G5 = [0 nu 0 0 0] # Selector for trend - Gbar = vcat(G1, G2, G3, G4, G5) - - # Build LSS struct - x0 = [0, 0, 0, 0, 0] - S0 = zeros(5, 5) - return LSS(Abar, Bbar, Gbar, mu_0 = x0, Sigma_0 = S0) -end - -function additive_decomp(A, B, D, F) - A_res = 1 / (1 - A) - g = D * A_res - H = F + D * A_res * B - - return H, g -end - -function multiplicative_decomp(A, B, D, F, nu) - H, g = additive_decomp(A, B, D, F) - nu_tilde = nu + 0.5 * H^2 - - return nu_tilde, H, g -end - -function loglikelihood_path(amf, x, y) - (; A, B, D, F) = amf - T = length(y) - FF = F^2 - FFinv = inv(FF) - temp = y[2:end] - y[1:(end - 1)] - D * x[1:(end - 1)] - obs = temp .* FFinv .* temp - obssum = cumsum(obs) - scalar = (log(FF) + log(2pi)) * (1:(T - 1)) - return -0.5 * (obssum + scalar) -end - -function loglikelihood(amf, x, y) - llh = loglikelihood_path(amf, x, y) - return llh[end] -end -``` - -The heavy lifting is done inside the AMF_LSS_VAR struct. - -The following code adds some simple functions that make it straightforward to generate sample paths from an instance of AMF_LSS_VAR - -```{code-cell} julia -function simulate_xy(amf, T) - foo, bar = simulate(amf.lss, T) - x = bar[1, :] - y = bar[2, :] - return x, y -end - -function simulate_paths(amf, T = 150, I = 5000) - # Allocate space - storeX = zeros(I, T) - storeY = zeros(I, T) - - for i in 1:I - # Do specific simulation - x, y = simulate_xy(amf, T) - - # Fill in our storage matrices - storeX[i, :] = x - storeY[i, :] = y - end - - return storeX, storeY -end - -function population_means(amf, T = 150) - # Allocate Space - xmean = zeros(T) - ymean = zeros(T) - - # Pull out moment generator - moment_generator = moment_sequence(amf.lss) - for (tt, x) in enumerate(moment_generator) - ymeans = x[2] - xmean[tt] = ymeans[1] - ymean[tt] = ymeans[2] - if tt == T - break - end - end - return xmean, ymean -end -``` - -Now that we have these functions in our took kit, let's apply them to run some -simulations. - -In particular, let's use our program to generate $I = 5000$ sample paths of length $T = 150$, labeled $\{ x_{t}^i, y_{t}^i \}_{t=0}^\infty$ for $i = 1, ..., I$. - -Then we compute averages of $\frac{1}{I} \sum_i x_t^i$ and $\frac{1}{I} \sum_i y_t^i$ across the sample paths and compare them with the population means of $x_t$ and $y_t$. - -Here goes - -```{code-cell} julia ---- -tags: [remove-cell] ---- -Random.seed!(42); -``` - -```{code-cell} julia -F = 0.2 -amf = AMF_LSS_VAR(A = 0.8, B = 1.0, D = 0.5, F = F) - -T = 150 -I = 5000 - -# Simulate and compute sample means -Xit, Yit = simulate_paths(amf, T, I) -Xmean_t = mean(Xit, dims = 1) -Ymean_t = mean(Yit, dims = 1) - -# Compute population means -Xmean_pop, Ymean_pop = population_means(amf, T) - -# Plot sample means vs population means -plt_1 = plot(Xmean_t', color = :blue, label = L"(1/I) \sum_i x_t^i") -plot!(plt_1, Xmean_pop, color = :black, label = L"E x_t") -plot!(plt_1, title = L"x_t", xlim = (0, T), legend = :outertopright) - -plt_2 = plot(Ymean_t', color = :blue, label = L"(1/I) \sum_i x_t^i") -plot!(plt_2, Ymean_pop, color = :black, label = L"E y_t") -plot!(plt_2, title = L"y_t", xlim = (0, T), legend = :outertopright) - -plot(plt_1, plt_2, layout = (2, 1), size = (800, 500)) -``` - -### Simulating log-likelihoods - -Our next aim is to write a program to simulate $\{\log L_t \mid \theta_o\}_{t=1}^T$. - -We want as inputs to this program the *same* sample paths $\{x_t^i, y_t^i\}_{t=0}^T$ that we have already computed. - -We now want to simulate $I = 5000$ paths of $\{\log L_t^i \mid \theta_o\}_{t=1}^T$. - -- For each path, we compute $\log L_T^i / T$. -- We also compute $\frac{1}{I} \sum_{i=1}^I \log L_T^i / T$. - -Then we to compare these objects. - -Below we plot the histogram of $\log L_T^i / T$ for realizations $i = 1, \ldots, 5000$ - -```{code-cell} julia ---- -tags: [remove-cell] ---- -Random.seed!(42); -``` - -```{code-cell} julia -function simulate_likelihood(amf, Xit, Yit) - # Get size - I, T = size(Xit) - - # Allocate space - LLit = zeros(I, T - 1) - - for i in 1:I - LLit[i, :] = loglikelihood_path(amf, Xit[i, :], Yit[i, :]) - end - - return LLit -end - -# Get likelihood from each path x^{i}, Y^{i} -LLit = simulate_likelihood(amf, Xit, Yit) - -LLT = 1 / T * LLit[:, end] -LLmean_t = mean(LLT) - -plot(seriestype = :histogram, LLT, label = "") -plot!(title = L"Distribution of $(I/T)log(L_T)|\theta_0$") -vline!([LLmean_t], linestyle = :dash, color = :black, lw = 2, alpha = 0.6, - label = "") -``` - -```{code-cell} julia ---- -tags: [remove-cell] ---- -@testset begin - # #test LLT[100] ≈ 0.237835678897198 atol = 1e-3 - #test LLmean_t ≈ 0.18834771174533427 atol = 1e-3 -end -``` - -Notice that the log likelihood is almost always nonnegative, implying that $L_t$ is typically bigger than 1. - -Recall that the likelihood function is a pdf (probability density function) and **not** a probability measure, so it can take values larger than 1. - -In the current case, the conditional variance of $\Delta y_{t+1}$, which equals $FF^T=0.04$, is so small that the maximum value of the pdf is 2 (see the figure below). - -This implies that approximately $75\%$ of the time (a bit more than one sigma deviation), we should expect the **increment** of the log likelihood to be nonnegative. - -Let's see this in a simulation - -```{code-cell} julia -normdist = Normal(0, F) -mult = 1.175 -println("The pdf at +/- $mult sigma takes the value: $(pdf(normdist,mult*F))") -println("Probability of dL being larger than 1 is approx: " * - "$(cdf(normdist,mult*F)-cdf(normdist,-mult*F))") - -# Compare this to the sample analogue: -L_increment = LLit[:, 2:end] - LLit[:, 1:(end - 1)] -r, c = size(L_increment) -frac_nonegative = sum(L_increment .>= 0) / (c * r) -print("Fraction of dlogL being nonnegative in the sample is: $(frac_nonegative)") -``` - -```{code-cell} julia ---- -tags: [remove-cell] ---- -@testset begin - @test pdf(normdist, mult * F) ≈ 1.0001868966924388 - @test pdf(normdist, F) ≈ 1.2098536225957168 -end -``` - -Let's also plot the conditional pdf of $\Delta y_{t+1}$ - -```{code-cell} julia -xgrid = range(-1, 1, length = 100) -println("The pdf at +/- one sigma takes the value: $(pdf(normdist, F)) ") -plot(xgrid, pdf.(normdist, xgrid), label = "") -plot!(title = L"Conditional pdf $f(\Delta y_{t+1} | x_t)$") -``` - -### An alternative parameter vector - -Now consider alternative parameter vector $\theta_1 = [A, B, D, F] = [0.9, 1.0, 0.55, 0.25]$. - -We want to compute $\{\log L_t \mid \theta_1\}_{t=1}^T$. - -The $x_t, y_t$ inputs to this program should be exactly the **same** sample paths $\{x_t^i, y_t^i\}_{t=0}^T$ that we we computed above. - -This is because we want to generate data under the $\theta_o$ probability model but evaluate the likelihood under the $\theta_1$ model. - -So our task is to use our program to simulate $I = 5000$ paths of $\{\log L_t^i \mid \theta_1\}_{t=1}^T$. - -- For each path, compute $\frac{1}{T} \log L_T^i$. -- Then compute $\frac{1}{I}\sum_{i=1}^I \frac{1}{T} \log L_T^i$. - -We want to compare these objects with each other and with the analogous objects that we computed above. - -Then we want to interpret outcomes. - -A function that we constructed can handle these tasks. - -The only innovation is that we must create an alternative model to feed in. - -We will creatively call the new model `amf2`. - -We make three graphs - -* the first sets the stage by repeating an earlier graph -* the second contains two histograms of values of log likelihoods of the two models over the period $T$ -* the third compares likelihoods under the true and alternative models - -Here's the code - -```{code-cell} julia ---- -tags: [remove-cell] ---- -Random.seed!(42); -``` - -```{code-cell} julia -# Create the second (wrong) alternative model -# parameters for theta_1 closer to theta_0 -amf2 = AMF_LSS_VAR(A = 0.9, B = 1.0, D = 0.55, F = 0.25) - -# Get likelihood from each path x^{i}, y^{i} -LLit2 = simulate_likelihood(amf2, Xit, Yit) - -LLT2 = 1 / (T - 1) * LLit2[:, end] -LLmean_t2 = mean(LLT2) - -plot(seriestype = :histogram, LLT2, label = "") -vline!([LLmean_t2], color = :black, lw = 2, linestyle = :dash, alpha = 0.6, - label = "") -plot!(title = L"Distribution of $(1/T)log(L_T) | \theta_1)$") -``` - -```{code-cell} julia ---- -tags: [remove-cell] ---- -@testset begin - # @test LLT2[1] ≈ 0.09636268288129826 - # @test LLmean_t2 ≈ 0.09215848797039161 -end -``` - -Let's see a histogram of the log-likelihoods under the true and the alternative model (same sample paths) - -```{code-cell} julia -plot(seriestype = :histogram, LLT, bin = 50, alpha = 0.5, label = "True", - normed = true) -plot!(seriestype = :histogram, LLT2, bin = 50, alpha = 0.5, - label = "Alternative", normed = true) -vline!([mean(LLT)], color = :black, lw = 2, linestyle = :dash, label = "") -vline!([mean(LLT2)], color = :black, lw = 2, linestyle = :dash, label = "") -``` - -Now we'll plot the histogram of the difference in log likelihood ratio - -```{code-cell} julia -LLT_diff = LLT - LLT2 - -plot(seriestype = :histogram, LLT_diff, bin = 50, label = "") -plot!(title = L"(1/T)[log(L_T^i | \theta_0) - log(L_T^i |\theta_1)]") -``` - -### Interpretation - -These histograms of log likelihood ratios illustrate important features of **likelihood ratio tests** as tools for discriminating between statistical models. - -* The loglikeklihood is higher on average under the true model -- obviously a very useful property. -* Nevertheless, for a positive fraction of realizations, the log likelihood is higher for the incorrect than for the true model - -* in these instances, a likelihood ratio test mistakenly selects the wrong model - -> - -* These mechanics underlie the statistical theory of **mistake probabilities** associated with model selection tests based on likelihood ratio. - -(In a subsequent lecture, we'll use some of the code prepared in this lecture to illustrate mistake probabilities) - -## Benefits from Reduced Aggregate Fluctuations - -Now let's turn to a new example of multiplicative functionals. - -This example illustrates ideas in the literatures on - -* **long-run risk** in the consumption based asset pricing literature (e.g., {cite}`bansalyaron2004`, {cite}`hhl2008`, {cite}`hansen2007`) -* **benefits of eliminating aggregate fluctuations** in representative agent macro models (e.g., {cite}`Tall2000`, {cite}`lucas2003`) - -Let $c_t$ be consumption at date $t \geq 0$. - -Suppose that $\{\log c_t \}_{t=0}^\infty$ is an additive functional described by - -$$ -\log c_{t+1} - \log c_t = \nu + D \cdot x_t + F \cdot z_{t+1} -$$ - -where - -$$ -x_{t+1} = A x_t + B z_{t+1} -$$ - -Here $\{z_{t+1}\}_{t=0}^\infty$ is an i.i.d. sequence of ${\cal N}(0,I)$ random vectors. - -A representative household ranks consumption processes $\{c_t\}_{t=0}^\infty$ with a utility functional $\{V_t\}_{t=0}^\infty$ that satisfies - -```{math} -:label: old1mf - -\log V_t - \log c_t = U \cdot x_t + {\sf u} -``` - -where - -$$ -U = \exp(-\delta) \left[ I - \exp(-\delta) A' \right]^{-1} D -$$ - -and - -$$ -{\sf u} - = {\frac {\exp( -\delta)}{ 1 - \exp(-\delta)}} {\nu} + \frac{(1 - \gamma)}{2} {\frac {\exp(-\delta)}{1 - \exp(-\delta)}} -\biggl| D' \left[ I - \exp(-\delta) A \right]^{-1}B + F \biggl|^2, -$$ - -Here $\gamma \geq 1$ is a risk-aversion coefficient and $\delta > 0$ is a rate of time preference. - -### Consumption as a multiplicative process - -We begin by showing that consumption is a **multiplicative functional** with representation - -```{math} -:label: old2mf - -\frac{c_t}{c_0} -= \exp(\tilde{\nu}t ) -\left( \frac{\tilde{M}_t}{\tilde{M}_0} \right) -\left( \frac{\tilde{e}(x_0)}{\tilde{e}(x_t)} \right) -``` - -where $\left( \frac{\tilde{M}_t}{\tilde{M}_0} \right)$ is a likelihood ratio process and $\tilde M_0 = 1$. - -At this point, as an exercise, we ask the reader please to verify the follow formulas for $\tilde{\nu}$ and $\tilde{e}(x_t)$ as functions of $A, B, D, F$: - -$$ -\tilde \nu = \nu + \frac{H \cdot H}{2} -$$ - -and - -$$ -\tilde e(x) = \exp[g(x)] = \exp \bigl[ D' (I - A)^{-1} x \bigr] -$$ - -### Simulating a likelihood ratio process again - -Next, we want a program to simulate the likelihood ratio process $\{ \tilde{M}_t \}_{t=0}^\infty$. - -In particular, we want to simulate 5000 sample paths of length $T=1000$ for the case in which $x$ is a scalar and $[A, B, D, F] = [0.8, 0.001, 1.0, 0.01]$ and $\nu = 0.005$. - -After accomplishing this, we want to display a histogram of $\tilde{M}_T^i$ for -$T=1000$. - -Here is code that accomplishes these tasks - -```{code-cell} julia ---- -tags: [remove-cell] ---- -Random.seed!(42); -``` - -```{code-cell} julia -function simulate_martingale_components(amf, T = 1_000, I = 5_000) - # Get the multiplicative decomposition - (; A, B, D, F, nu, lss) = amf - nu, H, g = multiplicative_decomp(A, B, D, F, nu) - - # Allocate space - add_mart_comp = zeros(I, T) - - # Simulate and pull out additive martingale component - for i in 1:I - foo, bar = simulate(lss, T) - # Martingale component is third component - add_mart_comp[i, :] = bar[3, :] - end - - mul_mart_comp = exp.(add_mart_comp' .- (0:(T - 1)) * H^2 / 2)' - - return add_mart_comp, mul_mart_comp -end - -# Build model -amf_2 = AMF_LSS_VAR(A = 0.8, B = 0.001, D = 1.0, F = 0.01, nu = 0.005) - -amc, mmc = simulate_martingale_components(amf_2, 1_000, 5_000) - -amcT = amc[:, end] -mmcT = mmc[:, end] - -println("The (min, mean, max) of additive Martingale component in period T:") -println("\t ($(minimum(amcT)), $(mean(amcT)), $(maximum(amcT)))") - -println("The (min, mean, max) of multiplicative Martingale component in period T:") -println("\t ($(minimum(mmcT)), $(mean(mmcT)), $(maximum(mmcT)))") -``` - -```{code-cell} julia ---- -tags: [remove-cell] ---- -@testset begin - #test amcT[20] ≈ 0.48937834866761837 - #test amc[14, 38] ≈ 0.10223016730936686 - #test mmcT[250] ≈ 0.6101128099569552 -end -``` - -#### Comments - -- The preceding min, mean, and max of the cross-section of the date - $T$ realizations of the multiplicative martingale component of - $c_t$ indicate that the sample mean is close to its population - mean of 1. - * This outcome prevails for all values of the horizon $T$. -- The cross-section distribution of the multiplicative martingale - component of $c$ at date $T$ approximates a log normal - distribution well. -- The histogram of the additive martingale component of - $\log c_t$ at date $T$ approximates a normal distribution - well. - -Here's a histogram of the additive martingale component - -```{code-cell} julia -plot(seriestype = :histogram, amcT, bin = 25, normed = true, label = "") -plot!(title = "Histogram of Additive Martingale Component") -``` - -Here's a histogram of the multiplicative martingale component - -```{code-cell} julia -plot(seriestype = :histogram, mmcT, bin = 25, normed = true, label = "") -plot!(title = "Histogram of Multiplicative Martingale Component") -``` - -### Representing the likelihood ratio process - -The likelihood ratio process $\{\widetilde M_t\}_{t=0}^\infty$ can be represented as - -$$ -\widetilde M_t = \exp \biggl( \sum_{j=1}^t \biggl(H \cdot z_j -\frac{ H \cdot H }{2} \biggr) \biggr), \quad \widetilde M_0 =1 , -$$ - -where $H = [F + B'(I-A')^{-1} D]$. - -It follows that $\log {\widetilde M}_t \sim {\mathcal N} ( -\frac{t H \cdot H}{2}, t H \cdot H )$ and that consequently ${\widetilde M}_t$ is log normal. - -Let's plot the probability density functions for $\log {\widetilde M}_t$ for -$t=100, 500, 1000, 10000, 100000$. - -Then let's use the plots to investigate how these densities evolve through time. - -We will plot the densities of $\log {\widetilde M}_t$ for different values of $t$. - -Here is some code that tackles these tasks - -```{code-cell} julia -function Mtilde_t_density(amf, t; xmin = 1e-8, xmax = 5.0, npts = 50) - (; A, B, D, F, nu) = amf - - # Pull out the multiplicative decomposition - nutilde, H, g = multiplicative_decomp(A, B, D, F, nu) - H2 = H * H - - # The distribution - mdist = LogNormal(-t * H2 / 2, sqrt(t * H2)) - x = range(xmin, xmax, length = npts) - p = pdf.(mdist, x) - - return x, p -end - -function logMtilde_t_density(amf, t; xmin = -15.0, xmax = 15.0, npts = 5000) - - # Pull out the multiplicative decomposition - (; A, B, D, F, nu) = amf - nutilde, H, g = multiplicative_decomp(A, B, D, F, nu) - H2 = H * H - - # The distribution - lmdist = Normal(-t * H2 / 2, sqrt(t * H2)) - x = range(xmin, xmax, length = npts) - p = pdf.(lmdist, x) - - return x, p -end - -times_to_plot = [10, 100, 500, 1000, 2500, 5000] -dens_to_plot = [Mtilde_t_density(amf_2, t, xmin = 1e-8, xmax = 6.0) - for t in times_to_plot] -ldens_to_plot = [logMtilde_t_density(amf_2, t, xmin = -10.0, xmax = 10.0) - for t in times_to_plot] - -plots = plot(layout = (3, 2), size = (600, 800)) - -for (it, dens_t) in enumerate(dens_to_plot) - x, pdf = dens_t - plot!(plots[it], title = "Density for time $(times_to_plot[it])", - titlefontsize = 10) - plot!(plots[it], pdf, fillrange = 0, label = "") -end -plot(plots) -``` - -These probability density functions illustrate a **peculiar property** of log likelihood ratio processes: - -* With respect to the true model probabilities, they have mathematical expectations equal to $1$ for all $t \geq 0$. -* They almost surely converge to zero. - -### Welfare benefits of reduced random aggregate fluctuations - -Suppose in the tradition of a strand of macroeconomics (for example Tallarini {cite}`Tall2000`, {cite}`lucas2003`) we want to estimate the welfare benefits from removing random fluctuations around trend growth. - -We shall compute how much initial consumption $c_0$ a representative consumer who ranks consumption streams according to {eq}`old1mf` would be willing to sacrifice to enjoy the consumption stream - -$$ -\frac{c_t}{c_0} = \exp (\tilde{\nu} t) -$$ - -rather than the stream described by equation {eq}`old2mf`. - -We want to compute the implied percentage reduction in $c_0$ that the representative consumer would accept. - -To accomplish this, we write a function that computes the coefficients $U$ -and $u$ for the original values of $A, B, D, F, \nu$, but -also for the case that $A, B, D, F = [0, 0, 0, 0]$ and -$\nu = \tilde{\nu}$. - -Here's our code - -```{code-cell} julia -function Uu(amf, delta, gamma) - (; A, B, D, F, nu) = amf - nu_tilde, H, g = multiplicative_decomp(A, B, D, F, nu) - - resolv = 1 / (1 - exp(-delta) * A) - vect = F + D * resolv * B - - U_risky = exp(-delta) * resolv * D - u_risky = exp(-delta) / (1 - exp(-delta)) * - (nu + 0.5 * (1 - gamma) * (vect^2)) - - U_det = 0 - u_det = exp(-delta) / (1 - exp(-delta)) * nu_tilde - - return U_risky, u_risky, U_det, u_det -end - -# Set remaining parameters -delta = 0.02 -gamma = 2.0 - -# Get coeffs -U_r, u_r, U_d, u_d = Uu(amf_2, delta, gamma) -``` - -The values of the two processes are - -$$ -\begin{aligned} - \log V^r_0 &= \log c^r_0 + U^r x_0 + u^r - \\ - \log V^d_0 &= \log c^d_0 + U^d x_0 + u^d -\end{aligned} -$$ - -We look for the ratio $\frac{c^r_0-c^d_0}{c^r_0}$ that makes -$\log V^r_0 - \log V^d_0 = 0$ - -$$ -\begin{aligned} - \underbrace{ \log V^r_0 - \log V^d_0}_{=0} + \log c^d_0 - \log c^r_0 - &= (U^r-U^d) x_0 + u^r - u^d - \\ - \frac{c^d_0}{ c^r_0} - &= \exp\left((U^r-U^d) x_0 + u^r - u^d\right) -\end{aligned} -$$ - -Hence, the implied percentage reduction in $c_0$ that the -representative consumer would accept is given by - -$$ -\frac{c^r_0-c^d_0}{c^r_0} = 1 - \exp\left((U^r-U^d) x_0 + u^r - u^d\right) -$$ - -Let's compute this - -```{code-cell} julia -x0 = 0.0 # initial conditions -logVC_r = U_r * x0 + u_r -logVC_d = U_d * x0 + u_d - -perc_reduct = 100 * (1 - exp(logVC_r - logVC_d)) -perc_reduct -``` - -```{code-cell} julia ---- -tags: [remove-cell] ---- -@test perc_reduct ≈ 1.0809878812017448 -``` - -We find that the consumer would be willing to take a percentage reduction of initial consumption equal to around 1.081. - diff --git a/lectures/tools_and_techniques/geom_series.md b/lectures/tools_and_techniques/geom_series.md index d6643c6d..7be76ae1 100644 --- a/lectures/tools_and_techniques/geom_series.md +++ b/lectures/tools_and_techniques/geom_series.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (mc)= diff --git a/lectures/tools_and_techniques/iterative_methods_sparsity.md b/lectures/tools_and_techniques/iterative_methods_sparsity.md index da719f65..16b9a7d6 100644 --- a/lectures/tools_and_techniques/iterative_methods_sparsity.md +++ b/lectures/tools_and_techniques/iterative_methods_sparsity.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (iterative_methods_sparsity)= diff --git a/lectures/tools_and_techniques/linear_algebra.md b/lectures/tools_and_techniques/linear_algebra.md index ac1f912e..5b1ebf78 100644 --- a/lectures/tools_and_techniques/linear_algebra.md +++ b/lectures/tools_and_techniques/linear_algebra.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (linear_algebra)= diff --git a/lectures/tools_and_techniques/lln_clt.md b/lectures/tools_and_techniques/lln_clt.md index 48ccba50..b59154a9 100644 --- a/lectures/tools_and_techniques/lln_clt.md +++ b/lectures/tools_and_techniques/lln_clt.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (lln_clt)= diff --git a/lectures/tools_and_techniques/numerical_linear_algebra.md b/lectures/tools_and_techniques/numerical_linear_algebra.md index d9e7f186..5c7b7af9 100644 --- a/lectures/tools_and_techniques/numerical_linear_algebra.md +++ b/lectures/tools_and_techniques/numerical_linear_algebra.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (numerical_linear_algebra)= @@ -420,7 +420,7 @@ With sparsity, the computational order is related to the number of non-zeros rat For real, symmetric, [positive semi-definite](https://en.wikipedia.org/wiki/Definiteness_of_a_matrix) matrices, a Cholesky decomposition is a specialized example of an LU decomposition where $L = U'$. -The Cholesky is directly useful on its own (e.g., {doc}`Classical Control with Linear Algebra <../time_series_models/classical_filtering>`), but it is also an efficient factorization to use in solving symmetric positive semi-definite systems. +The Cholesky is directly useful on its own, but it is also an efficient factorization to use in solving symmetric positive semi-definite systems. As always, symmetry allows specialized algorithms. diff --git a/lectures/tools_and_techniques/orth_proj.md b/lectures/tools_and_techniques/orth_proj.md index aa277eab..80cc6fbf 100644 --- a/lectures/tools_and_techniques/orth_proj.md +++ b/lectures/tools_and_techniques/orth_proj.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (orth_proj)= diff --git a/lectures/tools_and_techniques/stationary_densities.md b/lectures/tools_and_techniques/stationary_densities.md index 26be7a12..3e5283c7 100644 --- a/lectures/tools_and_techniques/stationary_densities.md +++ b/lectures/tools_and_techniques/stationary_densities.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (statd)= @@ -41,8 +41,7 @@ In this lecture, our focus will be on continuous Markov models that * are often nonlinear The fact that we accommodate nonlinear models here is significant, because -linear stochastic models have their own highly developed tool set, as we'll -see {doc}`later on <../time_series_models/arma>`. +linear stochastic models have their own highly developed tool set. The question that interests us most is: Given a particular stochastic dynamic model, how will the state of the system evolve over time? diff --git a/lectures/troubleshooting.md b/lectures/troubleshooting.md index 63a4de31..6edf5cad 100644 --- a/lectures/troubleshooting.md +++ b/lectures/troubleshooting.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (troubleshooting)= diff --git a/lectures/zreferences.md b/lectures/zreferences.md index 7f2bca90..f22113c2 100644 --- a/lectures/zreferences.md +++ b/lectures/zreferences.md @@ -6,7 +6,7 @@ jupytext: kernelspec: display_name: Julia language: julia - name: julia-1.11 + name: julia-1.12 --- (references)= From b4a4b32f508fdd0b76829bf5b2e4828ff76f58c0 Mon Sep 17 00:00:00 2001 From: Jesse Perla Date: Tue, 25 Nov 2025 16:18:44 -0800 Subject: [PATCH 2/2] julia 1.12.2 --- .github/workflows/cache.yml | 2 +- .github/workflows/ci.yml | 2 +- .github/workflows/format.yml | 2 +- .github/workflows/publish.yml | 2 +- lectures/Manifest.toml | 241 +++++++++++++++++++--------------- 5 files changed, 138 insertions(+), 111 deletions(-) diff --git a/.github/workflows/cache.yml b/.github/workflows/cache.yml index dc46d97e..8759b24b 100644 --- a/.github/workflows/cache.yml +++ b/.github/workflows/cache.yml @@ -37,7 +37,7 @@ jobs: - name: Set up Julia uses: julia-actions/setup-julia@v2 with: - version: 1.12.1 + version: 1.12.2 - name: Install IJulia and Setup Project shell: bash run: | diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a121478a..4ed6deab 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -32,7 +32,7 @@ jobs: - name: Set up Julia uses: julia-actions/setup-julia@v2 with: - version: 1.12.1 + version: 1.12.2 - name: Install IJulia and Setup Project shell: bash run: | diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index 5b7f8bc3..16af00af 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -17,7 +17,7 @@ jobs: - name: Setup Julia uses: julia-actions/setup-julia@v2 with: - version: 1.12.1 + version: 1.12.2 - name: Install JuliaFormatter.jl run: julia -e 'import Pkg; Pkg.add("JuliaFormatter")' diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index e227c0c7..2d7d17b6 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -35,7 +35,7 @@ jobs: - name: Set up Julia uses: julia-actions/setup-julia@v2 with: - version: 1.12.1 + version: 1.12.2 - name: Install IJulia and Setup Project shell: bash run: | diff --git a/lectures/Manifest.toml b/lectures/Manifest.toml index 3cd7f356..5b428b57 100644 --- a/lectures/Manifest.toml +++ b/lectures/Manifest.toml @@ -1,13 +1,13 @@ # This file is machine-generated - editing it directly is not advised -julia_version = "1.12.1" +julia_version = "1.12.2" manifest_format = "2.0" project_hash = "ded1023db51cdcd062d98e167751e8d33159e2b4" [[deps.ADTypes]] -git-tree-sha1 = "27cecae79e5cc9935255f90c53bb831cc3c870d7" +git-tree-sha1 = "8be2ae325471fc20b11c27bb34b518541d07dd3a" uuid = "47edcb42-4c32-4615-8424-f2b9edc5f35b" -version = "1.18.0" +version = "1.19.0" weakdeps = ["ChainRulesCore", "ConstructionBase", "EnzymeCore"] [deps.ADTypes.extensions] @@ -32,6 +32,12 @@ weakdeps = ["ChainRulesCore", "Test"] AbstractFFTsChainRulesCoreExt = "ChainRulesCore" AbstractFFTsTestExt = "Test" +[[deps.AbstractPlutoDingetjes]] +deps = ["Pkg"] +git-tree-sha1 = "6e1d2a35f2f90a4bc7c2ed98079b2ba09c35b83a" +uuid = "6e696c72-6542-2067-7265-42206c756150" +version = "1.3.2" + [[deps.AbstractTrees]] git-tree-sha1 = "2d9c9a55f9c93e8887ad391fbae72f8ef55e1177" uuid = "1520ce14-60c1-5f80-bbc7-55ef81b5835c" @@ -142,9 +148,9 @@ version = "7.22.0" [[deps.ArrayLayouts]] deps = ["FillArrays", "LinearAlgebra", "StaticArrays"] -git-tree-sha1 = "355ab2d61069927d4247cd69ad0e1f140b31e30d" +git-tree-sha1 = "122a06c8266e00035bfa572887ab52c344526eb4" uuid = "4c555306-a7a7-4459-81d9-ec55ddd5c99a" -version = "1.12.0" +version = "1.12.1" weakdeps = ["SparseArrays"] [deps.ArrayLayouts.extensions] @@ -162,9 +168,9 @@ version = "1.1.0" [[deps.BandedMatrices]] deps = ["ArrayLayouts", "FillArrays", "LinearAlgebra", "PrecompileTools"] -git-tree-sha1 = "4826c9fe6023a87029e54870ad1a9800c7ea6623" +git-tree-sha1 = "3ecdc34639e1b8b8217820af18e5850e8e78f1a7" uuid = "aae01518-5342-5314-be14-df237901396f" -version = "1.10.1" +version = "1.10.2" [deps.BandedMatrices.extensions] BandedMatricesSparseArraysExt = "SparseArrays" @@ -416,9 +422,9 @@ version = "1.8.1" [[deps.DataInterpolations]] deps = ["EnumX", "FindFirstFunctions", "ForwardDiff", "LinearAlgebra", "PrettyTables", "RecipesBase", "Reexport"] -git-tree-sha1 = "58ae0a38dd3002963a3c8d4af097e660cf409c38" +git-tree-sha1 = "016cf5adf7618df49ad5e7f00c13f433bd2c9361" uuid = "82cc6244-b520-54b8-b5a6-8a565e85f1d0" -version = "8.6.1" +version = "8.7.0" [deps.DataInterpolations.extensions] DataInterpolationsChainRulesCoreExt = "ChainRulesCore" @@ -439,10 +445,10 @@ version = "8.6.1" Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" [[deps.DataStructures]] -deps = ["Compat", "InteractiveUtils", "OrderedCollections"] -git-tree-sha1 = "4e1fe97fdaed23e9dc21d4d664bea76b65fc50a0" +deps = ["OrderedCollections"] +git-tree-sha1 = "e357641bb3e0638d353c4b29ea0e40ea644066a6" uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" -version = "0.18.22" +version = "0.19.3" [[deps.DataValueInterfaces]] git-tree-sha1 = "bfc1187b79289637fa0ef6d4436ebdfe6905cbd6" @@ -468,9 +474,9 @@ version = "1.9.1" [[deps.DiffEqBase]] deps = ["ArrayInterface", "ConcreteStructs", "DocStringExtensions", "EnzymeCore", "FastBroadcast", "FastClosures", "FastPower", "FunctionWrappers", "FunctionWrappersWrappers", "LinearAlgebra", "Logging", "Markdown", "MuladdMacro", "PrecompileTools", "Printf", "RecursiveArrayTools", "Reexport", "SciMLBase", "SciMLOperators", "SciMLStructures", "Setfield", "Static", "StaticArraysCore", "Statistics", "SymbolicIndexingInterface", "TruncatedStacktraces"] -git-tree-sha1 = "087632db966c90079a5534e4147afea9136ca39a" +git-tree-sha1 = "d7259aa30ff9c4a512513d119882e3df3e656238" uuid = "2b5f629d-d688-5b77-993f-72d75c75574e" -version = "6.190.2" +version = "6.192.0" [deps.DiffEqBase.extensions] DiffEqBaseCUDAExt = "CUDA" @@ -543,9 +549,9 @@ version = "1.15.1" [[deps.DifferentiationInterface]] deps = ["ADTypes", "LinearAlgebra"] -git-tree-sha1 = "961e5d49b64d63b3f2201b0de60065876f4be551" +git-tree-sha1 = "80bd15222b3e8d0bc70d921d2201aa0084810ce5" uuid = "a0c0ee7d-e4b9-4e03-894e-1c5f64a51d63" -version = "0.7.10" +version = "0.7.12" [deps.DifferentiationInterface.extensions] DifferentiationInterfaceChainRulesCoreExt = "ChainRulesCore" @@ -645,13 +651,13 @@ version = "0.7.16" [[deps.Downloads]] deps = ["ArgTools", "FileWatching", "LibCURL", "NetworkOptions"] uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6" -version = "1.6.0" +version = "1.7.0" [[deps.DynamicPolynomials]] deps = ["Future", "LinearAlgebra", "MultivariatePolynomials", "MutableArithmetics", "Reexport", "Test"] -git-tree-sha1 = "ca693f8707a77a0e365d49fe4622203b72b6cf1d" +git-tree-sha1 = "3f50fa86c968fc1a9e006c07b6bc40ccbb1b704d" uuid = "7c1d4256-1411-5781-91ec-d7bc3513ac07" -version = "0.6.3" +version = "0.6.4" [[deps.EnumX]] git-tree-sha1 = "bddad79635af6aec424f53ed8aad5d7555dc6f00" @@ -660,14 +666,13 @@ version = "1.0.5" [[deps.Enzyme]] deps = ["CEnum", "EnzymeCore", "Enzyme_jll", "GPUCompiler", "InteractiveUtils", "LLVM", "Libdl", "LinearAlgebra", "ObjectFile", "PrecompileTools", "Preferences", "Printf", "Random", "SparseArrays"] -git-tree-sha1 = "b36b64b70d4dd2d5473ffecfd9bf298fe7dcaf5b" +git-tree-sha1 = "af32c93dfb17c5f78c90e45f4547e6733552d78a" uuid = "7da242da-08ed-463a-9acd-ee780be4f1d9" -version = "0.13.96" +version = "0.13.108" [deps.Enzyme.extensions] EnzymeBFloat16sExt = "BFloat16s" EnzymeChainRulesCoreExt = "ChainRulesCore" - EnzymeDynamicPPLExt = ["ADTypes", "DynamicPPL"] EnzymeGPUArraysCoreExt = "GPUArraysCore" EnzymeLogExpFunctionsExt = "LogExpFunctions" EnzymeSpecialFunctionsExt = "SpecialFunctions" @@ -677,16 +682,15 @@ version = "0.13.96" ADTypes = "47edcb42-4c32-4615-8424-f2b9edc5f35b" BFloat16s = "ab4f0b2a-ad5b-11e8-123f-65d77653426b" ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" - DynamicPPL = "366bfd00-2699-11ea-058f-f148b4cae6d8" GPUArraysCore = "46192b85-c4d5-4398-a991-12ede77f4527" LogExpFunctions = "2ab3a3ac-af41-5b50-aa03-7779005ae688" SpecialFunctions = "276daf66-3868-5448-9aa4-cd146d93841b" StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" [[deps.EnzymeCore]] -git-tree-sha1 = "f91e7cb4c17dae77c490b75328f22a226708557c" +git-tree-sha1 = "820f06722a87d9544f42679182eb0850690f9b45" uuid = "f151be2c-9106-41f4-ab19-57ee4f262869" -version = "0.8.15" +version = "0.8.17" weakdeps = ["Adapt"] [deps.EnzymeCore.extensions] @@ -694,9 +698,9 @@ weakdeps = ["Adapt"] [[deps.Enzyme_jll]] deps = ["Artifacts", "JLLWrappers", "LazyArtifacts", "Libdl", "TOML"] -git-tree-sha1 = "a24799d1ca416f80b2c589b66d82867db3f70624" +git-tree-sha1 = "6096df88a1bc09afb9a1c85d4e54ed085a95e799" uuid = "7cc45869-7501-5eee-bdea-0790c847d4ef" -version = "0.0.207+0" +version = "0.0.221+0" [[deps.EpollShim_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl"] @@ -718,9 +722,9 @@ version = "2.7.3+0" [[deps.ExponentialUtilities]] deps = ["Adapt", "ArrayInterface", "GPUArraysCore", "GenericSchur", "LinearAlgebra", "PrecompileTools", "Printf", "SparseArrays", "libblastrampoline_jll"] -git-tree-sha1 = "cae251c76f353e32d32d76fae2fea655eab652af" +git-tree-sha1 = "664daa6edba2f1cc848d5a4cd1928306919238cc" uuid = "d4d017d3-3776-5f7e-afef-a10c40355c18" -version = "1.27.0" +version = "1.28.0" weakdeps = ["StaticArrays"] [deps.ExponentialUtilities.extensions] @@ -806,9 +810,9 @@ version = "1.11.0" [[deps.FillArrays]] deps = ["LinearAlgebra"] -git-tree-sha1 = "173e4d8f14230a7523ae11b9a3fa9edb3e0efd78" +git-tree-sha1 = "5bfcd42851cf2f1b303f51525a54dc5e98d408a3" uuid = "1a297f60-69ca-5386-bcde-b61e274b549b" -version = "1.14.0" +version = "1.15.0" weakdeps = ["PDMats", "SparseArrays", "Statistics"] [deps.FillArrays.extensions] @@ -858,9 +862,9 @@ version = "1.3.7" [[deps.ForwardDiff]] deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "LinearAlgebra", "LogExpFunctions", "NaNMath", "Preferences", "Printf", "Random", "SpecialFunctions"] -git-tree-sha1 = "ba6ce081425d0afb2bedd00d9884464f764a9225" +git-tree-sha1 = "cd33c7538e68650bd0ddbb3f5bd50a4a0fa95b50" uuid = "f6369f11-7733-5829-9624-2563aa707210" -version = "1.2.2" +version = "1.3.0" weakdeps = ["StaticArrays"] [deps.ForwardDiff.extensions] @@ -908,21 +912,27 @@ version = "0.2.0" [[deps.GPUCompiler]] deps = ["ExprTools", "InteractiveUtils", "LLVM", "Libdl", "Logging", "PrecompileTools", "Preferences", "Scratch", "Serialization", "TOML", "Tracy", "UUIDs"] -git-tree-sha1 = "9a8b92a457f55165923fcfe48997b7b93b712fca" +git-tree-sha1 = "90554fe518adab1b4c8f7a04d26c414482a240ca" uuid = "61eb1bfa-7361-4325-ad38-22787b887f55" -version = "1.7.2" +version = "1.7.4" [[deps.GR]] deps = ["Artifacts", "Base64", "DelimitedFiles", "Downloads", "GR_jll", "HTTP", "JSON", "Libdl", "LinearAlgebra", "Preferences", "Printf", "Qt6Wayland_jll", "Random", "Serialization", "Sockets", "TOML", "Tar", "Test", "p7zip_jll"] -git-tree-sha1 = "f52c27dd921390146624f3aab95f4e8614ad6531" +git-tree-sha1 = "f305bdb91e1f3fcc687944c97f2ede40585b1bd5" uuid = "28b8d3ca-fb5f-59d9-8090-bfdbd6d07a71" -version = "0.73.18" +version = "0.73.19" + + [deps.GR.extensions] + GRIJuliaExt = "IJulia" + + [deps.GR.weakdeps] + IJulia = "7073ff75-c697-5162-941a-fcdaad2a7d2a" [[deps.GR_jll]] deps = ["Artifacts", "Bzip2_jll", "Cairo_jll", "FFMPEG_jll", "Fontconfig_jll", "FreeType2_jll", "GLFW_jll", "JLLWrappers", "JpegTurbo_jll", "Libdl", "Libtiff_jll", "Pixman_jll", "Qt6Base_jll", "Zlib_jll", "libpng_jll"] -git-tree-sha1 = "4b0406b866ea9fdbaf1148bc9c0b887e59f9af68" +git-tree-sha1 = "de439fbc02b9dc0e639e67d7c5bd5811ff3b6f06" uuid = "d2c73de3-f751-5644-a686-071e5b155ba9" -version = "0.73.18+0" +version = "0.73.19+1" [[deps.GenericSchur]] deps = ["LinearAlgebra", "Printf"] @@ -967,9 +977,9 @@ version = "1.0.2" [[deps.HCubature]] deps = ["Combinatorics", "DataStructures", "LinearAlgebra", "QuadGK", "StaticArrays"] -git-tree-sha1 = "19ef9f0cb324eed957b7fe7257ac84e8ed8a48ec" +git-tree-sha1 = "8ee627fb73ecba0b5254158b04d4745611b404a1" uuid = "19dc6840-f33b-545b-b366-655c7e3ffd49" -version = "1.7.0" +version = "1.8.0" [[deps.HTTP]] deps = ["Base64", "CodecZlib", "ConcurrentUtilities", "Dates", "ExceptionUnwrapping", "Logging", "LoggingExtras", "MbedTLS", "NetworkOptions", "OpenSSL", "PrecompileTools", "Random", "SimpleBufferStream", "Sockets", "URIs", "UUIDs"] @@ -1080,9 +1090,9 @@ version = "0.16.2" Unitful = "1986cc42-f94f-5a68-af5c-568840ba703d" [[deps.IntervalSets]] -git-tree-sha1 = "5fbb102dcb8b1a858111ae81d56682376130517d" +git-tree-sha1 = "d966f85b3b7a8e49d034d27a189e9a4874b4391a" uuid = "8197267c-284f-5f27-9208-e0e47529a953" -version = "0.7.11" +version = "0.7.13" weakdeps = ["Random", "RecipesBase", "Statistics"] [deps.IntervalSets.extensions] @@ -1163,9 +1173,9 @@ version = "1.12.0" [[deps.JumpProcesses]] deps = ["ArrayInterface", "DataStructures", "DiffEqBase", "DiffEqCallbacks", "DocStringExtensions", "FunctionWrappers", "Graphs", "LinearAlgebra", "PoissonRandom", "Random", "RecursiveArrayTools", "Reexport", "SciMLBase", "Setfield", "StaticArrays", "SymbolicIndexingInterface", "UnPack"] -git-tree-sha1 = "905a2a28770e23f3ed750306ef48eb8c46c3a002" +git-tree-sha1 = "746d3ac930339ca8a37590892c98493fba89e9d3" uuid = "ccbc3e58-028d-4f4c-8cd5-9ae44345cda5" -version = "9.19.1" +version = "9.19.2" [deps.JumpProcesses.extensions] JumpProcessesKernelAbstractionsExt = ["Adapt", "KernelAbstractions"] @@ -1183,9 +1193,9 @@ version = "0.6.10" [[deps.Krylov]] deps = ["LinearAlgebra", "Printf", "SparseArrays"] -git-tree-sha1 = "d1fc961038207e43982851e57ee257adc37be5e8" +git-tree-sha1 = "09895a8e17b0aa97df8964ed13c94d1b6d9de666" uuid = "ba0b0d4f-ebba-5204-a429-3ac8c609bfb7" -version = "0.10.2" +version = "0.10.3" [[deps.LAME_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl"] @@ -1266,9 +1276,9 @@ version = "0.1.17" [[deps.LazyArrays]] deps = ["ArrayLayouts", "FillArrays", "LinearAlgebra", "MacroTools", "SparseArrays"] -git-tree-sha1 = "79ee64f6ba0a5a49930f51c86f60d7526b5e12c8" +git-tree-sha1 = "0f68899e54e5e98cff674bbe6380bcf89f603787" uuid = "5078a376-72f3-5289-bfd5-ec5146d43c02" -version = "2.8.0" +version = "2.9.1" [deps.LazyArrays.extensions] LazyArraysBandedMatricesExt = "BandedMatrices" @@ -1307,7 +1317,7 @@ version = "0.6.4" [[deps.LibCURL_jll]] deps = ["Artifacts", "LibSSH2_jll", "Libdl", "OpenSSL_jll", "Zlib_jll", "nghttp2_jll"] uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0" -version = "8.11.1+1" +version = "8.15.0+0" [[deps.LibGit2]] deps = ["LibGit2_jll", "NetworkOptions", "Printf", "SHA"] @@ -1388,9 +1398,9 @@ weakdeps = ["LineSearches"] [[deps.LineSearches]] deps = ["LinearAlgebra", "NLSolversBase", "NaNMath", "Parameters", "Printf"] -git-tree-sha1 = "4adee99b7262ad2a1a4bbbc59d993d24e55ea96f" +git-tree-sha1 = "a8b1215fb05581a1f9e403bec46a1333e7eb1ffb" uuid = "d3d80556-e9d4-5f37-9878-2ab0fcc64255" -version = "7.4.0" +version = "7.4.1" [[deps.LinearAlgebra]] deps = ["Libdl", "OpenBLAS_jll", "libblastrampoline_jll"] @@ -1411,9 +1421,9 @@ weakdeps = ["ChainRulesCore", "SparseArrays", "Statistics"] [[deps.LinearSolve]] deps = ["ArrayInterface", "ChainRulesCore", "ConcreteStructs", "DocStringExtensions", "EnumX", "GPUArraysCore", "InteractiveUtils", "Krylov", "LazyArrays", "Libdl", "LinearAlgebra", "MKL_jll", "Markdown", "OpenBLAS_jll", "PrecompileTools", "Preferences", "RecursiveArrayTools", "Reexport", "SciMLBase", "SciMLLogging", "SciMLOperators", "Setfield", "StaticArraysCore", "UnPack"] -git-tree-sha1 = "b5def83652705bdc00035dff671039e707588a00" +git-tree-sha1 = "e00dff84aded96c3ec03cfe46ff8d13e0c5afc44" uuid = "7ed4a6bd-45f5-4d41-b270-4a48e9bafcae" -version = "3.46.1" +version = "3.47.0" [deps.LinearSolve.extensions] LinearSolveAMDGPUExt = "AMDGPU" @@ -1546,9 +1556,9 @@ version = "1.1.9" [[deps.MbedTLS_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl"] -git-tree-sha1 = "3cce3511ca2c6f87b19c34ffc623417ed2798cbd" +git-tree-sha1 = "ff69a2b1330bcb730b9ac1ab7dd680176f5896b8" uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1" -version = "2.28.10+0" +version = "2.28.1010+0" [[deps.Measures]] git-tree-sha1 = "b513cedd20d9c914783d8ad83d08120702bf2c77" @@ -1587,10 +1597,14 @@ uuid = "46d2c3a1-f734-5fdb-9937-b9b9aeba4221" version = "0.2.4" [[deps.MultivariatePolynomials]] -deps = ["ChainRulesCore", "DataStructures", "LinearAlgebra", "MutableArithmetics"] -git-tree-sha1 = "fade91fe9bee7b142d332fc6ab3f0deea29f637b" +deps = ["DataStructures", "LinearAlgebra", "MutableArithmetics"] +git-tree-sha1 = "d38b8653b1cdfac5a7da3b819c0a8d6024f9a18c" uuid = "102ac46a-7ee4-5c85-9060-abc95bfdeaa3" -version = "0.5.9" +version = "0.5.13" +weakdeps = ["ChainRulesCore"] + + [deps.MultivariatePolynomials.extensions] + MultivariatePolynomialsChainRulesCoreExt = "ChainRulesCore" [[deps.MultivariateStats]] deps = ["Arpack", "Distributions", "LinearAlgebra", "SparseArrays", "Statistics", "StatsAPI", "StatsBase"] @@ -1642,9 +1656,9 @@ version = "1.1.3" [[deps.NearestNeighbors]] deps = ["Distances", "StaticArrays"] -git-tree-sha1 = "ca7e18198a166a1f3eb92a3650d53d94ed8ca8a1" +git-tree-sha1 = "e45bb6034fdef63d0c49b82ba9b889215bf8b344" uuid = "b8a86587-4115-5ab1-83bc-aa920d37bbce" -version = "0.4.22" +version = "0.4.24" [[deps.NetworkOptions]] uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908" @@ -1685,9 +1699,9 @@ version = "4.12.0" [[deps.NonlinearSolveBase]] deps = ["ADTypes", "Adapt", "ArrayInterface", "CommonSolve", "Compat", "ConcreteStructs", "DifferentiationInterface", "EnzymeCore", "FastClosures", "LinearAlgebra", "Markdown", "MaybeInplace", "Preferences", "Printf", "RecursiveArrayTools", "SciMLBase", "SciMLJacobianOperators", "SciMLLogging", "SciMLOperators", "SciMLStructures", "Setfield", "StaticArraysCore", "SymbolicIndexingInterface", "TimerOutputs"] -git-tree-sha1 = "9f1e723df4aafef077ac8bb6771602138b4b211f" +git-tree-sha1 = "a72dc6e5bba0fd9bb3bd9cc4abade8552d9fc982" uuid = "be0214bd-f91f-a760-ac4e-3421ce2b2da0" -version = "2.1.0" +version = "2.4.0" [deps.NonlinearSolveBase.extensions] NonlinearSolveBaseBandedMatricesExt = "BandedMatrices" @@ -1786,7 +1800,7 @@ version = "1.6.0" [[deps.OpenSSL_jll]] deps = ["Artifacts", "Libdl"] uuid = "458c3c95-2e84-50aa-8efc-19380b2a3a95" -version = "3.5.1+0" +version = "3.5.4+0" [[deps.OpenSpecFun_jll]] deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl"] @@ -2066,9 +2080,9 @@ weakdeps = ["StatsBase"] [[deps.Pango_jll]] deps = ["Artifacts", "Cairo_jll", "Fontconfig_jll", "FreeType2_jll", "FriBidi_jll", "Glib_jll", "HarfBuzz_jll", "JLLWrappers", "Libdl"] -git-tree-sha1 = "1f7f9bbd5f7a2e5a9f7d96e51c9754454ea7f60b" +git-tree-sha1 = "0662b083e11420952f2e62e17eddae7fc07d5997" uuid = "36c8627f-9965-5494-a995-c6b170f724f3" -version = "1.56.4+0" +version = "1.57.0+0" [[deps.Parameters]] deps = ["OrderedCollections", "UnPack"] @@ -2111,9 +2125,9 @@ version = "1.4.4" [[deps.Plots]] deps = ["Base64", "Contour", "Dates", "Downloads", "FFMPEG", "FixedPointNumbers", "GR", "JLFzf", "JSON", "LaTeXStrings", "Latexify", "LinearAlgebra", "Measures", "NaNMath", "Pkg", "PlotThemes", "PlotUtils", "PrecompileTools", "Printf", "REPL", "Random", "RecipesBase", "RecipesPipeline", "Reexport", "RelocatableFolders", "Requires", "Scratch", "Showoff", "SparseArrays", "Statistics", "StatsBase", "TOML", "UUIDs", "UnicodeFun", "Unzip"] -git-tree-sha1 = "12ce661880f8e309569074a61d3767e5756a199f" +git-tree-sha1 = "7b990898534ea9797bf9bf21bd086850e5d9f817" uuid = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" -version = "1.41.1" +version = "1.41.2" [deps.Plots.extensions] FileIOExt = "FileIO" @@ -2213,9 +2227,9 @@ version = "1.5.0" [[deps.PrettyTables]] deps = ["Crayons", "LaTeXStrings", "Markdown", "PrecompileTools", "Printf", "REPL", "Reexport", "StringManipulation", "Tables"] -git-tree-sha1 = "6b8e2f0bae3f678811678065c09571c1619da219" +git-tree-sha1 = "c5a07210bd060d6a8491b0ccdee2fa0235fc00bf" uuid = "08abe8d2-0d0c-5749-adfa-8a2ac140af0d" -version = "3.1.0" +version = "3.1.2" [[deps.Primes]] deps = ["IntegerMathUtils"] @@ -2235,9 +2249,9 @@ version = "1.11.0" [[deps.ProgressLogging]] deps = ["Logging", "SHA", "UUIDs"] -git-tree-sha1 = "d95ed0324b0799843ac6f7a6a85e65fe4e5173f0" +git-tree-sha1 = "f0803bc1171e455a04124affa9c21bba5ac4db32" uuid = "33c8b6b6-d38a-422a-b730-caa89a2f386c" -version = "0.1.5" +version = "0.1.6" [[deps.ProgressMeter]] deps = ["Distributed", "Printf"] @@ -2286,9 +2300,9 @@ weakdeps = ["Enzyme"] [[deps.QuantEcon]] deps = ["DSP", "Distributions", "FFTW", "Graphs", "LinearAlgebra", "Markdown", "NLopt", "Optim", "Primes", "Random", "SparseArrays", "SpecialFunctions", "Statistics", "StatsBase"] -git-tree-sha1 = "441453af42d42c42beeadf6cab81e313c38c493f" +git-tree-sha1 = "176af770443ee5533ae701167509b2c7fc39cc32" uuid = "fcd29c91-0bd7-5a09-975d-7ac3f643a60c" -version = "0.16.8" +version = "0.17.0" [[deps.QuasiMonteCarlo]] deps = ["Accessors", "ConcreteStructs", "LatticeRules", "LinearAlgebra", "Primes", "Random", "Requires", "Sobol", "StatsBase"] @@ -2332,6 +2346,11 @@ weakdeps = ["FixedPointNumbers"] [deps.Ratios.extensions] RatiosFixedPointNumbersExt = "FixedPointNumbers" +[[deps.ReadOnlyArrays]] +git-tree-sha1 = "e6f7ddf48cf141cb312b078ca21cb2d29d0dc11d" +uuid = "988b38a3-91fc-5605-94a2-ee2116b3bd83" +version = "0.2.0" + [[deps.RecipesBase]] deps = ["PrecompileTools"] git-tree-sha1 = "5c3d09cc4f31f5fc6af001c250bf1278733100ff" @@ -2456,9 +2475,9 @@ version = "0.6.43" [[deps.SciMLBase]] deps = ["ADTypes", "Accessors", "Adapt", "ArrayInterface", "CommonSolve", "ConstructionBase", "Distributed", "DocStringExtensions", "EnumX", "FunctionWrappersWrappers", "IteratorInterfaceExtensions", "LinearAlgebra", "Logging", "Markdown", "Moshi", "PreallocationTools", "PrecompileTools", "Preferences", "Printf", "RecipesBase", "RecursiveArrayTools", "Reexport", "RuntimeGeneratedFunctions", "SciMLLogging", "SciMLOperators", "SciMLPublic", "SciMLStructures", "StaticArraysCore", "Statistics", "SymbolicIndexingInterface"] -git-tree-sha1 = "7614a1b881317b6800a8c66eb1180c6ea5b986f3" +git-tree-sha1 = "dc93eb05a8101a58c844e0e20a47f8a92be33048" uuid = "0bca4576-84f4-4d90-8ffe-ffa030f20462" -version = "2.124.0" +version = "2.127.0" [deps.SciMLBase.extensions] SciMLBaseChainRulesCoreExt = "ChainRulesCore" @@ -2507,15 +2526,15 @@ version = "0.1.11" [[deps.SciMLLogging]] deps = ["Logging", "LoggingExtras", "Preferences"] -git-tree-sha1 = "5a026f5549ad167cda34c67b62f8d3dc55754da3" +git-tree-sha1 = "70d5b2fc50fde8d868f906b54045eb12b490e867" uuid = "a6db7da4-7206-11f0-1eab-35f2a5dbe1d1" -version = "1.3.1" +version = "1.5.0" [[deps.SciMLOperators]] deps = ["Accessors", "ArrayInterface", "DocStringExtensions", "LinearAlgebra", "MacroTools"] -git-tree-sha1 = "18e8ea3fdfca9c3408f1df8fc1d7690b12784338" +git-tree-sha1 = "a1e12aee1eb7e6f957e8483eeebf9a98f3e135d6" uuid = "c0aeaf25-5076-4817-a8d5-81caf7dfa961" -version = "1.10.0" +version = "1.13.0" weakdeps = ["SparseArrays", "StaticArraysCore"] [deps.SciMLOperators.extensions] @@ -2621,9 +2640,9 @@ version = "1.12.0" [[deps.SparseConnectivityTracer]] deps = ["ADTypes", "DocStringExtensions", "FillArrays", "LinearAlgebra", "Random", "SparseArrays"] -git-tree-sha1 = "ba6dc9b87304964647bd1c750b903cb360003a36" +git-tree-sha1 = "322365aa23098275562cbad6a1c2539ee40d9618" uuid = "9f842d2f-2579-4b1d-911e-f412cf18a3f5" -version = "1.1.2" +version = "1.1.3" [deps.SparseConnectivityTracer.extensions] SparseConnectivityTracerChainRulesCoreExt = "ChainRulesCore" @@ -2670,9 +2689,9 @@ weakdeps = ["ChainRulesCore"] [[deps.StableRNGs]] deps = ["Random"] -git-tree-sha1 = "95af145932c2ed859b63329952ce8d633719f091" +git-tree-sha1 = "4f96c596b8c8258cc7d3b19797854d368f243ddc" uuid = "860ef19b-820b-49d6-a774-d7a799459cd3" -version = "1.0.3" +version = "1.0.4" [[deps.Static]] deps = ["CommonWorldInvalidations", "IfElse", "PrecompileTools", "SciMLPublic"] @@ -2725,9 +2744,9 @@ version = "1.7.1" [[deps.StatsBase]] deps = ["AliasTables", "DataAPI", "DataStructures", "LinearAlgebra", "LogExpFunctions", "Missings", "Printf", "Random", "SortingAlgorithms", "SparseArrays", "Statistics", "StatsAPI"] -git-tree-sha1 = "a136f98cefaf3e2924a66bd75173d1c891ab7453" +git-tree-sha1 = "064b532283c97daae49e544bb9cb413c26511f8c" uuid = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91" -version = "0.34.7" +version = "0.34.8" [[deps.StatsFuns]] deps = ["HypergeometricFunctions", "IrrationalConstants", "LogExpFunctions", "Reexport", "Rmath", "SpecialFunctions"] @@ -2760,9 +2779,9 @@ version = "0.5.8" [[deps.StringManipulation]] deps = ["PrecompileTools"] -git-tree-sha1 = "725421ae8e530ec29bcbdddbe91ff8053421d023" +git-tree-sha1 = "a3c1536470bf8c5e02096ad4853606d7c8f62721" uuid = "892a3eda-7b42-436c-8928-eab12a02cf0e" -version = "0.4.1" +version = "0.4.2" [[deps.StructIO]] git-tree-sha1 = "c581be48ae1cbf83e899b14c07a807e1787512cc" @@ -2793,35 +2812,41 @@ weakdeps = ["PrettyTables"] SymbolicIndexingInterfacePrettyTablesExt = "PrettyTables" [[deps.SymbolicLimits]] -deps = ["SymbolicUtils"] -git-tree-sha1 = "f75c7deb7e11eea72d2c1ea31b24070b713ba061" +deps = ["SymbolicUtils", "TermInterface"] +git-tree-sha1 = "49201c2793ce02f141c6f8b5194ce34e8012cd68" uuid = "19f23fe9-fdab-4a78-91af-e7b7767979c3" -version = "0.2.3" +version = "0.2.4" [[deps.SymbolicUtils]] -deps = ["AbstractTrees", "ArrayInterface", "Bijections", "ChainRulesCore", "Combinatorics", "ConstructionBase", "DataStructures", "DocStringExtensions", "DynamicPolynomials", "ExproniconLite", "LinearAlgebra", "MultivariatePolynomials", "NaNMath", "Setfield", "SparseArrays", "SpecialFunctions", "StaticArrays", "SymbolicIndexingInterface", "TaskLocalValues", "TermInterface", "TimerOutputs", "Unityper"] -git-tree-sha1 = "a85b4262a55dbd1af39bb6facf621d79ca6a322d" +deps = ["AbstractTrees", "ArrayInterface", "Combinatorics", "ConstructionBase", "DataStructures", "DocStringExtensions", "DynamicPolynomials", "EnumX", "ExproniconLite", "LinearAlgebra", "MacroTools", "Moshi", "MultivariatePolynomials", "MutableArithmetics", "NaNMath", "PrecompileTools", "ReadOnlyArrays", "Setfield", "SparseArrays", "SpecialFunctions", "StaticArraysCore", "SymbolicIndexingInterface", "TaskLocalValues", "TermInterface", "WeakCacheSets"] +git-tree-sha1 = "f34b93587697ea97b5abcedaa6f752e66d1e0ba6" uuid = "d1185830-fcd6-423d-90d6-eec64667417b" -version = "3.32.0" +version = "4.5.1" [deps.SymbolicUtils.extensions] + SymbolicUtilsChainRulesCoreExt = "ChainRulesCore" + SymbolicUtilsDistributionsExt = "Distributions" SymbolicUtilsLabelledArraysExt = "LabelledArrays" SymbolicUtilsReverseDiffExt = "ReverseDiff" [deps.SymbolicUtils.weakdeps] + ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" + Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" LabelledArrays = "2ee39098-c373-598a-b85f-a56591580800" ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267" [[deps.Symbolics]] -deps = ["ADTypes", "ArrayInterface", "Bijections", "CommonWorldInvalidations", "ConstructionBase", "DataStructures", "DiffRules", "Distributions", "DocStringExtensions", "DomainSets", "DynamicPolynomials", "LaTeXStrings", "Latexify", "Libdl", "LinearAlgebra", "LogExpFunctions", "MacroTools", "Markdown", "NaNMath", "OffsetArrays", "PrecompileTools", "Primes", "RecipesBase", "Reexport", "RuntimeGeneratedFunctions", "SciMLBase", "SciMLPublic", "Setfield", "SparseArrays", "SpecialFunctions", "StaticArraysCore", "SymbolicIndexingInterface", "SymbolicLimits", "SymbolicUtils", "TermInterface"] -git-tree-sha1 = "8206e177903a41519145f577cb7f3793f3b7c960" +deps = ["ADTypes", "AbstractPlutoDingetjes", "ArrayInterface", "Bijections", "CommonWorldInvalidations", "ConstructionBase", "DataStructures", "DiffRules", "DocStringExtensions", "DomainSets", "DynamicPolynomials", "Libdl", "LinearAlgebra", "LogExpFunctions", "MacroTools", "Markdown", "Moshi", "MultivariatePolynomials", "MutableArithmetics", "NaNMath", "PrecompileTools", "Preferences", "Primes", "RecipesBase", "Reexport", "RuntimeGeneratedFunctions", "SciMLPublic", "Setfield", "SparseArrays", "SpecialFunctions", "StaticArraysCore", "SymbolicIndexingInterface", "SymbolicLimits", "SymbolicUtils", "TermInterface"] +git-tree-sha1 = "a7ca655bef8d9ef4aa20c7c6e2804d53cf05aaa5" uuid = "0c5d862f-8b57-4792-8d23-62f2024744c7" -version = "6.57.0" +version = "7.0.2" [deps.Symbolics.extensions] SymbolicsD3TreesExt = "D3Trees" + SymbolicsDistributionsExt = "Distributions" SymbolicsForwardDiffExt = "ForwardDiff" SymbolicsGroebnerExt = "Groebner" + SymbolicsLatexifyExt = ["Latexify", "LaTeXStrings"] SymbolicsLuxExt = "Lux" SymbolicsNemoExt = "Nemo" SymbolicsPreallocationToolsExt = ["PreallocationTools", "ForwardDiff"] @@ -2830,8 +2855,11 @@ version = "6.57.0" [deps.Symbolics.weakdeps] D3Trees = "e3df1716-f71e-5df9-9e2d-98e193103c45" + Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" Groebner = "0b43b601-686d-58a3-8a1c-6623616c7cd4" + LaTeXStrings = "b964fa9f-0449-5b57-a5c2-d3ea65f4040f" + Latexify = "23fbe1c1-3f47-55db-b15f-69d7ec21a316" Lux = "b2108857-7c20-44ae-9111-449ecde12c47" Nemo = "2edaba10-b0f1-5616-af89-8c11ac63239a" PreallocationTools = "d236fae5-4411-538c-8e31-a6e3d9e00b46" @@ -2959,12 +2987,6 @@ git-tree-sha1 = "53915e50200959667e78a92a418594b428dffddf" uuid = "1cfade01-22cf-5700-b092-accc4b62d6e1" version = "0.4.1" -[[deps.Unityper]] -deps = ["ConstructionBase"] -git-tree-sha1 = "25008b734a03736c41e2a7dc314ecb95bd6bbdb0" -uuid = "a7c27f48-0311-42f6-a7f8-2c11e75eb415" -version = "0.1.6" - [[deps.Unzip]] git-tree-sha1 = "ca0969166a028236229f63514992fc073799bb78" uuid = "41fe7b60-77ed-43a1-b4f0-825fd5a5650d" @@ -2988,6 +3010,11 @@ git-tree-sha1 = "96478df35bbc2f3e1e791bc7a3d0eeee559e60e9" uuid = "a2964d1f-97da-50d4-b82a-358c7fce9d89" version = "1.24.0+0" +[[deps.WeakCacheSets]] +git-tree-sha1 = "386050ae4353310d8ff9c228f83b1affca2f7f38" +uuid = "d30d5f5c-d141-4870-aa07-aabb0f5fe7d5" +version = "0.1.0" + [[deps.Widgets]] deps = ["Colors", "Dates", "Observables", "OrderedCollections"] git-tree-sha1 = "e9aeb174f95385de31e70bd15fa066a505ea82b9" @@ -3210,9 +3237,9 @@ version = "1.28.1+0" [[deps.libpng_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Zlib_jll"] -git-tree-sha1 = "07b6a107d926093898e82b3b1db657ebe33134ec" +git-tree-sha1 = "5cb3c5d039f880c0b3075803c8bf45cb95ae1e91" uuid = "b53b4c65-9356-5827-b1ea-8c7a1a84506f" -version = "1.6.50+0" +version = "1.6.51+0" [[deps.libvorbis_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Ogg_jll"] @@ -3238,9 +3265,9 @@ uuid = "1317d2d5-d96f-522e-a858-c73665f53c3e" version = "2022.0.0+1" [[deps.p7zip_jll]] -deps = ["Artifacts", "Libdl"] +deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"] uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0" -version = "17.5.0+2" +version = "17.7.0+0" [[deps.x264_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl"]