@@ -346,14 +346,14 @@ We'll write our code as a function that accepts the following three arguments
346346
347347* A stochastic matrix ` P `
348348* An initial distribution ` ψ `
349- * A positive integer ` sample_size ` representing the length of the time series the function should return
349+ * A positive integer ` ts_length ` representing the length of the time series the function should return
350350
351351``` {code-cell} ipython3
352- def mc_sample_path(P, ψ=None, sample_size =1_000):
352+ def mc_sample_path(P, ψ=None, ts_length =1_000):
353353
354354 # set up
355355 P = np.asarray(P)
356- X = np.empty(sample_size , dtype=int)
356+ X = np.empty(ts_length , dtype=int)
357357
358358 # Convert each row of P into a cdf
359359 n = len(P)
@@ -367,7 +367,7 @@ def mc_sample_path(P, ψ=None, sample_size=1_000):
367367
368368 # simulate
369369 X[0] = X_0
370- for t in range(sample_size - 1):
370+ for t in range(ts_length - 1):
371371 X[t+1] = qe.random.draw(P_dist[X[t], :])
372372
373373 return X
@@ -387,7 +387,7 @@ P = [[0.4, 0.6],
387387Here's a short time series.
388388
389389``` {code-cell} ipython3
390- mc_sample_path(P, ψ=[1.0, 0.0], sample_size =10)
390+ mc_sample_path(P, ψ=[1.0, 0.0], ts_length =10)
391391```
392392
393393+++ {"user_expressions": [ ] }
@@ -403,7 +403,7 @@ $X_0$ is drawn.
403403The following code illustrates this
404404
405405``` {code-cell} ipython3
406- X = mc_sample_path(P, ψ=[0.1, 0.9], sample_size =1_000_000)
406+ X = mc_sample_path(P, ψ=[0.1, 0.9], ts_length =1_000_000)
407407np.mean(X == 0)
408408```
409409
@@ -432,7 +432,7 @@ np.mean(X == 0)
432432The ` simulate ` routine is faster (because it is [ JIT compiled] ( https://python-programming.quantecon.org/numba.html#numba-link ) ).
433433
434434``` {code-cell} ipython3
435- %time mc_sample_path(P, sample_size =1_000_000) # Our homemade code version
435+ %time mc_sample_path(P, ts_length =1_000_000) # Our homemade code version
436436```
437437
438438``` {code-cell} ipython3
@@ -930,14 +930,14 @@ Therefore, we can see the sample path averages for each state (the fraction of t
930930P = np.array([[0.971, 0.029, 0.000],
931931 [0.145, 0.778, 0.077],
932932 [0.000, 0.508, 0.492]])
933- n = 10_000
933+ ts_length = 10_000
934934mc = MarkovChain(P)
935- n_state = P.shape[1]
936- fig, axes = plt.subplots(nrows=1, ncols=n_state )
935+ n = len(P)
936+ fig, axes = plt.subplots(nrows=1, ncols=n )
937937ψ_star = mc.stationary_distributions[0]
938938plt.subplots_adjust(wspace=0.35)
939939
940- for i in range(n_state ):
940+ for i in range(n ):
941941 axes[i].grid()
942942 axes[i].axhline(ψ_star[i], linestyle='dashed', lw=2, color = 'black',
943943 label = fr'$\psi^*({i})$')
@@ -947,8 +947,8 @@ for i in range(n_state):
947947 # Compute the fraction of time spent, starting from different x_0s
948948 for x0, col in ((0, 'blue'), (1, 'green'), (2, 'red')):
949949 # Generate time series that starts at different x0
950- X = mc.simulate(n , init=x0)
951- X_bar = (X == i).cumsum() / (1 + np.arange(n , dtype=float))
950+ X = mc.simulate(ts_length , init=x0)
951+ X_bar = (X == i).cumsum() / (1 + np.arange(ts_length , dtype=float))
952952 axes[i].plot(X_bar, color=col, label=f'$x_0 = \, {x0} $')
953953 axes[i].legend()
954954plt.show()
@@ -997,13 +997,13 @@ It is still irreducible, however, so ergodicity holds.
997997``` {code-cell} ipython3
998998P = np.array([[0, 1],
999999 [1, 0]])
1000- n = 10_000
1000+ ts_length = 10_000
10011001mc = MarkovChain(P)
1002- n_state = P.shape[1]
1003- fig, axes = plt.subplots(nrows=1, ncols=n_state )
1002+ n = len(P)
1003+ fig, axes = plt.subplots(nrows=1, ncols=n )
10041004ψ_star = mc.stationary_distributions[0]
10051005
1006- for i in range(n_state ):
1006+ for i in range(n ):
10071007 axes[i].grid()
10081008 axes[i].set_ylim(0.45, 0.55)
10091009 axes[i].axhline(ψ_star[i], linestyle='dashed', lw=2, color = 'black',
@@ -1012,10 +1012,10 @@ for i in range(n_state):
10121012 axes[i].set_ylabel(f'fraction of time spent at {i}')
10131013
10141014 # Compute the fraction of time spent, for each x
1015- for x0 in range(n_state ):
1015+ for x0 in range(n ):
10161016 # Generate time series starting at different x_0
1017- X = mc.simulate(n , init=x0)
1018- X_bar = (X == i).cumsum() / (1 + np.arange(n , dtype=float))
1017+ X = mc.simulate(ts_length , init=x0)
1018+ X_bar = (X == i).cumsum() / (1 + np.arange(ts_length , dtype=float))
10191019 axes[i].plot(X_bar, label=f'$x_0 = \, {x0} $')
10201020
10211021 axes[i].legend()
@@ -1070,10 +1070,11 @@ Let's pick an initial distribution $\psi$ and trace out the sequence of distribu
10701070First, we write a function to iterate the sequence of distributions for ` n ` period
10711071
10721072``` {code-cell} ipython3
1073- def iterate_ψ(ψ_0, P, n):
1074- ψs = np.empty((n, P.shape[0]))
1073+ def iterate_ψ(ψ_0, P, ts_length):
1074+ n = len(P)
1075+ ψs = np.empty((ts_length, n))
10751076 ψ = ψ_0
1076- for t in range(n ):
1077+ for t in range(ts_length ):
10771078 ψs[t] = ψ
10781079 ψ = ψ @ P
10791080 return np.array(ψs)
@@ -1125,11 +1126,12 @@ The following figure shows the dynamics of $(\psi P^t)(i)$ as $t$ gets large, f
11251126First, we write a function to draw ` n ` initial values
11261127
11271128``` {code-cell} ipython3
1128- def generate_initial_values(n, n_state):
1129- ψ_0s = np.empty((n, P.shape[0]))
1129+ def generate_initial_values(num_distributions, n):
1130+ n = len(P)
1131+ ψ_0s = np.empty((num_distributions, n))
11301132
1131- for i in range(n ):
1132- draws = np.random.randint(1, 10_000_000, size=n_state )
1133+ for i in range(num_distributions ):
1134+ draws = np.random.randint(1, 10_000_000, size=n )
11331135
11341136 # Scale them so that they add up into 1
11351137 ψ_0s[i,:] = np.array(draws/sum(draws))
@@ -1138,26 +1140,28 @@ def generate_initial_values(n, n_state):
11381140```
11391141
11401142``` {code-cell} ipython3
1141- # Define the number of iterations
1142- n = 50
1143- n_state = P.shape[0]
1143+ # Define the number of iterations
1144+ # and initial distributions
1145+ ts_length = 50
1146+ num_distributions = 25
1147+
1148+ n = len(P)
11441149mc = qe.MarkovChain(P)
11451150ψ_star = mc.stationary_distributions[0]
11461151
11471152# Draw the plot
1148- fig, axes = plt.subplots(nrows=1, ncols=n_state )
1153+ fig, axes = plt.subplots(nrows=1, ncols=n )
11491154plt.subplots_adjust(wspace=0.35)
11501155
1151- ψ_0s = generate_initial_values(n, n_state)
1152-
1156+ ψ_0s = generate_initial_values(num_distributions, n)
11531157for ψ_0 in ψ_0s:
1154- ψs = iterate_ψ(ψ_0, P, n )
1158+ ψs = iterate_ψ(ψ_0, P, ts_length )
11551159
11561160 # Obtain and plot distributions at each state
1157- for i in range(n_state ):
1158- axes[i].plot(range(0, n ), ψs[:,i], alpha=0.3)
1161+ for i in range(n ):
1162+ axes[i].plot(range(0, ts_length ), ψs[:,i], alpha=0.3)
11591163
1160- for i in range(n_state ):
1164+ for i in range(n ):
11611165 axes[i].axhline(ψ_star[i], linestyle='dashed', lw=2, color = 'black',
11621166 label = fr'$\psi^*({i})$')
11631167 axes[i].set_xlabel('t')
@@ -1179,22 +1183,23 @@ In the case of our periodic chain, we find the distribution is oscillating
11791183``` {code-cell} ipython3
11801184P = np.array([[0, 1],
11811185 [1, 0]])
1182- n = 50
1183- n_state = P.shape[0]
1186+
1187+ ts_length = 50
1188+ num_distributions = 25
1189+ n = len(P)
11841190mc = qe.MarkovChain(P)
11851191ψ_star = mc.stationary_distributions[0]
1186- fig, axes = plt.subplots(nrows=1, ncols=n_state)
1187-
1188- ψ_0s = generate_initial_values(n, n_state)
1192+ fig, axes = plt.subplots(nrows=1, ncols=n)
11891193
1194+ ψ_0s = generate_initial_values(num_distributions, n)
11901195for ψ_0 in ψ_0s:
1191- ψs = iterate_ψ(ψ_0, P, n )
1196+ ψs = iterate_ψ(ψ_0, P, ts_length )
11921197
11931198 # Obtain and plot distributions at each state
1194- for i in range(n_state ):
1195- axes[i].plot(range(0, n ), ψs[:,i], alpha=0.3)
1199+ for i in range(n ):
1200+ axes[i].plot(range(20, ts_length ), ψs[20 :,i], alpha=0.3)
11961201
1197- for i in range(n_state ):
1202+ for i in range(n ):
11981203 axes[i].axhline(ψ_star[i], linestyle='dashed', lw=2, color = 'black', label = fr'$\psi^*({i})$')
11991204 axes[i].set_xlabel('t')
12001205 axes[i].set_ylabel(fr'$\psi({i})$')
@@ -1385,18 +1390,18 @@ mc = qe.MarkovChain(P_B)
138513902 .
13861391
13871392``` {code-cell} ipython3
1388- N = 1000
1393+ ts_length = 1000
13891394mc = MarkovChain(P_B)
13901395fig, ax = plt.subplots(figsize=(9, 6))
1391- X = mc.simulate(N )
1396+ X = mc.simulate(ts_length )
13921397# Center the plot at 0
13931398ax.set_ylim(-0.25, 0.25)
13941399ax.axhline(0, linestyle='dashed', lw=2, color = 'black', alpha=0.4)
13951400
13961401
13971402for x0 in range(8):
13981403 # Calculate the fraction of time for each worker
1399- X_bar = (X == x0).cumsum() / (1 + np.arange(N , dtype=float))
1404+ X_bar = (X == x0).cumsum() / (1 + np.arange(ts_length , dtype=float))
14001405 ax.plot(X_bar - ψ_star[x0], label=f'$X = {x0+1} $')
14011406 ax.set_xlabel('t')
14021407 ax.set_ylabel(r'fraction of time spent in a state $- \psi^* (x)$')
@@ -1464,7 +1469,7 @@ As $m$ gets large, both series converge to zero.
14641469
14651470``` {code-cell} ipython3
14661471α = β = 0.1
1467- N = 10000
1472+ ts_length = 10000
14681473p = β / (α + β)
14691474
14701475P = ((1 - α, α), # Careful: P and p are distinct
@@ -1474,15 +1479,15 @@ mc = MarkovChain(P)
14741479fig, ax = plt.subplots(figsize=(9, 6))
14751480ax.set_ylim(-0.25, 0.25)
14761481ax.grid()
1477- ax.hlines(0, 0, N , lw=2, alpha=0.6) # Horizonal line at zero
1482+ ax.hlines(0, 0, ts_length , lw=2, alpha=0.6) # Horizonal line at zero
14781483
14791484for x0, col in ((0, 'blue'), (1, 'green')):
14801485 # Generate time series for worker that starts at x0
1481- X = mc.simulate(N , init=x0)
1486+ X = mc.simulate(ts_length , init=x0)
14821487 # Compute fraction of time spent unemployed, for each n
1483- X_bar = (X == 0).cumsum() / (1 + np.arange(N , dtype=float))
1488+ X_bar = (X == 0).cumsum() / (1 + np.arange(ts_length , dtype=float))
14841489 # Plot
1485- ax.fill_between(range(N ), np.zeros(N ), X_bar - p, color=col, alpha=0.1)
1490+ ax.fill_between(range(ts_length ), np.zeros(ts_length ), X_bar - p, color=col, alpha=0.1)
14861491 ax.plot(X_bar - p, color=col, label=f'$X_0 = \, {x0} $')
14871492 # Overlay in black--make lines clearer
14881493 ax.plot(X_bar - p, 'k-', alpha=0.6)
@@ -1515,7 +1520,7 @@ Based on this claim, write a function to test irreducibility.
15151520
15161521``` {code-cell} ipython3
15171522def is_irreducible(P):
1518- n = P.shape[0]
1523+ n = len(P)
15191524 result = np.zeros((n, n))
15201525 for i in range(n):
15211526 result += np.linalg.matrix_power(P, i)
@@ -1548,7 +1553,9 @@ power $P^k$ for all $k \in \mathbb N$.
15481553
15491554
15501555``` {solution-start} mc_ex_pk
1556+ :class: dropdown
15511557```
1558+
15521559Suppose that $P$ is stochastic and, moreover, that $P^k$ is
15531560stochastic for some integer $k$.
15541561
0 commit comments