Skip to content

Commit 1bcbb1d

Browse files
committed
integrate functions
1 parent ea94e4f commit 1bcbb1d

File tree

1 file changed

+48
-37
lines changed

1 file changed

+48
-37
lines changed

lectures/markov_chains.md

Lines changed: 48 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ jupytext:
44
extension: .md
55
format_name: myst
66
format_version: 0.13
7-
jupytext_version: 1.14.1
7+
jupytext_version: 1.14.4
88
kernelspec:
99
display_name: Python 3 (ipykernel)
1010
language: python
@@ -1067,8 +1067,22 @@ P @ P
10671067

10681068
Let's pick an initial distribution $\psi$ and trace out the sequence of distributions $\psi P^t$.
10691069

1070+
First, we write a function to simulate the sequence of distributions for `n` period
1071+
1072+
```{code-cell} ipython3
1073+
def simulate_ψ(ψ_0, P, n):
1074+
ψs = np.empty((n, P.shape[0]))
1075+
ψ = ψ_0
1076+
for t in range(n):
1077+
ψs[t] = ψ
1078+
ψ = ψ @ P
1079+
return np.array(ψs)
1080+
```
1081+
1082+
Now we plot the sequence
1083+
10701084
```{code-cell} ipython3
1071-
ψ = (0.0, 0.2, 0.8) # Initial condition
1085+
ψ_0 = (0.0, 0.2, 0.8) # Initial condition
10721086
10731087
fig = plt.figure(figsize=(8, 6))
10741088
ax = fig.add_subplot(111, projection='3d')
@@ -1078,14 +1092,9 @@ ax.set(xlim=(0, 1), ylim=(0, 1), zlim=(0, 1),
10781092
yticks=(0.25, 0.5, 0.75),
10791093
zticks=(0.25, 0.5, 0.75))
10801094
1081-
x_vals, y_vals, z_vals = [], [], []
1082-
for t in range(20):
1083-
x_vals.append(ψ[0])
1084-
y_vals.append(ψ[1])
1085-
z_vals.append(ψ[2])
1086-
ψ = ψ @ P
1095+
ψs = simulate_ψ(ψ_0, P, 20)
10871096
1088-
ax.scatter(x_vals, y_vals, z_vals, c='r', s=60)
1097+
ax.scatter(ψs[:,0], ψs[:,1], ψs[:,2], c='r', s=60)
10891098
ax.view_init(30, 210)
10901099
10911100
mc = qe.MarkovChain(P)
@@ -1113,8 +1122,22 @@ We can show this in a slightly different way by focusing on the probability that
11131122

11141123
The following figure shows the dynamics of $(\psi P^t)(i)$ as $t$ gets large, for each state $i$.
11151124

1125+
First, we write a function to draw `n` initial values
1126+
11161127
```{code-cell} ipython3
1128+
def generate_initial_values(n, n_state):
1129+
ψ_0s = np.empty((n, P.shape[0]))
1130+
1131+
for i in range(n):
1132+
draws = np.random.randint(1, 10_000_000, size=n_state)
11171133
1134+
# Scale them so that they add up into 1
1135+
ψ_0s[i,:] = np.array(draws/sum(draws))
1136+
1137+
return ψ_0s
1138+
```
1139+
1140+
```{code-cell} ipython3
11181141
# Define the number of iterations
11191142
n = 50
11201143
n_state = P.shape[0]
@@ -1124,35 +1147,28 @@ mc = qe.MarkovChain(P)
11241147
# Draw the plot
11251148
fig, axes = plt.subplots(nrows=1, ncols=n_state)
11261149
plt.subplots_adjust(wspace=0.35)
1127-
x0s = np.ones((n, n_state))
1128-
for i in range(n):
1129-
draws = np.random.randint(1, 10_000_000, size=n_state)
11301150
1131-
# Scale them so that they add up into 1
1132-
x0s[i,:] = np.array(draws/sum(draws))
1151+
ψ_0s = generate_initial_values(n, n_state)
11331152
1134-
# Loop through many initial values
1135-
for x0 in x0s:
1136-
x = x0
1137-
X = np.zeros((n, n_state))
1153+
for ψ_0 in ψ_0s:
1154+
ψs = simulate_ψ(ψ_0, P, n)
11381155
11391156
# Obtain and plot distributions at each state
1140-
for t in range(0, n):
1141-
x = x @ P
1142-
X[t] = x
11431157
for i in range(n_state):
1144-
axes[i].plot(range(0, n), X[:,i], alpha=0.3)
1158+
axes[i].plot(range(0, n), ψs[:,i], alpha=0.3)
11451159
11461160
for i in range(n_state):
11471161
axes[i].axhline(ψ_star[i], linestyle='dashed', lw=2, color = 'black',
11481162
label = fr'$\psi^*({i})$')
11491163
axes[i].set_xlabel('t')
1150-
axes[i].set_ylabel(fr'probability of state $i$')
1164+
axes[i].set_ylabel(fr'$\psi({i})$')
11511165
axes[i].legend()
11521166
11531167
plt.show()
11541168
```
11551169

1170+
The convergence to $\psi^*$ holds for different initial values.
1171+
11561172
+++ {"user_expressions": []}
11571173

11581174
#### Example: Failure of Convergence
@@ -1170,20 +1186,15 @@ n_state = P.shape[0]
11701186
mc = qe.MarkovChain(P)
11711187
ψ_star = mc.stationary_distributions[0]
11721188
fig, axes = plt.subplots(nrows=1, ncols=n_state)
1173-
x0s = np.ones((n, n_state))
1174-
for i in range(n):
1175-
nums = np.random.randint(1, 10_000_000, size=n_state)
1176-
x0s[i,:] = np.array(nums/sum(nums))
1177-
1178-
for x0 in x0s:
1179-
x = x0
1180-
X = np.zeros((n,n_state))
1181-
1182-
for t in range(0, n):
1183-
x = x @ P
1184-
X[t] = x
1189+
1190+
ψ_0s = generate_initial_values(n, n_state)
1191+
1192+
for ψ_0 in ψ_0s:
1193+
ψs = simulate_ψ(ψ_0, P, n)
1194+
1195+
# Obtain and plot distributions at each state
11851196
for i in range(n_state):
1186-
axes[i].plot(range(20, n), X[20:,i], alpha=0.3)
1197+
axes[i].plot(range(0, n), ψs[:,i], alpha=0.3)
11871198
11881199
for i in range(n_state):
11891200
axes[i].axhline(ψ_star[i], linestyle='dashed', lw=2, color = 'black', label = fr'$\psi^*({i})$')
@@ -1200,7 +1211,7 @@ This example helps to emphasize the fact that asymptotic stationarity is about t
12001211

12011212
The proportion of time spent in a state can converge to the stationary distribution with periodic chains.
12021213

1203-
However, the distribution at each state will not.
1214+
However, the distribution at each state does not.
12041215

12051216
(finite_mc_expec)=
12061217
## Computing Expectations

0 commit comments

Comments
 (0)