|
129 | 129 | "def plot_posterior(p=0.6, N=0):\n", |
130 | 130 | " \"\"\"Plot the posterior given a uniform prior; Bernoulli trials\n", |
131 | 131 | " with probability p; sample size N\"\"\"\n", |
| 132 | + " # Set seed\n", |
132 | 133 | " np.random.seed(42)\n", |
| 134 | + "\n", |
133 | 135 | " # Flip coins \n", |
134 | 136 | " n_successes = np.random.binomial(N, p)\n", |
| 137 | + " \n", |
135 | 138 | " # X-axis for PDF\n", |
136 | 139 | " x = np.linspace(0, 1, 100)\n", |
137 | | - " # Prior\n", |
| 140 | + " \n", |
| 141 | + " # Write out equation for uniform prior\n", |
138 | 142 | " prior = 1\n", |
139 | | - " # Compute posterior, given the likelihood (analytic form)\n", |
140 | | - " posterior = x**n_successes*(1-x)**(N-n_successes)*prior\n", |
141 | | - " posterior /= np.max(posterior) # so that peak always at 1\n", |
| 143 | + " \n", |
| 144 | + " # Write out equation for posterior, which is likelihood * prior.\n", |
| 145 | + " posterior = (x**n_successes) * ((1-x)**(N-n_successes)) * prior\n", |
| 146 | + " \n", |
| 147 | + " # Pseudo-normalize the posterior so that we can compare them on the same scale.\n", |
| 148 | + " posterior /= np.max(posterior) \n", |
| 149 | + " \n", |
| 150 | + " # Plot posterior\n", |
142 | 151 | " plt.plot(x, posterior)\n", |
143 | 152 | " plt.show()" |
144 | 153 | ] |
|
245 | 254 | }, |
246 | 255 | { |
247 | 256 | "cell_type": "code", |
248 | | - "execution_count": 5, |
| 257 | + "execution_count": 7, |
249 | 258 | "metadata": {}, |
250 | 259 | "outputs": [], |
251 | 260 | "source": [ |
252 | | - "# Solution\n", |
| 261 | + "# Write the plotting function, as above\n", |
253 | 262 | "def plot_posteriors(p=0.6, N=0):\n", |
254 | 263 | " np.random.seed(42)\n", |
255 | 264 | " n_successes = np.random.binomial(N, p)\n", |
256 | 265 | " x = np.linspace(0.01, 0.99, 100)\n", |
257 | | - " posterior1 = x**n_successes*(1-x)**(N-n_successes) # w/ uniform prior\n", |
258 | | - " posterior1 /= np.max(posterior1) # so that peak always at 1\n", |
259 | | - " plt.plot(x, posterior1, label='Uniform prior')\n", |
260 | | - " jp = np.sqrt(x*(1-x))**(-1) # Jeffreys prior\n", |
261 | | - " posterior2 = posterior1*jp # w/ Jeffreys prior\n", |
262 | | - " posterior2 /= np.max(posterior2) # so that peak always at 1 (not quite correct to do; see below)\n", |
263 | | - " plt.plot(x, posterior2, label='Jeffreys prior')\n", |
| 266 | + "\n", |
| 267 | + " # Write out the likelihood for the data\n", |
| 268 | + " likelihood = x**n_successes*(1-x)**(N-n_successes) \n", |
| 269 | + " \n", |
| 270 | + " # Write out equation for posterior given uniform prior\n", |
| 271 | + " prior_uniform = 1 \n", |
| 272 | + " posterior_uniform = likelihood * prior_uniform\n", |
| 273 | + " posterior_uniform /= np.max(posterior_uniform)\n", |
| 274 | + " plt.plot(x, posterior_uniform, label='Uniform prior')\n", |
| 275 | + " \n", |
| 276 | + " # Write out equation for posterior given Jeffreys prior\n", |
| 277 | + " prior_jeffreys = np.sqrt(x*(1-x))**(-1)\n", |
| 278 | + " posterior_jeffreys = likelihood * prior_jeffreys\n", |
| 279 | + " posterior_jeffreys /= np.max(posterior_jeffreys)\n", |
| 280 | + " plt.plot(x, posterior_jeffreys, label='Jeffreys prior')\n", |
264 | 281 | " plt.legend()\n", |
265 | 282 | " plt.show()" |
266 | 283 | ] |
267 | 284 | }, |
268 | 285 | { |
269 | 286 | "cell_type": "code", |
270 | | - "execution_count": 6, |
| 287 | + "execution_count": 8, |
271 | 288 | "metadata": {}, |
272 | 289 | "outputs": [ |
273 | 290 | { |
|
286 | 303 | "source": [ |
287 | 304 | "interact(plot_posteriors, p=(0, 1, 0.01), N=(0, 100));" |
288 | 305 | ] |
| 306 | + }, |
| 307 | + { |
| 308 | + "cell_type": "code", |
| 309 | + "execution_count": null, |
| 310 | + "metadata": {}, |
| 311 | + "outputs": [], |
| 312 | + "source": [] |
289 | 313 | } |
290 | 314 | ], |
291 | 315 | "metadata": { |
|
0 commit comments