|
5 | 5 | An overview of metrics & model evaluation to examine model fit quality. |
6 | 6 | """ |
7 | 7 |
|
| 8 | +################################################################################################### |
| 9 | + |
| 10 | +# Import the model object |
| 11 | +from specparam import SpectralModel |
| 12 | + |
| 13 | +# Import function to check available list of metrics |
| 14 | +from specparam.metrics.definitions import check_metrics |
| 15 | + |
| 16 | +# Import a utility to download and load example data |
| 17 | +from specparam.utils.download import load_example_data |
| 18 | + |
8 | 19 | ################################################################################################### |
9 | 20 | # Model Metrics |
10 | 21 | # ------------- |
11 | 22 | # |
12 | 23 | # In this tutorial, we will explore model metrics. |
13 | 24 | # |
14 | | -# The `specparam` module uses the term `metric` to refer to a measure that is computed that |
15 | | -# reflects something about the spectral model (but that is not computed as part of the model fit). |
| 25 | +# The `specparam` module uses the term `metric` to refer to a measure that reflect something |
| 26 | +# about the spectral model (but that is not computed as part of the model fit). |
16 | 27 | # |
| 28 | +# Generally, these metrics are used to evaluate how well the model fits the data |
| 29 | +# and thus to assess the quality of the model fits. |
| 30 | +# |
| 31 | +# The module comes with various available metrics. To see the list of available metrics, |
| 32 | +# we can use the :func:`~specparam.metrics.definitions.check_metrics` function. |
17 | 33 | # |
18 | 34 |
|
| 35 | +################################################################################################### |
| 36 | + |
| 37 | +# Check the list of available metrics |
| 38 | +check_metrics() |
| 39 | + |
19 | 40 | ################################################################################################### |
20 | 41 | # |
| 42 | +# As we can see above, metrics are organized into categories, including 'error' and 'gof' |
| 43 | +# (goodness of fit). Within each category there are different specific measures. |
21 | 44 | # |
| 45 | +# Broadly, error measures compute an error measure reflecting the difference between the |
| 46 | +# full model fit and the original data. Goodness-of-fit measures compute the correspondence |
| 47 | +# between the model and data. |
22 | 48 | # |
23 | 49 |
|
24 | 50 | ################################################################################################### |
25 | | -# Interpreting Model Fit Quality Measures |
26 | | -# --------------------------------------- |
| 51 | +# Specifying Metrics |
| 52 | +# ~~~~~~~~~~~~~~~~~~ |
| 53 | +# |
| 54 | +# Which metrics are computed depends on how the model object is initialized. When initializing |
| 55 | +# a model object, which metrics to use can be specified using the `metrics` argument. |
| 56 | +# When fitting a model, these metrics will then be automatically calculated after the |
| 57 | +# model fitting and stored in the model object. |
| 58 | +# |
| 59 | + |
| 60 | +################################################################################################## |
| 61 | + |
| 62 | +# Download example data files needed for this example |
| 63 | +freqs = load_example_data('freqs.npy', folder='data') |
| 64 | +spectrum = load_example_data('spectrum.npy', folder='data') |
| 65 | + |
| 66 | +################################################################################################### |
| 67 | + |
| 68 | +# Define a set of metrics to use |
| 69 | +metrics1 = ['error_mae', 'gof_rsquared'] |
| 70 | + |
| 71 | +# Initialize model with metric specification & fit model |
| 72 | +fm1 = SpectralModel(metrics=metrics1) |
| 73 | +fm1.report(freqs, spectrum) |
| 74 | + |
| 75 | +################################################################################################### |
| 76 | +# |
| 77 | +# After model fitting, values of the computed metrics can be accessed with the |
| 78 | +# :func:`~specparam.SpectralModel.results.get_metrics` method. |
| 79 | +# |
| 80 | + |
| 81 | +################################################################################################### |
| 82 | + |
| 83 | +print('Error: ', fm1.get_metrics('error', 'mae')) |
| 84 | +print('GOF: ', fm1.get_metrics('gof', 'squared')) |
| 85 | + |
| 86 | +################################################################################################### |
| 87 | +# |
| 88 | +# All the metric results are stored with a Metrics sub-component of the model results, from |
| 89 | +# which you can also directly access all the metric results. |
| 90 | +# |
| 91 | + |
| 92 | +################################################################################################### |
| 93 | + |
| 94 | +# Check the full set of metric results |
| 95 | +print(fm1.results.metrics.results) |
| 96 | + |
| 97 | +################################################################################################### |
| 98 | +# Default Metrics |
| 99 | +# ~~~~~~~~~~~~~~~ |
| 100 | +# |
| 101 | +# You might notice that when specifying the metrics above, we specified metrics that have been |
| 102 | +# available during model fitting previously, even when we did not explicitly specify them. |
| 103 | +# |
| 104 | +# The two specified metrics above are actually the default metrics, which are selected |
| 105 | +# if no explicit metrics definition is given, as we've seen in previous examples. |
| 106 | +# |
| 107 | + |
| 108 | +################################################################################################### |
| 109 | +# Changing Metrics |
| 110 | +# ~~~~~~~~~~~~~~~~ |
27 | 111 | # |
28 | | -# After model fitting, some goodness of fit metrics are calculated to assist with assessing |
29 | | -# the quality of the model fits. It calculates both the model fit error, as the mean absolute |
30 | | -# error (MAE) between the full model fit (``modeled_spectrum_``) and the original power spectrum, |
31 | | -# as well as the R-squared correspondence between the original spectrum and the full model. |
| 112 | +# We can use explicit metric specification to select different metrics to compute, as in the |
| 113 | +# next example. |
32 | 114 | # |
33 | 115 |
|
| 116 | +################################################################################################### |
| 117 | + |
| 118 | +# Define a new set of metrics to use |
| 119 | +metrics2 = ['error_mse', 'gof_adjrsquared'] |
| 120 | + |
| 121 | +# Initialize model with metric specification & fit model |
| 122 | +fm2 = SpectralModel(metrics=metrics2) |
| 123 | +fm2.report(freqs, spectrum) |
| 124 | + |
| 125 | +################################################################################################### |
| 126 | +# Adding Additional Metrics |
| 127 | +# ~~~~~~~~~~~~~~~~~~~~~~~~~ |
| 128 | +# |
| 129 | +# We are also not limited to a specific number of metrics. In the following example, we can |
| 130 | +# specify a whole selection of different metrics. |
| 131 | +# |
| 132 | + |
| 133 | +################################################################################################### |
| 134 | + |
| 135 | +# Define a new set of metrics to use |
| 136 | +metrics3 = ['error_mae', 'error_mse', 'gof_rsquared', 'gof_adjrsquared'] |
| 137 | + |
| 138 | +# Initialize model with metric specification & fit model |
| 139 | +fm3 = SpectralModel(metrics=metrics3) |
| 140 | +fm3.report(freqs, spectrum) |
| 141 | + |
| 142 | +################################################################################################### |
| 143 | +# |
| 144 | +# Note that when using get_metrics, you specify the category and measure names. |
| 145 | +# |
| 146 | +# To return all available metrics within a specific category, leave the measure specification blank. |
| 147 | +# |
| 148 | + |
| 149 | +################################################################################################### |
| 150 | + |
| 151 | +print(fm3.get_metrics('error')) |
| 152 | +print(fm3.get_metrics('error', 'mse')) |
| 153 | + |
| 154 | +################################################################################################### |
| 155 | +# |
| 156 | +# As before you can also check the full set of metric results from the object results. |
| 157 | +# |
| 158 | + |
| 159 | +################################################################################################### |
| 160 | + |
| 161 | +print(fm3.results.metrics.results) |
| 162 | + |
| 163 | +################################################################################################### |
| 164 | +# Interpreting Model Fit Quality Measures |
| 165 | +# --------------------------------------- |
| 166 | +# |
34 | 167 | # These scores can be used to assess how the model is performing. However interpreting these |
35 | 168 | # measures requires a bit of nuance. Model fitting is NOT optimized to minimize fit error / |
36 | 169 | # maximize r-squared at all costs. To do so typically results in fitting a large number of peaks, |
|
52 | 185 | # for a given modality / dataset / application. |
53 | 186 | # |
54 | 187 |
|
55 | | - |
56 | | -################################################################################################### |
57 | | - |
58 | | - |
59 | | -################################################################################################### |
60 | | - |
61 | | - |
62 | 188 | ################################################################################################### |
| 189 | +# Defining Custom Metrics |
| 190 | +# ----------------------- |
| 191 | +# |
| 192 | +# In this tutorial, we have explored specifying and using metrics by selecting from measures |
| 193 | +# that are defined and available within the module. You can also define custom metrics if |
| 194 | +# that is useful for your use case - see an example of this in the Examples. |
| 195 | +# |
0 commit comments