@@ -49,7 +49,7 @@ tl;dr, one can use the following *loss functions* that
4949+ `adaptive.learner.learner2D.default_loss `
5050+ `adaptive.learner.learner2D.uniform_loss `
5151+ `adaptive.learner.learner2D.minimize_triangle_surface_loss `
52- + `adaptive.learner.learner2D.resolution_loss `
52+ + `adaptive.learner.learner2D.resolution_loss_function `
5353
5454
5555Uniform sampling
@@ -132,34 +132,23 @@ small (0 loss).
132132
133133 %%opts EdgePaths (color='w') Image [logz=True colorbar=True]
134134
135- def resolution_loss(ip, min_distance=0, max_distance=1):
135+ def resolution_loss_function( min_distance=0, max_distance=1):
136136 """min_distance and max_distance should be in between 0 and 1
137137 because the total area is normalized to 1."""
138+ def resolution_loss(ip):
139+ from adaptive.learner.learner2D import default_loss, areas
140+ loss = default_loss(ip)
138141
139- from adaptive.learner.learner2D import areas, deviations
142+ A = areas(ip)
143+ # Setting areas with a small area to zero such that they won't be chosen again
144+ loss[A < min_distance**2] = 0
140145
141- A = areas(ip)
142-
143- # 'deviations' returns an array of shape '(n, len(ip))', where
144- # 'n' is the is the dimension of the output of the learned function
145- # In this case we know that the learned function returns a scalar,
146- # so 'deviations' returns an array of shape '(1, len(ip))'.
147- # It represents the deviation of the function value from a linear estimate
148- # over each triangular subdomain.
149- dev = deviations(ip)[0]
150-
151- # we add terms of the same dimension: dev == [distance], A == [distance**2]
152- loss = np.sqrt(A) * dev + A
153-
154- # Setting areas with a small area to zero such that they won't be chosen again
155- loss[A < min_distance**2] = 0
156-
157- # Setting triangles that have a size larger than max_distance to infinite loss
158- loss[A > max_distance**2] = np.inf
159-
160- return loss
146+ # Setting triangles that have a size larger than max_distance to infinite loss
147+ loss[A > max_distance**2] = np.inf
161148
162- loss = partial(resolution_loss, min_distance=0.01)
149+ return loss
150+ return resolution_loss
151+ loss = resolution_loss_function(min_distance=0.01)
163152
164153 learner = adaptive.Learner2D(f_divergent_2d, [(-1, 1), (-1, 1)], loss_per_triangle=loss)
165154 runner = adaptive.BlockingRunner(learner, goal=lambda l: l.loss() < 0.02)
@@ -169,4 +158,4 @@ Awesome! We zoom in on the singularity, but not at the expense of
169158sampling the rest of the domain a reasonable amount.
170159
171160The above strategy is available as
172- `adaptive.learner.learner2D.resolution_loss `.
161+ `adaptive.learner.learner2D.resolution_loss_function `.
0 commit comments