@@ -229,22 +229,19 @@ def f(x):
229229 simple (learner , goal = lambda l : l .npoints > 10 )
230230
231231
232- @run_with (xfail ( Learner1D ) , Learner2D , LearnerND )
232+ @run_with (Learner1D , Learner2D , LearnerND )
233233def test_adding_existing_data_is_idempotent (learner_type , f , learner_kwargs ):
234234 """Adding already existing data is an idempotent operation.
235235
236236 Either it is idempotent, or it is an error.
237237 This is the only sane behaviour.
238-
239- This test will fail for the Learner1D because the losses are normalized by
240- _scale which is updated after every point. After one iteration of adding
241- points, the _scale could be different from what it was when calculating
242- the losses of the intervals. Readding the points a second time means
243- that the losses are now all normalized by the correct _scale.
244238 """
245239 f = generate_random_parametrization (f )
246240 learner = learner_type (f , ** learner_kwargs )
247241 control = learner_type (f , ** learner_kwargs )
242+ if learner_type is Learner1D :
243+ learner ._recompute_losses_factor = 1
244+ control ._recompute_losses_factor = 1
248245
249246 N = random .randint (10 , 30 )
250247 control .ask (N )
@@ -298,14 +295,11 @@ def test_adding_non_chosen_data(learner_type, f, learner_kwargs):
298295 assert set (pls ) == set (cpls )
299296
300297
301- @run_with (xfail ( Learner1D ) , xfail (Learner2D ), xfail (LearnerND ), AverageLearner )
298+ @run_with (Learner1D , xfail (Learner2D ), xfail (LearnerND ), AverageLearner )
302299def test_point_adding_order_is_irrelevant (learner_type , f , learner_kwargs ):
303300 """The order of calls to 'tell' between calls to 'ask'
304301 is arbitrary.
305302
306- This test will fail for the Learner1D for the same reason as described in
307- the doc-string in `test_adding_existing_data_is_idempotent`.
308-
309303 This test will fail for the Learner2D because
310304 `interpolate.interpnd.estimate_gradients_2d_global` will give different
311305 outputs based on the order of the triangles and values in
@@ -315,6 +309,10 @@ def test_point_adding_order_is_irrelevant(learner_type, f, learner_kwargs):
315309 learner = learner_type (f , ** learner_kwargs )
316310 control = learner_type (f , ** learner_kwargs )
317311
312+ if learner_type is Learner1D :
313+ learner ._recompute_losses_factor = 1
314+ control ._recompute_losses_factor = 1
315+
318316 N = random .randint (10 , 30 )
319317 control .ask (N )
320318 xs , _ = learner .ask (N )
@@ -443,14 +441,16 @@ def test_saving(learner_type, f, learner_kwargs):
443441 f = generate_random_parametrization (f )
444442 learner = learner_type (f , ** learner_kwargs )
445443 control = learner_type (f , ** learner_kwargs )
444+ if learner_type is Learner1D :
445+ learner ._recompute_losses_factor = 1
446+ control ._recompute_losses_factor = 1
446447 simple (learner , lambda l : l .npoints > 100 )
447448 fd , path = tempfile .mkstemp ()
448449 try :
449450 learner .save (path )
450451 control .load (path )
451- if learner_type is not Learner1D :
452- # Because different scales result in differnt losses
453- np .testing .assert_almost_equal (learner .loss (), control .loss ())
452+
453+ np .testing .assert_almost_equal (learner .loss (), control .loss ())
454454
455455 # Try if the control is runnable
456456 simple (control , lambda l : l .npoints > 200 )
@@ -466,18 +466,22 @@ def test_saving_of_balancing_learner(learner_type, f, learner_kwargs):
466466 learner = BalancingLearner ([learner_type (f , ** learner_kwargs )])
467467 control = BalancingLearner ([learner_type (f , ** learner_kwargs )])
468468
469+ if learner_type is Learner1D :
470+ for l , c in zip (learner .learners , control .learners ):
471+ l ._recompute_losses_factor = 1
472+ c ._recompute_losses_factor = 1
473+
469474 simple (learner , lambda l : l .learners [0 ].npoints > 100 )
470475 folder = tempfile .mkdtemp ()
471476
472477 def fname (learner ):
473478 return folder + 'test'
474479
475480 try :
476- learner .save (fname )
477- control .load (fname )
478- if learner_type is not Learner1D :
479- # Because different scales result in differnt losses
480- np .testing .assert_almost_equal (learner .loss (), control .loss ())
481+ learner .save (fname = fname )
482+ control .load (fname = fname )
483+
484+ np .testing .assert_almost_equal (learner .loss (), control .loss ())
481485
482486 # Try if the control is runnable
483487 simple (control , lambda l : l .learners [0 ].npoints > 200 )
@@ -494,14 +498,19 @@ def test_saving_with_datasaver(learner_type, f, learner_kwargs):
494498 arg_picker = operator .itemgetter ('y' )
495499 learner = DataSaver (learner_type (g , ** learner_kwargs ), arg_picker )
496500 control = DataSaver (learner_type (g , ** learner_kwargs ), arg_picker )
501+
502+ if learner_type is Learner1D :
503+ learner .learner ._recompute_losses_factor = 1
504+ control .learner ._recompute_losses_factor = 1
505+
497506 simple (learner , lambda l : l .npoints > 100 )
498507 fd , path = tempfile .mkstemp ()
499508 try :
500509 learner .save (path )
501510 control .load (path )
502- if learner_type is not Learner1D :
503- # Because different scales result in differnt losses
504- np . testing . assert_almost_equal ( learner . loss (), control . loss ())
511+
512+ np . testing . assert_almost_equal ( learner . loss (), control . loss ())
513+
505514 assert learner .extra_data == control .extra_data
506515
507516 # Try if the control is runnable
0 commit comments