Skip to content

Commit d9964ff

Browse files
Merge pull request #1 from cgranade/fix-hho-tests
Fix hho tests
2 parents 0f1f598 + cab23a9 commit d9964ff

File tree

1 file changed

+77
-169
lines changed

1 file changed

+77
-169
lines changed

src/qinfer/tests/test_optimiser.py

Lines changed: 77 additions & 169 deletions
Original file line numberDiff line numberDiff line change
@@ -30,178 +30,86 @@
3030

3131
## IMPORTS ####################################################################
3232

33+
import numpy as np
34+
35+
from qinfer.tests.base_test import DerandomizedTestCase
36+
3337
import qinfer.rb as rb
3438
import qinfer.distributions as dist
3539

36-
import numpy as np
37-
import random as rnd
38-
39-
from functools import partial
40+
from qinfer.hyper_heuristic_optimisers import (
41+
ParticleSwarmOptimizer,
42+
ParticleSwarmSimpleAnnealingOptimizer,
43+
ParticleSwarmTemperingOptimizer
44+
)
45+
from qinfer.expdesign import ExpSparseHeuristic
4046

4147
## CLASSES ####################################################################
4248

43-
class TestPSO(DerandomizedTestCase):
44-
45-
def test_pso_quad(self):
46-
f_quad = lambda x: numpy.sum(10 * (x-0.5)**2)
47-
hh_opt = ParticleSwarmOptimizer(['x','y','z','a'], fitness_function = f_quad)
48-
hh_opt()
49-
50-
def test_pso_sin_sq(self):
51-
f_sin_sq = lambda x: numpy.sum(np.sin(x - 0.2)**2)
52-
hh_opt = ParticleSwarmOptimizer(['x','y','z','a'], fitness_function = f_sin_sq)
53-
hh_opt()
54-
55-
def test_pso_rosenbrock(self):
56-
f_rosenbrock = lambda x: numpy.sum([((x[i+1] - x[i]**2)**2 + (1 - x[i])**2)/len(x) for i in range(len(x)-1)])
57-
hh_opt = ParticleSwarmOptimizer(['x','y','z','a'], fitness_function = f_rosenbrock)
58-
hh_opt()
59-
60-
61-
def test_pso_perf_test_multiple_short(self):
62-
# Define our experiment
63-
n_trials = 20 # Times we repeat the set of experiments
64-
n_exp = 100 # Number of experiments in the set
65-
n_particles = 4000 # Number of points we track during the experiment
66-
67-
# Model for the experiment
68-
model = rb.RandomizedBenchmarkingModel()
69-
70-
#Ordering of RB is 'p', 'A', 'B'
71-
# A + B < 1, 0 < p < 1
72-
#Prior distribution of the experiment
73-
prior = dist.PostselectedDistribution(
74-
dist.MultivariateNormalDistribution(mean=[0.5,0.1,0.25], cov=np.diag([0.1, 0.1, 0.1])),
75-
model
76-
)
77-
78-
#Heuristic used in the experiment
79-
heuristic_class = qi.expdesign.ExpSparseHeuristic
80-
81-
#Heuristic Parameters
82-
params = ['base', 'scale']
83-
84-
#Fitness function to evaluate the performance of the experiment
85-
EXPERIMENT_FITNESS = lambda performance: performance['loss'][:,-1].mean(axis=0)
86-
87-
hh_opt = ParticleSwarmOptimizer(params,
88-
n_trials = n_trials,
89-
n_particles = n_particles,
90-
prior = prior,
91-
model = model,
92-
n_exp = n_exp,
93-
heuristic_class = heuristic_class
94-
)
95-
hh_opt(n_pso_iterations=5,
96-
n_pso_particles=6)
97-
98-
def TestPSSAO(DerandomizedTestCase):
99-
100-
def test_pssao_quad(self):
101-
f_quad = lambda x: numpy.sum(10 * (x-0.5)**2)
102-
hh_opt = ParticleSwarmSimpleAnnealingOptimizer(['x','y','z','a'], fitness_function = f_quad)
103-
hh_opt()
104-
105-
def test_pssao_sin_sq(self):
106-
f_sin_sq = lambda x: numpy.sum(np.sin(x - 0.2)**2)
107-
hh_opt = ParticleSwarmSimpleAnnealingOptimizer(['x','y','z','a'], fitness_function = f_sin_sq)
108-
hh_opt()
109-
110-
def test_pssao_rosenbrock(self):
111-
f_rosenbrock = lambda x: numpy.sum([((x[i+1] - x[i]**2)**2 + (1 - x[i])**2)/len(x) for i in range(len(x)-1)])
112-
hh_opt = ParticleSwarmSimpleAnnealingOptimizer(['x','y','z','a'], fitness_function = f_rosenbrock)
113-
hh_opt()
114-
115-
116-
def test_pssao_perf_test_multiple_short(self):
117-
# Define our experiment
118-
n_trials = 20 # Times we repeat the set of experiments
119-
n_exp = 150 # Number of experiments in the set
120-
n_particles = 4000 # Number of points we track during the experiment
121-
122-
# Model for the experiment
123-
model = rb.RandomizedBenchmarkingModel()
124-
125-
#Ordering of RB is 'p', 'A', 'B'
126-
# A + B < 1, 0 < p < 1
127-
#Prior distribution of the experiment
128-
prior = dist.PostselectedDistribution(
129-
dist.MultivariateNormalDistribution(mean=[0.5,0.1,0.25], cov=np.diag([0.1, 0.1, 0.1])),
130-
model
131-
)
132-
133-
#Heuristic used in the experiment
134-
heuristic_class = qi.expdesign.ExpSparseHeuristic
135-
136-
#Heuristic Parameters
137-
params = ['base', 'scale']
138-
139-
#Fitness function to evaluate the performance of the experiment
140-
EXPERIMENT_FITNESS = lambda performance: performance['loss'][:,-1].mean(axis=0)
141-
142-
hh_opt = ParticleSwarmSimpleAnnealingOptimizer(params,
143-
n_trials = n_trials,
144-
n_particles = n_particles,
145-
prior = prior,
146-
model = model,
147-
n_exp = n_exp,
148-
heuristic_class = heuristic_class
149-
)
150-
hh_opt(n_pso_iterations=5,
151-
n_pso_particles=6)
152-
153-
154-
def TestPSTO(DerandomizedTestCase):
155-
156-
def test_psto_quad(self):
157-
f_quad = lambda x: numpy.sum(10 * (x-0.5)**2)
158-
hh_opt = ParticleSwarmTemperingOptimizer(['x','y','z','a'], fitness_function = f_quad)
159-
hh_opt()
160-
161-
def test_psto_sin_sq(self):
162-
f_sin_sq = lambda x: numpy.sum(np.sin(x - 0.2)**2)
163-
hh_opt = ParticleSwarmTemperingOptimizer(['x','y','z','a'], fitness_function = f_sin_sq)
164-
hh_opt()
165-
166-
def test_psto_rosenbrock(self):
167-
f_rosenbrock = lambda x: numpy.sum([((x[i+1] - x[i]**2)**2 + (1 - x[i])**2)/len(x) for i in range(len(x)-1)])
168-
hh_opt = ParticleSwarmTemperingOptimizer(['x','y','z','a'], fitness_function = f_rosenbrock)
169-
hh_opt()
170-
171-
172-
def test_psto_perf_test_multiple_short(self):
173-
# Define our experiment
174-
n_trials = 20 # Times we repeat the set of experiments
175-
n_exp = 150 # Number of experiments in the set
176-
n_particles = 4000 # Number of points we track during the experiment
177-
178-
# Model for the experiment
179-
model = rb.RandomizedBenchmarkingModel()
180-
181-
#Ordering of RB is 'p', 'A', 'B'
182-
# A + B < 1, 0 < p < 1
183-
#Prior distribution of the experiment
184-
prior = dist.PostselectedDistribution(
185-
dist.MultivariateNormalDistribution(mean=[0.5,0.1,0.25], cov=np.diag([0.1, 0.1, 0.1])),
186-
model
187-
)
188-
189-
#Heuristic used in the experiment
190-
heuristic_class = qi.expdesign.ExpSparseHeuristic
191-
192-
#Heuristic Parameters
193-
params = ['base', 'scale']
194-
195-
#Fitness function to evaluate the performance of the experiment
196-
EXPERIMENT_FITNESS = lambda performance: performance['loss'][:,-1].mean(axis=0)
197-
198-
hh_opt = ParticleSwarmTemperingOptimizer(params,
199-
n_trials = n_trials,
200-
n_particles = n_particles,
201-
prior = prior,
202-
model = model,
203-
n_exp = n_exp,
204-
heuristic_class = heuristic_class
205-
)
206-
hh_opt(n_pso_iterations=5,
207-
n_pso_particles=6)
49+
class OptimizerTestMethods(object):
50+
# See http://stackoverflow.com/a/1323554/267841 for why this works.
51+
52+
optimizer_class = None
53+
54+
def test_quad(self):
55+
f_quad = lambda x: np.sum(10 * (x - 0.5) ** 2)
56+
hh_opt = self.optimizer_class(['x', 'y', 'z', 'a'], fitness_function=f_quad)
57+
hh_opt()
58+
59+
def test_sin_sq(self):
60+
f_sin_sq = lambda x: np.sum(np.sin(x - 0.2) ** 2)
61+
hh_opt = self.optimizer_class(['x', 'y', 'z', 'a'], fitness_function=f_sin_sq)
62+
hh_opt()
63+
64+
def test_rosenbrock(self):
65+
f_rosenbrock = lambda x: np.sum([
66+
((x[i + 1] - x[i] ** 2) ** 2 + (1 - x[i])** 2) / len(x)
67+
for i in range(len(x) - 1)
68+
])
69+
hh_opt = self.optimizer_class(['x', 'y', 'z', 'a'], fitness_function=f_rosenbrock)
70+
hh_opt()
71+
72+
73+
def test_perf_test_multiple_short(self):
74+
# Define our experiment
75+
n_trials = 20 # Times we repeat the set of experiments
76+
n_exp = 100 # Number of experiments in the set
77+
n_particles = 4000 # Number of points we track during the experiment
78+
79+
# Model for the experiment
80+
model = rb.RandomizedBenchmarkingModel()
81+
82+
#Ordering of RB is 'p', 'A', 'B'
83+
# A + B < 1, 0 < p < 1
84+
#Prior distribution of the experiment
85+
prior = dist.PostselectedDistribution(
86+
dist.MultivariateNormalDistribution(mean=[0.5,0.1,0.25], cov=np.diag([0.1, 0.1, 0.1])),
87+
model
88+
)
89+
90+
#Heuristic used in the experiment
91+
heuristic_class = ExpSparseHeuristic
92+
93+
#Heuristic Parameters
94+
params = ['base', 'scale']
95+
96+
#Fitness function to evaluate the performance of the experiment
97+
hh_opt = self.optimizer_class(params,
98+
n_trials=n_trials,
99+
n_particles=n_particles,
100+
prior=prior,
101+
model=model,
102+
n_exp=n_exp,
103+
heuristic_class=heuristic_class
104+
)
105+
hh_opt(n_pso_iterations=5,
106+
n_pso_particles=6)
107+
108+
class TestPSO(DerandomizedTestCase, OptimizerTestMethods):
109+
optimizer_class = ParticleSwarmOptimizer
110+
111+
class TestPSSAO(DerandomizedTestCase, OptimizerTestMethods):
112+
optimizer_class = ParticleSwarmSimpleAnnealingOptimizer
113+
114+
class TestPSTO(DerandomizedTestCase, OptimizerTestMethods):
115+
optimizer_class = ParticleSwarmTemperingOptimizer

0 commit comments

Comments
 (0)