Skip to content

Commit 95094e6

Browse files
committed
Fixed import, tabs → spaces.
1 parent 0f1f598 commit 95094e6

File tree

1 file changed

+151
-149
lines changed

1 file changed

+151
-149
lines changed

src/qinfer/tests/test_optimiser.py

Lines changed: 151 additions & 149 deletions
Original file line numberDiff line numberDiff line change
@@ -30,178 +30,180 @@
3030

3131
## IMPORTS ####################################################################
3232

33-
import qinfer.rb as rb
34-
import qinfer.distributions as dist
33+
from functools import partial
3534

3635
import numpy as np
3736
import random as rnd
3837

39-
from functools import partial
38+
import qinfer.rb as rb
39+
import qinfer.distributions as dist
40+
41+
from qinfer.tests.base_test import DerandomizedTestCase
4042

4143
## CLASSES ####################################################################
4244

4345
class TestPSO(DerandomizedTestCase):
4446

45-
def test_pso_quad(self):
46-
f_quad = lambda x: numpy.sum(10 * (x-0.5)**2)
47-
hh_opt = ParticleSwarmOptimizer(['x','y','z','a'], fitness_function = f_quad)
48-
hh_opt()
49-
50-
def test_pso_sin_sq(self):
51-
f_sin_sq = lambda x: numpy.sum(np.sin(x - 0.2)**2)
52-
hh_opt = ParticleSwarmOptimizer(['x','y','z','a'], fitness_function = f_sin_sq)
53-
hh_opt()
54-
55-
def test_pso_rosenbrock(self):
56-
f_rosenbrock = lambda x: numpy.sum([((x[i+1] - x[i]**2)**2 + (1 - x[i])**2)/len(x) for i in range(len(x)-1)])
57-
hh_opt = ParticleSwarmOptimizer(['x','y','z','a'], fitness_function = f_rosenbrock)
58-
hh_opt()
59-
60-
61-
def test_pso_perf_test_multiple_short(self):
62-
# Define our experiment
63-
n_trials = 20 # Times we repeat the set of experiments
64-
n_exp = 100 # Number of experiments in the set
65-
n_particles = 4000 # Number of points we track during the experiment
66-
67-
# Model for the experiment
68-
model = rb.RandomizedBenchmarkingModel()
69-
70-
#Ordering of RB is 'p', 'A', 'B'
71-
# A + B < 1, 0 < p < 1
72-
#Prior distribution of the experiment
73-
prior = dist.PostselectedDistribution(
74-
dist.MultivariateNormalDistribution(mean=[0.5,0.1,0.25], cov=np.diag([0.1, 0.1, 0.1])),
75-
model
76-
)
77-
78-
#Heuristic used in the experiment
79-
heuristic_class = qi.expdesign.ExpSparseHeuristic
80-
81-
#Heuristic Parameters
82-
params = ['base', 'scale']
83-
84-
#Fitness function to evaluate the performance of the experiment
85-
EXPERIMENT_FITNESS = lambda performance: performance['loss'][:,-1].mean(axis=0)
86-
87-
hh_opt = ParticleSwarmOptimizer(params,
88-
n_trials = n_trials,
89-
n_particles = n_particles,
90-
prior = prior,
91-
model = model,
92-
n_exp = n_exp,
93-
heuristic_class = heuristic_class
94-
)
95-
hh_opt(n_pso_iterations=5,
96-
n_pso_particles=6)
47+
def test_pso_quad(self):
48+
f_quad = lambda x: numpy.sum(10 * (x-0.5)**2)
49+
hh_opt = ParticleSwarmOptimizer(['x','y','z','a'], fitness_function = f_quad)
50+
hh_opt()
51+
52+
def test_pso_sin_sq(self):
53+
f_sin_sq = lambda x: numpy.sum(np.sin(x - 0.2)**2)
54+
hh_opt = ParticleSwarmOptimizer(['x','y','z','a'], fitness_function = f_sin_sq)
55+
hh_opt()
56+
57+
def test_pso_rosenbrock(self):
58+
f_rosenbrock = lambda x: numpy.sum([((x[i+1] - x[i]**2)**2 + (1 - x[i])**2)/len(x) for i in range(len(x)-1)])
59+
hh_opt = ParticleSwarmOptimizer(['x','y','z','a'], fitness_function = f_rosenbrock)
60+
hh_opt()
61+
62+
63+
def test_pso_perf_test_multiple_short(self):
64+
# Define our experiment
65+
n_trials = 20 # Times we repeat the set of experiments
66+
n_exp = 100 # Number of experiments in the set
67+
n_particles = 4000 # Number of points we track during the experiment
68+
69+
# Model for the experiment
70+
model = rb.RandomizedBenchmarkingModel()
71+
72+
#Ordering of RB is 'p', 'A', 'B'
73+
# A + B < 1, 0 < p < 1
74+
#Prior distribution of the experiment
75+
prior = dist.PostselectedDistribution(
76+
dist.MultivariateNormalDistribution(mean=[0.5,0.1,0.25], cov=np.diag([0.1, 0.1, 0.1])),
77+
model
78+
)
79+
80+
#Heuristic used in the experiment
81+
heuristic_class = qi.expdesign.ExpSparseHeuristic
82+
83+
#Heuristic Parameters
84+
params = ['base', 'scale']
85+
86+
#Fitness function to evaluate the performance of the experiment
87+
EXPERIMENT_FITNESS = lambda performance: performance['loss'][:,-1].mean(axis=0)
88+
89+
hh_opt = ParticleSwarmOptimizer(params,
90+
n_trials = n_trials,
91+
n_particles = n_particles,
92+
prior = prior,
93+
model = model,
94+
n_exp = n_exp,
95+
heuristic_class = heuristic_class
96+
)
97+
hh_opt(n_pso_iterations=5,
98+
n_pso_particles=6)
9799

98100
def TestPSSAO(DerandomizedTestCase):
99101

100-
def test_pssao_quad(self):
101-
f_quad = lambda x: numpy.sum(10 * (x-0.5)**2)
102-
hh_opt = ParticleSwarmSimpleAnnealingOptimizer(['x','y','z','a'], fitness_function = f_quad)
103-
hh_opt()
102+
def test_pssao_quad(self):
103+
f_quad = lambda x: numpy.sum(10 * (x-0.5)**2)
104+
hh_opt = ParticleSwarmSimpleAnnealingOptimizer(['x','y','z','a'], fitness_function = f_quad)
105+
hh_opt()
104106

105-
def test_pssao_sin_sq(self):
106-
f_sin_sq = lambda x: numpy.sum(np.sin(x - 0.2)**2)
107-
hh_opt = ParticleSwarmSimpleAnnealingOptimizer(['x','y','z','a'], fitness_function = f_sin_sq)
108-
hh_opt()
107+
def test_pssao_sin_sq(self):
108+
f_sin_sq = lambda x: numpy.sum(np.sin(x - 0.2)**2)
109+
hh_opt = ParticleSwarmSimpleAnnealingOptimizer(['x','y','z','a'], fitness_function = f_sin_sq)
110+
hh_opt()
109111

110-
def test_pssao_rosenbrock(self):
111-
f_rosenbrock = lambda x: numpy.sum([((x[i+1] - x[i]**2)**2 + (1 - x[i])**2)/len(x) for i in range(len(x)-1)])
112-
hh_opt = ParticleSwarmSimpleAnnealingOptimizer(['x','y','z','a'], fitness_function = f_rosenbrock)
113-
hh_opt()
112+
def test_pssao_rosenbrock(self):
113+
f_rosenbrock = lambda x: numpy.sum([((x[i+1] - x[i]**2)**2 + (1 - x[i])**2)/len(x) for i in range(len(x)-1)])
114+
hh_opt = ParticleSwarmSimpleAnnealingOptimizer(['x','y','z','a'], fitness_function = f_rosenbrock)
115+
hh_opt()
114116

115117

116-
def test_pssao_perf_test_multiple_short(self):
117-
# Define our experiment
118-
n_trials = 20 # Times we repeat the set of experiments
119-
n_exp = 150 # Number of experiments in the set
120-
n_particles = 4000 # Number of points we track during the experiment
118+
def test_pssao_perf_test_multiple_short(self):
119+
# Define our experiment
120+
n_trials = 20 # Times we repeat the set of experiments
121+
n_exp = 150 # Number of experiments in the set
122+
n_particles = 4000 # Number of points we track during the experiment
121123

122-
# Model for the experiment
123-
model = rb.RandomizedBenchmarkingModel()
124+
# Model for the experiment
125+
model = rb.RandomizedBenchmarkingModel()
124126

125-
#Ordering of RB is 'p', 'A', 'B'
126-
# A + B < 1, 0 < p < 1
127-
#Prior distribution of the experiment
128-
prior = dist.PostselectedDistribution(
129-
dist.MultivariateNormalDistribution(mean=[0.5,0.1,0.25], cov=np.diag([0.1, 0.1, 0.1])),
130-
model
131-
)
127+
#Ordering of RB is 'p', 'A', 'B'
128+
# A + B < 1, 0 < p < 1
129+
#Prior distribution of the experiment
130+
prior = dist.PostselectedDistribution(
131+
dist.MultivariateNormalDistribution(mean=[0.5,0.1,0.25], cov=np.diag([0.1, 0.1, 0.1])),
132+
model
133+
)
132134

133-
#Heuristic used in the experiment
134-
heuristic_class = qi.expdesign.ExpSparseHeuristic
135+
#Heuristic used in the experiment
136+
heuristic_class = qi.expdesign.ExpSparseHeuristic
135137

136-
#Heuristic Parameters
137-
params = ['base', 'scale']
138+
#Heuristic Parameters
139+
params = ['base', 'scale']
138140

139-
#Fitness function to evaluate the performance of the experiment
140-
EXPERIMENT_FITNESS = lambda performance: performance['loss'][:,-1].mean(axis=0)
141+
#Fitness function to evaluate the performance of the experiment
142+
EXPERIMENT_FITNESS = lambda performance: performance['loss'][:,-1].mean(axis=0)
141143

142-
hh_opt = ParticleSwarmSimpleAnnealingOptimizer(params,
143-
n_trials = n_trials,
144-
n_particles = n_particles,
145-
prior = prior,
146-
model = model,
147-
n_exp = n_exp,
148-
heuristic_class = heuristic_class
149-
)
150-
hh_opt(n_pso_iterations=5,
151-
n_pso_particles=6)
144+
hh_opt = ParticleSwarmSimpleAnnealingOptimizer(params,
145+
n_trials = n_trials,
146+
n_particles = n_particles,
147+
prior = prior,
148+
model = model,
149+
n_exp = n_exp,
150+
heuristic_class = heuristic_class
151+
)
152+
hh_opt(n_pso_iterations=5,
153+
n_pso_particles=6)
152154

153155

154156
def TestPSTO(DerandomizedTestCase):
155157

156-
def test_psto_quad(self):
157-
f_quad = lambda x: numpy.sum(10 * (x-0.5)**2)
158-
hh_opt = ParticleSwarmTemperingOptimizer(['x','y','z','a'], fitness_function = f_quad)
159-
hh_opt()
160-
161-
def test_psto_sin_sq(self):
162-
f_sin_sq = lambda x: numpy.sum(np.sin(x - 0.2)**2)
163-
hh_opt = ParticleSwarmTemperingOptimizer(['x','y','z','a'], fitness_function = f_sin_sq)
164-
hh_opt()
165-
166-
def test_psto_rosenbrock(self):
167-
f_rosenbrock = lambda x: numpy.sum([((x[i+1] - x[i]**2)**2 + (1 - x[i])**2)/len(x) for i in range(len(x)-1)])
168-
hh_opt = ParticleSwarmTemperingOptimizer(['x','y','z','a'], fitness_function = f_rosenbrock)
169-
hh_opt()
170-
171-
172-
def test_psto_perf_test_multiple_short(self):
173-
# Define our experiment
174-
n_trials = 20 # Times we repeat the set of experiments
175-
n_exp = 150 # Number of experiments in the set
176-
n_particles = 4000 # Number of points we track during the experiment
177-
178-
# Model for the experiment
179-
model = rb.RandomizedBenchmarkingModel()
180-
181-
#Ordering of RB is 'p', 'A', 'B'
182-
# A + B < 1, 0 < p < 1
183-
#Prior distribution of the experiment
184-
prior = dist.PostselectedDistribution(
185-
dist.MultivariateNormalDistribution(mean=[0.5,0.1,0.25], cov=np.diag([0.1, 0.1, 0.1])),
186-
model
187-
)
188-
189-
#Heuristic used in the experiment
190-
heuristic_class = qi.expdesign.ExpSparseHeuristic
191-
192-
#Heuristic Parameters
193-
params = ['base', 'scale']
194-
195-
#Fitness function to evaluate the performance of the experiment
196-
EXPERIMENT_FITNESS = lambda performance: performance['loss'][:,-1].mean(axis=0)
197-
198-
hh_opt = ParticleSwarmTemperingOptimizer(params,
199-
n_trials = n_trials,
200-
n_particles = n_particles,
201-
prior = prior,
202-
model = model,
203-
n_exp = n_exp,
204-
heuristic_class = heuristic_class
205-
)
206-
hh_opt(n_pso_iterations=5,
207-
n_pso_particles=6)
158+
def test_psto_quad(self):
159+
f_quad = lambda x: numpy.sum(10 * (x-0.5)**2)
160+
hh_opt = ParticleSwarmTemperingOptimizer(['x','y','z','a'], fitness_function = f_quad)
161+
hh_opt()
162+
163+
def test_psto_sin_sq(self):
164+
f_sin_sq = lambda x: numpy.sum(np.sin(x - 0.2)**2)
165+
hh_opt = ParticleSwarmTemperingOptimizer(['x','y','z','a'], fitness_function = f_sin_sq)
166+
hh_opt()
167+
168+
def test_psto_rosenbrock(self):
169+
f_rosenbrock = lambda x: numpy.sum([((x[i+1] - x[i]**2)**2 + (1 - x[i])**2)/len(x) for i in range(len(x)-1)])
170+
hh_opt = ParticleSwarmTemperingOptimizer(['x','y','z','a'], fitness_function = f_rosenbrock)
171+
hh_opt()
172+
173+
174+
def test_psto_perf_test_multiple_short(self):
175+
# Define our experiment
176+
n_trials = 20 # Times we repeat the set of experiments
177+
n_exp = 150 # Number of experiments in the set
178+
n_particles = 4000 # Number of points we track during the experiment
179+
180+
# Model for the experiment
181+
model = rb.RandomizedBenchmarkingModel()
182+
183+
#Ordering of RB is 'p', 'A', 'B'
184+
# A + B < 1, 0 < p < 1
185+
#Prior distribution of the experiment
186+
prior = dist.PostselectedDistribution(
187+
dist.MultivariateNormalDistribution(mean=[0.5,0.1,0.25], cov=np.diag([0.1, 0.1, 0.1])),
188+
model
189+
)
190+
191+
#Heuristic used in the experiment
192+
heuristic_class = qi.expdesign.ExpSparseHeuristic
193+
194+
#Heuristic Parameters
195+
params = ['base', 'scale']
196+
197+
#Fitness function to evaluate the performance of the experiment
198+
EXPERIMENT_FITNESS = lambda performance: performance['loss'][:,-1].mean(axis=0)
199+
200+
hh_opt = ParticleSwarmTemperingOptimizer(params,
201+
n_trials = n_trials,
202+
n_particles = n_particles,
203+
prior = prior,
204+
model = model,
205+
n_exp = n_exp,
206+
heuristic_class = heuristic_class
207+
)
208+
hh_opt(n_pso_iterations=5,
209+
n_pso_particles=6)

0 commit comments

Comments
 (0)