Skip to content

Commit 0f61c2b

Browse files
committed
Port over changes
1 parent b9e3ff5 commit 0f61c2b

File tree

5 files changed

+117
-25
lines changed

5 files changed

+117
-25
lines changed

.github/workflows/python-package.yml

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -228,6 +228,24 @@ jobs:
228228
run: pip install --upgrade --upgrade-strategy eager -e .
229229
- name: Run tests
230230
run: python -m tests.test_examples
231+
test_horizon:
232+
timeout-minutes: 5
233+
runs-on: ubuntu-latest
234+
steps:
235+
- uses: actions/checkout@v3
236+
- name: Set up Python
237+
uses: actions/setup-python@v4
238+
with:
239+
python-version: '3.7'
240+
- name: Install dependencies cache
241+
uses: actions/cache@v2
242+
with:
243+
path: ~/.cache/pip
244+
key: pip-cache
245+
- name: Update
246+
run: pip install --upgrade --upgrade-strategy eager -e .
247+
- name: Run tests
248+
run: python -m tests.test_horizon
231249
test_linear_model:
232250
timeout-minutes: 5
233251
runs-on: ubuntu-latest

examples/horizon.py

Lines changed: 12 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -12,48 +12,35 @@
1212
ii) Time event is predicted to occur (with uncertainty)
1313
"""
1414

15+
import numpy as np
1516
from progpy.models.thrown_object import ThrownObject
16-
from progpy import *
17+
from progpy.predictors import MonteCarlo
18+
from progpy.uncertain_data import MultivariateNormalDist
1719
from pprint import pprint
1820

1921
def run_example():
20-
# Step 1: Setup model & future loading
22+
# Step 1: Setup model, future loading, and state
2123
def future_loading(t, x = None):
2224
return {}
23-
m = ThrownObject(process_noise = 0.25, measurement_noise = 0.2)
25+
m = ThrownObject(process_noise = 0.5, measurement_noise = 0.15)
2426
initial_state = m.initialize()
2527

26-
# Step 2: Demonstrating state estimator
27-
print("\nPerforming State Estimation Step...")
28-
29-
# Step 2a: Setup
3028
NUM_SAMPLES = 1000
31-
filt = state_estimators.ParticleFilter(m, initial_state, num_particles = NUM_SAMPLES)
32-
# VVV Uncomment this to use UKF State Estimator VVV
33-
# filt = state_estimators.UnscentedKalmanFilter(batt, initial_state)
34-
35-
# Step 2b: One step of state estimator
36-
u = m.InputContainer({}) # No input for ThrownObject
37-
filt.estimate(0.1, u, m.output(initial_state))
38-
39-
# Note: in a prognostic application the above state estimation
40-
# step would be repeated each time there is new data.
41-
# Here we're doing one step to demonstrate how the state estimator is used
29+
x = MultivariateNormalDist(initial_state.keys(), initial_state.values(), np.diag([x_i*0.01 for x_i in initial_state.values()]))
4230

43-
# Step 3: Demonstrating Predictor
31+
# Step 2: Demonstrating Predictor
4432
print("\nPerforming Prediction Step...")
4533

46-
# Step 3a: Setup Predictor
47-
mc = predictors.MonteCarlo(m)
34+
# Step 2a: Setup Predictor
35+
mc = MonteCarlo(m)
4836

49-
# Step 3b: Perform a prediction
37+
# Step 2b: Perform a prediction
5038
# THIS IS WHERE WE DIVERGE FROM THE THROWN_OBJECT_EXAMPLE
5139
# Here we set a prediction horizon
5240
# We're saying we are not interested in any events that occur after this time
53-
PREDICTION_HORIZON = 7.75
54-
samples = filt.x # Since we're using a particle filter, which is also sample-based, we can directly use the samples, without changes
41+
PREDICTION_HORIZON = 7.7
5542
STEP_SIZE = 0.01
56-
mc_results = mc.predict(samples, future_loading, dt=STEP_SIZE, horizon = PREDICTION_HORIZON)
43+
mc_results = mc.predict(x, future_loading, n_samples=NUM_SAMPLES,dt=STEP_SIZE, horizon = PREDICTION_HORIZON)
5744
print("\nPredicted Time of Event:")
5845
metrics = mc_results.time_of_event.metrics()
5946
pprint(metrics) # Note this takes some time

src/progpy/predictors/monte_carlo.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,7 @@ def predict(self, state: UncertainData, future_loading_eqn: Callable, **kwargs)
7474

7575
# Perform prediction
7676
t0 = params.get('t0', 0)
77+
HORIZON = params.get('horizon', float('inf')) # Save the horizon to be used later
7778
for x in state:
7879
first_output = self.model.output(x)
7980

@@ -82,6 +83,7 @@ def predict(self, state: UncertainData, future_loading_eqn: Callable, **kwargs)
8283

8384
params['t0'] = t0
8485
params['x'] = x
86+
params['horizon'] = HORIZON # reset to initial horizon
8587

8688
if 'save_freq' in params and not isinstance(params['save_freq'], tuple):
8789
params['save_freq'] = (params['t0'], params['save_freq'])
@@ -103,6 +105,10 @@ def predict(self, state: UncertainData, future_loading_eqn: Callable, **kwargs)
103105

104106
# Non-vectorized prediction
105107
while len(events_remaining) > 0: # Still events to predict
108+
# Since horizon is relative to t0 (the simulation starting point),
109+
# we must subtract the difference in current t0 from the initial (i.e., prediction t0)
110+
# each subsequent simulation
111+
params['horizon'] = HORIZON - (params['t0'] - t0)
106112
(t, u, xi, z, es) = simulate_to_threshold(future_loading_eqn,
107113
first_output,
108114
threshold_keys = events_remaining,

tests/__main__.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
from tests.test_ensemble import main as ensemble_main
1212
from tests.test_estimate_params import main as estimate_params_main
1313
from tests.test_examples import main as examples_main
14+
from tests.test_horizon import main as horizon_main
1415
from tests.test_linear_model import main as linear_main
1516
from tests.test_metrics import main as metrics_main
1617
from tests.test_pneumatic_valve import main as pneumatic_valve_main
@@ -77,6 +78,11 @@
7778
except Exception:
7879
was_successful = False
7980

81+
try:
82+
horizon_main()
83+
except Exception:
84+
was_successful = False
85+
8086
try:
8187
linear_main()
8288
except Exception:

tests/test_horizon.py

Lines changed: 75 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,75 @@
1+
from io import StringIO
2+
import sys
3+
import unittest
4+
5+
from progpy import predictors
6+
from progpy.models import ThrownObject
7+
8+
class TestHorizon(unittest.TestCase):
9+
def setUp(self):
10+
# set stdout (so it won't print)
11+
sys.stdout = StringIO()
12+
13+
def tearDown(self):
14+
sys.stdout = sys.__stdout__
15+
16+
def test_horizon_ex(self):
17+
# Setup model
18+
m = ThrownObject(process_noise=0.25, measurement_noise=0.2)
19+
# Change parameters (to make simulation faster)
20+
m.parameters['thrower_height'] = 1.0
21+
m.parameters['throwing_speed'] = 10.0
22+
initial_state = m.initialize()
23+
24+
# Define future loading (necessary for prediction call)
25+
def future_loading(t, x=None):
26+
return {}
27+
28+
# Setup Predictor (smaller sample size for efficiency)
29+
mc = predictors.MonteCarlo(m)
30+
mc.parameters['n_samples'] = 50
31+
32+
# Perform a prediction
33+
# With this horizon, all samples will reach 'falling', but only some will reach 'impact'
34+
PREDICTION_HORIZON = 2.127
35+
STEP_SIZE = 0.001
36+
mc_results = mc.predict(initial_state, future_loading, dt=STEP_SIZE, horizon = PREDICTION_HORIZON)
37+
38+
# 'falling' happens before the horizon is met
39+
falling_res = [mc_results.time_of_event[iter]['falling'] for iter in range(mc.parameters['n_samples']) if mc_results.time_of_event[iter]['falling'] is not None]
40+
self.assertEqual(len(falling_res), mc.parameters['n_samples'])
41+
42+
# 'impact' happens around the horizon, so some samples have reached this event and others haven't
43+
impact_res = [mc_results.time_of_event[iter]['impact'] for iter in range(mc.parameters['n_samples']) if mc_results.time_of_event[iter]['impact'] is not None]
44+
self.assertLess(len(impact_res), mc.parameters['n_samples'])
45+
46+
# Try again with very low prediction_horizon, where no events are reached
47+
# Note: here we count how many None values there are for each event (in the above and below examples, we count values that are NOT None)
48+
mc_results_no_event = mc.predict(initial_state, future_loading, dt=STEP_SIZE, horizon=0.3)
49+
falling_res_no_event = [mc_results_no_event.time_of_event[iter]['falling'] for iter in range(mc.parameters['n_samples']) if mc_results_no_event.time_of_event[iter]['falling'] is None]
50+
impact_res_no_event = [mc_results_no_event.time_of_event[iter]['impact'] for iter in range(mc.parameters['n_samples']) if mc_results_no_event.time_of_event[iter]['impact'] is None]
51+
self.assertEqual(len(falling_res_no_event), mc.parameters['n_samples'])
52+
self.assertEqual(len(impact_res_no_event), mc.parameters['n_samples'])
53+
54+
# Finally, try without horizon, all events should be reached for all samples
55+
mc_results_all_event = mc.predict(initial_state, future_loading, dt=STEP_SIZE)
56+
falling_res_all_event = [mc_results_all_event.time_of_event[iter]['falling'] for iter in range(mc.parameters['n_samples']) if mc_results_all_event.time_of_event[iter]['falling'] is not None]
57+
impact_res_all_event = [mc_results_all_event.time_of_event[iter]['impact'] for iter in range(mc.parameters['n_samples']) if mc_results_all_event.time_of_event[iter]['impact'] is not None]
58+
self.assertEqual(len(falling_res_all_event), mc.parameters['n_samples'])
59+
self.assertEqual(len(impact_res_all_event), mc.parameters['n_samples'])
60+
61+
# This allows the module to be executed directly
62+
def run_tests():
63+
unittest.main()
64+
65+
def main():
66+
load_test = unittest.TestLoader()
67+
runner = unittest.TextTestRunner()
68+
print("\n\nTesting Horizon functionality")
69+
result = runner.run(load_test.loadTestsFromTestCase(TestHorizon)).wasSuccessful()
70+
71+
if not result:
72+
raise Exception("Failed test")
73+
74+
if __name__ == '__main__':
75+
main()

0 commit comments

Comments
 (0)