Skip to content

Commit ca90cb8

Browse files
authored
Merge pull request #75 from nasa/port_prog_algs_v1_5_1
Port over changes from prog_algs hotfix v1.5.1
2 parents 81d8448 + 464bae1 commit ca90cb8

File tree

5 files changed

+120
-24
lines changed

5 files changed

+120
-24
lines changed

.github/workflows/python-package.yml

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -228,6 +228,24 @@ jobs:
228228
run: pip install --upgrade --upgrade-strategy eager -e .
229229
- name: Run tests
230230
run: python -m tests.test_examples
231+
test_horizon:
232+
timeout-minutes: 5
233+
runs-on: ubuntu-latest
234+
steps:
235+
- uses: actions/checkout@v3
236+
- name: Set up Python
237+
uses: actions/setup-python@v4
238+
with:
239+
python-version: '3.7'
240+
- name: Install dependencies cache
241+
uses: actions/cache@v2
242+
with:
243+
path: ~/.cache/pip
244+
key: pip-cache
245+
- name: Update
246+
run: pip install --upgrade --upgrade-strategy eager -e .
247+
- name: Run tests
248+
run: python -m tests.test_horizon
231249
test_linear_model:
232250
timeout-minutes: 5
233251
runs-on: ubuntu-latest

examples/horizon.py

Lines changed: 12 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -12,46 +12,34 @@
1212
ii) Time event is predicted to occur (with uncertainty)
1313
"""
1414

15+
import numpy as np
1516
from progpy.models.thrown_object import ThrownObject
16-
from progpy import *
17+
from progpy.predictors import MonteCarlo
18+
from progpy.uncertain_data import MultivariateNormalDist
1719
from pprint import pprint
1820

1921
def run_example():
2022
# Step 1: Setup model & future loading
21-
m = ThrownObject(process_noise=0.25, measurement_noise=0.2)
23+
m = ThrownObject(process_noise=0.5, measurement_noise=0.15)
2224
initial_state = m.initialize()
2325

24-
# Step 2: Demonstrating state estimator
25-
print("\nPerforming State Estimation Step...")
26-
27-
# Step 2a: Setup
2826
NUM_SAMPLES = 1000
29-
filt = state_estimators.ParticleFilter(m, initial_state, num_particles = NUM_SAMPLES)
30-
# VVV Uncomment this to use UKF State Estimator VVV
31-
# filt = state_estimators.UnscentedKalmanFilter(batt, initial_state)
32-
33-
# Step 2b: One step of state estimator
34-
u = m.InputContainer({}) # No input for ThrownObject
35-
filt.estimate(0.1, u, m.output(initial_state))
27+
x = MultivariateNormalDist(initial_state.keys(), initial_state.values(), np.diag([x_i*0.01 for x_i in initial_state.values()]))
3628

37-
# Note: in a prognostic application the above state estimation
38-
# step would be repeated each time there is new data.
39-
# Here we're doing one step to demonstrate how the state estimator is used
40-
41-
# Step 3: Demonstrating Predictor
29+
# Step 2: Demonstrating Predictor
4230
print("\nPerforming Prediction Step...")
4331

44-
# Step 3a: Setup Predictor
45-
mc = predictors.MonteCarlo(m)
32+
# Step 2a: Setup Predictor
33+
mc = MonteCarlo(m)
4634

47-
# Step 3b: Perform a prediction
35+
# Step 2b: Perform a prediction
4836
# THIS IS WHERE WE DIVERGE FROM THE THROWN_OBJECT_EXAMPLE
4937
# Here we set a prediction horizon
5038
# We're saying we are not interested in any events that occur after this time
51-
PREDICTION_HORIZON = 7.75
52-
samples = filt.x # Since we're using a particle filter, which is also sample-based, we can directly use the samples, without changes
39+
PREDICTION_HORIZON = 7.7
5340
STEP_SIZE = 0.01
54-
mc_results = mc.predict(samples, dt=STEP_SIZE, horizon=PREDICTION_HORIZON)
41+
mc_results = mc.predict(x, n_samples=NUM_SAMPLES,dt=STEP_SIZE, horizon = PREDICTION_HORIZON)
42+
5543
print("\nPredicted Time of Event:")
5644
metrics = mc_results.time_of_event.metrics()
5745
pprint(metrics) # Note this takes some time

src/progpy/predictors/monte_carlo.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -115,6 +115,7 @@ def predict(self, state: UncertainData, future_loading_eqn: Callable = None, **k
115115

116116
# Perform prediction
117117
t0 = params.get('t0', 0)
118+
HORIZON = params.get('horizon', float('inf')) # Save the horizon to be used later
118119
for x in state:
119120
first_output = self.model.output(x)
120121

@@ -123,6 +124,7 @@ def predict(self, state: UncertainData, future_loading_eqn: Callable = None, **k
123124

124125
params['t0'] = t0
125126
params['x'] = x
127+
params['horizon'] = HORIZON # reset to initial horizon
126128

127129
if 'save_freq' in params and not isinstance(params['save_freq'], tuple):
128130
params['save_freq'] = (params['t0'], params['save_freq'])
@@ -144,6 +146,10 @@ def predict(self, state: UncertainData, future_loading_eqn: Callable = None, **k
144146

145147
# Non-vectorized prediction
146148
while len(events_remaining) > 0: # Still events to predict
149+
# Since horizon is relative to t0 (the simulation starting point),
150+
# we must subtract the difference in current t0 from the initial (i.e., prediction t0)
151+
# each subsequent simulation
152+
params['horizon'] = HORIZON - (params['t0'] - t0)
147153
(t, u, xi, z, es) = simulate_to_threshold(future_loading_eqn,
148154
first_output,
149155
threshold_keys = events_remaining,

tests/__main__.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
from tests.test_ensemble import main as ensemble_main
1212
from tests.test_estimate_params import main as estimate_params_main
1313
from tests.test_examples import main as examples_main
14+
from tests.test_horizon import main as horizon_main
1415
from tests.test_linear_model import main as linear_main
1516
from tests.test_metrics import main as metrics_main
1617
from tests.test_pneumatic_valve import main as pneumatic_valve_main
@@ -77,6 +78,11 @@
7778
except Exception:
7879
was_successful = False
7980

81+
try:
82+
horizon_main()
83+
except Exception:
84+
was_successful = False
85+
8086
try:
8187
linear_main()
8288
except Exception:

tests/test_horizon.py

Lines changed: 78 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,78 @@
1+
# Copyright © 2021 United States Government as represented by the Administrator of the
2+
# National Aeronautics and Space Administration. All Rights Reserved.
3+
4+
from io import StringIO
5+
import sys
6+
import unittest
7+
8+
from progpy import predictors
9+
from progpy.models import ThrownObject
10+
11+
class TestHorizon(unittest.TestCase):
12+
def setUp(self):
13+
# set stdout (so it won't print)
14+
sys.stdout = StringIO()
15+
16+
def tearDown(self):
17+
sys.stdout = sys.__stdout__
18+
19+
def test_horizon_ex(self):
20+
# Setup model
21+
m = ThrownObject(process_noise=0.25, measurement_noise=0.2)
22+
# Change parameters (to make simulation faster)
23+
m.parameters['thrower_height'] = 1.0
24+
m.parameters['throwing_speed'] = 10.0
25+
initial_state = m.initialize()
26+
27+
# Define future loading (necessary for prediction call)
28+
def future_loading(t, x=None):
29+
return {}
30+
31+
# Setup Predictor (smaller sample size for efficiency)
32+
mc = predictors.MonteCarlo(m)
33+
mc.parameters['n_samples'] = 50
34+
35+
# Perform a prediction
36+
# With this horizon, all samples will reach 'falling', but only some will reach 'impact'
37+
PREDICTION_HORIZON = 2.127
38+
STEP_SIZE = 0.001
39+
mc_results = mc.predict(initial_state, future_loading, dt=STEP_SIZE, horizon = PREDICTION_HORIZON)
40+
41+
# 'falling' happens before the horizon is met
42+
falling_res = [mc_results.time_of_event[iter]['falling'] for iter in range(mc.parameters['n_samples']) if mc_results.time_of_event[iter]['falling'] is not None]
43+
self.assertEqual(len(falling_res), mc.parameters['n_samples'])
44+
45+
# 'impact' happens around the horizon, so some samples have reached this event and others haven't
46+
impact_res = [mc_results.time_of_event[iter]['impact'] for iter in range(mc.parameters['n_samples']) if mc_results.time_of_event[iter]['impact'] is not None]
47+
self.assertLess(len(impact_res), mc.parameters['n_samples'])
48+
49+
# Try again with very low prediction_horizon, where no events are reached
50+
# Note: here we count how many None values there are for each event (in the above and below examples, we count values that are NOT None)
51+
mc_results_no_event = mc.predict(initial_state, future_loading, dt=STEP_SIZE, horizon=0.3)
52+
falling_res_no_event = [mc_results_no_event.time_of_event[iter]['falling'] for iter in range(mc.parameters['n_samples']) if mc_results_no_event.time_of_event[iter]['falling'] is None]
53+
impact_res_no_event = [mc_results_no_event.time_of_event[iter]['impact'] for iter in range(mc.parameters['n_samples']) if mc_results_no_event.time_of_event[iter]['impact'] is None]
54+
self.assertEqual(len(falling_res_no_event), mc.parameters['n_samples'])
55+
self.assertEqual(len(impact_res_no_event), mc.parameters['n_samples'])
56+
57+
# Finally, try without horizon, all events should be reached for all samples
58+
mc_results_all_event = mc.predict(initial_state, future_loading, dt=STEP_SIZE)
59+
falling_res_all_event = [mc_results_all_event.time_of_event[iter]['falling'] for iter in range(mc.parameters['n_samples']) if mc_results_all_event.time_of_event[iter]['falling'] is not None]
60+
impact_res_all_event = [mc_results_all_event.time_of_event[iter]['impact'] for iter in range(mc.parameters['n_samples']) if mc_results_all_event.time_of_event[iter]['impact'] is not None]
61+
self.assertEqual(len(falling_res_all_event), mc.parameters['n_samples'])
62+
self.assertEqual(len(impact_res_all_event), mc.parameters['n_samples'])
63+
64+
# This allows the module to be executed directly
65+
def run_tests():
66+
unittest.main()
67+
68+
def main():
69+
load_test = unittest.TestLoader()
70+
runner = unittest.TextTestRunner()
71+
print("\n\nTesting Horizon functionality")
72+
result = runner.run(load_test.loadTestsFromTestCase(TestHorizon)).wasSuccessful()
73+
74+
if not result:
75+
raise Exception("Failed test")
76+
77+
if __name__ == '__main__':
78+
main()

0 commit comments

Comments
 (0)