Skip to content

Commit 53dce78

Browse files
committed
Minor refactor to allow running with 'python -m ...'
1 parent 65008de commit 53dce78

File tree

4 files changed

+164
-122
lines changed

4 files changed

+164
-122
lines changed

switch_model/__main__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ def runner():
3737
"solve-scenarios": get_module_runner("switch_model.solve_scenarios"),
3838
"test": get_module_runner("switch_model.test"),
3939
"upgrade": get_module_runner("switch_model.upgrade"),
40-
"get_inputs": get_module_runner("switch_model.wecc.get_inputs"),
40+
"get_inputs": get_module_runner("switch_model.wecc.get_inputs.cli"),
4141
"drop": get_module_runner("switch_model.tools.drop"),
4242
"new": get_module_runner("switch_model.tools.new"),
4343
"graph": get_module_runner("switch_model.tools.graph.cli_graph"),
Lines changed: 0 additions & 71 deletions
Original file line numberDiff line numberDiff line change
@@ -1,71 +0,0 @@
1-
""" Script to retrieve the input data from the switch-wecc database and apply post-processing steps.
2-
"""
3-
import argparse
4-
import os
5-
6-
from switch_model.utilities import query_yes_no, StepTimer
7-
from switch_model.wecc.get_inputs.get_inputs import query_db
8-
from switch_model.wecc.get_inputs.register_post_process import run_post_process
9-
from switch_model.wecc.utilities import load_config
10-
from switch_model.wecc.get_inputs.post_process_steps import *
11-
12-
13-
def main():
14-
timer = StepTimer()
15-
16-
# Create command line tool, just provides help information
17-
parser = argparse.ArgumentParser(
18-
description="Write SWITCH input files from database tables.",
19-
epilog="""
20-
This tool will populate the inputs folder with the data from the PostgreSQL database.
21-
config.yaml specifies the scenario parameters.
22-
The environment variable DB_URL specifies the url to connect to the database. """,
23-
)
24-
parser.add_argument(
25-
"--skip-cf",
26-
default=False,
27-
action="store_true",
28-
help="Skip creation variable_capacity_factors.csv. Useful when debugging and one doesn't"
29-
"want to wait for the command.",
30-
)
31-
parser.add_argument(
32-
"--post-process", default=None, help="Run only this post process step."
33-
)
34-
parser.add_argument(
35-
"--overwrite",
36-
default=False,
37-
action="store_true",
38-
help="Overwrite previous input files without prompting to confirm.",
39-
)
40-
args = parser.parse_args() # Makes switch get_inputs --help works
41-
42-
# Load values from config.yaml
43-
full_config = load_config()
44-
switch_to_input_dir(full_config, overwrite=args.overwrite)
45-
46-
if args.post_process is None:
47-
query_db(full_config, skip_cf=args.skip_cf)
48-
print("Post-processing...")
49-
run_post_process(full_config, step_name=args.post_process)
50-
print(f"\nScript took {timer.step_time_as_str()} seconds to build input tables.")
51-
52-
53-
def switch_to_input_dir(config, overwrite):
54-
inputs_dir = config["inputs_dir"]
55-
56-
# Create inputs_dir if it doesn't exist
57-
if not os.path.exists(inputs_dir):
58-
os.makedirs(inputs_dir)
59-
print("Inputs directory created.")
60-
else:
61-
if not overwrite and not query_yes_no(
62-
"Inputs directory already exists. Allow contents to be overwritten?"
63-
):
64-
raise SystemExit("User cancelled run.")
65-
66-
os.chdir(inputs_dir)
67-
return inputs_dir
68-
69-
70-
if __name__ == "__main__":
71-
main()
Lines changed: 71 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,71 @@
1+
""" Script to retrieve the input data from the switch-wecc database and apply post-processing steps.
2+
"""
3+
import argparse
4+
import os
5+
6+
from switch_model.utilities import query_yes_no, StepTimer
7+
from switch_model.wecc.get_inputs.get_inputs import query_db
8+
from switch_model.wecc.get_inputs.register_post_process import run_post_process
9+
from switch_model.wecc.utilities import load_config
10+
from switch_model.wecc.get_inputs.post_process_steps import *
11+
12+
13+
def main():
14+
timer = StepTimer()
15+
16+
# Create command line tool, just provides help information
17+
parser = argparse.ArgumentParser(
18+
description="Write SWITCH input files from database tables.",
19+
epilog="""
20+
This tool will populate the inputs folder with the data from the PostgreSQL database.
21+
config.yaml specifies the scenario parameters.
22+
The environment variable DB_URL specifies the url to connect to the database. """,
23+
)
24+
parser.add_argument(
25+
"--skip-cf",
26+
default=False,
27+
action="store_true",
28+
help="Skip creation variable_capacity_factors.csv. Useful when debugging and one doesn't"
29+
"want to wait for the command.",
30+
)
31+
parser.add_argument(
32+
"--post-process", default=None, help="Run only this post process step."
33+
)
34+
parser.add_argument(
35+
"--overwrite",
36+
default=False,
37+
action="store_true",
38+
help="Overwrite previous input files without prompting to confirm.",
39+
)
40+
args = parser.parse_args() # Makes switch get_inputs --help works
41+
42+
# Load values from config.yaml
43+
full_config = load_config()
44+
switch_to_input_dir(full_config, overwrite=args.overwrite)
45+
46+
if args.post_process is None:
47+
query_db(full_config, skip_cf=args.skip_cf)
48+
print("Post-processing...")
49+
run_post_process(full_config, step_name=args.post_process)
50+
print(f"\nScript took {timer.step_time_as_str()} seconds to build input tables.")
51+
52+
53+
def switch_to_input_dir(config, overwrite):
54+
inputs_dir = config["inputs_dir"]
55+
56+
# Create inputs_dir if it doesn't exist
57+
if not os.path.exists(inputs_dir):
58+
os.makedirs(inputs_dir)
59+
print("Inputs directory created.")
60+
else:
61+
if not overwrite and not query_yes_no(
62+
"Inputs directory already exists. Allow contents to be overwritten?"
63+
):
64+
raise SystemExit("User cancelled run.")
65+
66+
os.chdir(inputs_dir)
67+
return inputs_dir
68+
69+
70+
if __name__ == "__main__":
71+
main()

switch_model/wecc/get_inputs/post_process_steps/aggregate_candidate_projects.py

Lines changed: 92 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,8 @@ def post_process(config):
3636
assert "Hydro_Pumped" not in agg_techs
3737

3838
print(
39-
f"\t\tAggregating on projects where gen_tech in {agg_techs} with capacity factors from the {cf_quantile * 100}th percentile")
39+
f"\t\tAggregating on projects where gen_tech in {agg_techs} with capacity factors from the {cf_quantile * 100}th percentile"
40+
)
4041
key = "GENERATION_PROJECT"
4142

4243
#################
@@ -177,25 +178,39 @@ def create_capacity_factors():
177178
outputs/dispatch.csv (to know the DispatchGen and Curtailment)
178179
"""
179180
# Read the projects
180-
projects = pd.read_csv("inputs/generation_projects_info.csv",
181-
usecols=["GENERATION_PROJECT", "gen_tech", "gen_is_variable", "gen_is_baseload",
182-
"gen_forced_outage_rate"],
183-
dtype={"GENERATION_PROJECT": str},
184-
index_col=False)
181+
projects = pd.read_csv(
182+
"inputs/generation_projects_info.csv",
183+
usecols=[
184+
"GENERATION_PROJECT",
185+
"gen_tech",
186+
"gen_is_variable",
187+
"gen_is_baseload",
188+
"gen_forced_outage_rate",
189+
],
190+
dtype={"GENERATION_PROJECT": str},
191+
index_col=False,
192+
)
185193
# Filter out predetermined plants
186-
predetermined = pd.read_csv("inputs/gen_build_predetermined.csv", usecols=["GENERATION_PROJECT"],
187-
dtype={"GENERATION_PROJECT": str},
188-
index_col=False)["GENERATION_PROJECT"]
194+
predetermined = pd.read_csv(
195+
"inputs/gen_build_predetermined.csv",
196+
usecols=["GENERATION_PROJECT"],
197+
dtype={"GENERATION_PROJECT": str},
198+
index_col=False,
199+
)["GENERATION_PROJECT"]
189200
n = len(projects)
190201
projects = projects[~projects["GENERATION_PROJECT"].isin(predetermined)]
191202
print(f"Removed {n - len(projects)} projects that were predetermined plants.")
192203
del predetermined
193204
# Determine the gen_techs where gen_is_variable is always True and gen_is_baseload is always False.
194205
# Grouping and summing works since summing Falses gives 0 but summing Trues gives >0.
195206
projects["gen_is_not_variable"] = ~projects["gen_is_variable"]
196-
grouped_projects = projects.groupby("gen_tech", as_index=False)[["gen_is_not_variable", "gen_is_baseload"]].sum()
207+
grouped_projects = projects.groupby("gen_tech", as_index=False)[
208+
["gen_is_not_variable", "gen_is_baseload"]
209+
].sum()
197210
grouped_projects = grouped_projects[
198-
(grouped_projects["gen_is_not_variable"] == 0) & (grouped_projects["gen_is_baseload"] == 0)]
211+
(grouped_projects["gen_is_not_variable"] == 0)
212+
& (grouped_projects["gen_is_baseload"] == 0)
213+
]
199214
gen_tech = grouped_projects["gen_tech"]
200215
del grouped_projects
201216
print(f"Aggregating for gen_tech: {gen_tech.values}")
@@ -207,86 +222,113 @@ def create_capacity_factors():
207222
print(f"Removed {n - len(projects)} projects that aren't of allowed gen_tech.")
208223

209224
# Calculate the gen_forced_outage_rate and verify it is identical for all the projects within the same group
210-
outage_rates = projects.groupby("gen_tech", as_index=False)["gen_forced_outage_rate"]
225+
outage_rates = projects.groupby("gen_tech", as_index=False)[
226+
"gen_forced_outage_rate"
227+
]
211228
if (outage_rates.nunique()["gen_forced_outage_rate"] - 1).sum() != 0:
212-
outage_rates = outage_rates.nunique().set_index("gen_tech")["gen_forced_outage_rate"] - 1
229+
outage_rates = (
230+
outage_rates.nunique().set_index("gen_tech")["gen_forced_outage_rate"] - 1
231+
)
213232
outage_rates = outage_rates[outage_rates != 0]
214233
raise Exception(
215-
f"These generation technologies have different forced outage rates: {outage_rates.index.values}")
216-
outage_rates = outage_rates.mean() # They're all the same so mean returns the proper value
234+
f"These generation technologies have different forced outage rates: {outage_rates.index.values}"
235+
)
236+
outage_rates = (
237+
outage_rates.mean()
238+
) # They're all the same so mean returns the proper value
217239
del projects
218240
print("Check passed: gen_forced_outage_rate is identical.")
219241

220242
# Read the dispatch instructions
221-
dispatch = pd.read_csv("outputs/dispatch.csv",
222-
usecols=["generation_project", "timestamp", "gen_tech", "gen_load_zone", "DispatchGen_MW",
223-
"Curtailment_MW"],
224-
index_col=False,
225-
dtype={"generation_project": str})
243+
dispatch = pd.read_csv(
244+
"outputs/dispatch.csv",
245+
usecols=[
246+
"generation_project",
247+
"timestamp",
248+
"gen_tech",
249+
"gen_load_zone",
250+
"DispatchGen_MW",
251+
"Curtailment_MW",
252+
],
253+
index_col=False,
254+
dtype={"generation_project": str},
255+
)
226256
# Keep only valid projects
227257
dispatch = dispatch[dispatch["generation_project"].isin(valid_gens)]
228258
# Group by timestamp, gen_tech and load_zone
229-
dispatch = dispatch.groupby(["timestamp", "gen_tech", "gen_load_zone"], as_index=False).sum()
259+
dispatch = dispatch.groupby(
260+
["timestamp", "gen_tech", "gen_load_zone"], as_index=False
261+
).sum()
230262
# Get the DispatchUpperLimit from DispatchGen + Curtailment
231-
dispatch["DispatchUpperLimit"] = dispatch["DispatchGen_MW"] + dispatch["Curtailment_MW"]
263+
dispatch["DispatchUpperLimit"] = (
264+
dispatch["DispatchGen_MW"] + dispatch["Curtailment_MW"]
265+
)
232266
dispatch = dispatch.drop(["DispatchGen_MW", "Curtailment_MW"], axis=1)
233267

234268
# Add the period to each row by merging with outputs/timestamp.csv
235-
timestamps = pd.read_csv("outputs/timestamps.csv",
236-
usecols=["timestamp", "period"],
237-
index_col=False)
269+
timestamps = pd.read_csv(
270+
"outputs/timestamps.csv", usecols=["timestamp", "period"], index_col=False
271+
)
238272
dispatch = dispatch.merge(
239-
timestamps,
240-
on="timestamp",
241-
how='left',
242-
validate="many_to_one"
273+
timestamps, on="timestamp", how="left", validate="many_to_one"
243274
)
244275
del timestamps
245276

246277
# Read the gen_cap.csv
247-
cap = pd.read_csv("outputs/gen_cap.csv",
248-
usecols=["GENERATION_PROJECT", "PERIOD", "gen_tech", "gen_load_zone", "GenCapacity"],
249-
index_col=False,
250-
dtype={"GENERATION_PROJECT": str}).rename({"PERIOD": "period"}, axis=1)
278+
cap = pd.read_csv(
279+
"outputs/gen_cap.csv",
280+
usecols=[
281+
"GENERATION_PROJECT",
282+
"PERIOD",
283+
"gen_tech",
284+
"gen_load_zone",
285+
"GenCapacity",
286+
],
287+
index_col=False,
288+
dtype={"GENERATION_PROJECT": str},
289+
).rename({"PERIOD": "period"}, axis=1)
251290
# Keep only valid projects
252-
cap = cap[cap["GENERATION_PROJECT"].isin(valid_gens)].drop("GENERATION_PROJECT", axis=1)
291+
cap = cap[cap["GENERATION_PROJECT"].isin(valid_gens)].drop(
292+
"GENERATION_PROJECT", axis=1
293+
)
253294
# Sum for the tech, period and load zone
254295
cap = cap.groupby(["period", "gen_tech", "gen_load_zone"], as_index=False).sum()
255296
# Merge onto dispatch
256297
dispatch = dispatch.merge(
257298
cap,
258299
on=["period", "gen_tech", "gen_load_zone"],
259300
how="left",
260-
validate="many_to_one"
301+
validate="many_to_one",
261302
)
262303
del cap
263304

264305
# Filter out zones with no buildout
265306
is_no_buildout = dispatch["GenCapacity"] == 0
266-
missing_data = dispatch\
267-
[is_no_buildout]\
268-
[["period", "gen_tech", "gen_load_zone"]]\
269-
.drop_duplicates()\
270-
.groupby(["period", "gen_tech"], as_index=False)["gen_load_zone"]\
271-
.nunique()\
307+
missing_data = (
308+
dispatch[is_no_buildout][["period", "gen_tech", "gen_load_zone"]]
309+
.drop_duplicates()
310+
.groupby(["period", "gen_tech"], as_index=False)["gen_load_zone"]
311+
.nunique()
272312
.rename({"gen_load_zone": "Number of Load Zones"}, axis=1)
313+
)
273314
if missing_data["Number of Load Zones"].sum() > 0:
274315
warnings.warn(
275-
f"Unable to make capacity factors for the following categories since total capacity in those zones is 0.\n{missing_data}")
316+
f"Unable to make capacity factors for the following categories since total capacity in those zones is 0.\n{missing_data}"
317+
)
276318
dispatch = dispatch[~is_no_buildout]
277319

278320
# Merge outage rates onto dispatch
279-
dispatch = dispatch.merge(
280-
outage_rates,
281-
on="gen_tech"
282-
)
321+
dispatch = dispatch.merge(outage_rates, on="gen_tech")
283322
del outage_rates
284323

285324
dispatch["gen_max_capacity_factor"] = dispatch["DispatchUpperLimit"] / (
286-
dispatch["GenCapacity"] * (1 - dispatch["gen_forced_outage_rate"]))
287-
dispatch = dispatch[["gen_tech", "gen_load_zone", "timestamp", "gen_max_capacity_factor"]]
325+
dispatch["GenCapacity"] * (1 - dispatch["gen_forced_outage_rate"])
326+
)
327+
dispatch = dispatch[
328+
["gen_tech", "gen_load_zone", "timestamp", "gen_max_capacity_factor"]
329+
]
288330
dispatch.to_csv("zonal_capacity_factors.csv", index=False)
289331

290332

291-
if __name__=="__main__":
292-
create_capacity_factors()
333+
if __name__ == "__main__":
334+
create_capacity_factors()

0 commit comments

Comments
 (0)