Skip to content

Commit 4c70d28

Browse files
committed
Refactor add_storage package into post process step
1 parent 8e1898f commit 4c70d28

File tree

2 files changed

+125
-150
lines changed

2 files changed

+125
-150
lines changed

switch_model/tools/add_storage/__init__.py

Lines changed: 0 additions & 139 deletions
This file was deleted.
Lines changed: 125 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,21 +1,135 @@
11
"""
2-
This post-process steps was used by Martin when studying LDES.
3-
4-
It adds the storage data to the input files.
2+
This post-process step was created by Martin Staadecker
3+
when studying long duration energy storage. It
4+
allows adding storage technologies from a Google Sheet to
5+
the csvs in the inputs folder.
56
"""
7+
import pandas as pd
8+
69
from switch_model.wecc.get_inputs.register_post_process import register_post_process
710

811

12+
def fetch_df(tab_name, key, config):
13+
"""
14+
Returns a dataframe from the google sheet and filters it by the key
15+
"""
16+
TAB_NAME_GID = {
17+
"constants": 0,
18+
"plants": 889129113,
19+
"costs": 1401952285
20+
}
21+
SHEET_ID = "1SJrj039T1T95NLTs964VQnsfZgo2QWCo29x2ireVYcU"
22+
23+
gid = TAB_NAME_GID[tab_name]
24+
url = f"https://docs.google.com/spreadsheet/ccc?key={SHEET_ID}&output=csv&gid={gid}"
25+
26+
df = pd.read_csv(url, index_col=False) \
27+
.replace("FALSE", 0) \
28+
.replace("TRUE", 1)
29+
30+
if key is not None:
31+
df = filer_by_scenario(df, key, config)
32+
return df
33+
34+
35+
def filer_by_scenario(df, scenario_column, config):
36+
"""
37+
Filters a dataframe by a scenario param
38+
"""
39+
if scenario_column in config:
40+
scenario = config[scenario_column]
41+
else:
42+
scenario = input(f"Which scenario do you want for '{scenario_column}' (default 0) : ")
43+
scenario = int(scenario) if scenario != "" else 0
44+
df = df[df[scenario_column] == scenario]
45+
return df.drop(scenario_column, axis=1)
46+
47+
48+
def cross_join(df1, df2):
49+
return df1.assign(key=1).merge(
50+
df2.assign(key=1),
51+
on="key"
52+
).drop("key", axis=1)
53+
54+
55+
def append_to_csv(filename, to_add, primary_key=None):
56+
"""
57+
Used to append a dataframe to an input .csv file
58+
"""
59+
df = pd.read_csv(filename, index_col=False)
60+
df = pd.concat([df, to_add], ignore_index=True)[df.columns]
61+
# Confirm that primary_key is unique
62+
if primary_key is not None:
63+
assert len(df[primary_key]) == len(df[primary_key].drop_duplicates())
64+
df.to_csv(filename, index=False)
65+
66+
67+
def get_gen_constants(config):
68+
df = fetch_df("constants", "constant_scenario", config)
69+
df = df.set_index("param_name")
70+
return df.transpose()
71+
72+
73+
def drop_previous_candidate_storage():
74+
"""
75+
Drops all candidate storage from the model
76+
"""
77+
# Get the generation projects
78+
STORAGE_TECH = "Battery_Storage"
79+
80+
gen = pd.read_csv("generation_projects_info.csv", index_col=False)
81+
# Find generation projects that are both storage and not predetermined (i.e. candidate)
82+
predetermined_gen = pd.read_csv("gen_build_predetermined.csv", index_col=False)["GENERATION_PROJECT"]
83+
should_drop = (gen["gen_tech"] == STORAGE_TECH) & ~gen["GENERATION_PROJECT"].isin(predetermined_gen)
84+
# Find projects that we should drop (candidate storage)
85+
gen_to_drop = gen[should_drop]["GENERATION_PROJECT"]
86+
# Verify we're dropping the right amount
87+
assert len(gen_to_drop) == 50 # 50 is the number of load zones. we expect one candidate per load zone
88+
89+
# Drop and write output
90+
gen = gen[~should_drop]
91+
gen.to_csv("generation_projects_info.csv", index=False)
92+
93+
# Drop the dropped generation projects from gen_build_costs.csv
94+
costs = pd.read_csv("gen_build_costs.csv", index_col=False)
95+
costs = costs[~costs["GENERATION_PROJECT"].isin(gen_to_drop)]
96+
costs.to_csv("gen_build_costs.csv", index=False)
97+
98+
999
@register_post_process(
10100
name="add_storage",
11101
msg="Adding storage from Google Sheets",
12102
only_with_config=True,
13-
priority=1
103+
priority=1 # Increased priority (default is 2) so that it always runs before replace_plants_in_zone_all.py
14104
)
15-
def add_storage(config):
16-
from switch_model.tools.add_storage import main
17-
main(
18-
run_post_solve=False, # We will run post solve automatically right afterwards
19-
scenario_config=config,
20-
change_dir=False
21-
)
105+
def main(config):
106+
# Drop previous candidate storage from inputs
107+
drop_previous_candidate_storage()
108+
109+
# Get the generation storage plants from Google Sheet
110+
gen_constants = get_gen_constants(config)
111+
gen_plants = fetch_df("plants", "plants_scenario", config)
112+
gen_plants = cross_join(gen_plants, gen_constants)
113+
114+
# Append the storage plants to the inputs
115+
append_to_csv("generation_projects_info.csv", gen_plants, primary_key="GENERATION_PROJECT")
116+
117+
# Get the plant costs from GSheets and append to costs
118+
storage_costs = fetch_df("costs", "costs_scenario", config)
119+
append_to_csv("gen_build_costs.csv", storage_costs, primary_key=["GENERATION_PROJECT", "build_year"])
120+
121+
# Create add_storage_info.csv
122+
pd.DataFrame([config]).transpose().to_csv("add_storage_info.csv", header=False)
123+
124+
# Add the storage types to the graphs
125+
gen_type = gen_plants[["gen_tech", "gen_energy_source"]].drop_duplicates()
126+
gen_type.columns = ["gen_tech", "energy_source"]
127+
gen_type["map_name"] = "default"
128+
gen_type["gen_type"] = "Storage"
129+
pd.concat([
130+
pd.read_csv("graph_tech_types.csv", index_col=False), gen_type
131+
]).to_csv("graph_tech_types.csv", index=False)
132+
133+
134+
if __name__ == "__main__":
135+
main({})

0 commit comments

Comments
 (0)