Skip to content

Commit 77dfb5d

Browse files
committed
Create script that generates capacity factors from past runs
1 parent dab5ab2 commit 77dfb5d

File tree

1 file changed

+144
-2
lines changed

1 file changed

+144
-2
lines changed

switch_model/wecc/get_inputs/post_process_steps/aggregate_candidate_projects.py

Lines changed: 144 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,8 @@
1212
1313
3. We aggregate the variable_capacity_factors.csv by averaging the values for each timepoint
1414
"""
15+
import warnings
16+
1517
import numpy as np
1618
import pandas as pd
1719

@@ -24,7 +26,7 @@
2426
only_with_config=True,
2527
priority=4,
2628
)
27-
def main(config):
29+
def post_process(config):
2830
agg_techs = config["agg_techs"]
2931
cf_quantile = config["cf_quantile"]
3032
assert type(agg_techs) == list
@@ -33,7 +35,8 @@ def main(config):
3335
assert "Hydro_NonPumped" not in agg_techs
3436
assert "Hydro_Pumped" not in agg_techs
3537

36-
print(f"\t\tAggregating on projects where gen_tech in {agg_techs} with capacity factors from the {cf_quantile*100}th percentile")
38+
print(
39+
f"\t\tAggregating on projects where gen_tech in {agg_techs} with capacity factors from the {cf_quantile * 100}th percentile")
3740
key = "GENERATION_PROJECT"
3841

3942
#################
@@ -148,3 +151,142 @@ def agg_costs(x):
148151
# .rename({None: "gen_max_capacity_factor"}, axis=1)
149152
df = pd.concat([df, df_keep])
150153
df[columns].to_csv(filename, index=False)
154+
155+
156+
def create_capacity_factors():
157+
"""
158+
This function creates a zonal_capacity_factors.csv file
159+
that contains capacity factors aggregated by load_zone, timepoint and technology based on the dispatch
160+
instructions for *candidate* renewable plants from the results of a previous run. Capacity
161+
factors are calculated by aggregating all the candidate plants of the same gen_tech within a load
162+
zone and using the following equation
163+
164+
capacity factor = (DispatchGen + Curtailment) / (GenCapacity * (1 - gen_forced_outage_rate))
165+
166+
This equation is essentially backtracking how DispatchUpperLimit is calculated in the SWITCH model.
167+
See switch_model.generators.core.no_commit.py
168+
169+
Note that capacity factors are only calculated for technologies where all the candidate
170+
plants are variable and not baseload (baseload plants have a different way of calculating the outage rate).
171+
172+
This function requires the following files
173+
inputs/generation_projects_info.csv (to get gen_forced_outage_rate)
174+
inputs/gen_build_predetermined.csv (to know which projects are candidate projects)
175+
outputs/timestamps.csv (to find which timepoint matches which period)
176+
outputs/gen_cap.csv (to find the GenCapacity during any period)
177+
outputs/dispatch.csv (to know the DispatchGen and Curtailment)
178+
"""
179+
# Read the projects
180+
projects = pd.read_csv("inputs/generation_projects_info.csv",
181+
usecols=["GENERATION_PROJECT", "gen_tech", "gen_is_variable", "gen_is_baseload",
182+
"gen_forced_outage_rate"],
183+
dtype={"GENERATION_PROJECT": str},
184+
index_col=False)
185+
# Filter out predetermined plants
186+
predetermined = pd.read_csv("inputs/gen_build_predetermined.csv", usecols=["GENERATION_PROJECT"],
187+
dtype={"GENERATION_PROJECT": str},
188+
index_col=False)["GENERATION_PROJECT"]
189+
n = len(projects)
190+
projects = projects[~projects["GENERATION_PROJECT"].isin(predetermined)]
191+
print(f"Removed {n - len(projects)} projects that were predetermined plants.")
192+
del predetermined
193+
# Determine the gen_techs where gen_is_variable is always True and gen_is_baseload is always False.
194+
# Grouping and summing works since summing Falses gives 0 but summing Trues gives >0.
195+
projects["gen_is_not_variable"] = ~projects["gen_is_variable"]
196+
grouped_projects = projects.groupby("gen_tech", as_index=False)[["gen_is_not_variable", "gen_is_baseload"]].sum()
197+
grouped_projects = grouped_projects[
198+
(grouped_projects["gen_is_not_variable"] == 0) & (grouped_projects["gen_is_baseload"] == 0)]
199+
gen_tech = grouped_projects["gen_tech"]
200+
del grouped_projects
201+
print(f"Aggregating for gen_tech: {gen_tech.values}")
202+
203+
# Filter out projects that aren't variable or are baseload
204+
n = len(projects)
205+
projects = projects[projects["gen_tech"].isin(gen_tech)]
206+
valid_gens = projects["GENERATION_PROJECT"]
207+
print(f"Removed {n - len(projects)} projects that aren't of allowed gen_tech.")
208+
209+
# Calculate the gen_forced_outage_rate and verify it is identical for all the projects within the same group
210+
outage_rates = projects.groupby("gen_tech", as_index=False)["gen_forced_outage_rate"]
211+
if (outage_rates.nunique()["gen_forced_outage_rate"] - 1).sum() != 0:
212+
outage_rates = outage_rates.nunique().set_index("gen_tech")["gen_forced_outage_rate"] - 1
213+
outage_rates = outage_rates[outage_rates != 0]
214+
raise Exception(
215+
f"These generation technologies have different forced outage rates: {outage_rates.index.values}")
216+
outage_rates = outage_rates.mean() # They're all the same so mean returns the proper value
217+
del projects
218+
print("Check passed: gen_forced_outage_rate is identical.")
219+
220+
# Read the dispatch instructions
221+
dispatch = pd.read_csv("outputs/dispatch.csv",
222+
usecols=["generation_project", "timestamp", "gen_tech", "gen_load_zone", "DispatchGen_MW",
223+
"Curtailment_MW"],
224+
index_col=False,
225+
dtype={"generation_project": str})
226+
# Keep only valid projects
227+
dispatch = dispatch[dispatch["generation_project"].isin(valid_gens)]
228+
# Group by timestamp, gen_tech and load_zone
229+
dispatch = dispatch.groupby(["timestamp", "gen_tech", "gen_load_zone"], as_index=False).sum()
230+
# Get the DispatchUpperLimit from DispatchGen + Curtailment
231+
dispatch["DispatchUpperLimit"] = dispatch["DispatchGen_MW"] + dispatch["Curtailment_MW"]
232+
dispatch = dispatch.drop(["DispatchGen_MW", "Curtailment_MW"], axis=1)
233+
234+
# Add the period to each row by merging with outputs/timestamp.csv
235+
timestamps = pd.read_csv("outputs/timestamps.csv",
236+
usecols=["timestamp", "period"],
237+
index_col=False)
238+
dispatch = dispatch.merge(
239+
timestamps,
240+
on="timestamp",
241+
how='left',
242+
validate="many_to_one"
243+
)
244+
del timestamps
245+
246+
# Read the gen_cap.csv
247+
cap = pd.read_csv("outputs/gen_cap.csv",
248+
usecols=["GENERATION_PROJECT", "PERIOD", "gen_tech", "gen_load_zone", "GenCapacity"],
249+
index_col=False,
250+
dtype={"GENERATION_PROJECT": str}).rename({"PERIOD": "period"}, axis=1)
251+
# Keep only valid projects
252+
cap = cap[cap["GENERATION_PROJECT"].isin(valid_gens)].drop("GENERATION_PROJECT", axis=1)
253+
# Sum for the tech, period and load zone
254+
cap = cap.groupby(["period", "gen_tech", "gen_load_zone"], as_index=False).sum()
255+
# Merge onto dispatch
256+
dispatch = dispatch.merge(
257+
cap,
258+
on=["period", "gen_tech", "gen_load_zone"],
259+
how="left",
260+
validate="many_to_one"
261+
)
262+
del cap
263+
264+
# Filter out zones with no buildout
265+
is_no_buildout = dispatch["GenCapacity"] == 0
266+
missing_data = dispatch\
267+
[is_no_buildout]\
268+
[["period", "gen_tech", "gen_load_zone"]]\
269+
.drop_duplicates()\
270+
.groupby(["period", "gen_tech"], as_index=False)["gen_load_zone"]\
271+
.nunique()\
272+
.rename({"gen_load_zone": "Number of Load Zones"}, axis=1)
273+
if missing_data["Number of Load Zones"].sum() > 0:
274+
warnings.warn(
275+
f"Unable to make capacity factors for the following categories since total capacity in those zones is 0.\n{missing_data}")
276+
dispatch = dispatch[~is_no_buildout]
277+
278+
# Merge outage rates onto dispatch
279+
dispatch = dispatch.merge(
280+
outage_rates,
281+
on="gen_tech"
282+
)
283+
del outage_rates
284+
285+
dispatch["gen_max_capacity_factor"] = dispatch["DispatchUpperLimit"] / (
286+
dispatch["GenCapacity"] * (1 - dispatch["gen_forced_outage_rate"]))
287+
dispatch = dispatch[["gen_tech", "gen_load_zone", "timestamp", "gen_max_capacity_factor"]]
288+
dispatch.to_csv("zonal_capacity_factors.csv", index=False)
289+
290+
291+
if __name__=="__main__":
292+
create_capacity_factors()

0 commit comments

Comments
 (0)