Skip to content

Commit 717cc97

Browse files
committed
Output timepoint and start working on post-process step
1 parent 53dce78 commit 717cc97

File tree

2 files changed

+29
-14
lines changed

2 files changed

+29
-14
lines changed

switch_model/tools/templates/config.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,4 +56,4 @@ get_inputs:
5656
# of all the available candidate plants, 0.5 will use the median plant and 0 will use the worst plant.
5757
# aggregate_projects_by_zone:
5858
# agg_techs: ["Central_PV"]
59-
# cf_quantile: 0.95
59+
# cf_method: "file" # Other options are "weighted_mean" and "95_quantile"

switch_model/wecc/get_inputs/post_process_steps/aggregate_candidate_projects.py

Lines changed: 28 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@
2828
)
2929
def post_process(config):
3030
agg_techs = config["agg_techs"]
31-
cf_quantile = config["cf_quantile"]
31+
cf_method = config["cf_method"]
3232
assert type(agg_techs) == list
3333
# Don't allow hydro to be aggregated since we haven't implemented how to handle
3434
# hydro_timeseries.csv
@@ -60,7 +60,7 @@ def post_process(config):
6060

6161
# Specify the new project id (e.g. agg_Wind_CA_SGE) and save a mapping of keys to aggregate keys for later
6262
df["agg_key"] = "agg_" + df["gen_tech"] + "_" + df["gen_load_zone"]
63-
keys_to_agg = df[[key, "agg_key"]]
63+
keys_to_agg = df[[key, "agg_key", "gen_tech", "gen_load_zone"]]
6464
df = df.astype({"gen_capacity_limit_mw": float})
6565
keys_to_agg["weight"] = df["gen_capacity_limit_mw"]
6666
df[key] = df.pop("agg_key")
@@ -103,8 +103,10 @@ def agg_projects(x):
103103
df = df[should_agg]
104104
# Replace the plant id with the aggregated plant id
105105
df = (
106-
df.merge(keys_to_agg, on=key, how="left", validate="many_to_one")
107-
.drop([key, "weight"], axis=1)
106+
df.merge(
107+
keys_to_agg[[key, "agg_key"]], on=key, how="left", validate="many_to_one"
108+
)
109+
.drop(key, axis=1)
108110
.rename({"agg_key": key}, axis=1)
109111
)
110112

@@ -139,17 +141,22 @@ def agg_costs(x):
139141
# Replace the plant id with the aggregated plant id
140142
df = (
141143
df.merge(keys_to_agg, on=key, how="left", validate="many_to_one")
142-
.drop([key, "weight"], axis=1)
144+
.drop(key, axis=1)
143145
.rename({"agg_key": key}, axis=1)
144146
)
145147

146-
# Aggregate by group and key
148+
# Aggregate by group and timepoint
147149
dfgroup = df.groupby([key, "timepoint"], as_index=False, dropna=False, sort=False)
148-
df = dfgroup.quantile(cf_quantile)
149-
# Code to take the weighted average
150-
# df = dfgroup \
151-
# .quantile(lambda x: np.average(x["gen_max_capacity_factor"], weights=x["weight"])) \
152-
# .rename({None: "gen_max_capacity_factor"}, axis=1)
150+
if cf_method == "95_quantile":
151+
df = dfgroup.quantile(0.95)
152+
elif cf_method == "weighted_mean":
153+
# Code to take the weighted average
154+
df = dfgroup.quantile(
155+
lambda x: np.average(x["gen_max_capacity_factor"], weights=x["weight"])
156+
).rename({None: "gen_max_capacity_factor"}, axis=1)
157+
else:
158+
zonal_cf = pd.read_csv("zonal_capacity_factors.csv", index_col=False)
159+
# TODO
153160
df = pd.concat([df, df_keep])
154161
df[columns].to_csv(filename, index=False)
155162

@@ -267,7 +274,9 @@ def create_capacity_factors():
267274

268275
# Add the period to each row by merging with outputs/timestamp.csv
269276
timestamps = pd.read_csv(
270-
"outputs/timestamps.csv", usecols=["timestamp", "period"], index_col=False
277+
"outputs/timestamps.csv",
278+
usecols=["timestamp", "timepoint", "period"],
279+
index_col=False,
271280
)
272281
dispatch = dispatch.merge(
273282
timestamps, on="timestamp", how="left", validate="many_to_one"
@@ -325,7 +334,13 @@ def create_capacity_factors():
325334
dispatch["GenCapacity"] * (1 - dispatch["gen_forced_outage_rate"])
326335
)
327336
dispatch = dispatch[
328-
["gen_tech", "gen_load_zone", "timestamp", "gen_max_capacity_factor"]
337+
[
338+
"gen_tech",
339+
"gen_load_zone",
340+
"timestamp",
341+
"timepoint",
342+
"gen_max_capacity_factor",
343+
]
329344
]
330345
dispatch.to_csv("zonal_capacity_factors.csv", index=False)
331346

0 commit comments

Comments
 (0)