|
28 | 28 | ) |
29 | 29 | def post_process(config): |
30 | 30 | agg_techs = config["agg_techs"] |
31 | | - cf_quantile = config["cf_quantile"] |
| 31 | + cf_method = config["cf_method"] |
32 | 32 | assert type(agg_techs) == list |
33 | 33 | # Don't allow hydro to be aggregated since we haven't implemented how to handle |
34 | 34 | # hydro_timeseries.csv |
@@ -60,7 +60,7 @@ def post_process(config): |
60 | 60 |
|
61 | 61 | # Specify the new project id (e.g. agg_Wind_CA_SGE) and save a mapping of keys to aggregate keys for later |
62 | 62 | df["agg_key"] = "agg_" + df["gen_tech"] + "_" + df["gen_load_zone"] |
63 | | - keys_to_agg = df[[key, "agg_key"]] |
| 63 | + keys_to_agg = df[[key, "agg_key", "gen_tech", "gen_load_zone"]] |
64 | 64 | df = df.astype({"gen_capacity_limit_mw": float}) |
65 | 65 | keys_to_agg["weight"] = df["gen_capacity_limit_mw"] |
66 | 66 | df[key] = df.pop("agg_key") |
@@ -103,8 +103,10 @@ def agg_projects(x): |
103 | 103 | df = df[should_agg] |
104 | 104 | # Replace the plant id with the aggregated plant id |
105 | 105 | df = ( |
106 | | - df.merge(keys_to_agg, on=key, how="left", validate="many_to_one") |
107 | | - .drop([key, "weight"], axis=1) |
| 106 | + df.merge( |
| 107 | + keys_to_agg[[key, "agg_key"]], on=key, how="left", validate="many_to_one" |
| 108 | + ) |
| 109 | + .drop(key, axis=1) |
108 | 110 | .rename({"agg_key": key}, axis=1) |
109 | 111 | ) |
110 | 112 |
|
@@ -139,17 +141,22 @@ def agg_costs(x): |
139 | 141 | # Replace the plant id with the aggregated plant id |
140 | 142 | df = ( |
141 | 143 | df.merge(keys_to_agg, on=key, how="left", validate="many_to_one") |
142 | | - .drop([key, "weight"], axis=1) |
| 144 | + .drop(key, axis=1) |
143 | 145 | .rename({"agg_key": key}, axis=1) |
144 | 146 | ) |
145 | 147 |
|
146 | | - # Aggregate by group and key |
| 148 | + # Aggregate by group and timepoint |
147 | 149 | dfgroup = df.groupby([key, "timepoint"], as_index=False, dropna=False, sort=False) |
148 | | - df = dfgroup.quantile(cf_quantile) |
149 | | - # Code to take the weighted average |
150 | | - # df = dfgroup \ |
151 | | - # .quantile(lambda x: np.average(x["gen_max_capacity_factor"], weights=x["weight"])) \ |
152 | | - # .rename({None: "gen_max_capacity_factor"}, axis=1) |
| 150 | + if cf_method == "95_quantile": |
| 151 | + df = dfgroup.quantile(0.95) |
| 152 | + elif cf_method == "weighted_mean": |
| 153 | + # Code to take the weighted average |
| 154 | + df = dfgroup.quantile( |
| 155 | + lambda x: np.average(x["gen_max_capacity_factor"], weights=x["weight"]) |
| 156 | + ).rename({None: "gen_max_capacity_factor"}, axis=1) |
| 157 | + else: |
| 158 | + zonal_cf = pd.read_csv("zonal_capacity_factors.csv", index_col=False) |
| 159 | + # TODO |
153 | 160 | df = pd.concat([df, df_keep]) |
154 | 161 | df[columns].to_csv(filename, index=False) |
155 | 162 |
|
@@ -267,7 +274,9 @@ def create_capacity_factors(): |
267 | 274 |
|
268 | 275 | # Add the period to each row by merging with outputs/timestamp.csv |
269 | 276 | timestamps = pd.read_csv( |
270 | | - "outputs/timestamps.csv", usecols=["timestamp", "period"], index_col=False |
| 277 | + "outputs/timestamps.csv", |
| 278 | + usecols=["timestamp", "timepoint", "period"], |
| 279 | + index_col=False, |
271 | 280 | ) |
272 | 281 | dispatch = dispatch.merge( |
273 | 282 | timestamps, on="timestamp", how="left", validate="many_to_one" |
@@ -325,7 +334,13 @@ def create_capacity_factors(): |
325 | 334 | dispatch["GenCapacity"] * (1 - dispatch["gen_forced_outage_rate"]) |
326 | 335 | ) |
327 | 336 | dispatch = dispatch[ |
328 | | - ["gen_tech", "gen_load_zone", "timestamp", "gen_max_capacity_factor"] |
| 337 | + [ |
| 338 | + "gen_tech", |
| 339 | + "gen_load_zone", |
| 340 | + "timestamp", |
| 341 | + "timepoint", |
| 342 | + "gen_max_capacity_factor", |
| 343 | + ] |
329 | 344 | ] |
330 | 345 | dispatch.to_csv("zonal_capacity_factors.csv", index=False) |
331 | 346 |
|
|
0 commit comments