Skip to content

Commit 82058ff

Browse files
committed
Improve error message when loading data fails
1 parent c4ab3d8 commit 82058ff

File tree

2 files changed

+107
-73
lines changed

2 files changed

+107
-73
lines changed

switch_model/generators/core/dispatch.py

Lines changed: 81 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -729,16 +729,24 @@ def graph_total_dispatch(tools):
729729

730730
tools.bar_label()
731731

732+
732733
@graph(
733734
"energy_balance",
734735
title="Energy Balance For Every Month",
735736
supports_multi_scenario=True,
736-
is_long=True
737+
is_long=True,
737738
)
738739
def energy_balance(tools):
739740
# Get dispatch dataframe
740-
cols = ["timestamp", "gen_tech", "gen_energy_source", "DispatchGen_MW", "scenario_name", "scenario_index",
741-
"Curtailment_MW"]
741+
cols = [
742+
"timestamp",
743+
"gen_tech",
744+
"gen_energy_source",
745+
"DispatchGen_MW",
746+
"scenario_name",
747+
"scenario_index",
748+
"Curtailment_MW",
749+
]
742750
df = tools.get_dataframe("dispatch.csv", drop_scenario_info=False)[cols]
743751
df = tools.transform.gen_type(df)
744752

@@ -749,11 +757,16 @@ def energy_balance(tools):
749757
# Sum dispatch across all the projects of the same type and timepoint
750758
key_columns = ["timestamp", "gen_type", "scenario_name", "scenario_index"]
751759
df = df.groupby(key_columns, as_index=False).sum()
752-
df = df.melt(id_vars=key_columns, value_vars=["Dispatch", "Dispatch Limit"], var_name="Type")
760+
df = df.melt(
761+
id_vars=key_columns, value_vars=["Dispatch", "Dispatch Limit"], var_name="Type"
762+
)
753763
df = df.rename({"gen_type": "Source"}, axis=1)
754764

755-
discharge = df[(df["Source"] == "Storage") & (df["Type"] == "Dispatch")].drop(["Source", "Type"], axis=1).rename(
756-
{"value": "discharge"}, axis=1)
765+
discharge = (
766+
df[(df["Source"] == "Storage") & (df["Type"] == "Dispatch")]
767+
.drop(["Source", "Type"], axis=1)
768+
.rename({"value": "discharge"}, axis=1)
769+
)
757770

758771
# Get load dataframe
759772
load = tools.get_dataframe("load_balance.csv", drop_scenario_info=False)
@@ -764,23 +777,21 @@ def energy_balance(tools):
764777
load = load.groupby(key_columns, as_index=False).sum()
765778

766779
# Subtract storage dispatch from generation and add it to the storage charge to get net flow
767-
load = load.merge(
768-
discharge,
769-
how="left",
770-
on=key_columns,
771-
validate="one_to_one"
772-
)
780+
load = load.merge(discharge, how="left", on=key_columns, validate="one_to_one")
773781
load["ZoneTotalCentralDispatch"] -= load["discharge"]
774782
load["StorageNetCharge"] += load["discharge"]
775783
load = load.drop("discharge", axis=1)
776784

777785
# Rename and convert from wide to long format
778-
load = load.rename({
779-
"ZoneTotalCentralDispatch": "Total Generation (excl. storage discharge)",
780-
"TXPowerNet": "Transmission Losses",
781-
"StorageNetCharge": "Storage Net Flow",
782-
"zone_demand_mw": "Demand",
783-
}, axis=1).sort_index(axis=1)
786+
load = load.rename(
787+
{
788+
"ZoneTotalCentralDispatch": "Total Generation (excl. storage discharge)",
789+
"TXPowerNet": "Transmission Losses",
790+
"StorageNetCharge": "Storage Net Flow",
791+
"zone_demand_mw": "Demand",
792+
},
793+
axis=1,
794+
).sort_index(axis=1)
784795
load = load.melt(id_vars=key_columns, var_name="Source")
785796
load["Type"] = "Dispatch"
786797

@@ -796,26 +807,34 @@ def energy_balance(tools):
796807
FREQUENCY = "1W"
797808

798809
def groupby_time(df):
799-
return df.groupby([
800-
"scenario_name",
801-
"period",
802-
"Source",
803-
"Type",
804-
tools.pd.Grouper(key="datetime", freq=FREQUENCY, origin="start")
805-
])["value"]
810+
return df.groupby(
811+
[
812+
"scenario_name",
813+
"period",
814+
"Source",
815+
"Type",
816+
tools.pd.Grouper(key="datetime", freq=FREQUENCY, origin="start"),
817+
]
818+
)["value"]
806819

807820
df = groupby_time(df).sum().reset_index()
808821

809822
# Get the state of charge data
810-
soc = tools.get_dataframe("StateOfCharge.csv", dtype={"STORAGE_GEN_TPS_1": str}, drop_scenario_info=False)
811-
soc = soc.rename({"STORAGE_GEN_TPS_2": "timepoint", "StateOfCharge": "value"}, axis=1)
823+
soc = tools.get_dataframe(
824+
"StateOfCharge.csv", dtype={"STORAGE_GEN_TPS_1": str}, drop_scenario_info=False
825+
)
826+
soc = soc.rename(
827+
{"STORAGE_GEN_TPS_2": "timepoint", "StateOfCharge": "value"}, axis=1
828+
)
812829
# Sum over all the projects that are in the same scenario with the same timepoint
813830
soc = soc.groupby(["timepoint", "scenario_name"], as_index=False).sum()
814831
soc["Source"] = "State Of Charge"
815832
soc["value"] /= 1e6 # Convert to TWh
816833

817834
# Group by time
818-
soc = tools.transform.timestamp(soc, use_timepoint=True, key_col="timepoint").astype({"period": str})
835+
soc = tools.transform.timestamp(
836+
soc, use_timepoint=True, key_col="timepoint"
837+
).astype({"period": str})
819838
soc["Type"] = "Dispatch"
820839
soc = groupby_time(soc).mean().reset_index()
821840

@@ -827,35 +846,47 @@ def groupby_time(df):
827846
# Plot
828847
# Get the colors for the lines
829848
colors = tools.get_colors()
830-
colors.update({
831-
"Transmission Losses": "brown",
832-
"Storage Net Flow": "cadetblue",
833-
"Demand": "black",
834-
"Total Generation (excl. storage discharge)": "black",
835-
"State Of Charge": "green"
836-
})
849+
colors.update(
850+
{
851+
"Transmission Losses": "brown",
852+
"Storage Net Flow": "cadetblue",
853+
"Demand": "black",
854+
"Total Generation (excl. storage discharge)": "black",
855+
"State Of Charge": "green",
856+
}
857+
)
837858

838859
# plot
839860
num_periods = df["period"].nunique()
840861
pn = tools.pn
841-
plot = pn.ggplot(df) + \
842-
pn.geom_line(pn.aes(x="day", y="value", color="Source", linetype="Type")) + \
843-
pn.facet_grid("period ~ scenario_name") + \
844-
pn.labs(y="Contribution to Energy Balance (TWh)") + \
845-
pn.scales.scale_color_manual(values=colors, aesthetics="color", na_value=colors["Other"]) + \
846-
pn.scales.scale_x_continuous(
847-
name="Month",
848-
labels=["J", "F", "M", "A", "M", "J", "J", "A", "S", "O", "N", "D"],
849-
breaks=(15, 46, 76, 106, 137, 167, 198, 228, 259, 289, 319, 350),
850-
limits=(0, 366)) + \
851-
pn.scales.scale_linetype_manual(
852-
values={"Dispatch Limit": "dotted", "Dispatch": "solid"}
853-
) + \
854-
pn.theme(
855-
figure_size=(pn.options.figure_size[0] * tools.num_scenarios, pn.options.figure_size[1] * num_periods))
862+
plot = (
863+
pn.ggplot(df)
864+
+ pn.geom_line(pn.aes(x="day", y="value", color="Source", linetype="Type"))
865+
+ pn.facet_grid("period ~ scenario_name")
866+
+ pn.labs(y="Contribution to Energy Balance (TWh)")
867+
+ pn.scales.scale_color_manual(
868+
values=colors, aesthetics="color", na_value=colors["Other"]
869+
)
870+
+ pn.scales.scale_x_continuous(
871+
name="Month",
872+
labels=["J", "F", "M", "A", "M", "J", "J", "A", "S", "O", "N", "D"],
873+
breaks=(15, 46, 76, 106, 137, 167, 198, 228, 259, 289, 319, 350),
874+
limits=(0, 366),
875+
)
876+
+ pn.scales.scale_linetype_manual(
877+
values={"Dispatch Limit": "dotted", "Dispatch": "solid"}
878+
)
879+
+ pn.theme(
880+
figure_size=(
881+
pn.options.figure_size[0] * tools.num_scenarios,
882+
pn.options.figure_size[1] * num_periods,
883+
)
884+
)
885+
)
856886

857887
tools.save_figure(plot.draw())
858888

889+
859890
@graph(
860891
"curtailment_per_period",
861892
title="Percent of total dispatchable capacity curtailed",

switch_model/utilities/load_data.py

Lines changed: 26 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -99,16 +99,16 @@ def __str__(self):
9999
return repr(self.value)
100100

101101

102-
def load_data(switch_data, optional, auto_select, optional_params, **kwds):
103-
path = kwds["filename"]
102+
def load_data(switch_data, optional, auto_select, optional_params, **kwargs):
103+
path = kwargs["filename"]
104104
# Skip if the file is missing
105105
if optional and not os.path.isfile(path):
106106
return
107107
# If this is a .dat file, then skip the rest of this fancy business; we'll
108108
# only check if the file is missing and optional for .csv files.
109109
filename, extension = os.path.splitext(path)
110110
if extension == ".dat":
111-
switch_data.load(**kwds)
111+
switch_data.load(**kwargs)
112112
return
113113

114114
# copy the optional_params to avoid side-effects when the list is altered below
@@ -134,14 +134,14 @@ def load_data(switch_data, optional, auto_select, optional_params, **kwds):
134134
# Try to get a list of parameters. If param was given as a
135135
# singleton or a tuple, make it into a list that can be edited.
136136
params = []
137-
if "param" in kwds:
137+
if "param" in kwargs:
138138
# Tuple -> list
139-
if isinstance(kwds["param"], tuple):
140-
kwds["param"] = list(kwds["param"])
139+
if isinstance(kwargs["param"], tuple):
140+
kwargs["param"] = list(kwargs["param"])
141141
# Singleton -> list
142-
elif not isinstance(kwds["param"], list):
143-
kwds["param"] = [kwds["param"]]
144-
params = kwds["param"]
142+
elif not isinstance(kwargs["param"], list):
143+
kwargs["param"] = [kwargs["param"]]
144+
params = kwargs["param"]
145145
# optional_params may include Param objects instead of names. In
146146
# those cases, convert objects to names.
147147
for (i, p) in enumerate(optional_params):
@@ -157,11 +157,11 @@ def load_data(switch_data, optional, auto_select, optional_params, **kwds):
157157
optional_params.append(p.name)
158158
# How many index columns do we expect?
159159
# Grab the dimensionality of the index param if it was provided.
160-
if "index" in kwds:
161-
num_indexes = kwds["index"].dimen
160+
if "index" in kwargs:
161+
num_indexes = kwargs["index"].dimen
162162
if num_indexes == UnknownSetDimen:
163163
raise Exception(
164-
f"Index {kwds['index'].name} has unknown dimension. Specify dimen= during its creation."
164+
f"Index {kwargs['index'].name} has unknown dimension. Specify dimen= during its creation."
165165
)
166166
# Next try the first parameter's index.
167167
elif len(params) > 0:
@@ -185,7 +185,7 @@ def load_data(switch_data, optional, auto_select, optional_params, **kwds):
185185
# within the file (e.g., "cost" and "limit"). We could also require the data file
186186
# to be called "rfm_supply_tier.csv" for greater consistency/predictability.
187187
if auto_select:
188-
if "select" in kwds:
188+
if "select" in kwargs:
189189
raise InputError(
190190
"You may not specify a select parameter if "
191191
+ "auto_select is set to True."
@@ -197,15 +197,15 @@ def get_column_name(p):
197197
else:
198198
return p.name
199199

200-
kwds["select"] = headers[0:num_indexes] + [get_column_name(p) for p in params]
200+
kwargs["select"] = headers[0:num_indexes] + [get_column_name(p) for p in params]
201201
# Check to see if expected column names are in the file. If a column
202202
# name is missing and its parameter is optional, then drop it from
203203
# the select & param lists.
204-
if "select" in kwds:
205-
if isinstance(kwds["select"], tuple):
206-
kwds["select"] = list(kwds["select"])
204+
if "select" in kwargs:
205+
if isinstance(kwargs["select"], tuple):
206+
kwargs["select"] = list(kwargs["select"])
207207
del_items = []
208-
for (i, col) in enumerate(kwds["select"]):
208+
for (i, col) in enumerate(kwargs["select"]):
209209
p_i = i - num_indexes
210210
if col not in headers:
211211
if len(params) > p_i >= 0 and params[p_i].name in optional_params:
@@ -218,17 +218,20 @@ def get_column_name(p):
218218
# to first so that the indexes won't get messed up as we go.
219219
del_items.sort(reverse=True)
220220
for (i, p_i) in del_items:
221-
del kwds["select"][i]
222-
del kwds["param"][p_i]
221+
del kwargs["select"][i]
222+
del kwargs["param"][p_i]
223223

224224
if optional and file_has_no_data_rows:
225225
# Skip the file. Note that we are only doing this after having
226226
# validated the file's column headings.
227227
return
228228

229229
# Use our custom DataManager to allow 'inf' in csvs.
230-
if kwds["filename"][-4:] == ".csv":
231-
kwds["using"] = "switch_csv"
230+
if kwargs["filename"][-4:] == ".csv":
231+
kwargs["using"] = "switch_csv"
232232
# All done with cleaning optional bits. Pass the updated arguments
233233
# into the DataPortal.load() function.
234-
switch_data.load(**kwds)
234+
try:
235+
switch_data.load(**kwargs)
236+
except:
237+
raise Exception(f"Failed to load data from file {path}.")

0 commit comments

Comments
 (0)