55the csvs in the inputs folder.
66"""
77import pandas as pd
8-
98from switch_model .wecc .get_inputs .register_post_process import register_post_process
109
1110
@@ -17,16 +16,16 @@ def fetch_df(tab_name, key, config):
1716 "constants" : 0 ,
1817 "plants" : 889129113 ,
1918 "costs" : 1401952285 ,
20- "minimums" : 1049456965
19+ "minimums" : 1049456965 ,
2120 }
2221 SHEET_ID = "1SJrj039T1T95NLTs964VQnsfZgo2QWCo29x2ireVYcU"
2322
2423 gid = TAB_NAME_GID [tab_name ]
2524 url = f"https://docs.google.com/spreadsheet/ccc?key={ SHEET_ID } &output=csv&gid={ gid } "
2625
27- df : pd .DataFrame = pd . read_csv ( url , index_col = False ) \
28- . replace ("FALSE" , False ) \
29- . replace ( "TRUE" , True )
26+ df : pd .DataFrame = (
27+ pd . read_csv ( url , index_col = False ). replace ("FALSE" , False ). replace ( "TRUE" , True )
28+ )
3029
3130 if "description" in df .columns :
3231 df = df .drop ("description" , axis = 1 )
@@ -43,17 +42,16 @@ def filer_by_scenario(df, scenario_column, config):
4342 if scenario_column in config :
4443 scenario = config [scenario_column ]
4544 else :
46- scenario = input (f"Which scenario do you want for '{ scenario_column } ' (default 0) : " )
45+ scenario = input (
46+ f"Which scenario do you want for '{ scenario_column } ' (default 0) : "
47+ )
4748 scenario = int (scenario ) if scenario != "" else 0
4849 df = df [df [scenario_column ] == scenario ]
4950 return df .drop (scenario_column , axis = 1 )
5051
5152
5253def cross_join (df1 , df2 ):
53- return df1 .assign (key = 1 ).merge (
54- df2 .assign (key = 1 ),
55- on = "key"
56- ).drop ("key" , axis = 1 )
54+ return df1 .assign (key = 1 ).merge (df2 .assign (key = 1 ), on = "key" ).drop ("key" , axis = 1 )
5755
5856
5957def add_to_csv (filename , to_add , primary_key = None , append = True ):
@@ -83,8 +81,12 @@ def drop_previous_candidate_storage():
8381
8482 gen = pd .read_csv ("generation_projects_info.csv" , index_col = False )
8583 # Find generation projects that are both storage and not predetermined (i.e. candidate)
86- predetermined_gen = pd .read_csv ("gen_build_predetermined.csv" , index_col = False )["GENERATION_PROJECT" ]
87- should_drop = (gen ["gen_tech" ] == STORAGE_TECH ) & ~ gen ["GENERATION_PROJECT" ].isin (predetermined_gen )
84+ predetermined_gen = pd .read_csv ("gen_build_predetermined.csv" , index_col = False )[
85+ "GENERATION_PROJECT"
86+ ]
87+ should_drop = (gen ["gen_tech" ] == STORAGE_TECH ) & ~ gen ["GENERATION_PROJECT" ].isin (
88+ predetermined_gen
89+ )
8890 # Find projects that we should drop (candidate storage)
8991 gen_to_drop = gen [should_drop ]["GENERATION_PROJECT" ]
9092
@@ -99,30 +101,46 @@ def drop_previous_candidate_storage():
99101
100102
101103@register_post_process (
102- name = "add_storage" ,
103104 msg = "Adding storage from Google Sheets" ,
104- only_with_config = True ,
105- priority = 1 # Increased priority (default is 2) so that it always runs before replace_plants_in_zone_all.py
106105)
107- def main (config ):
106+ def post_process (config ):
108107 # Drop previous candidate storage from inputs
109108 drop_previous_candidate_storage ()
110109
111110 # Get the generation storage plants from Google Sheet
112- gen_projects = fetch_df ("constants" , "constant_scenario" , config ).set_index ("param_name" ).transpose ()
113- gen_projects = cross_join (gen_projects , fetch_df ("plants" , "plants_scenario" , config ))
111+ gen_projects = (
112+ fetch_df ("constants" , "constant_scenario" , config )
113+ .set_index ("param_name" )
114+ .transpose ()
115+ )
116+ gen_projects = cross_join (
117+ gen_projects , fetch_df ("plants" , "plants_scenario" , config )
118+ )
114119
115120 # Append the storage plants to the inputs
116- add_to_csv ("generation_projects_info.csv" , gen_projects , primary_key = "GENERATION_PROJECT" )
121+ add_to_csv (
122+ "generation_projects_info.csv" , gen_projects , primary_key = "GENERATION_PROJECT"
123+ )
117124
118125 # Create min_per_tech.csv
119126 min_projects = fetch_df ("minimums" , "minimums_scenario" , config )
120- add_to_csv ("min_per_tech.csv" , min_projects , primary_key = ["gen_tech" , "period" ], append = False )
127+ add_to_csv (
128+ "min_per_tech.csv" ,
129+ min_projects ,
130+ primary_key = ["gen_tech" , "period" ],
131+ append = False ,
132+ )
121133
122134 # Get the plant costs from GSheets and append to costs
123135 storage_costs = fetch_df ("costs" , "costs_scenario" , config )
124- storage_costs = storage_costs [storage_costs ["GENERATION_PROJECT" ].isin (gen_projects ["GENERATION_PROJECT" ])]
125- add_to_csv ("gen_build_costs.csv" , storage_costs , primary_key = ["GENERATION_PROJECT" , "build_year" ])
136+ storage_costs = storage_costs [
137+ storage_costs ["GENERATION_PROJECT" ].isin (gen_projects ["GENERATION_PROJECT" ])
138+ ]
139+ add_to_csv (
140+ "gen_build_costs.csv" ,
141+ storage_costs ,
142+ primary_key = ["GENERATION_PROJECT" , "build_year" ],
143+ )
126144
127145 # Create add_storage_info.csv
128146 pd .DataFrame ([config ]).transpose ().to_csv ("add_storage_info.csv" , header = False )
@@ -132,9 +150,9 @@ def main(config):
132150 gen_type .columns = ["gen_tech" , "energy_source" ]
133151 gen_type ["map_name" ] = "default"
134152 gen_type ["gen_type" ] = "Storage"
135- pd .concat ([
136- pd . read_csv ( "graph_tech_types.csv" , index_col = False ), gen_type
137- ]). to_csv ( "graph_tech_types.csv" , index = False )
153+ pd .concat ([pd . read_csv ( "graph_tech_types.csv" , index_col = False ), gen_type ]). to_csv (
154+ "graph_tech_types.csv" , index = False
155+ )
138156
139157
140158if __name__ == "__main__" :
0 commit comments