@@ -969,6 +969,8 @@ def read_sql_table(
969969 table : str ,
970970 database : str ,
971971 ctas_approach : bool = True ,
972+ unload_approach : bool = False ,
973+ unload_parameters : Optional [Dict [str , Any ]] = None ,
972974 categories : Optional [List [str ]] = None ,
973975 chunksize : Optional [Union [int , bool ]] = None ,
974976 s3_output : Optional [str ] = None ,
@@ -1090,6 +1092,11 @@ def read_sql_table(
10901092 ctas_approach: bool
10911093 Wraps the query using a CTAS, and read the resulted parquet data on S3.
10921094 If false, read the regular CSV on S3.
1095+ unload_approach: bool
1096+ Wraps the query using UNLOAD, and read the results from S3.
1097+ Only PARQUET format is supported.
1098+ unload_parameters : Optional[Dict[str, Any]]
1099+ Params of the UNLOAD such as format, compression, field_delimiter, and partitioned_by.
10931100 categories: List[str], optional
10941101 List of columns names that should be returned as pandas.Categorical.
10951102 Recommended for memory restricted environments.
@@ -1177,6 +1184,8 @@ def read_sql_table(
11771184 database = database ,
11781185 data_source = data_source ,
11791186 ctas_approach = ctas_approach ,
1187+ unload_approach = unload_approach ,
1188+ unload_parameters = unload_parameters ,
11801189 categories = categories ,
11811190 chunksize = chunksize ,
11821191 s3_output = s3_output ,
0 commit comments