diff --git a/pyiceberg/avro/resolver.py b/pyiceberg/avro/resolver.py index c11d2878aa..84805640eb 100644 --- a/pyiceberg/avro/resolver.py +++ b/pyiceberg/avro/resolver.py @@ -281,7 +281,7 @@ def struct(self, file_schema: StructType, record_struct: IcebergType | None, fil record_struct_positions: Dict[int, int] = {field.field_id: pos for pos, field in enumerate(record_struct.fields)} results: List[Tuple[int | None, Writer]] = [] - for writer, file_field in zip(file_writers, file_schema.fields): + for writer, file_field in zip(file_writers, file_schema.fields, strict=True): if file_field.field_id in record_struct_positions: results.append((record_struct_positions[file_field.field_id], writer)) elif file_field.required: @@ -408,7 +408,7 @@ def struct(self, struct: StructType, expected_struct: IcebergType | None, field_ # Check if we need to convert it to an Enum result_reader if not (enum_type := self.read_enums.get(field.field_id)) else EnumReader(enum_type, result_reader), ) - for field, result_reader in zip(struct.fields, field_readers) + for field, result_reader in zip(struct.fields, field_readers, strict=True) ] file_fields = {field.field_id for field in struct.fields} diff --git a/pyiceberg/io/pyarrow.py b/pyiceberg/io/pyarrow.py index 5b4c041ff5..46d7fe6b8b 100644 --- a/pyiceberg/io/pyarrow.py +++ b/pyiceberg/io/pyarrow.py @@ -1898,7 +1898,7 @@ def struct(self, struct: StructType, struct_array: pa.Array | None, field_result return None field_arrays: List[pa.Array] = [] fields: List[pa.Field] = [] - for field, field_array in zip(struct.fields, field_results): + for field, field_array in zip(struct.fields, field_results, strict=True): if field_array is not None: array = self._cast_if_needed(field, field_array) field_arrays.append(array) @@ -2840,7 +2840,7 @@ def _determine_partitions(spec: PartitionSpec, schema: Schema, arrow_table: pa.T # to avoid conflicts partition_fields = [f"_partition_{field.name}" for field in spec.fields] - for partition, name in zip(spec.fields, partition_fields): + for partition, name in zip(spec.fields, partition_fields, strict=True): source_field = schema.find_field(partition.source_id) full_field_name = schema.find_column_name(partition.source_id) if full_field_name is None: @@ -2854,7 +2854,7 @@ def _determine_partitions(spec: PartitionSpec, schema: Schema, arrow_table: pa.T partition_key = PartitionKey( field_values=[ PartitionFieldValue(field=field, value=unique_partition[name]) - for field, name in zip(spec.fields, partition_fields) + for field, name in zip(spec.fields, partition_fields, strict=True) ], partition_spec=spec, schema=schema, @@ -2868,7 +2868,7 @@ def _determine_partitions(spec: PartitionSpec, schema: Schema, arrow_table: pa.T if unique_partition[partition_field_name] is not None else pc.field(partition_field_name).is_null() ) - for field, partition_field_name in zip(spec.fields, partition_fields) + for field, partition_field_name in zip(spec.fields, partition_fields, strict=True) ], ) ) diff --git a/pyiceberg/partitioning.py b/pyiceberg/partitioning.py index 046782c0dc..bf8e4081fe 100644 --- a/pyiceberg/partitioning.py +++ b/pyiceberg/partitioning.py @@ -202,7 +202,7 @@ def compatible_with(self, other: PartitionSpec) -> bool: this_field.source_id == that_field.source_id and this_field.transform == that_field.transform and this_field.name == that_field.name - for this_field, that_field in zip(self.fields, other.fields) + for this_field, that_field in zip(self.fields, other.fields, strict=True) ) def partition_type(self, schema: Schema) -> StructType: @@ -242,7 +242,7 @@ def partition_to_path(self, data: Record, schema: Schema) -> str: value_strs.append(quote_plus(value_str, safe="")) field_strs.append(quote_plus(partition_field.name, safe="")) - path = "/".join([field_str + "=" + value_str for field_str, value_str in zip(field_strs, value_strs)]) + path = "/".join([field_str + "=" + value_str for field_str, value_str in zip(field_strs, value_strs, strict=True)]) return path diff --git a/pyiceberg/schema.py b/pyiceberg/schema.py index c8c73eded8..f6e4684b91 100644 --- a/pyiceberg/schema.py +++ b/pyiceberg/schema.py @@ -125,7 +125,7 @@ def __eq__(self, other: Any) -> bool: return False identifier_field_ids_is_equal = self.identifier_field_ids == other.identifier_field_ids - schema_is_equal = all(lhs == rhs for lhs, rhs in zip(self.columns, other.columns)) + schema_is_equal = all(lhs == rhs for lhs, rhs in zip(self.columns, other.columns, strict=True)) return identifier_field_ids_is_equal and schema_is_equal @@ -1317,7 +1317,7 @@ def schema(self, schema: Schema, struct_result: Callable[[], StructType]) -> Sch def struct(self, struct: StructType, field_results: List[Callable[[], IcebergType]]) -> StructType: new_ids = [self._get_and_increment(field.field_id) for field in struct.fields] new_fields = [] - for field_id, field, field_type in zip(new_ids, struct.fields, field_results): + for field_id, field, field_type in zip(new_ids, struct.fields, field_results, strict=True): new_fields.append( NestedField( field_id=field_id, diff --git a/pyiceberg/table/metadata.py b/pyiceberg/table/metadata.py index 3582a9be8c..201aaee511 100644 --- a/pyiceberg/table/metadata.py +++ b/pyiceberg/table/metadata.py @@ -337,7 +337,9 @@ def _generate_snapshot_id() -> int: """ rnd_uuid = uuid.uuid4() snapshot_id = int.from_bytes( - bytes(lhs ^ rhs for lhs, rhs in zip(rnd_uuid.bytes[0:8], rnd_uuid.bytes[8:16])), byteorder="little", signed=True + bytes(lhs ^ rhs for lhs, rhs in zip(rnd_uuid.bytes[0:8], rnd_uuid.bytes[8:16], strict=True)), + byteorder="little", + signed=True, ) snapshot_id = snapshot_id if snapshot_id >= 0 else snapshot_id * -1 diff --git a/pyiceberg/table/name_mapping.py b/pyiceberg/table/name_mapping.py index 8ba9fd2554..1216daa2a4 100644 --- a/pyiceberg/table/name_mapping.py +++ b/pyiceberg/table/name_mapping.py @@ -155,7 +155,7 @@ def schema(self, schema: Schema, struct_result: List[MappedField]) -> List[Mappe def struct(self, struct: StructType, field_results: List[List[MappedField]]) -> List[MappedField]: return [ MappedField(field_id=field.field_id, names=[field.name], fields=result) - for field, result in zip(struct.fields, field_results) + for field, result in zip(struct.fields, field_results, strict=True) ] def field(self, field: NestedField, field_result: List[MappedField]) -> List[MappedField]: diff --git a/pyiceberg/table/upsert_util.py b/pyiceberg/table/upsert_util.py index cefdd101a0..6f32826eb0 100644 --- a/pyiceberg/table/upsert_util.py +++ b/pyiceberg/table/upsert_util.py @@ -103,7 +103,9 @@ def get_rows_to_update(source_table: pa.Table, target_table: pa.Table, join_cols # Step 4: Compare all rows using Python to_update_indices = [] for source_idx, target_idx in zip( - matching_indices[SOURCE_INDEX_COLUMN_NAME].to_pylist(), matching_indices[TARGET_INDEX_COLUMN_NAME].to_pylist() + matching_indices[SOURCE_INDEX_COLUMN_NAME].to_pylist(), + matching_indices[TARGET_INDEX_COLUMN_NAME].to_pylist(), + strict=True, ): source_row = source_table.slice(source_idx, 1) target_row = target_table.slice(target_idx, 1) diff --git a/pyiceberg/utils/lazydict.py b/pyiceberg/utils/lazydict.py index db5c1f82c5..4b616c5c27 100644 --- a/pyiceberg/utils/lazydict.py +++ b/pyiceberg/utils/lazydict.py @@ -46,7 +46,7 @@ def __init__(self, contents: Sequence[Sequence[K | V]]): def _build_dict(self) -> Dict[K, V]: self._dict = {} for item in self._contents: - self._dict.update(dict(zip(cast(Sequence[K], item[::2]), cast(Sequence[V], item[1::2])))) + self._dict.update(dict(zip(cast(Sequence[K], item[::2]), cast(Sequence[V], item[1::2]), strict=True))) return self._dict diff --git a/ruff.toml b/ruff.toml index 7fb76404c7..ca9d1b15ff 100644 --- a/ruff.toml +++ b/ruff.toml @@ -57,7 +57,15 @@ select = [ "I", # isort "UP", # pyupgrade ] -ignore = ["E501","E203","B024","B028","UP037", "UP035", "UP006", "B905"] +ignore = [ + "E501", + "E203", + "B024", + "B028", + "UP037", + "UP035", + "UP006" +] # Allow autofix for all enabled rules (when `--fix`) is provided. fixable = ["ALL"] diff --git a/tests/integration/test_add_files.py b/tests/integration/test_add_files.py index 84a30ab371..653549ebb6 100644 --- a/tests/integration/test_add_files.py +++ b/tests/integration/test_add_files.py @@ -713,7 +713,7 @@ def test_add_file_with_valid_nullability_diff(spark: SparkSession, session_catal rhs = written_arrow_table.to_pandas() for column in written_arrow_table.column_names: - for left, right in zip(lhs[column].to_list(), rhs[column].to_list()): + for left, right in zip(lhs[column].to_list(), rhs[column].to_list(), strict=True): assert left == right @@ -755,7 +755,7 @@ def test_add_files_with_valid_upcast( rhs = written_arrow_table.to_pandas() for column in written_arrow_table.column_names: - for left, right in zip(lhs[column].to_list(), rhs[column].to_list()): + for left, right in zip(lhs[column].to_list(), rhs[column].to_list(), strict=True): if column == "map": # Arrow returns a list of tuples, instead of a dict right = dict(right) @@ -802,7 +802,7 @@ def test_add_files_subset_of_schema(spark: SparkSession, session_catalog: Catalo rhs = written_arrow_table.to_pandas() for column in written_arrow_table.column_names: - for left, right in zip(lhs[column].to_list(), rhs[column].to_list()): + for left, right in zip(lhs[column].to_list(), rhs[column].to_list(), strict=True): assert left == right diff --git a/tests/integration/test_inspect_table.py b/tests/integration/test_inspect_table.py index 7a9617a995..4add18cf3f 100644 --- a/tests/integration/test_inspect_table.py +++ b/tests/integration/test_inspect_table.py @@ -152,7 +152,7 @@ def _inspect_files_asserts(df: pa.Table, spark_df: DataFrame) -> None: if column == "partition": # Spark leaves out the partition if the table is unpartitioned continue - for left, right in zip(lhs[column].to_list(), rhs[column].to_list()): + for left, right in zip(lhs[column].to_list(), rhs[column].to_list(), strict=True): if isinstance(left, float) and math.isnan(left) and isinstance(right, float) and math.isnan(right): # NaN != NaN in Python continue @@ -209,7 +209,7 @@ def _check_pyiceberg_df_equals_spark_df(df: pa.Table, spark_df: DataFrame) -> No lhs = df.to_pandas().sort_values("last_updated_at") rhs = spark_df.toPandas().sort_values("last_updated_at") for column in df.column_names: - for left, right in zip(lhs[column].to_list(), rhs[column].to_list()): + for left, right in zip(lhs[column].to_list(), rhs[column].to_list(), strict=True): assert left == right, f"Difference in column {column}: {left} != {right}" @@ -284,7 +284,7 @@ def test_inspect_snapshots( lhs = spark.table(f"{identifier}.snapshots").toPandas() rhs = df.to_pandas() for column in df.column_names: - for left, right in zip(lhs[column].to_list(), rhs[column].to_list()): + for left, right in zip(lhs[column].to_list(), rhs[column].to_list(), strict=True): if column == "summary": # Arrow returns a list of tuples, instead of a dict right = dict(right) @@ -332,7 +332,7 @@ def check_pyiceberg_df_equals_spark_df(df: pa.Table, spark_df: DataFrame) -> Non assert len(lhs) == len(rhs) for column in df.column_names: - for left, right in zip(lhs[column].to_list(), rhs[column].to_list()): + for left, right in zip(lhs[column].to_list(), rhs[column].to_list(), strict=True): if column == "data_file": for df_column in left.keys(): if df_column == "partition": @@ -485,7 +485,7 @@ def test_inspect_refs( lhs = spark.table(f"{identifier}.refs").toPandas() rhs = df.to_pandas() for column in df.column_names: - for left, right in zip(lhs[column].to_list(), rhs[column].to_list()): + for left, right in zip(lhs[column].to_list(), rhs[column].to_list(), strict=True): if isinstance(left, float) and math.isnan(left) and isinstance(right, float) and math.isnan(right): # NaN != NaN in Python continue @@ -535,7 +535,7 @@ def test_inspect_partitions_unpartitioned( lhs = df.to_pandas() rhs = spark.table(f"{identifier}.partitions").toPandas() for column in df.column_names: - for left, right in zip(lhs[column].to_list(), rhs[column].to_list()): + for left, right in zip(lhs[column].to_list(), rhs[column].to_list(), strict=True): assert left == right, f"Difference in column {column}: {left} != {right}" @@ -755,7 +755,7 @@ def test_inspect_manifests(spark: SparkSession, session_catalog: Catalog, format lhs = spark.table(f"{identifier}.manifests").toPandas() rhs = df.to_pandas() for column in df.column_names: - for left, right in zip(lhs[column].to_list(), rhs[column].to_list()): + for left, right in zip(lhs[column].to_list(), rhs[column].to_list(), strict=True): assert left == right, f"Difference in column {column}: {left} != {right}" @@ -793,7 +793,7 @@ def test_inspect_metadata_log_entries( assert_frame_equal(left_before_last, right_before_last, check_dtype=False) # compare the last row, except for the timestamp for column in df.column_names: - for left, right in zip(left_last[column], right_last[column]): + for left, right in zip(left_last[column], right_last[column], strict=True): if column == "timestamp": continue assert left == right, f"Difference in column {column}: {left} != {right}" @@ -861,7 +861,7 @@ def test_inspect_history(spark: SparkSession, session_catalog: Catalog, format_v lhs = spark.table(f"{identifier}.history").toPandas() rhs = df.to_pandas() for column in df.column_names: - for left, right in zip(lhs[column].to_list(), rhs[column].to_list()): + for left, right in zip(lhs[column].to_list(), rhs[column].to_list(), strict=True): if isinstance(left, float) and math.isnan(left) and isinstance(right, float) and math.isnan(right): # NaN != NaN in Python continue diff --git a/tests/integration/test_partitioning_key.py b/tests/integration/test_partitioning_key.py index 1908ec16f3..fcc5dc0e35 100644 --- a/tests/integration/test_partitioning_key.py +++ b/tests/integration/test_partitioning_key.py @@ -737,7 +737,7 @@ def test_partition_key( ) -> None: field_values = [ PartitionFieldValue(field, field.transform.transform(TABLE_SCHEMA.find_field(field.source_id).field_type)(value)) - for field, value in zip(partition_fields, partition_values) + for field, value in zip(partition_fields, partition_values, strict=True) ] spec = PartitionSpec(*partition_fields) diff --git a/tests/integration/test_rest_manifest.py b/tests/integration/test_rest_manifest.py index 8dd9510ac8..5d7a3d9441 100644 --- a/tests/integration/test_rest_manifest.py +++ b/tests/integration/test_rest_manifest.py @@ -38,7 +38,7 @@ # direct comparison with the dicts returned by fastavro def todict(obj: Any, spec_keys: List[str]) -> Any: if type(obj) is Record: - return {key: obj[pos] for key, pos in zip(spec_keys, range(len(obj)))} + return {key: obj[pos] for key, pos in zip(spec_keys, range(len(obj)), strict=True)} if isinstance(obj, dict) or isinstance(obj, LazyDict): data = [] for k, v in obj.items(): diff --git a/tests/integration/test_writes/test_writes.py b/tests/integration/test_writes/test_writes.py index dcd465a7ca..e7bac5e3b8 100644 --- a/tests/integration/test_writes/test_writes.py +++ b/tests/integration/test_writes/test_writes.py @@ -759,7 +759,9 @@ def test_spark_writes_orc_pyiceberg_reads(spark: SparkSession, session_catalog: ] # Verify PyIceberg results contain the expected data (appears twice due to create + append) - pyiceberg_data = list(zip(pyiceberg_df["id"], pyiceberg_df["name"], pyiceberg_df["age"], pyiceberg_df["is_active"])) + pyiceberg_data = list( + zip(pyiceberg_df["id"], pyiceberg_df["name"], pyiceberg_df["age"], pyiceberg_df["is_active"], strict=True) + ) assert pyiceberg_data == expected_data + expected_data # Data should appear twice # Verify PyIceberg data types are correct @@ -1170,7 +1172,7 @@ def test_inspect_snapshots( lhs = spark.table(f"{identifier}.snapshots").toPandas() rhs = df.to_pandas() for column in df.column_names: - for left, right in zip(lhs[column].to_list(), rhs[column].to_list()): + for left, right in zip(lhs[column].to_list(), rhs[column].to_list(), strict=True): if column == "summary": # Arrow returns a list of tuples, instead of a dict right = dict(right) @@ -1466,7 +1468,7 @@ def test_table_write_schema_with_valid_nullability_diff( rhs = written_arrow_table.to_pandas() for column in written_arrow_table.column_names: - for left, right in zip(lhs[column].to_list(), rhs[column].to_list()): + for left, right in zip(lhs[column].to_list(), rhs[column].to_list(), strict=True): assert left == right @@ -1506,7 +1508,7 @@ def test_table_write_schema_with_valid_upcast( rhs = written_arrow_table.to_pandas() for column in written_arrow_table.column_names: - for left, right in zip(lhs[column].to_list(), rhs[column].to_list()): + for left, right in zip(lhs[column].to_list(), rhs[column].to_list(), strict=True): if column == "map": # Arrow returns a list of tuples, instead of a dict right = dict(right) @@ -1552,7 +1554,7 @@ def test_write_all_timestamp_precision( rhs = written_arrow_table.to_pandas() for column in written_arrow_table.column_names: - for left, right in zip(lhs[column].to_list(), rhs[column].to_list()): + for left, right in zip(lhs[column].to_list(), rhs[column].to_list(), strict=True): if pd.isnull(left): assert pd.isnull(right) else: diff --git a/tests/io/test_pyarrow.py b/tests/io/test_pyarrow.py index dbd88a77c8..3765ea6de6 100644 --- a/tests/io/test_pyarrow.py +++ b/tests/io/test_pyarrow.py @@ -1076,16 +1076,16 @@ def test_projection_add_column(file_int: str) -> None: for col in result_table.columns: assert len(col) == 3 - for actual, expected in zip(result_table.columns[0], [None, None, None]): + for actual, expected in zip(result_table.columns[0], [None, None, None], strict=True): assert actual.as_py() == expected - for actual, expected in zip(result_table.columns[1], [None, None, None]): + for actual, expected in zip(result_table.columns[1], [None, None, None], strict=True): assert actual.as_py() == expected - for actual, expected in zip(result_table.columns[2], [None, None, None]): + for actual, expected in zip(result_table.columns[2], [None, None, None], strict=True): assert actual.as_py() == expected - for actual, expected in zip(result_table.columns[3], [None, None, None]): + for actual, expected in zip(result_table.columns[3], [None, None, None], strict=True): assert actual.as_py() == expected assert ( repr(result_table.schema) @@ -1106,7 +1106,9 @@ def test_read_list(schema_list: Schema, file_list: str) -> None: result_table = project(schema_list, [file_list]) assert len(result_table.columns[0]) == 3 - for actual, expected in zip(result_table.columns[0], [list(range(1, 10)), list(range(2, 20)), list(range(3, 30))]): + for actual, expected in zip( + result_table.columns[0], [list(range(1, 10)), list(range(2, 20)), list(range(3, 30))], strict=True + ): assert actual.as_py() == expected assert ( @@ -1120,7 +1122,7 @@ def test_read_map(schema_map: Schema, file_map: str) -> None: result_table = project(schema_map, [file_map]) assert len(result_table.columns[0]) == 3 - for actual, expected in zip(result_table.columns[0], [[("a", "b")], [("c", "d")], [("e", "f"), ("g", "h")]]): + for actual, expected in zip(result_table.columns[0], [[("a", "b")], [("c", "d")], [("e", "f"), ("g", "h")]], strict=True): assert actual.as_py() == expected assert ( @@ -1177,7 +1179,7 @@ def test_projection_rename_column(schema_int: Schema, file_int: str) -> None: ) result_table = project(schema, [file_int]) assert len(result_table.columns[0]) == 3 - for actual, expected in zip(result_table.columns[0], [0, 1, 2]): + for actual, expected in zip(result_table.columns[0], [0, 1, 2], strict=True): assert actual.as_py() == expected assert repr(result_table.schema) == "other_name: int32 not null" @@ -1186,7 +1188,7 @@ def test_projection_rename_column(schema_int: Schema, file_int: str) -> None: def test_projection_concat_files(schema_int: Schema, file_int: str) -> None: result_table = project(schema_int, [file_int, file_int]) - for actual, expected in zip(result_table.columns[0], [0, 1, 2, 0, 1, 2]): + for actual, expected in zip(result_table.columns[0], [0, 1, 2, 0, 1, 2], strict=True): assert actual.as_py() == expected assert len(result_table.columns[0]) == 6 assert repr(result_table.schema) == "id: int32" @@ -1350,7 +1352,7 @@ def test_projection_filter_add_column(schema_int: Schema, file_int: str, file_st """We have one file that has the column, and the other one doesn't""" result_table = project(schema_int, [file_int, file_string]) - for actual, expected in zip(result_table.columns[0], [0, 1, 2, None, None, None]): + for actual, expected in zip(result_table.columns[0], [0, 1, 2, None, None, None], strict=True): assert actual.as_py() == expected assert len(result_table.columns[0]) == 6 assert repr(result_table.schema) == "id: int32" @@ -1360,7 +1362,7 @@ def test_projection_filter_add_column_promote(file_int: str) -> None: schema_long = Schema(NestedField(1, "id", LongType(), required=True)) result_table = project(schema_long, [file_int]) - for actual, expected in zip(result_table.columns[0], [0, 1, 2]): + for actual, expected in zip(result_table.columns[0], [0, 1, 2], strict=True): assert actual.as_py() == expected assert len(result_table.columns[0]) == 3 assert repr(result_table.schema) == "id: int64 not null" @@ -1388,7 +1390,7 @@ def test_projection_nested_struct_subset(file_struct: str) -> None: result_table = project(schema, [file_struct]) - for actual, expected in zip(result_table.columns[0], [52.371807, 52.387386, 52.078663]): + for actual, expected in zip(result_table.columns[0], [52.371807, 52.387386, 52.078663], strict=True): assert actual.as_py() == {"lat": expected} assert len(result_table.columns[0]) == 3 @@ -1413,7 +1415,7 @@ def test_projection_nested_new_field(file_struct: str) -> None: result_table = project(schema, [file_struct]) - for actual, expected in zip(result_table.columns[0], [None, None, None]): + for actual, expected in zip(result_table.columns[0], [None, None, None], strict=True): assert actual.as_py() == {"null": expected} assert len(result_table.columns[0]) == 3 assert ( @@ -1445,6 +1447,7 @@ def test_projection_nested_struct(schema_struct: Schema, file_struct: str) -> No {"lat": 52.387386, "long": 4.646219, "null": None}, {"lat": 52.078663, "long": 4.288788, "null": None}, ], + strict=True, ): assert actual.as_py() == expected assert len(result_table.columns[0]) == 3 @@ -1536,6 +1539,7 @@ def test_projection_maps_of_structs(schema_map_of_structs: Schema, file_map_of_s ("4", {"latitude": 52.387386, "longitude": 4.646219, "altitude": None}), ], ], + strict=True, ): assert actual.as_py() == expected assert ( @@ -1563,7 +1567,7 @@ def test_projection_nested_struct_different_parent_id(file_struct: str) -> None: ) result_table = project(schema, [file_struct]) - for actual, expected in zip(result_table.columns[0], [None, None, None]): + for actual, expected in zip(result_table.columns[0], [None, None, None], strict=True): assert actual.as_py() == expected assert len(result_table.columns[0]) == 3 assert ( @@ -1579,10 +1583,7 @@ def test_projection_filter_on_unprojected_field(schema_int_str: Schema, file_int result_table = project(schema, [file_int_str], GreaterThan("data", "1"), schema_int_str) - for actual, expected in zip( - result_table.columns[0], - [2], - ): + for actual, expected in zip(result_table.columns[0], [2], strict=True): assert actual.as_py() == expected assert len(result_table.columns[0]) == 1 assert repr(result_table.schema) == "id: int32 not null" diff --git a/tests/test_types.py b/tests/test_types.py index 18eb909d09..6d671e951f 100644 --- a/tests/test_types.py +++ b/tests/test_types.py @@ -698,7 +698,7 @@ def test_deepcopy_of_singleton_fixed_type() -> None: list_of_fixed_types = [FixedType(22), FixedType(19)] copied_list = deepcopy(list_of_fixed_types) - for lhs, rhs in zip(list_of_fixed_types, copied_list): + for lhs, rhs in zip(list_of_fixed_types, copied_list, strict=True): assert id(lhs) == id(rhs)