Skip to content

Commit d282e4f

Browse files
committed
chore(ruff): apply ruff format
See https://astral.sh/blog/ruff-v0.9.0 for the list of changes
1 parent aae2833 commit d282e4f

File tree

6 files changed

+13
-14
lines changed

6 files changed

+13
-14
lines changed

tests/test_cli.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -377,9 +377,9 @@ def test_skip_extraction(
377377

378378
assert result.exit_code == 0
379379
process_file_mock.assert_called_once()
380-
assert (
381-
process_file_mock.call_args.args[0].skip_extraction == skip_extraction
382-
), fail_message
380+
assert process_file_mock.call_args.args[0].skip_extraction == skip_extraction, (
381+
fail_message
382+
)
383383

384384

385385
@pytest.mark.parametrize(

unblob/cli.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -177,7 +177,7 @@ def __init__(
177177
help=f"""Skip processing files with given magic prefix.
178178
The provided values are appended to unblob's own skip magic list unless
179179
--clear-skip-magic is provided.
180-
[default: {', '.join(DEFAULT_SKIP_MAGIC)}]
180+
[default: {", ".join(DEFAULT_SKIP_MAGIC)}]
181181
""",
182182
multiple=True,
183183
)
@@ -481,12 +481,12 @@ def print_report(reports: ProcessResult):
481481
chunks_distribution.items(), key=lambda item: item[1], reverse=True
482482
):
483483
chunks_table.add_row(
484-
handler.upper(), human_size(size), f"{(size/total_size) * 100:0.2f}%"
484+
handler.upper(), human_size(size), f"{(size / total_size) * 100:0.2f}%"
485485
)
486486

487487
console.print(chunks_table)
488488
console.print(
489-
f"Chunk identification ratio: [#00FFC8]{(valid_size/total_size) * 100:0.2f}%[/#00FFC8]"
489+
f"Chunk identification ratio: [#00FFC8]{(valid_size / total_size) * 100:0.2f}%[/#00FFC8]"
490490
)
491491

492492
if len(reports.errors):

unblob/finder.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
from functools import lru_cache
77
from typing import Optional
88

9-
import attr
9+
import attrs
1010
from pyperscan import Flag, Pattern, Scan, StreamDatabase
1111
from structlog import get_logger
1212

@@ -19,7 +19,7 @@
1919
logger = get_logger()
2020

2121

22-
@attr.define
22+
@attrs.define
2323
class HyperscanMatchContext:
2424
file: File
2525
file_size: int

unblob/handlers/archive/tar.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -206,7 +206,7 @@ def _padded_field(re_content_char, size, leftpad_re=" ", rightpad_re=r"[ \0x00]"
206206
field_regexes = []
207207

208208
for padsize in range(size):
209-
content_re = f"{re_content_char}{{{size-padsize}}}"
209+
content_re = f"{re_content_char}{{{size - padsize}}}"
210210

211211
for leftpadsize in range(padsize + 1):
212212
rightpadsize = padsize - leftpadsize

unblob/handlers/compression/_gzip_reader.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,8 +30,7 @@ def read(self):
3030
break
3131
if buf == b"":
3232
raise EOFError(
33-
"Compressed file ended before the "
34-
"end-of-stream marker was reached"
33+
"Compressed file ended before the end-of-stream marker was reached"
3534
)
3635

3736
self._add_read_data(uncompress)

unblob/testing.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -48,9 +48,9 @@ def gather_integration_tests(test_data_path: Path):
4848
for input_dir, output_dir, test_id in zip(
4949
test_input_dirs, test_output_dirs, test_ids
5050
):
51-
assert (
52-
list(input_dir.iterdir()) != []
53-
), f"Integration test input dir should contain at least 1 file: {input_dir}"
51+
assert list(input_dir.iterdir()) != [], (
52+
f"Integration test input dir should contain at least 1 file: {input_dir}"
53+
)
5454

5555
yield pytest.param(input_dir, output_dir, id=test_id)
5656

0 commit comments

Comments
 (0)