Skip to content

Commit 0c94038

Browse files
committed
Merge: dm-crypt: limit the size of encryption requests
MR: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-9/-/merge_requests/4779 JIRA: https://issues.redhat.com/browse/RHEL-49548 Tested: cryptsetup tests and the bug reproducer Upstream Status: kernel/git/device-mapper/linux-dm.git commit 0d815e3 Author: Mikulas Patocka <mpatocka@redhat.com> Date: Wed Jul 3 15:00:29 2024 +0200 dm-crypt: limit the size of encryption requests There was a performance regression reported where dm-crypt would perform worse on new kernels than on old kernels. The reason is that the old kernels split the bios to NVMe request size (that is usually 65536 or 131072 bytes) and the new kernels pass the big bios through dm-crypt and split them underneath. If a big 1MiB bio is passed to dm-crypt, dm-crypt processes it on a single core without parallelization and this is what causes the performance degradation. This commit introduces new tunable variables /sys/module/dm_crypt/parameters/max_read_size and /sys/module/dm_crypt/parameters/max_write_size that specify the maximum bio size for dm-crypt. Bios larger than this value are split, so that they can be encrypted in parallel by multiple cores. If these variables are '0', a default 131072 is used. Splitting bios may cause performance regressions in other workloads - if this happens, the user should increase the value in max_read_size and max_write_size variables. max_read_size: 128k 2399MiB/s 256k 2368MiB/s 512k 1986MiB/s 1024 1790MiB/s max_write_size: 128k 1712MiB/s 256k 1651MiB/s 512k 1537MiB/s 1024k 1332MiB/s Note that if you run dm-crypt inside a virtual machine, you may need to do "echo numa >/sys/module/workqueue/parameters/default_affinity_scope" to improve performance. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Tested-by: Laurence Oberman <loberman@redhat.com> Signed-off-by: Benjamin Marzinski <bmarzins@redhat.com> Approved-by: Mikuláš Patočka <mpatocka@redhat.com> Approved-by: John B. Wyatt IV <jwyatt@redhat.com> Approved-by: CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> Merged-by: Lucas Zampieri <lzampier@redhat.com>
2 parents 2e7049b + 71f6daa commit 0c94038

File tree

2 files changed

+40
-3
lines changed

2 files changed

+40
-3
lines changed

Documentation/admin-guide/device-mapper/dm-crypt.rst

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -155,6 +155,17 @@ iv_large_sectors
155155
The <iv_offset> must be multiple of <sector_size> (in 512 bytes units)
156156
if this flag is specified.
157157

158+
159+
Module parameters::
160+
max_read_size
161+
max_write_size
162+
Maximum size of read or write requests. When a request larger than this size
163+
is received, dm-crypt will split the request. The splitting improves
164+
concurrency (the split requests could be encrypted in parallel by multiple
165+
cores), but it also causes overhead. The user should tune these parameters to
166+
fit the actual workload.
167+
168+
158169
Example scripts
159170
===============
160171
LUKS (Linux Unified Key Setup) is now the preferred way to set up disk

drivers/md/dm-crypt.c

Lines changed: 29 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -238,6 +238,31 @@ static unsigned int dm_crypt_clients_n;
238238
static volatile unsigned long dm_crypt_pages_per_client;
239239
#define DM_CRYPT_MEMORY_PERCENT 2
240240
#define DM_CRYPT_MIN_PAGES_PER_CLIENT (BIO_MAX_VECS * 16)
241+
#define DM_CRYPT_DEFAULT_MAX_READ_SIZE 131072
242+
#define DM_CRYPT_DEFAULT_MAX_WRITE_SIZE 131072
243+
244+
static unsigned int max_read_size = 0;
245+
module_param(max_read_size, uint, 0644);
246+
MODULE_PARM_DESC(max_read_size, "Maximum size of a read request");
247+
static unsigned int max_write_size = 0;
248+
module_param(max_write_size, uint, 0644);
249+
MODULE_PARM_DESC(max_write_size, "Maximum size of a write request");
250+
static unsigned get_max_request_size(struct crypt_config *cc, bool wrt)
251+
{
252+
unsigned val, sector_align;
253+
val = !wrt ? READ_ONCE(max_read_size) : READ_ONCE(max_write_size);
254+
if (likely(!val))
255+
val = !wrt ? DM_CRYPT_DEFAULT_MAX_READ_SIZE : DM_CRYPT_DEFAULT_MAX_WRITE_SIZE;
256+
if (wrt || cc->on_disk_tag_size) {
257+
if (unlikely(val > BIO_MAX_VECS << PAGE_SHIFT))
258+
val = BIO_MAX_VECS << PAGE_SHIFT;
259+
}
260+
sector_align = max(bdev_logical_block_size(cc->dev->bdev), (unsigned)cc->sector_size);
261+
val = round_down(val, sector_align);
262+
if (unlikely(!val))
263+
val = sector_align;
264+
return val >> SECTOR_SHIFT;
265+
}
241266

242267
static void crypt_endio(struct bio *clone);
243268
static void kcryptd_queue_crypt(struct dm_crypt_io *io);
@@ -3435,6 +3460,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
34353460
{
34363461
struct dm_crypt_io *io;
34373462
struct crypt_config *cc = ti->private;
3463+
unsigned max_sectors;
34383464

34393465
/*
34403466
* If bio is REQ_PREFLUSH or REQ_OP_DISCARD, just bypass crypt queues.
@@ -3453,9 +3479,9 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
34533479
/*
34543480
* Check if bio is too large, split as needed.
34553481
*/
3456-
if (unlikely(bio->bi_iter.bi_size > (BIO_MAX_VECS << PAGE_SHIFT)) &&
3457-
(bio_data_dir(bio) == WRITE || cc->on_disk_tag_size))
3458-
dm_accept_partial_bio(bio, ((BIO_MAX_VECS << PAGE_SHIFT) >> SECTOR_SHIFT));
3482+
max_sectors = get_max_request_size(cc, bio_data_dir(bio) == WRITE);
3483+
if (unlikely(bio_sectors(bio) > max_sectors))
3484+
dm_accept_partial_bio(bio, max_sectors);
34593485

34603486
/*
34613487
* Ensure that bio is a multiple of internal sector encryption size

0 commit comments

Comments
 (0)