Skip to content

Commit 12fa57b

Browse files
author
Ming Lei
committed
blk-mq: Move flush queue allocation into blk_mq_init_hctx()
JIRA: https://issues.redhat.com/browse/RHEL-120078 commit aba19ee Author: Ming Lei <ming.lei@redhat.com> Date: Sat Aug 30 10:18:19 2025 +0800 blk-mq: Move flush queue allocation into blk_mq_init_hctx() Move flush queue allocation into blk_mq_init_hctx() and its release into blk_mq_exit_hctx(), and prepare for replacing tags->lock with SRCU to draining inflight request walking. blk_mq_exit_hctx() is the last chance for us to get valid `tag_set` reference, and we need to add one SRCU to `tag_set` for freeing flush request via call_srcu(). It is safe to move flush queue & request release into blk_mq_exit_hctx(), because blk_mq_clear_flush_rq_mapping() clears the flush request reference int driver tags inflight request table, meantime inflight request walking is drained. Reviewed-by: Hannes Reinecke <hare@suse.de> Reviewed-by: Yu Kuai <yukuai3@huawei.com> Signed-off-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Ming Lei <ming.lei@redhat.com>
1 parent 9e4302f commit 12fa57b

File tree

2 files changed

+13
-8
lines changed

2 files changed

+13
-8
lines changed

block/blk-mq-sysfs.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,6 @@ static void blk_mq_hw_sysfs_release(struct kobject *kobj)
3434
struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
3535
kobj);
3636

37-
blk_free_flush_queue(hctx->fq);
3837
sbitmap_free(&hctx->ctx_map);
3938
free_cpumask_var(hctx->cpumask);
4039
kfree(hctx->ctxs);

block/blk-mq.c

Lines changed: 13 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -3935,6 +3935,9 @@ static void blk_mq_exit_hctx(struct request_queue *q,
39353935
if (set->ops->exit_hctx)
39363936
set->ops->exit_hctx(hctx, hctx_idx);
39373937

3938+
blk_free_flush_queue(hctx->fq);
3939+
hctx->fq = NULL;
3940+
39383941
xa_erase(&q->hctx_table, hctx_idx);
39393942

39403943
spin_lock(&q->unused_hctx_lock);
@@ -3960,13 +3963,19 @@ static int blk_mq_init_hctx(struct request_queue *q,
39603963
struct blk_mq_tag_set *set,
39613964
struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
39623965
{
3966+
gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY;
3967+
3968+
hctx->fq = blk_alloc_flush_queue(hctx->numa_node, set->cmd_size, gfp);
3969+
if (!hctx->fq)
3970+
goto fail;
3971+
39633972
hctx->queue_num = hctx_idx;
39643973

39653974
hctx->tags = set->tags[hctx_idx];
39663975

39673976
if (set->ops->init_hctx &&
39683977
set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
3969-
goto fail;
3978+
goto fail_free_fq;
39703979

39713980
if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx,
39723981
hctx->numa_node))
@@ -3983,6 +3992,9 @@ static int blk_mq_init_hctx(struct request_queue *q,
39833992
exit_hctx:
39843993
if (set->ops->exit_hctx)
39853994
set->ops->exit_hctx(hctx, hctx_idx);
3995+
fail_free_fq:
3996+
blk_free_flush_queue(hctx->fq);
3997+
hctx->fq = NULL;
39863998
fail:
39873999
return -1;
39884000
}
@@ -4034,16 +4046,10 @@ blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
40344046
init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
40354047
INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
40364048

4037-
hctx->fq = blk_alloc_flush_queue(hctx->numa_node, set->cmd_size, gfp);
4038-
if (!hctx->fq)
4039-
goto free_bitmap;
4040-
40414049
blk_mq_hctx_kobj_init(hctx);
40424050

40434051
return hctx;
40444052

4045-
free_bitmap:
4046-
sbitmap_free(&hctx->ctx_map);
40474053
free_ctxs:
40484054
kfree(hctx->ctxs);
40494055
free_cpumask:

0 commit comments

Comments
 (0)