Skip to content

Commit 93221f3

Browse files
author
Ming Lei
committed
blk-mq: Replace tags->lock with SRCU for tag iterators
JIRA: https://issues.redhat.com/browse/RHEL-120078 commit 995412e Author: Ming Lei <ming.lei@redhat.com> Date: Sat Aug 30 10:18:23 2025 +0800 blk-mq: Replace tags->lock with SRCU for tag iterators Replace the spinlock in blk_mq_find_and_get_req() with an SRCU read lock around the tag iterators. This is done by: - Holding the SRCU read lock in blk_mq_queue_tag_busy_iter(), blk_mq_tagset_busy_iter(), and blk_mq_hctx_has_requests(). - Removing the now-redundant tags->lock from blk_mq_find_and_get_req(). This change fixes lockup issue in scsi_host_busy() in case of shost->host_blocked. Also avoids big tags->lock when reading disk sysfs attribute `inflight`. Reviewed-by: Hannes Reinecke <hare@suse.de> Signed-off-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Yu Kuai <yukuai3@huawei.com> Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Ming Lei <ming.lei@redhat.com>
1 parent b531899 commit 93221f3

File tree

2 files changed

+12
-24
lines changed

2 files changed

+12
-24
lines changed

block/blk-mq-tag.c

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -256,13 +256,10 @@ static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
256256
unsigned int bitnr)
257257
{
258258
struct request *rq;
259-
unsigned long flags;
260259

261-
spin_lock_irqsave(&tags->lock, flags);
262260
rq = tags->rqs[bitnr];
263261
if (!rq || rq->tag != bitnr || !req_ref_inc_not_zero(rq))
264262
rq = NULL;
265-
spin_unlock_irqrestore(&tags->lock, flags);
266263
return rq;
267264
}
268265

@@ -440,7 +437,9 @@ void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
440437
busy_tag_iter_fn *fn, void *priv)
441438
{
442439
unsigned int flags = tagset->flags;
443-
int i, nr_tags;
440+
int i, nr_tags, srcu_idx;
441+
442+
srcu_idx = srcu_read_lock(&tagset->tags_srcu);
444443

445444
nr_tags = blk_mq_is_shared_tags(flags) ? 1 : tagset->nr_hw_queues;
446445

@@ -449,6 +448,7 @@ void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
449448
__blk_mq_all_tag_iter(tagset->tags[i], fn, priv,
450449
BT_TAG_ITER_STARTED);
451450
}
451+
srcu_read_unlock(&tagset->tags_srcu, srcu_idx);
452452
}
453453
EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
454454

@@ -499,6 +499,8 @@ EXPORT_SYMBOL(blk_mq_tagset_wait_completed_request);
499499
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
500500
void *priv)
501501
{
502+
int srcu_idx;
503+
502504
/*
503505
* __blk_mq_update_nr_hw_queues() updates nr_hw_queues and hctx_table
504506
* while the queue is frozen. So we can use q_usage_counter to avoid
@@ -507,6 +509,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
507509
if (!percpu_ref_tryget(&q->q_usage_counter))
508510
return;
509511

512+
srcu_idx = srcu_read_lock(&q->tag_set->tags_srcu);
510513
if (blk_mq_is_shared_tags(q->tag_set->flags)) {
511514
struct blk_mq_tags *tags = q->tag_set->shared_tags;
512515
struct sbitmap_queue *bresv = &tags->breserved_tags;
@@ -536,6 +539,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
536539
bt_for_each(hctx, q, btags, fn, priv, false);
537540
}
538541
}
542+
srcu_read_unlock(&q->tag_set->tags_srcu, srcu_idx);
539543
blk_queue_exit(q);
540544
}
541545

block/blk-mq.c

Lines changed: 4 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -3411,7 +3411,6 @@ static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags,
34113411
struct blk_mq_tags *tags)
34123412
{
34133413
struct page *page;
3414-
unsigned long flags;
34153414

34163415
/*
34173416
* There is no need to clear mapping if driver tags is not initialized
@@ -3435,15 +3434,6 @@ static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags,
34353434
}
34363435
}
34373436
}
3438-
3439-
/*
3440-
* Wait until all pending iteration is done.
3441-
*
3442-
* Request reference is cleared and it is guaranteed to be observed
3443-
* after the ->lock is released.
3444-
*/
3445-
spin_lock_irqsave(&drv_tags->lock, flags);
3446-
spin_unlock_irqrestore(&drv_tags->lock, flags);
34473437
}
34483438

34493439
void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
@@ -3666,8 +3656,12 @@ static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx)
36663656
struct rq_iter_data data = {
36673657
.hctx = hctx,
36683658
};
3659+
int srcu_idx;
36693660

3661+
srcu_idx = srcu_read_lock(&hctx->queue->tag_set->tags_srcu);
36703662
blk_mq_all_tag_iter(tags, blk_mq_has_request, &data);
3663+
srcu_read_unlock(&hctx->queue->tag_set->tags_srcu, srcu_idx);
3664+
36713665
return data.has_rq;
36723666
}
36733667

@@ -3887,7 +3881,6 @@ static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags,
38873881
unsigned int queue_depth, struct request *flush_rq)
38883882
{
38893883
int i;
3890-
unsigned long flags;
38913884

38923885
/* The hw queue may not be mapped yet */
38933886
if (!tags)
@@ -3897,15 +3890,6 @@ static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags,
38973890

38983891
for (i = 0; i < queue_depth; i++)
38993892
cmpxchg(&tags->rqs[i], flush_rq, NULL);
3900-
3901-
/*
3902-
* Wait until all pending iteration is done.
3903-
*
3904-
* Request reference is cleared and it is guaranteed to be observed
3905-
* after the ->lock is released.
3906-
*/
3907-
spin_lock_irqsave(&tags->lock, flags);
3908-
spin_unlock_irqrestore(&tags->lock, flags);
39093893
}
39103894

39113895
static void blk_free_flush_queue_callback(struct rcu_head *head)

0 commit comments

Comments
 (0)