Skip to content

Commit ceab9de

Browse files
committed
Merge: block: fix sysfs store hang
MR: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-9/-/merge_requests/6164 JIRA: https://issues.redhat.com/browse/RHEL-71345 Signed-off-by: Ming Lei <ming.lei@redhat.com> Approved-by: Ewan D. Milne <emilne@redhat.com> Approved-by: Jeff Moyer <jmoyer@redhat.com> Approved-by: CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> Merged-by: Patrick Talbert <ptalbert@redhat.com>
2 parents 3554d4b + a4d85bf commit ceab9de

File tree

17 files changed

+191
-179
lines changed

17 files changed

+191
-179
lines changed

block/blk-core.c

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -613,8 +613,14 @@ static void __submit_bio(struct bio *bio)
613613
blk_mq_submit_bio(bio);
614614
} else if (likely(bio_queue_enter(bio) == 0)) {
615615
struct gendisk *disk = bio->bi_bdev->bd_disk;
616-
617-
disk->fops->submit_bio(bio);
616+
617+
if ((bio->bi_opf & REQ_POLLED) &&
618+
!(disk->queue->limits.features & BLK_FEAT_POLL)) {
619+
bio->bi_status = BLK_STS_NOTSUPP;
620+
bio_endio(bio);
621+
} else {
622+
disk->fops->submit_bio(bio);
623+
}
618624
blk_queue_exit(disk->queue);
619625
}
620626

@@ -788,12 +794,6 @@ void submit_bio_noacct(struct bio *bio)
788794
}
789795
}
790796

791-
if (!(q->limits.features & BLK_FEAT_POLL) &&
792-
(bio->bi_opf & REQ_POLLED)) {
793-
bio_clear_polled(bio);
794-
goto not_supported;
795-
}
796-
797797
switch (bio_op(bio)) {
798798
case REQ_OP_READ:
799799
break;
@@ -918,7 +918,7 @@ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
918918
return 0;
919919

920920
q = bdev_get_queue(bdev);
921-
if (cookie == BLK_QC_T_NONE || !(q->limits.features & BLK_FEAT_POLL))
921+
if (cookie == BLK_QC_T_NONE)
922922
return 0;
923923

924924
blk_flush_plug(current->plug, false);
@@ -939,7 +939,8 @@ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
939939
} else {
940940
struct gendisk *disk = q->disk;
941941

942-
if (disk && disk->fops->poll_bio)
942+
if ((q->limits.features & BLK_FEAT_POLL) && disk &&
943+
disk->fops->poll_bio)
943944
ret = disk->fops->poll_bio(bio, iob, flags);
944945
}
945946
blk_queue_exit(q);

block/blk-integrity.c

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -218,9 +218,7 @@ static ssize_t flag_store(struct device *dev, const char *page, size_t count,
218218
else
219219
lim.integrity.flags |= flag;
220220

221-
blk_mq_freeze_queue(q);
222-
err = queue_limits_commit_update(q, &lim);
223-
blk_mq_unfreeze_queue(q);
221+
err = queue_limits_commit_update_frozen(q, &lim);
224222
if (err)
225223
return err;
226224
return count;

block/blk-mq.c

Lines changed: 13 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -2976,14 +2976,21 @@ void blk_mq_submit_bio(struct bio *bio)
29762976
}
29772977

29782978
/*
2979-
* Device reconfiguration may change logical block size, so alignment
2980-
* check has to be done with queue usage counter held
2979+
* Device reconfiguration may change logical block size or reduce the
2980+
* number of poll queues, so the checks for alignment and poll support
2981+
* have to be done with queue usage counter held.
29812982
*/
29822983
if (unlikely(bio_unaligned(bio, q))) {
29832984
bio_io_error(bio);
29842985
goto queue_exit;
29852986
}
29862987

2988+
if ((bio->bi_opf & REQ_POLLED) && !blk_mq_can_poll(q)) {
2989+
bio->bi_status = BLK_STS_NOTSUPP;
2990+
bio_endio(bio);
2991+
goto queue_exit;
2992+
}
2993+
29872994
bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
29882995
if (!bio)
29892996
goto queue_exit;
@@ -4130,12 +4137,6 @@ void blk_mq_release(struct request_queue *q)
41304137
blk_mq_sysfs_deinit(q);
41314138
}
41324139

4133-
static bool blk_mq_can_poll(struct blk_mq_tag_set *set)
4134-
{
4135-
return set->nr_maps > HCTX_TYPE_POLL &&
4136-
set->map[HCTX_TYPE_POLL].nr_queues;
4137-
}
4138-
41394140
struct request_queue *blk_mq_alloc_queue(struct blk_mq_tag_set *set,
41404141
struct queue_limits *lim, void *queuedata)
41414142
{
@@ -4146,7 +4147,7 @@ struct request_queue *blk_mq_alloc_queue(struct blk_mq_tag_set *set,
41464147
if (!lim)
41474148
lim = &default_lim;
41484149
lim->features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT;
4149-
if (blk_mq_can_poll(set))
4150+
if (set->nr_maps > HCTX_TYPE_POLL)
41504151
lim->features |= BLK_FEAT_POLL;
41514152

41524153
q = blk_alloc_queue(lim, set->numa_node);
@@ -4819,8 +4820,6 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
48194820
fallback:
48204821
blk_mq_update_queue_map(set);
48214822
list_for_each_entry(q, &set->tag_list, tag_set_list) {
4822-
struct queue_limits lim;
4823-
48244823
blk_mq_realloc_hw_ctxs(set, q);
48254824

48264825
if (q->nr_hw_queues != set->nr_hw_queues) {
@@ -4834,13 +4833,6 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
48344833
set->nr_hw_queues = prev_nr_hw_queues;
48354834
goto fallback;
48364835
}
4837-
lim = queue_limits_start_update(q);
4838-
if (blk_mq_can_poll(set))
4839-
lim.features |= BLK_FEAT_POLL;
4840-
else
4841-
lim.features &= ~BLK_FEAT_POLL;
4842-
if (queue_limits_commit_update(q, &lim) < 0)
4843-
pr_warn("updating the poll flag failed\n");
48444836
blk_mq_map_swqueue(q);
48454837
}
48464838

@@ -4900,9 +4892,9 @@ static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
49004892
int blk_mq_poll(struct request_queue *q, blk_qc_t cookie,
49014893
struct io_comp_batch *iob, unsigned int flags)
49024894
{
4903-
struct blk_mq_hw_ctx *hctx = xa_load(&q->hctx_table, cookie);
4904-
4905-
return blk_hctx_poll(q, hctx, iob, flags);
4895+
if (!blk_mq_can_poll(q))
4896+
return 0;
4897+
return blk_hctx_poll(q, xa_load(&q->hctx_table, cookie), iob, flags);
49064898
}
49074899

49084900
int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,

block/blk-mq.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -438,4 +438,10 @@ do { \
438438
#define blk_mq_run_dispatch_ops(q, dispatch_ops) \
439439
__blk_mq_run_dispatch_ops(q, true, dispatch_ops) \
440440

441+
static inline bool blk_mq_can_poll(struct request_queue *q)
442+
{
443+
return (q->limits.features & BLK_FEAT_POLL) &&
444+
q->tag_set->map[HCTX_TYPE_POLL].nr_queues;
445+
}
446+
441447
#endif

block/blk-settings.c

Lines changed: 26 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -389,7 +389,8 @@ int blk_set_default_limits(struct queue_limits *lim)
389389
* @lim: limits to apply
390390
*
391391
* Apply the limits in @lim that were obtained from queue_limits_start_update()
392-
* and updated by the caller to @q.
392+
* and updated by the caller to @q. The caller must have frozen the queue or
393+
* ensure that there are no outstanding I/Os by other means.
393394
*
394395
* Returns 0 if successful, else a negative error code.
395396
*/
@@ -419,6 +420,30 @@ int queue_limits_commit_update(struct request_queue *q,
419420
}
420421
EXPORT_SYMBOL_GPL(queue_limits_commit_update);
421422

423+
/**
424+
* queue_limits_commit_update_frozen - commit an atomic update of queue limits
425+
* @q: queue to update
426+
* @lim: limits to apply
427+
*
428+
* Apply the limits in @lim that were obtained from queue_limits_start_update()
429+
* and updated with the new values by the caller to @q. Freezes the queue
430+
* before the update and unfreezes it after.
431+
*
432+
* Returns 0 if successful, else a negative error code.
433+
*/
434+
int queue_limits_commit_update_frozen(struct request_queue *q,
435+
struct queue_limits *lim)
436+
{
437+
int ret;
438+
439+
blk_mq_freeze_queue(q);
440+
ret = queue_limits_commit_update(q, lim);
441+
blk_mq_unfreeze_queue(q);
442+
443+
return ret;
444+
}
445+
EXPORT_SYMBOL_GPL(queue_limits_commit_update_frozen);
446+
422447
/**
423448
* queue_limits_commit_set - apply queue limits to queue
424449
* @q: queue to update

0 commit comments

Comments
 (0)