@@ -2976,14 +2976,21 @@ void blk_mq_submit_bio(struct bio *bio)
29762976 }
29772977
29782978 /*
2979- * Device reconfiguration may change logical block size, so alignment
2980- * check has to be done with queue usage counter held
2979+ * Device reconfiguration may change logical block size or reduce the
2980+ * number of poll queues, so the checks for alignment and poll support
2981+ * have to be done with queue usage counter held.
29812982 */
29822983 if (unlikely (bio_unaligned (bio , q ))) {
29832984 bio_io_error (bio );
29842985 goto queue_exit ;
29852986 }
29862987
2988+ if ((bio -> bi_opf & REQ_POLLED ) && !blk_mq_can_poll (q )) {
2989+ bio -> bi_status = BLK_STS_NOTSUPP ;
2990+ bio_endio (bio );
2991+ goto queue_exit ;
2992+ }
2993+
29872994 bio = __bio_split_to_limits (bio , & q -> limits , & nr_segs );
29882995 if (!bio )
29892996 goto queue_exit ;
@@ -4130,12 +4137,6 @@ void blk_mq_release(struct request_queue *q)
41304137 blk_mq_sysfs_deinit (q );
41314138}
41324139
4133- static bool blk_mq_can_poll (struct blk_mq_tag_set * set )
4134- {
4135- return set -> nr_maps > HCTX_TYPE_POLL &&
4136- set -> map [HCTX_TYPE_POLL ].nr_queues ;
4137- }
4138-
41394140struct request_queue * blk_mq_alloc_queue (struct blk_mq_tag_set * set ,
41404141 struct queue_limits * lim , void * queuedata )
41414142{
@@ -4146,7 +4147,7 @@ struct request_queue *blk_mq_alloc_queue(struct blk_mq_tag_set *set,
41464147 if (!lim )
41474148 lim = & default_lim ;
41484149 lim -> features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT ;
4149- if (blk_mq_can_poll ( set ) )
4150+ if (set -> nr_maps > HCTX_TYPE_POLL )
41504151 lim -> features |= BLK_FEAT_POLL ;
41514152
41524153 q = blk_alloc_queue (lim , set -> numa_node );
@@ -4819,8 +4820,6 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
48194820fallback :
48204821 blk_mq_update_queue_map (set );
48214822 list_for_each_entry (q , & set -> tag_list , tag_set_list ) {
4822- struct queue_limits lim ;
4823-
48244823 blk_mq_realloc_hw_ctxs (set , q );
48254824
48264825 if (q -> nr_hw_queues != set -> nr_hw_queues ) {
@@ -4834,13 +4833,6 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
48344833 set -> nr_hw_queues = prev_nr_hw_queues ;
48354834 goto fallback ;
48364835 }
4837- lim = queue_limits_start_update (q );
4838- if (blk_mq_can_poll (set ))
4839- lim .features |= BLK_FEAT_POLL ;
4840- else
4841- lim .features &= ~BLK_FEAT_POLL ;
4842- if (queue_limits_commit_update (q , & lim ) < 0 )
4843- pr_warn ("updating the poll flag failed\n" );
48444836 blk_mq_map_swqueue (q );
48454837 }
48464838
@@ -4900,9 +4892,9 @@ static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
49004892int blk_mq_poll (struct request_queue * q , blk_qc_t cookie ,
49014893 struct io_comp_batch * iob , unsigned int flags )
49024894{
4903- struct blk_mq_hw_ctx * hctx = xa_load ( & q -> hctx_table , cookie );
4904-
4905- return blk_hctx_poll (q , hctx , iob , flags );
4895+ if (! blk_mq_can_poll ( q ))
4896+ return 0 ;
4897+ return blk_hctx_poll (q , xa_load ( & q -> hctx_table , cookie ) , iob , flags );
49064898}
49074899
49084900int blk_rq_poll (struct request * rq , struct io_comp_batch * iob ,
0 commit comments