Skip to content

Commit 0c8df15

Browse files
committed
Merge tag 'block-6.18-20251016' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux
Pull block fixes from Jens Axboe: - NVMe pull request via Keith: - iostats accounting fixed on multipath retries (Amit) - secure concatenation response fixup (Martin) - tls partial record fixup (Wilfred) - Fix for a lockdep reported issue with the elevator lock and blk group frozen operations - Fix for a regression in this merge window, where updating 'nr_requests' would not do the right thing for queues with shared tags * tag 'block-6.18-20251016' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux: nvme/tcp: handle tls partially sent records in write_space() block: Remove elevator_lock usage from blkg_conf frozen operations blk-mq: fix stale tag depth for shared sched tags in blk_mq_update_nr_requests() nvme-auth: update sc_c in host response nvme-multipath: Skip nr_active increments in RETRY disposition
2 parents cf1ea88 + f0624c6 commit 0c8df15

File tree

8 files changed

+23
-17
lines changed

8 files changed

+23
-17
lines changed

block/blk-cgroup.c

Lines changed: 4 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -812,8 +812,7 @@ int blkg_conf_open_bdev(struct blkg_conf_ctx *ctx)
812812
}
813813
/*
814814
* Similar to blkg_conf_open_bdev, but additionally freezes the queue,
815-
* acquires q->elevator_lock, and ensures the correct locking order
816-
* between q->elevator_lock and q->rq_qos_mutex.
815+
* ensures the correct locking order between freeze queue and q->rq_qos_mutex.
817816
*
818817
* This function returns negative error on failure. On success it returns
819818
* memflags which must be saved and later passed to blkg_conf_exit_frozen
@@ -834,13 +833,11 @@ unsigned long __must_check blkg_conf_open_bdev_frozen(struct blkg_conf_ctx *ctx)
834833
* At this point, we haven’t started protecting anything related to QoS,
835834
* so we release q->rq_qos_mutex here, which was first acquired in blkg_
836835
* conf_open_bdev. Later, we re-acquire q->rq_qos_mutex after freezing
837-
* the queue and acquiring q->elevator_lock to maintain the correct
838-
* locking order.
836+
* the queue to maintain the correct locking order.
839837
*/
840838
mutex_unlock(&ctx->bdev->bd_queue->rq_qos_mutex);
841839

842840
memflags = blk_mq_freeze_queue(ctx->bdev->bd_queue);
843-
mutex_lock(&ctx->bdev->bd_queue->elevator_lock);
844841
mutex_lock(&ctx->bdev->bd_queue->rq_qos_mutex);
845842

846843
return memflags;
@@ -995,17 +992,15 @@ void blkg_conf_exit(struct blkg_conf_ctx *ctx)
995992
EXPORT_SYMBOL_GPL(blkg_conf_exit);
996993

997994
/*
998-
* Similar to blkg_conf_exit, but also unfreezes the queue and releases
999-
* q->elevator_lock. Should be used when blkg_conf_open_bdev_frozen
1000-
* is used to open the bdev.
995+
* Similar to blkg_conf_exit, but also unfreezes the queue. Should be used
996+
* when blkg_conf_open_bdev_frozen is used to open the bdev.
1001997
*/
1002998
void blkg_conf_exit_frozen(struct blkg_conf_ctx *ctx, unsigned long memflags)
1003999
{
10041000
if (ctx->bdev) {
10051001
struct request_queue *q = ctx->bdev->bd_queue;
10061002

10071003
blkg_conf_exit(ctx);
1008-
mutex_unlock(&q->elevator_lock);
10091004
blk_mq_unfreeze_queue(q, memflags);
10101005
}
10111006
}

block/blk-mq-sched.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -557,7 +557,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e,
557557
if (blk_mq_is_shared_tags(flags)) {
558558
/* Shared tags are stored at index 0 in @et->tags. */
559559
q->sched_shared_tags = et->tags[0];
560-
blk_mq_tag_update_sched_shared_tags(q);
560+
blk_mq_tag_update_sched_shared_tags(q, et->nr_requests);
561561
}
562562

563563
queue_for_each_hw_ctx(q, hctx, i) {

block/blk-mq-tag.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -622,10 +622,11 @@ void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set, unsigned int size
622622
sbitmap_queue_resize(&tags->bitmap_tags, size - set->reserved_tags);
623623
}
624624

625-
void blk_mq_tag_update_sched_shared_tags(struct request_queue *q)
625+
void blk_mq_tag_update_sched_shared_tags(struct request_queue *q,
626+
unsigned int nr)
626627
{
627628
sbitmap_queue_resize(&q->sched_shared_tags->bitmap_tags,
628-
q->nr_requests - q->tag_set->reserved_tags);
629+
nr - q->tag_set->reserved_tags);
629630
}
630631

631632
/**

block/blk-mq.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4941,7 +4941,7 @@ struct elevator_tags *blk_mq_update_nr_requests(struct request_queue *q,
49414941
* tags can't grow, see blk_mq_alloc_sched_tags().
49424942
*/
49434943
if (q->elevator)
4944-
blk_mq_tag_update_sched_shared_tags(q);
4944+
blk_mq_tag_update_sched_shared_tags(q, nr);
49454945
else
49464946
blk_mq_tag_resize_shared_tags(set, nr);
49474947
} else if (!q->elevator) {

block/blk-mq.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -186,7 +186,8 @@ void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
186186
void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags);
187187
void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set,
188188
unsigned int size);
189-
void blk_mq_tag_update_sched_shared_tags(struct request_queue *q);
189+
void blk_mq_tag_update_sched_shared_tags(struct request_queue *q,
190+
unsigned int nr);
190191

191192
void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
192193
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,

drivers/nvme/host/auth.c

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@ struct nvme_dhchap_queue_context {
3636
u8 status;
3737
u8 dhgroup_id;
3838
u8 hash_id;
39+
u8 sc_c;
3940
size_t hash_len;
4041
u8 c1[64];
4142
u8 c2[64];
@@ -154,6 +155,8 @@ static int nvme_auth_set_dhchap_negotiate_data(struct nvme_ctrl *ctrl,
154155
data->auth_protocol[0].dhchap.idlist[34] = NVME_AUTH_DHGROUP_6144;
155156
data->auth_protocol[0].dhchap.idlist[35] = NVME_AUTH_DHGROUP_8192;
156157

158+
chap->sc_c = data->sc_c;
159+
157160
return size;
158161
}
159162

@@ -489,7 +492,7 @@ static int nvme_auth_dhchap_setup_host_response(struct nvme_ctrl *ctrl,
489492
ret = crypto_shash_update(shash, buf, 2);
490493
if (ret)
491494
goto out;
492-
memset(buf, 0, sizeof(buf));
495+
*buf = chap->sc_c;
493496
ret = crypto_shash_update(shash, buf, 1);
494497
if (ret)
495498
goto out;
@@ -500,6 +503,7 @@ static int nvme_auth_dhchap_setup_host_response(struct nvme_ctrl *ctrl,
500503
strlen(ctrl->opts->host->nqn));
501504
if (ret)
502505
goto out;
506+
memset(buf, 0, sizeof(buf));
503507
ret = crypto_shash_update(shash, buf, 1);
504508
if (ret)
505509
goto out;

drivers/nvme/host/multipath.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -182,12 +182,14 @@ void nvme_mpath_start_request(struct request *rq)
182182
struct nvme_ns *ns = rq->q->queuedata;
183183
struct gendisk *disk = ns->head->disk;
184184

185-
if (READ_ONCE(ns->head->subsys->iopolicy) == NVME_IOPOLICY_QD) {
185+
if ((READ_ONCE(ns->head->subsys->iopolicy) == NVME_IOPOLICY_QD) &&
186+
!(nvme_req(rq)->flags & NVME_MPATH_CNT_ACTIVE)) {
186187
atomic_inc(&ns->ctrl->nr_active);
187188
nvme_req(rq)->flags |= NVME_MPATH_CNT_ACTIVE;
188189
}
189190

190-
if (!blk_queue_io_stat(disk->queue) || blk_rq_is_passthrough(rq))
191+
if (!blk_queue_io_stat(disk->queue) || blk_rq_is_passthrough(rq) ||
192+
(nvme_req(rq)->flags & NVME_MPATH_IO_STATS))
191193
return;
192194

193195
nvme_req(rq)->flags |= NVME_MPATH_IO_STATS;

drivers/nvme/host/tcp.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1081,6 +1081,9 @@ static void nvme_tcp_write_space(struct sock *sk)
10811081
queue = sk->sk_user_data;
10821082
if (likely(queue && sk_stream_is_writeable(sk))) {
10831083
clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1084+
/* Ensure pending TLS partial records are retried */
1085+
if (nvme_tcp_queue_tls(queue))
1086+
queue->write_space(sk);
10841087
queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
10851088
}
10861089
read_unlock_bh(&sk->sk_callback_lock);

0 commit comments

Comments
 (0)