Skip to content

Commit 2a56441

Browse files
committed
Merge: Revert nvme-tcp sendmsg MSG_SPLICE_PAGES use
MR: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-9/-/merge_requests/4775 # Merge Request Required Information ## Summary of Changes I don't think the rest of the kernel is ready for this yet, kTLS for sure isn't and that means that NVMe/TLS is not working as expected. ## Approved Development Ticket JIRA: https://issues.redhat.com/browse/RHEL-49696 Signed-off-by: Chris Leech <cleech@redhat.com> Approved-by: Maurizio Lombardi <mlombard@redhat.com> Approved-by: John Meneghini <jmeneghi@redhat.com> Approved-by: CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> Merged-by: Lucas Zampieri <lzampier@redhat.com>
2 parents f79d833 + 3833a5b commit 2a56441

File tree

2 files changed

+39
-56
lines changed

2 files changed

+39
-56
lines changed

drivers/nvme/host/tcp.c

Lines changed: 22 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -1027,28 +1027,25 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
10271027
u32 h2cdata_left = req->h2cdata_left;
10281028

10291029
while (true) {
1030-
struct bio_vec bvec;
1031-
struct msghdr msg = {
1032-
.msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES,
1033-
};
10341030
struct page *page = nvme_tcp_req_cur_page(req);
10351031
size_t offset = nvme_tcp_req_cur_offset(req);
10361032
size_t len = nvme_tcp_req_cur_length(req);
10371033
bool last = nvme_tcp_pdu_last_send(req, len);
10381034
int req_data_sent = req->data_sent;
1039-
int ret;
1035+
int ret, flags = MSG_DONTWAIT;
10401036

10411037
if (last && !queue->data_digest && !nvme_tcp_queue_more(queue))
1042-
msg.msg_flags |= MSG_EOR;
1038+
flags |= MSG_EOR;
10431039
else
1044-
msg.msg_flags |= MSG_MORE;
1040+
flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
10451041

1046-
if (!sendpage_ok(page))
1047-
msg.msg_flags &= ~MSG_SPLICE_PAGES;
1048-
1049-
bvec_set_page(&bvec, page, len, offset);
1050-
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len);
1051-
ret = sock_sendmsg(queue->sock, &msg);
1042+
if (sendpage_ok(page)) {
1043+
ret = kernel_sendpage(queue->sock, page, offset, len,
1044+
flags);
1045+
} else {
1046+
ret = sock_no_sendpage(queue->sock, page, offset, len,
1047+
flags);
1048+
}
10521049
if (ret <= 0)
10531050
return ret;
10541051

@@ -1087,24 +1084,22 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
10871084
{
10881085
struct nvme_tcp_queue *queue = req->queue;
10891086
struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
1090-
struct bio_vec bvec;
1091-
struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, };
10921087
bool inline_data = nvme_tcp_has_inline_data(req);
10931088
u8 hdgst = nvme_tcp_hdgst_len(queue);
10941089
int len = sizeof(*pdu) + hdgst - req->offset;
1090+
int flags = MSG_DONTWAIT;
10951091
int ret;
10961092

10971093
if (inline_data || nvme_tcp_queue_more(queue))
1098-
msg.msg_flags |= MSG_MORE;
1094+
flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
10991095
else
1100-
msg.msg_flags |= MSG_EOR;
1096+
flags |= MSG_EOR;
11011097

11021098
if (queue->hdr_digest && !req->offset)
11031099
nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
11041100

1105-
bvec_set_virt(&bvec, (void *)pdu + req->offset, len);
1106-
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len);
1107-
ret = sock_sendmsg(queue->sock, &msg);
1101+
ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
1102+
offset_in_page(pdu) + req->offset, len, flags);
11081103
if (unlikely(ret <= 0))
11091104
return ret;
11101105

@@ -1128,8 +1123,6 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
11281123
{
11291124
struct nvme_tcp_queue *queue = req->queue;
11301125
struct nvme_tcp_data_pdu *pdu = nvme_tcp_req_data_pdu(req);
1131-
struct bio_vec bvec;
1132-
struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_MORE, };
11331126
u8 hdgst = nvme_tcp_hdgst_len(queue);
11341127
int len = sizeof(*pdu) - req->offset + hdgst;
11351128
int ret;
@@ -1138,11 +1131,13 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
11381131
nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
11391132

11401133
if (!req->h2cdata_left)
1141-
msg.msg_flags |= MSG_SPLICE_PAGES;
1142-
1143-
bvec_set_virt(&bvec, (void *)pdu + req->offset, len);
1144-
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len);
1145-
ret = sock_sendmsg(queue->sock, &msg);
1134+
ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
1135+
offset_in_page(pdu) + req->offset, len,
1136+
MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
1137+
else
1138+
ret = sock_no_sendpage(queue->sock, virt_to_page(pdu),
1139+
offset_in_page(pdu) + req->offset, len,
1140+
MSG_DONTWAIT | MSG_MORE);
11461141
if (unlikely(ret <= 0))
11471142
return ret;
11481143

drivers/nvme/target/tcp.c

Lines changed: 17 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -602,17 +602,13 @@ static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd)
602602

603603
static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
604604
{
605-
struct msghdr msg = {
606-
.msg_flags = MSG_DONTWAIT | MSG_MORE | MSG_SPLICE_PAGES,
607-
};
608-
struct bio_vec bvec;
609605
u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
610606
int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst;
611607
int ret;
612608

613-
bvec_set_virt(&bvec, (void *)cmd->data_pdu + cmd->offset, left);
614-
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
615-
ret = sock_sendmsg(cmd->queue->sock, &msg);
609+
ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->data_pdu),
610+
offset_in_page(cmd->data_pdu) + cmd->offset,
611+
left, MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
616612
if (ret <= 0)
617613
return ret;
618614

@@ -633,21 +629,17 @@ static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
633629
int ret;
634630

635631
while (cmd->cur_sg) {
636-
struct msghdr msg = {
637-
.msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES,
638-
};
639632
struct page *page = sg_page(cmd->cur_sg);
640-
struct bio_vec bvec;
641633
u32 left = cmd->cur_sg->length - cmd->offset;
634+
int flags = MSG_DONTWAIT;
642635

643636
if ((!last_in_batch && cmd->queue->send_list_len) ||
644637
cmd->wbytes_done + left < cmd->req.transfer_len ||
645638
queue->data_digest || !queue->nvme_sq.sqhd_disabled)
646-
msg.msg_flags |= MSG_MORE;
639+
flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
647640

648-
bvec_set_page(&bvec, page, left, cmd->offset);
649-
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
650-
ret = sock_sendmsg(cmd->queue->sock, &msg);
641+
ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset,
642+
left, flags);
651643
if (ret <= 0)
652644
return ret;
653645

@@ -683,20 +675,18 @@ static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
683675
static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
684676
bool last_in_batch)
685677
{
686-
struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, };
687-
struct bio_vec bvec;
688678
u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
689679
int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst;
680+
int flags = MSG_DONTWAIT;
690681
int ret;
691682

692683
if (!last_in_batch && cmd->queue->send_list_len)
693-
msg.msg_flags |= MSG_MORE;
684+
flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
694685
else
695-
msg.msg_flags |= MSG_EOR;
686+
flags |= MSG_EOR;
696687

697-
bvec_set_virt(&bvec, (void *)cmd->rsp_pdu + cmd->offset, left);
698-
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
699-
ret = sock_sendmsg(cmd->queue->sock, &msg);
688+
ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->rsp_pdu),
689+
offset_in_page(cmd->rsp_pdu) + cmd->offset, left, flags);
700690
if (ret <= 0)
701691
return ret;
702692
cmd->offset += ret;
@@ -713,20 +703,18 @@ static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
713703

714704
static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
715705
{
716-
struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, };
717-
struct bio_vec bvec;
718706
u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
719707
int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst;
708+
int flags = MSG_DONTWAIT;
720709
int ret;
721710

722711
if (!last_in_batch && cmd->queue->send_list_len)
723-
msg.msg_flags |= MSG_MORE;
712+
flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
724713
else
725-
msg.msg_flags |= MSG_EOR;
714+
flags |= MSG_EOR;
726715

727-
bvec_set_virt(&bvec, (void *)cmd->r2t_pdu + cmd->offset, left);
728-
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
729-
ret = sock_sendmsg(cmd->queue->sock, &msg);
716+
ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->r2t_pdu),
717+
offset_in_page(cmd->r2t_pdu) + cmd->offset, left, flags);
730718
if (ret <= 0)
731719
return ret;
732720
cmd->offset += ret;

0 commit comments

Comments
 (0)