Skip to content

Commit 054776f

Browse files
committed
Merge: Rebase MANA hyperv driver to v6.14
MR: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-9/-/merge_requests/6508 This rebases MANA hyperv driver to upstream kernel 6.14-rc5 JIRA: https://issues.redhat.com/browse/RHEL-80098 Tested: Smoke boot test on azure machine, IB portion not yet tested. Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com> Approved-by: Kamal Heib <kheib@redhat.com> Approved-by: Vitaly Kuznetsov <vkuznets@redhat.com> Approved-by: CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> Merged-by: Augusto Caringi <acaringi@redhat.com>
2 parents cf0309b + f445ac5 commit 054776f

File tree

7 files changed

+390
-119
lines changed

7 files changed

+390
-119
lines changed

drivers/infiniband/hw/mana/main.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -174,7 +174,7 @@ static int mana_gd_allocate_doorbell_page(struct gdma_context *gc,
174174

175175
req.resource_type = GDMA_RESOURCE_DOORBELL_PAGE;
176176
req.num_resources = 1;
177-
req.alignment = 1;
177+
req.alignment = PAGE_SIZE / MANA_PAGE_SIZE;
178178

179179
/* Have GDMA start searching from 0 */
180180
req.allocated_resources = 0;
@@ -383,7 +383,7 @@ static int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem
383383

384384
create_req->length = umem->length;
385385
create_req->offset_in_page = ib_umem_dma_offset(umem, page_sz);
386-
create_req->gdma_page_type = order_base_2(page_sz) - PAGE_SHIFT;
386+
create_req->gdma_page_type = order_base_2(page_sz) - MANA_PAGE_SHIFT;
387387
create_req->page_count = num_pages_total;
388388

389389
ibdev_dbg(&dev->ib_dev, "size_dma_region %lu num_pages_total %lu\n",
@@ -511,13 +511,13 @@ int mana_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
511511
PAGE_SHIFT;
512512
prot = pgprot_writecombine(vma->vm_page_prot);
513513

514-
ret = rdma_user_mmap_io(ibcontext, vma, pfn, gc->db_page_size, prot,
514+
ret = rdma_user_mmap_io(ibcontext, vma, pfn, PAGE_SIZE, prot,
515515
NULL);
516516
if (ret)
517517
ibdev_dbg(ibdev, "can't rdma_user_mmap_io ret %d\n", ret);
518518
else
519-
ibdev_dbg(ibdev, "mapped I/O pfn 0x%llx page_size %u, ret %d\n",
520-
pfn, gc->db_page_size, ret);
519+
ibdev_dbg(ibdev, "mapped I/O pfn 0x%llx page_size %lu, ret %d\n",
520+
pfn, PAGE_SIZE, ret);
521521

522522
return ret;
523523
}

drivers/net/ethernet/microsoft/mana/gdma_main.c

Lines changed: 45 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
22
/* Copyright (c) 2021, Microsoft Corporation. */
33

4+
#include <linux/debugfs.h>
45
#include <linux/module.h>
56
#include <linux/pci.h>
67
#include <linux/utsname.h>
@@ -9,6 +10,7 @@
910
#include <net/mana/mana.h>
1011

1112
#include <linux/cpu.h>
13+
struct dentry *mana_debugfs_root;
1214

1315
static u32 mana_gd_r32(struct gdma_context *g, u64 offset)
1416
{
@@ -1317,7 +1319,7 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
13171319
GFP_KERNEL);
13181320
if (!gc->irq_contexts) {
13191321
err = -ENOMEM;
1320-
goto free_irq_vector;
1322+
goto free_irq_array;
13211323
}
13221324

13231325
for (i = 0; i < nvec; i++) {
@@ -1374,6 +1376,7 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
13741376
gc->max_num_msix = nvec;
13751377
gc->num_msix_usable = nvec;
13761378
cpus_read_unlock();
1379+
kfree(irqs);
13771380
return 0;
13781381

13791382
free_irq:
@@ -1386,8 +1389,9 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
13861389
}
13871390

13881391
kfree(gc->irq_contexts);
1389-
kfree(irqs);
13901392
gc->irq_contexts = NULL;
1393+
free_irq_array:
1394+
kfree(irqs);
13911395
free_irq_vector:
13921396
cpus_read_unlock();
13931397
pci_free_irq_vectors(pdev);
@@ -1518,6 +1522,12 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
15181522
gc->bar0_va = bar0_va;
15191523
gc->dev = &pdev->dev;
15201524

1525+
if (gc->is_pf)
1526+
gc->mana_pci_debugfs = debugfs_create_dir("0", mana_debugfs_root);
1527+
else
1528+
gc->mana_pci_debugfs = debugfs_create_dir(pci_slot_name(pdev->slot),
1529+
mana_debugfs_root);
1530+
15211531
err = mana_gd_setup(pdev);
15221532
if (err)
15231533
goto unmap_bar;
@@ -1531,6 +1541,13 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
15311541
cleanup_gd:
15321542
mana_gd_cleanup(pdev);
15331543
unmap_bar:
1544+
/*
1545+
* at this point we know that the other debugfs child dir/files
1546+
* are either not yet created or are already cleaned up.
1547+
* The pci debugfs folder clean-up now, will only be cleaning up
1548+
* adapter-MTU file and apc->mana_pci_debugfs folder.
1549+
*/
1550+
debugfs_remove_recursive(gc->mana_pci_debugfs);
15341551
pci_iounmap(pdev, bar0_va);
15351552
free_gc:
15361553
pci_set_drvdata(pdev, NULL);
@@ -1551,6 +1568,8 @@ static void mana_gd_remove(struct pci_dev *pdev)
15511568

15521569
mana_gd_cleanup(pdev);
15531570

1571+
debugfs_remove_recursive(gc->mana_pci_debugfs);
1572+
15541573
pci_iounmap(pdev, gc->bar0_va);
15551574

15561575
vfree(gc);
@@ -1602,6 +1621,8 @@ static void mana_gd_shutdown(struct pci_dev *pdev)
16021621

16031622
mana_gd_cleanup(pdev);
16041623

1624+
debugfs_remove_recursive(gc->mana_pci_debugfs);
1625+
16051626
pci_disable_device(pdev);
16061627
}
16071628

@@ -1621,7 +1642,28 @@ static struct pci_driver mana_driver = {
16211642
.shutdown = mana_gd_shutdown,
16221643
};
16231644

1624-
module_pci_driver(mana_driver);
1645+
static int __init mana_driver_init(void)
1646+
{
1647+
int err;
1648+
1649+
mana_debugfs_root = debugfs_create_dir("mana", NULL);
1650+
1651+
err = pci_register_driver(&mana_driver);
1652+
if (err)
1653+
debugfs_remove(mana_debugfs_root);
1654+
1655+
return err;
1656+
}
1657+
1658+
static void __exit mana_driver_exit(void)
1659+
{
1660+
pci_unregister_driver(&mana_driver);
1661+
1662+
debugfs_remove(mana_debugfs_root);
1663+
}
1664+
1665+
module_init(mana_driver_init);
1666+
module_exit(mana_driver_exit);
16251667

16261668
MODULE_DEVICE_TABLE(pci, mana_id_table);
16271669

drivers/net/ethernet/microsoft/mana/hw_channel.c

Lines changed: 34 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -51,16 +51,41 @@ static int mana_hwc_verify_resp_msg(const struct hwc_caller_ctx *caller_ctx,
5151
return 0;
5252
}
5353

54+
static int mana_hwc_post_rx_wqe(const struct hwc_wq *hwc_rxq,
55+
struct hwc_work_request *req)
56+
{
57+
struct device *dev = hwc_rxq->hwc->dev;
58+
struct gdma_sge *sge;
59+
int err;
60+
61+
sge = &req->sge;
62+
sge->address = (u64)req->buf_sge_addr;
63+
sge->mem_key = hwc_rxq->msg_buf->gpa_mkey;
64+
sge->size = req->buf_len;
65+
66+
memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
67+
req->wqe_req.sgl = sge;
68+
req->wqe_req.num_sge = 1;
69+
req->wqe_req.client_data_unit = 0;
70+
71+
err = mana_gd_post_and_ring(hwc_rxq->gdma_wq, &req->wqe_req, NULL);
72+
if (err)
73+
dev_err(dev, "Failed to post WQE on HWC RQ: %d\n", err);
74+
return err;
75+
}
76+
5477
static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len,
55-
const struct gdma_resp_hdr *resp_msg)
78+
struct hwc_work_request *rx_req)
5679
{
80+
const struct gdma_resp_hdr *resp_msg = rx_req->buf_va;
5781
struct hwc_caller_ctx *ctx;
5882
int err;
5983

6084
if (!test_bit(resp_msg->response.hwc_msg_id,
6185
hwc->inflight_msg_res.map)) {
6286
dev_err(hwc->dev, "hwc_rx: invalid msg_id = %u\n",
6387
resp_msg->response.hwc_msg_id);
88+
mana_hwc_post_rx_wqe(hwc->rxq, rx_req);
6489
return;
6590
}
6691

@@ -74,30 +99,13 @@ static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len,
7499
memcpy(ctx->output_buf, resp_msg, resp_len);
75100
out:
76101
ctx->error = err;
77-
complete(&ctx->comp_event);
78-
}
79-
80-
static int mana_hwc_post_rx_wqe(const struct hwc_wq *hwc_rxq,
81-
struct hwc_work_request *req)
82-
{
83-
struct device *dev = hwc_rxq->hwc->dev;
84-
struct gdma_sge *sge;
85-
int err;
86-
87-
sge = &req->sge;
88-
sge->address = (u64)req->buf_sge_addr;
89-
sge->mem_key = hwc_rxq->msg_buf->gpa_mkey;
90-
sge->size = req->buf_len;
91102

92-
memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
93-
req->wqe_req.sgl = sge;
94-
req->wqe_req.num_sge = 1;
95-
req->wqe_req.client_data_unit = 0;
103+
/* Must post rx wqe before complete(), otherwise the next rx may
104+
* hit no_wqe error.
105+
*/
106+
mana_hwc_post_rx_wqe(hwc->rxq, rx_req);
96107

97-
err = mana_gd_post_and_ring(hwc_rxq->gdma_wq, &req->wqe_req, NULL);
98-
if (err)
99-
dev_err(dev, "Failed to post WQE on HWC RQ: %d\n", err);
100-
return err;
108+
complete(&ctx->comp_event);
101109
}
102110

103111
static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
@@ -234,14 +242,12 @@ static void mana_hwc_rx_event_handler(void *ctx, u32 gdma_rxq_id,
234242
return;
235243
}
236244

237-
mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, resp);
245+
mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, rx_req);
238246

239-
/* Do no longer use 'resp', because the buffer is posted to the HW
240-
* in the below mana_hwc_post_rx_wqe().
247+
/* Can no longer use 'resp', because the buffer is posted to the HW
248+
* in mana_hwc_handle_resp() above.
241249
*/
242250
resp = NULL;
243-
244-
mana_hwc_post_rx_wqe(hwc_rxq, rx_req);
245251
}
246252

247253
static void mana_hwc_tx_event_handler(void *ctx, u32 gdma_txq_id,

0 commit comments

Comments
 (0)