Skip to content

Commit db2eb2e

Browse files
committed
Merge: Rebase HyperV MANA driver to upstream kernel v6.11-rc3
MR: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-9/-/merge_requests/5023 This pull request rebases MANA driver to kernel v6.11-rc3 JIRA: https://issues.redhat.com/browse/RHEL-54330 Tested: compile tested only Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com> Approved-by: Vitaly Kuznetsov <vkuznets@redhat.com> Approved-by: Wander Lairson Costa <wander@redhat.com> Approved-by: Chris von Recklinghausen <crecklin@redhat.com> Approved-by: Izabela Bakollari <ibakolla@redhat.com> Approved-by: CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> Merged-by: Rado Vrbovsky <rvrbovsk@redhat.com>
2 parents c8d98a5 + c65cfd5 commit db2eb2e

File tree

21 files changed

+1455
-368
lines changed

21 files changed

+1455
-368
lines changed

drivers/infiniband/hw/mana/cq.c

Lines changed: 81 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -9,66 +9,78 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
99
struct ib_udata *udata)
1010
{
1111
struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
12+
struct mana_ib_create_cq_resp resp = {};
13+
struct mana_ib_ucontext *mana_ucontext;
1214
struct ib_device *ibdev = ibcq->device;
1315
struct mana_ib_create_cq ucmd = {};
1416
struct mana_ib_dev *mdev;
15-
struct gdma_context *gc;
17+
bool is_rnic_cq;
18+
u32 doorbell;
1619
int err;
1720

1821
mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
19-
gc = mdev->gdma_dev->gdma_context;
2022

21-
if (udata->inlen < sizeof(ucmd))
22-
return -EINVAL;
23+
cq->comp_vector = attr->comp_vector % ibdev->num_comp_vectors;
24+
cq->cq_handle = INVALID_MANA_HANDLE;
2325

24-
if (attr->comp_vector > gc->max_num_queues)
26+
if (udata->inlen < offsetof(struct mana_ib_create_cq, flags))
2527
return -EINVAL;
2628

27-
cq->comp_vector = attr->comp_vector;
28-
2929
err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
3030
if (err) {
3131
ibdev_dbg(ibdev,
3232
"Failed to copy from udata for create cq, %d\n", err);
3333
return err;
3434
}
3535

36-
if (attr->cqe > mdev->adapter_caps.max_qp_wr) {
36+
is_rnic_cq = !!(ucmd.flags & MANA_IB_CREATE_RNIC_CQ);
37+
38+
if (!is_rnic_cq && attr->cqe > mdev->adapter_caps.max_qp_wr) {
3739
ibdev_dbg(ibdev, "CQE %d exceeding limit\n", attr->cqe);
3840
return -EINVAL;
3941
}
4042

4143
cq->cqe = attr->cqe;
42-
cq->umem = ib_umem_get(ibdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE,
43-
IB_ACCESS_LOCAL_WRITE);
44-
if (IS_ERR(cq->umem)) {
45-
err = PTR_ERR(cq->umem);
46-
ibdev_dbg(ibdev, "Failed to get umem for create cq, err %d\n",
47-
err);
44+
err = mana_ib_create_queue(mdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE, &cq->queue);
45+
if (err) {
46+
ibdev_dbg(ibdev, "Failed to create queue for create cq, %d\n", err);
4847
return err;
4948
}
5049

51-
err = mana_ib_gd_create_dma_region(mdev, cq->umem, &cq->gdma_region);
52-
if (err) {
53-
ibdev_dbg(ibdev,
54-
"Failed to create dma region for create cq, %d\n",
55-
err);
56-
goto err_release_umem;
50+
mana_ucontext = rdma_udata_to_drv_context(udata, struct mana_ib_ucontext,
51+
ibucontext);
52+
doorbell = mana_ucontext->doorbell;
53+
54+
if (is_rnic_cq) {
55+
err = mana_ib_gd_create_cq(mdev, cq, doorbell);
56+
if (err) {
57+
ibdev_dbg(ibdev, "Failed to create RNIC cq, %d\n", err);
58+
goto err_destroy_queue;
59+
}
60+
61+
err = mana_ib_install_cq_cb(mdev, cq);
62+
if (err) {
63+
ibdev_dbg(ibdev, "Failed to install cq callback, %d\n", err);
64+
goto err_destroy_rnic_cq;
65+
}
5766
}
5867

59-
ibdev_dbg(ibdev,
60-
"mana_ib_gd_create_dma_region ret %d gdma_region 0x%llx\n",
61-
err, cq->gdma_region);
62-
63-
/*
64-
* The CQ ID is not known at this time. The ID is generated at create_qp
65-
*/
66-
cq->id = INVALID_QUEUE_ID;
68+
resp.cqid = cq->queue.id;
69+
err = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
70+
if (err) {
71+
ibdev_dbg(&mdev->ib_dev, "Failed to copy to udata, %d\n", err);
72+
goto err_remove_cq_cb;
73+
}
6774

6875
return 0;
6976

70-
err_release_umem:
71-
ib_umem_release(cq->umem);
77+
err_remove_cq_cb:
78+
mana_ib_remove_cq_cb(mdev, cq);
79+
err_destroy_rnic_cq:
80+
mana_ib_gd_destroy_cq(mdev, cq);
81+
err_destroy_queue:
82+
mana_ib_destroy_queue(mdev, &cq->queue);
83+
7284
return err;
7385
}
7486

@@ -77,33 +89,57 @@ int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
7789
struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
7890
struct ib_device *ibdev = ibcq->device;
7991
struct mana_ib_dev *mdev;
80-
struct gdma_context *gc;
81-
int err;
8292

8393
mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
84-
gc = mdev->gdma_dev->gdma_context;
8594

86-
err = mana_ib_gd_destroy_dma_region(mdev, cq->gdma_region);
87-
if (err) {
88-
ibdev_dbg(ibdev,
89-
"Failed to destroy dma region, %d\n", err);
90-
return err;
91-
}
95+
mana_ib_remove_cq_cb(mdev, cq);
9296

93-
if (cq->id != INVALID_QUEUE_ID) {
94-
kfree(gc->cq_table[cq->id]);
95-
gc->cq_table[cq->id] = NULL;
96-
}
97+
/* Ignore return code as there is not much we can do about it.
98+
* The error message is printed inside.
99+
*/
100+
mana_ib_gd_destroy_cq(mdev, cq);
97101

98-
ib_umem_release(cq->umem);
102+
mana_ib_destroy_queue(mdev, &cq->queue);
99103

100104
return 0;
101105
}
102106

103-
void mana_ib_cq_handler(void *ctx, struct gdma_queue *gdma_cq)
107+
static void mana_ib_cq_handler(void *ctx, struct gdma_queue *gdma_cq)
104108
{
105109
struct mana_ib_cq *cq = ctx;
106110

107111
if (cq->ibcq.comp_handler)
108112
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
109113
}
114+
115+
int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
116+
{
117+
struct gdma_context *gc = mdev_to_gc(mdev);
118+
struct gdma_queue *gdma_cq;
119+
120+
if (cq->queue.id >= gc->max_num_cqs)
121+
return -EINVAL;
122+
/* Create CQ table entry */
123+
WARN_ON(gc->cq_table[cq->queue.id]);
124+
gdma_cq = kzalloc(sizeof(*gdma_cq), GFP_KERNEL);
125+
if (!gdma_cq)
126+
return -ENOMEM;
127+
128+
gdma_cq->cq.context = cq;
129+
gdma_cq->type = GDMA_CQ;
130+
gdma_cq->cq.callback = mana_ib_cq_handler;
131+
gdma_cq->id = cq->queue.id;
132+
gc->cq_table[cq->queue.id] = gdma_cq;
133+
return 0;
134+
}
135+
136+
void mana_ib_remove_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
137+
{
138+
struct gdma_context *gc = mdev_to_gc(mdev);
139+
140+
if (cq->queue.id >= gc->max_num_cqs || cq->queue.id == INVALID_QUEUE_ID)
141+
return;
142+
143+
kfree(gc->cq_table[cq->queue.id]);
144+
gc->cq_table[cq->queue.id] = NULL;
145+
}

drivers/infiniband/hw/mana/device.c

Lines changed: 53 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,8 @@
55

66
#include "mana_ib.h"
77
#include <net/mana/mana_auxiliary.h>
8+
#include <linux/etherdevice.h>
9+
#include <net/addrconf.h>
810

911
MODULE_DESCRIPTION("Microsoft Azure Network Adapter IB driver");
1012
MODULE_LICENSE("GPL");
@@ -15,6 +17,7 @@ static const struct ib_device_ops mana_ib_dev_ops = {
1517
.driver_id = RDMA_DRIVER_MANA,
1618
.uverbs_abi_ver = MANA_IB_UVERBS_ABI_VERSION,
1719

20+
.add_gid = mana_ib_gd_add_gid,
1821
.alloc_pd = mana_ib_alloc_pd,
1922
.alloc_ucontext = mana_ib_alloc_ucontext,
2023
.create_cq = mana_ib_create_cq,
@@ -23,18 +26,21 @@ static const struct ib_device_ops mana_ib_dev_ops = {
2326
.create_wq = mana_ib_create_wq,
2427
.dealloc_pd = mana_ib_dealloc_pd,
2528
.dealloc_ucontext = mana_ib_dealloc_ucontext,
29+
.del_gid = mana_ib_gd_del_gid,
2630
.dereg_mr = mana_ib_dereg_mr,
2731
.destroy_cq = mana_ib_destroy_cq,
2832
.destroy_qp = mana_ib_destroy_qp,
2933
.destroy_rwq_ind_table = mana_ib_destroy_rwq_ind_table,
3034
.destroy_wq = mana_ib_destroy_wq,
3135
.disassociate_ucontext = mana_ib_disassociate_ucontext,
36+
.get_link_layer = mana_ib_get_link_layer,
3237
.get_port_immutable = mana_ib_get_port_immutable,
3338
.mmap = mana_ib_mmap,
3439
.modify_qp = mana_ib_modify_qp,
3540
.modify_wq = mana_ib_modify_wq,
3641
.query_device = mana_ib_query_device,
3742
.query_gid = mana_ib_query_gid,
43+
.query_pkey = mana_ib_query_pkey,
3844
.query_port = mana_ib_query_port,
3945
.reg_user_mr = mana_ib_reg_user_mr,
4046

@@ -51,8 +57,10 @@ static int mana_ib_probe(struct auxiliary_device *adev,
5157
{
5258
struct mana_adev *madev = container_of(adev, struct mana_adev, adev);
5359
struct gdma_dev *mdev = madev->mdev;
60+
struct net_device *ndev;
5461
struct mana_context *mc;
5562
struct mana_ib_dev *dev;
63+
u8 mac_addr[ETH_ALEN];
5664
int ret;
5765

5866
mc = mdev->driver_data;
@@ -74,9 +82,26 @@ static int mana_ib_probe(struct auxiliary_device *adev,
7482
* num_comp_vectors needs to set to the max MSIX index
7583
* when interrupts and event queues are implemented
7684
*/
77-
dev->ib_dev.num_comp_vectors = 1;
85+
dev->ib_dev.num_comp_vectors = mdev->gdma_context->max_num_queues;
7886
dev->ib_dev.dev.parent = mdev->gdma_context->dev;
7987

88+
rcu_read_lock(); /* required to get primary netdev */
89+
ndev = mana_get_primary_netdev_rcu(mc, 0);
90+
if (!ndev) {
91+
rcu_read_unlock();
92+
ret = -ENODEV;
93+
ibdev_err(&dev->ib_dev, "Failed to get netdev for IB port 1");
94+
goto free_ib_device;
95+
}
96+
ether_addr_copy(mac_addr, ndev->dev_addr);
97+
addrconf_addr_eui48((u8 *)&dev->ib_dev.node_guid, ndev->dev_addr);
98+
ret = ib_device_set_netdev(&dev->ib_dev, ndev, 1);
99+
rcu_read_unlock();
100+
if (ret) {
101+
ibdev_err(&dev->ib_dev, "Failed to set ib netdev, ret %d", ret);
102+
goto free_ib_device;
103+
}
104+
80105
ret = mana_gd_register_device(&mdev->gdma_context->mana_ib);
81106
if (ret) {
82107
ibdev_err(&dev->ib_dev, "Failed to register device, ret %d",
@@ -92,15 +117,38 @@ static int mana_ib_probe(struct auxiliary_device *adev,
92117
goto deregister_device;
93118
}
94119

120+
ret = mana_ib_create_eqs(dev);
121+
if (ret) {
122+
ibdev_err(&dev->ib_dev, "Failed to create EQs, ret %d", ret);
123+
goto deregister_device;
124+
}
125+
126+
ret = mana_ib_gd_create_rnic_adapter(dev);
127+
if (ret)
128+
goto destroy_eqs;
129+
130+
xa_init_flags(&dev->qp_table_wq, XA_FLAGS_LOCK_IRQ);
131+
ret = mana_ib_gd_config_mac(dev, ADDR_OP_ADD, mac_addr);
132+
if (ret) {
133+
ibdev_err(&dev->ib_dev, "Failed to add Mac address, ret %d",
134+
ret);
135+
goto destroy_rnic;
136+
}
137+
95138
ret = ib_register_device(&dev->ib_dev, "mana_%d",
96139
mdev->gdma_context->dev);
97140
if (ret)
98-
goto deregister_device;
141+
goto destroy_rnic;
99142

100143
dev_set_drvdata(&adev->dev, dev);
101144

102145
return 0;
103146

147+
destroy_rnic:
148+
xa_destroy(&dev->qp_table_wq);
149+
mana_ib_gd_destroy_rnic_adapter(dev);
150+
destroy_eqs:
151+
mana_ib_destroy_eqs(dev);
104152
deregister_device:
105153
mana_gd_deregister_device(dev->gdma_dev);
106154
free_ib_device:
@@ -113,9 +161,10 @@ static void mana_ib_remove(struct auxiliary_device *adev)
113161
struct mana_ib_dev *dev = dev_get_drvdata(&adev->dev);
114162

115163
ib_unregister_device(&dev->ib_dev);
116-
164+
xa_destroy(&dev->qp_table_wq);
165+
mana_ib_gd_destroy_rnic_adapter(dev);
166+
mana_ib_destroy_eqs(dev);
117167
mana_gd_deregister_device(dev->gdma_dev);
118-
119168
ib_dealloc_device(&dev->ib_dev);
120169
}
121170

0 commit comments

Comments
 (0)