Skip to content

Commit c5de957

Browse files
author
Maxim Levitsky
committed
RDMA/mana_ib: Add device statistics support
JIRA: https://issues.redhat.com/browse/RHEL-109583 commit baa640d Author: Shiraz Saleem <shirazsaleem@microsoft.com> Date: Tue Jun 10 05:48:37 2025 -0700 RDMA/mana_ib: Add device statistics support Add support for mana device level statistics. Co-developed-by: Solom Tamawy <solom.tamawy@microsoft.com> Signed-off-by: Solom Tamawy <solom.tamawy@microsoft.com> Signed-off-by: Shiraz Saleem <shirazsaleem@microsoft.com> Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com> Link: https://patch.msgid.link/1749559717-3424-1-git-send-email-kotaranov@linux.microsoft.com Reviewed-by: Long Li <longli@microsoft.com> Signed-off-by: Leon Romanovsky <leon@kernel.org> Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
1 parent 6ef4e0c commit c5de957

File tree

4 files changed

+93
-2
lines changed

4 files changed

+93
-2
lines changed

drivers/infiniband/hw/mana/counters.c

Lines changed: 58 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,22 @@ static const struct rdma_stat_desc mana_ib_port_stats_desc[] = {
3434
[MANA_IB_CURRENT_RATE].name = "current_rate",
3535
};
3636

37+
static const struct rdma_stat_desc mana_ib_device_stats_desc[] = {
38+
[MANA_IB_SENT_CNPS].name = "sent_cnps",
39+
[MANA_IB_RECEIVED_ECNS].name = "received_ecns",
40+
[MANA_IB_RECEIVED_CNP_COUNT].name = "received_cnp_count",
41+
[MANA_IB_QP_CONGESTED_EVENTS].name = "qp_congested_events",
42+
[MANA_IB_QP_RECOVERED_EVENTS].name = "qp_recovered_events",
43+
[MANA_IB_DEV_RATE_INC_EVENTS].name = "rate_inc_events",
44+
};
45+
46+
struct rdma_hw_stats *mana_ib_alloc_hw_device_stats(struct ib_device *ibdev)
47+
{
48+
return rdma_alloc_hw_stats_struct(mana_ib_device_stats_desc,
49+
ARRAY_SIZE(mana_ib_device_stats_desc),
50+
RDMA_HW_STATS_DEFAULT_LIFESPAN);
51+
}
52+
3753
struct rdma_hw_stats *mana_ib_alloc_hw_port_stats(struct ib_device *ibdev,
3854
u32 port_num)
3955
{
@@ -42,8 +58,39 @@ struct rdma_hw_stats *mana_ib_alloc_hw_port_stats(struct ib_device *ibdev,
4258
RDMA_HW_STATS_DEFAULT_LIFESPAN);
4359
}
4460

45-
int mana_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
46-
u32 port_num, int index)
61+
static int mana_ib_get_hw_device_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats)
62+
{
63+
struct mana_ib_dev *mdev = container_of(ibdev, struct mana_ib_dev,
64+
ib_dev);
65+
struct mana_rnic_query_device_cntrs_resp resp = {};
66+
struct mana_rnic_query_device_cntrs_req req = {};
67+
int err;
68+
69+
mana_gd_init_req_hdr(&req.hdr, MANA_IB_QUERY_DEVICE_COUNTERS,
70+
sizeof(req), sizeof(resp));
71+
req.hdr.dev_id = mdev->gdma_dev->dev_id;
72+
req.adapter = mdev->adapter_handle;
73+
74+
err = mana_gd_send_request(mdev_to_gc(mdev), sizeof(req), &req,
75+
sizeof(resp), &resp);
76+
if (err) {
77+
ibdev_err(&mdev->ib_dev, "Failed to query device counters err %d",
78+
err);
79+
return err;
80+
}
81+
82+
stats->value[MANA_IB_SENT_CNPS] = resp.sent_cnps;
83+
stats->value[MANA_IB_RECEIVED_ECNS] = resp.received_ecns;
84+
stats->value[MANA_IB_RECEIVED_CNP_COUNT] = resp.received_cnp_count;
85+
stats->value[MANA_IB_QP_CONGESTED_EVENTS] = resp.qp_congested_events;
86+
stats->value[MANA_IB_QP_RECOVERED_EVENTS] = resp.qp_recovered_events;
87+
stats->value[MANA_IB_DEV_RATE_INC_EVENTS] = resp.rate_inc_events;
88+
89+
return ARRAY_SIZE(mana_ib_device_stats_desc);
90+
}
91+
92+
static int mana_ib_get_hw_port_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
93+
u32 port_num)
4794
{
4895
struct mana_ib_dev *mdev = container_of(ibdev, struct mana_ib_dev,
4996
ib_dev);
@@ -103,3 +150,12 @@ int mana_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
103150

104151
return ARRAY_SIZE(mana_ib_port_stats_desc);
105152
}
153+
154+
int mana_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
155+
u32 port_num, int index)
156+
{
157+
if (!port_num)
158+
return mana_ib_get_hw_device_stats(ibdev, stats);
159+
else
160+
return mana_ib_get_hw_port_stats(ibdev, stats, port_num);
161+
}

drivers/infiniband/hw/mana/counters.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,8 +37,18 @@ enum mana_ib_port_counters {
3737
MANA_IB_CURRENT_RATE,
3838
};
3939

40+
enum mana_ib_device_counters {
41+
MANA_IB_SENT_CNPS,
42+
MANA_IB_RECEIVED_ECNS,
43+
MANA_IB_RECEIVED_CNP_COUNT,
44+
MANA_IB_QP_CONGESTED_EVENTS,
45+
MANA_IB_QP_RECOVERED_EVENTS,
46+
MANA_IB_DEV_RATE_INC_EVENTS,
47+
};
48+
4049
struct rdma_hw_stats *mana_ib_alloc_hw_port_stats(struct ib_device *ibdev,
4150
u32 port_num);
51+
struct rdma_hw_stats *mana_ib_alloc_hw_device_stats(struct ib_device *ibdev);
4252
int mana_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
4353
u32 port_num, int index);
4454
#endif /* _COUNTERS_H_ */

drivers/infiniband/hw/mana/device.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,10 @@ static const struct ib_device_ops mana_ib_stats_ops = {
6666
.get_hw_stats = mana_ib_get_hw_stats,
6767
};
6868

69+
static const struct ib_device_ops mana_ib_device_stats_ops = {
70+
.alloc_hw_device_stats = mana_ib_alloc_hw_device_stats,
71+
};
72+
6973
static int mana_ib_netdev_event(struct notifier_block *this,
7074
unsigned long event, void *ptr)
7175
{
@@ -154,6 +158,8 @@ static int mana_ib_probe(struct auxiliary_device *adev,
154158
}
155159

156160
ib_set_device_ops(&dev->ib_dev, &mana_ib_stats_ops);
161+
if (dev->adapter_caps.feature_flags & MANA_IB_FEATURE_DEV_COUNTERS_SUPPORT)
162+
ib_set_device_ops(&dev->ib_dev, &mana_ib_device_stats_ops);
157163

158164
ret = mana_ib_create_eqs(dev);
159165
if (ret) {

drivers/infiniband/hw/mana/mana_ib.h

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -210,6 +210,7 @@ enum mana_ib_command_code {
210210
MANA_IB_DESTROY_RC_QP = 0x3000b,
211211
MANA_IB_SET_QP_STATE = 0x3000d,
212212
MANA_IB_QUERY_VF_COUNTERS = 0x30022,
213+
MANA_IB_QUERY_DEVICE_COUNTERS = 0x30023,
213214
};
214215

215216
struct mana_ib_query_adapter_caps_req {
@@ -218,6 +219,7 @@ struct mana_ib_query_adapter_caps_req {
218219

219220
enum mana_ib_adapter_features {
220221
MANA_IB_FEATURE_CLIENT_ERROR_CQE_SUPPORT = BIT(4),
222+
MANA_IB_FEATURE_DEV_COUNTERS_SUPPORT = BIT(5),
221223
};
222224

223225
struct mana_ib_query_adapter_caps_resp {
@@ -516,6 +518,23 @@ struct mana_rnic_query_vf_cntrs_resp {
516518
u64 current_rate;
517519
}; /* HW Data */
518520

521+
struct mana_rnic_query_device_cntrs_req {
522+
struct gdma_req_hdr hdr;
523+
mana_handle_t adapter;
524+
}; /* HW Data */
525+
526+
struct mana_rnic_query_device_cntrs_resp {
527+
struct gdma_resp_hdr hdr;
528+
u32 sent_cnps;
529+
u32 received_ecns;
530+
u32 reserved1;
531+
u32 received_cnp_count;
532+
u32 qp_congested_events;
533+
u32 qp_recovered_events;
534+
u32 rate_inc_events;
535+
u32 reserved2;
536+
}; /* HW Data */
537+
519538
static inline struct gdma_context *mdev_to_gc(struct mana_ib_dev *mdev)
520539
{
521540
return mdev->gdma_dev->gdma_context;

0 commit comments

Comments
 (0)