Skip to content

Commit 5f3b665

Browse files
committed
Merge: mlx5: Add driver fixes up to v6.10
MR: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-9/-/merge_requests/4625 Hi all, This MR includes updates for the mlx5 drivers. It has backports of fixes up to kernel v6.10. JIRA: https://issues.redhat.com/browse/RHEL-45365 All patches are accepted upstream in Linus' tree. Each patch commit message describes its origin. This patch set passed incremental build testing to verify that it is bisectable. Sanity tests ran over mlx5 drivers on x86_64 systems (using ConnectX-4/5/6), including the following: Ethernet: - IPv4 traffic (ICMP, TCP, UDP). - IPv6 traffic (ICMP, TCP, UDP). VLAN: - IPv4 traffic (ICMP, TCP, UDP). - IPv6 traffic (ICMP, TCP, UDP). RoCE: - RDMA (ibv_*_pingpong). - RDMACM (examples that comes with librdmacm packages). Infiniband: - RDMA (ibv_*_pingpong). - RDMACM (examples that comes with librdmacm packages). IPoIB: - IPv4 traffic (ICMP, TCP, UDP). - IPv6 traffic (ICMP, TCP, UDP). PKey: - IPv4 traffic (ICMP, TCP, UDP). - IPv6 traffic (ICMP, TCP, UDP). ASAP2/OVS: - Various sanity tests covering OVS offloads. NFSoRDMA: - Discover, mount and write. iSER: - Discover, login and mount. SRP: - Verify srp_daemon service is up and system can discover SRP targets. Signed-off-by: Benjamin Poirier <bpoirier@redhat.com> Approved-by: Kamal Heib <kheib@redhat.com> Approved-by: Michal Schmidt <mschmidt@redhat.com> Approved-by: CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> Merged-by: Lucas Zampieri <lzampier@redhat.com>
2 parents 2119c72 + a873d28 commit 5f3b665

File tree

21 files changed

+158
-73
lines changed

21 files changed

+158
-73
lines changed

drivers/infiniband/hw/mlx5/main.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3760,10 +3760,10 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
37603760
spin_lock_init(&dev->dm.lock);
37613761
dev->dm.dev = mdev;
37623762
return 0;
3763-
err:
3764-
mlx5r_macsec_dealloc_gids(dev);
37653763
err_mp:
37663764
mlx5_ib_cleanup_multiport_master(dev);
3765+
err:
3766+
mlx5r_macsec_dealloc_gids(dev);
37673767
return err;
37683768
}
37693769

drivers/infiniband/hw/mlx5/mlx5_ib.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -639,9 +639,10 @@ struct mlx5_ib_mkey {
639639
unsigned int ndescs;
640640
struct wait_queue_head wait;
641641
refcount_t usecount;
642-
/* User Mkey must hold either a rb_key or a cache_ent. */
642+
/* Cacheable user Mkey must hold either a rb_key or a cache_ent. */
643643
struct mlx5r_cache_rb_key rb_key;
644644
struct mlx5_cache_ent *cache_ent;
645+
u8 cacheable : 1;
645646
};
646647

647648
#define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)

drivers/infiniband/hw/mlx5/mr.c

Lines changed: 27 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -246,6 +246,7 @@ static void set_cache_mkc(struct mlx5_cache_ent *ent, void *mkc)
246246
MLX5_SET(mkc, mkc, access_mode_1_0, ent->rb_key.access_mode & 0x3);
247247
MLX5_SET(mkc, mkc, access_mode_4_2,
248248
(ent->rb_key.access_mode >> 2) & 0x7);
249+
MLX5_SET(mkc, mkc, ma_translation_mode, !!ent->rb_key.ats);
249250

250251
MLX5_SET(mkc, mkc, translations_octword_size,
251252
get_mkc_octo_size(ent->rb_key.access_mode,
@@ -641,10 +642,8 @@ static int mlx5_cache_ent_insert(struct mlx5_mkey_cache *cache,
641642
new = &((*new)->rb_left);
642643
if (cmp < 0)
643644
new = &((*new)->rb_right);
644-
if (cmp == 0) {
645-
mutex_unlock(&cache->rb_lock);
645+
if (cmp == 0)
646646
return -EEXIST;
647-
}
648647
}
649648

650649
/* Add new node and rebalance tree. */
@@ -719,6 +718,8 @@ static struct mlx5_ib_mr *_mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
719718
}
720719
mr->mmkey.cache_ent = ent;
721720
mr->mmkey.type = MLX5_MKEY_MR;
721+
mr->mmkey.rb_key = ent->rb_key;
722+
mr->mmkey.cacheable = true;
722723
init_waitqueue_head(&mr->mmkey.wait);
723724
return mr;
724725
}
@@ -1158,6 +1159,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
11581159
if (IS_ERR(mr))
11591160
return mr;
11601161
mr->mmkey.rb_key = rb_key;
1162+
mr->mmkey.cacheable = true;
11611163
return mr;
11621164
}
11631165

@@ -1570,7 +1572,8 @@ static bool can_use_umr_rereg_access(struct mlx5_ib_dev *dev,
15701572
unsigned int diffs = current_access_flags ^ target_access_flags;
15711573

15721574
if (diffs & ~(IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
1573-
IB_ACCESS_REMOTE_READ | IB_ACCESS_RELAXED_ORDERING))
1575+
IB_ACCESS_REMOTE_READ | IB_ACCESS_RELAXED_ORDERING |
1576+
IB_ACCESS_REMOTE_ATOMIC))
15741577
return false;
15751578
return mlx5r_umr_can_reconfig(dev, current_access_flags,
15761579
target_access_flags);
@@ -1835,6 +1838,23 @@ static int cache_ent_find_and_store(struct mlx5_ib_dev *dev,
18351838
return ret;
18361839
}
18371840

1841+
static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
1842+
{
1843+
struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
1844+
struct mlx5_cache_ent *ent = mr->mmkey.cache_ent;
1845+
1846+
if (mr->mmkey.cacheable && !mlx5r_umr_revoke_mr(mr) && !cache_ent_find_and_store(dev, mr))
1847+
return 0;
1848+
1849+
if (ent) {
1850+
spin_lock_irq(&ent->mkeys_queue.lock);
1851+
ent->in_use--;
1852+
mr->mmkey.cache_ent = NULL;
1853+
spin_unlock_irq(&ent->mkeys_queue.lock);
1854+
}
1855+
return destroy_mkey(dev, mr);
1856+
}
1857+
18381858
int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
18391859
{
18401860
struct mlx5_ib_mr *mr = to_mmr(ibmr);
@@ -1880,16 +1900,9 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
18801900
}
18811901

18821902
/* Stop DMA */
1883-
if (mr->umem && mlx5r_umr_can_load_pas(dev, mr->umem->length))
1884-
if (mlx5r_umr_revoke_mr(mr) ||
1885-
cache_ent_find_and_store(dev, mr))
1886-
mr->mmkey.cache_ent = NULL;
1887-
1888-
if (!mr->mmkey.cache_ent) {
1889-
rc = destroy_mkey(to_mdev(mr->ibmr.device), mr);
1890-
if (rc)
1891-
return rc;
1892-
}
1903+
rc = mlx5_revoke_mr(mr);
1904+
if (rc)
1905+
return rc;
18931906

18941907
if (mr->umem) {
18951908
bool is_odp = is_odp_mr(mr);

drivers/infiniband/hw/mlx5/srq.c

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -199,17 +199,20 @@ int mlx5_ib_create_srq(struct ib_srq *ib_srq,
199199
int err;
200200
struct mlx5_srq_attr in = {};
201201
__u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
202+
__u32 max_sge_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq) /
203+
sizeof(struct mlx5_wqe_data_seg);
202204

203205
if (init_attr->srq_type != IB_SRQT_BASIC &&
204206
init_attr->srq_type != IB_SRQT_XRC &&
205207
init_attr->srq_type != IB_SRQT_TM)
206208
return -EOPNOTSUPP;
207209

208-
/* Sanity check SRQ size before proceeding */
209-
if (init_attr->attr.max_wr >= max_srq_wqes) {
210-
mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
211-
init_attr->attr.max_wr,
212-
max_srq_wqes);
210+
/* Sanity check SRQ and sge size before proceeding */
211+
if (init_attr->attr.max_wr >= max_srq_wqes ||
212+
init_attr->attr.max_sge > max_sge_sz) {
213+
mlx5_ib_dbg(dev, "max_wr %d,wr_cap %d,max_sge %d, sge_cap:%d\n",
214+
init_attr->attr.max_wr, max_srq_wqes,
215+
init_attr->attr.max_sge, max_sge_sz);
213216
return -EINVAL;
214217
}
215218

drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,8 +28,10 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
2828
struct mlx5e_xsk_param *xsk,
2929
struct mlx5_core_dev *mdev)
3030
{
31-
/* AF_XDP doesn't support frames larger than PAGE_SIZE. */
32-
if (xsk->chunk_size > PAGE_SIZE || xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE) {
31+
/* AF_XDP doesn't support frames larger than PAGE_SIZE,
32+
* and xsk->chunk_size is limited to 65535 bytes.
33+
*/
34+
if ((size_t)xsk->chunk_size > PAGE_SIZE || xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE) {
3335
mlx5_core_err(mdev, "XSK chunk size %u out of bounds [%u, %lu]\n", xsk->chunk_size,
3436
MLX5E_MIN_XSK_CHUNK_SIZE, PAGE_SIZE);
3537
return false;

drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -102,8 +102,14 @@ static inline void
102102
mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb)
103103
{
104104
int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr);
105+
struct udphdr *udphdr;
105106

106-
udp_hdr(skb)->len = htons(payload_len);
107+
if (skb->encapsulation)
108+
udphdr = (struct udphdr *)skb_inner_transport_header(skb);
109+
else
110+
udphdr = udp_hdr(skb);
111+
112+
udphdr->len = htons(payload_len);
107113
}
108114

109115
struct mlx5e_accel_tx_state {

drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -750,8 +750,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
750750
err_fs_ft:
751751
if (rx->allow_tunnel_mode)
752752
mlx5_eswitch_unblock_encap(mdev);
753-
mlx5_del_flow_rules(rx->status.rule);
754-
mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
753+
mlx5_ipsec_rx_status_destroy(ipsec, rx);
755754
err_add:
756755
mlx5_destroy_flow_table(rx->ft.status);
757756
err_fs_ft_status:

drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h

Lines changed: 5 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -98,18 +98,11 @@ mlx5e_ipsec_feature_check(struct sk_buff *skb, netdev_features_t features)
9898
if (!x || !x->xso.offload_handle)
9999
goto out_disable;
100100

101-
if (xo->inner_ipproto) {
102-
/* Cannot support tunnel packet over IPsec tunnel mode
103-
* because we cannot offload three IP header csum
104-
*/
105-
if (x->props.mode == XFRM_MODE_TUNNEL)
106-
goto out_disable;
107-
108-
/* Only support UDP or TCP L4 checksum */
109-
if (xo->inner_ipproto != IPPROTO_UDP &&
110-
xo->inner_ipproto != IPPROTO_TCP)
111-
goto out_disable;
112-
}
101+
/* Only support UDP or TCP L4 checksum */
102+
if (xo->inner_ipproto &&
103+
xo->inner_ipproto != IPPROTO_UDP &&
104+
xo->inner_ipproto != IPPROTO_TCP)
105+
goto out_disable;
113106

114107
return features;
115108

drivers/net/ethernet/mellanox/mlx5/core/en_main.c

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3769,7 +3769,7 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
37693769
mlx5e_fold_sw_stats64(priv, stats);
37703770
}
37713771

3772-
stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
3772+
stats->rx_missed_errors = priv->stats.qcnt.rx_out_of_buffer;
37733773

37743774
stats->rx_length_errors =
37753775
PPORT_802_3_GET(pstats, a_in_range_length_errors) +
@@ -4717,7 +4717,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
47174717

47184718
/* Verify if UDP port is being offloaded by HW */
47194719
if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, port))
4720-
return features;
4720+
return vxlan_features_check(skb, features);
47214721

47224722
#if IS_ENABLED(CONFIG_GENEVE)
47234723
/* Support Geneve offload for default UDP port */
@@ -4743,7 +4743,6 @@ netdev_features_t mlx5e_features_check(struct sk_buff *skb,
47434743
struct mlx5e_priv *priv = netdev_priv(netdev);
47444744

47454745
features = vlan_features_check(skb, features);
4746-
features = vxlan_features_check(skb, features);
47474746

47484747
/* Validate if the tunneled packet is being offloaded by HW */
47494748
if (skb->encapsulation &&
@@ -5702,6 +5701,11 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
57025701
kfree(priv->htb_qos_sq_stats[i]);
57035702
kvfree(priv->htb_qos_sq_stats);
57045703

5704+
if (priv->mqprio_rl) {
5705+
mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
5706+
mlx5e_mqprio_rl_free(priv->mqprio_rl);
5707+
}
5708+
57055709
memset(priv, 0, sizeof(*priv));
57065710
}
57075711

drivers/net/ethernet/mellanox/mlx5/core/en_tx.c

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -153,7 +153,11 @@ mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb, int *hopbyhop)
153153

154154
*hopbyhop = 0;
155155
if (skb->encapsulation) {
156-
ihs = skb_inner_tcp_all_headers(skb);
156+
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
157+
ihs = skb_inner_transport_offset(skb) +
158+
sizeof(struct udphdr);
159+
else
160+
ihs = skb_inner_tcp_all_headers(skb);
157161
stats->tso_inner_packets++;
158162
stats->tso_inner_bytes += skb->len - ihs;
159163
} else {

0 commit comments

Comments
 (0)