Skip to content

Commit c86af66

Browse files
author
Herton R. Krzesinski
committed
Merge: Revert "vdpa/mlx5: Add RX MAC VLAN filter support"
MR: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-9/-/merge_requests/2042 SVQ can not work with VLAN filtering, supporting VLAN in SVQ is not a high priority now, so we need to revert these commit in rhel9.2 Upstream-status: RHEL-Only Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2169174 tested by me Signed-off-by: Cindy Lu <lulu@redhat.com> Approved-by: Eugenio Pérez <eperezma@redhat.com> Approved-by: Laurent Vivier <lvivier@redhat.com> Signed-off-by: Herton R. Krzesinski <herton@redhat.com>
2 parents 326e802 + 43a612b commit c86af66

File tree

1 file changed

+58
-222
lines changed

1 file changed

+58
-222
lines changed

drivers/vdpa/mlx5/net/mlx5_vnet.c

Lines changed: 58 additions & 222 deletions
Original file line numberDiff line numberDiff line change
@@ -48,8 +48,6 @@ MODULE_LICENSE("Dual BSD/GPL");
4848

4949
#define MLX5_FEATURE(_mvdev, _feature) (!!((_mvdev)->actual_features & BIT_ULL(_feature)))
5050

51-
#define MLX5V_UNTAGGED 0x1000
52-
5351
struct mlx5_vdpa_net_resources {
5452
u32 tisn;
5553
u32 tdn;
@@ -146,8 +144,6 @@ static bool is_index_valid(struct mlx5_vdpa_dev *mvdev, u16 idx)
146144
return idx <= mvdev->max_idx;
147145
}
148146

149-
#define MLX5V_MACVLAN_SIZE 256
150-
151147
struct mlx5_vdpa_net {
152148
struct mlx5_vdpa_dev mvdev;
153149
struct mlx5_vdpa_net_resources res;
@@ -161,21 +157,15 @@ struct mlx5_vdpa_net {
161157
*/
162158
struct rw_semaphore reslock;
163159
struct mlx5_flow_table *rxft;
160+
struct mlx5_flow_handle *rx_rule_ucast;
161+
struct mlx5_flow_handle *rx_rule_mcast;
164162
bool setup;
165163
u32 cur_num_vqs;
166164
u32 rqt_size;
167165
bool nb_registered;
168166
struct notifier_block nb;
169167
struct vdpa_callback config_cb;
170168
struct mlx5_vdpa_wq_ent cvq_ent;
171-
struct hlist_head macvlan_hash[MLX5V_MACVLAN_SIZE];
172-
};
173-
174-
struct macvlan_node {
175-
struct hlist_node hlist;
176-
struct mlx5_flow_handle *ucast_rule;
177-
struct mlx5_flow_handle *mcast_rule;
178-
u64 macvlan;
179169
};
180170

181171
static void free_resources(struct mlx5_vdpa_net *ndev);
@@ -1439,17 +1429,12 @@ static void destroy_tir(struct mlx5_vdpa_net *ndev)
14391429
mlx5_vdpa_destroy_tir(&ndev->mvdev, ndev->res.tirn);
14401430
}
14411431

1442-
#define MAX_STEERING_ENT 0x8000
1443-
#define MAX_STEERING_GROUPS 2
1444-
1445-
static int mlx5_vdpa_add_mac_vlan_rules(struct mlx5_vdpa_net *ndev, u8 *mac,
1446-
u16 vid, bool tagged,
1447-
struct mlx5_flow_handle **ucast,
1448-
struct mlx5_flow_handle **mcast)
1432+
static int add_fwd_to_tir(struct mlx5_vdpa_net *ndev)
14491433
{
14501434
struct mlx5_flow_destination dest = {};
1435+
struct mlx5_flow_table_attr ft_attr = {};
14511436
struct mlx5_flow_act flow_act = {};
1452-
struct mlx5_flow_handle *rule;
1437+
struct mlx5_flow_namespace *ns;
14531438
struct mlx5_flow_spec *spec;
14541439
void *headers_c;
14551440
void *headers_v;
@@ -1462,180 +1447,74 @@ static int mlx5_vdpa_add_mac_vlan_rules(struct mlx5_vdpa_net *ndev, u8 *mac,
14621447
return -ENOMEM;
14631448

14641449
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1450+
ft_attr.max_fte = 2;
1451+
ft_attr.autogroup.max_num_groups = 2;
1452+
1453+
ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, MLX5_FLOW_NAMESPACE_BYPASS);
1454+
if (!ns) {
1455+
mlx5_vdpa_warn(&ndev->mvdev, "failed to get flow namespace\n");
1456+
err = -EOPNOTSUPP;
1457+
goto err_ns;
1458+
}
1459+
1460+
ndev->rxft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
1461+
if (IS_ERR(ndev->rxft)) {
1462+
err = PTR_ERR(ndev->rxft);
1463+
goto err_ns;
1464+
}
1465+
14651466
headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
1466-
headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
14671467
dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c, outer_headers.dmac_47_16);
1468+
memset(dmac_c, 0xff, ETH_ALEN);
1469+
headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
14681470
dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v, outer_headers.dmac_47_16);
1469-
eth_broadcast_addr(dmac_c);
1470-
ether_addr_copy(dmac_v, mac);
1471-
if (ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN)) {
1472-
MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
1473-
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, first_vid);
1474-
}
1475-
if (tagged) {
1476-
MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
1477-
MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, vid);
1478-
}
1471+
ether_addr_copy(dmac_v, ndev->config.mac);
1472+
14791473
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
14801474
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
14811475
dest.tir_num = ndev->res.tirn;
1482-
rule = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, &dest, 1);
1483-
if (IS_ERR(rule))
1484-
return PTR_ERR(rule);
1476+
ndev->rx_rule_ucast = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, &dest, 1);
14851477

1486-
*ucast = rule;
1478+
if (IS_ERR(ndev->rx_rule_ucast)) {
1479+
err = PTR_ERR(ndev->rx_rule_ucast);
1480+
ndev->rx_rule_ucast = NULL;
1481+
goto err_rule_ucast;
1482+
}
14871483

14881484
memset(dmac_c, 0, ETH_ALEN);
14891485
memset(dmac_v, 0, ETH_ALEN);
14901486
dmac_c[0] = 1;
14911487
dmac_v[0] = 1;
1492-
rule = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, &dest, 1);
1493-
kvfree(spec);
1494-
if (IS_ERR(rule)) {
1495-
err = PTR_ERR(rule);
1496-
goto err_mcast;
1497-
}
1498-
1499-
*mcast = rule;
1500-
return 0;
1501-
1502-
err_mcast:
1503-
mlx5_del_flow_rules(*ucast);
1504-
return err;
1505-
}
1506-
1507-
static void mlx5_vdpa_del_mac_vlan_rules(struct mlx5_vdpa_net *ndev,
1508-
struct mlx5_flow_handle *ucast,
1509-
struct mlx5_flow_handle *mcast)
1510-
{
1511-
mlx5_del_flow_rules(ucast);
1512-
mlx5_del_flow_rules(mcast);
1513-
}
1514-
1515-
static u64 search_val(u8 *mac, u16 vlan, bool tagged)
1516-
{
1517-
u64 val;
1518-
1519-
if (!tagged)
1520-
vlan = MLX5V_UNTAGGED;
1521-
1522-
val = (u64)vlan << 48 |
1523-
(u64)mac[0] << 40 |
1524-
(u64)mac[1] << 32 |
1525-
(u64)mac[2] << 24 |
1526-
(u64)mac[3] << 16 |
1527-
(u64)mac[4] << 8 |
1528-
(u64)mac[5];
1529-
1530-
return val;
1531-
}
1532-
1533-
static struct macvlan_node *mac_vlan_lookup(struct mlx5_vdpa_net *ndev, u64 value)
1534-
{
1535-
struct macvlan_node *pos;
1536-
u32 idx;
1537-
1538-
idx = hash_64(value, 8); // tbd 8
1539-
hlist_for_each_entry(pos, &ndev->macvlan_hash[idx], hlist) {
1540-
if (pos->macvlan == value)
1541-
return pos;
1488+
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1489+
ndev->rx_rule_mcast = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, &dest, 1);
1490+
if (IS_ERR(ndev->rx_rule_mcast)) {
1491+
err = PTR_ERR(ndev->rx_rule_mcast);
1492+
ndev->rx_rule_mcast = NULL;
1493+
goto err_rule_mcast;
15421494
}
1543-
return NULL;
1544-
}
15451495

1546-
static int mac_vlan_add(struct mlx5_vdpa_net *ndev, u8 *mac, u16 vlan, bool tagged) // vlan -> vid
1547-
{
1548-
struct macvlan_node *ptr;
1549-
u64 val;
1550-
u32 idx;
1551-
int err;
1552-
1553-
val = search_val(mac, vlan, tagged);
1554-
if (mac_vlan_lookup(ndev, val))
1555-
return -EEXIST;
1556-
1557-
ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
1558-
if (!ptr)
1559-
return -ENOMEM;
1560-
1561-
err = mlx5_vdpa_add_mac_vlan_rules(ndev, ndev->config.mac, vlan, tagged,
1562-
&ptr->ucast_rule, &ptr->mcast_rule);
1563-
if (err)
1564-
goto err_add;
1565-
1566-
ptr->macvlan = val;
1567-
idx = hash_64(val, 8);
1568-
hlist_add_head(&ptr->hlist, &ndev->macvlan_hash[idx]);
1496+
kvfree(spec);
15691497
return 0;
15701498

1571-
err_add:
1572-
kfree(ptr);
1499+
err_rule_mcast:
1500+
mlx5_del_flow_rules(ndev->rx_rule_ucast);
1501+
ndev->rx_rule_ucast = NULL;
1502+
err_rule_ucast:
1503+
mlx5_destroy_flow_table(ndev->rxft);
1504+
err_ns:
1505+
kvfree(spec);
15731506
return err;
15741507
}
15751508

1576-
static void mac_vlan_del(struct mlx5_vdpa_net *ndev, u8 *mac, u16 vlan, bool tagged)
1509+
static void remove_fwd_to_tir(struct mlx5_vdpa_net *ndev)
15771510
{
1578-
struct macvlan_node *ptr;
1579-
1580-
ptr = mac_vlan_lookup(ndev, search_val(mac, vlan, tagged));
1581-
if (!ptr)
1511+
if (!ndev->rx_rule_ucast)
15821512
return;
15831513

1584-
hlist_del(&ptr->hlist);
1585-
mlx5_vdpa_del_mac_vlan_rules(ndev, ptr->ucast_rule, ptr->mcast_rule);
1586-
kfree(ptr);
1587-
}
1588-
1589-
static void clear_mac_vlan_table(struct mlx5_vdpa_net *ndev)
1590-
{
1591-
struct macvlan_node *pos;
1592-
struct hlist_node *n;
1593-
int i;
1594-
1595-
for (i = 0; i < MLX5V_MACVLAN_SIZE; i++) {
1596-
hlist_for_each_entry_safe(pos, n, &ndev->macvlan_hash[i], hlist) {
1597-
hlist_del(&pos->hlist);
1598-
mlx5_vdpa_del_mac_vlan_rules(ndev, pos->ucast_rule, pos->mcast_rule);
1599-
kfree(pos);
1600-
}
1601-
}
1602-
}
1603-
1604-
static int setup_steering(struct mlx5_vdpa_net *ndev)
1605-
{
1606-
struct mlx5_flow_table_attr ft_attr = {};
1607-
struct mlx5_flow_namespace *ns;
1608-
int err;
1609-
1610-
ft_attr.max_fte = MAX_STEERING_ENT;
1611-
ft_attr.autogroup.max_num_groups = MAX_STEERING_GROUPS;
1612-
1613-
ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, MLX5_FLOW_NAMESPACE_BYPASS);
1614-
if (!ns) {
1615-
mlx5_vdpa_warn(&ndev->mvdev, "failed to get flow namespace\n");
1616-
return -EOPNOTSUPP;
1617-
}
1618-
1619-
ndev->rxft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
1620-
if (IS_ERR(ndev->rxft)) {
1621-
mlx5_vdpa_warn(&ndev->mvdev, "failed to create flow table\n");
1622-
return PTR_ERR(ndev->rxft);
1623-
}
1624-
1625-
err = mac_vlan_add(ndev, ndev->config.mac, 0, false);
1626-
if (err)
1627-
goto err_add;
1628-
1629-
return 0;
1630-
1631-
err_add:
1632-
mlx5_destroy_flow_table(ndev->rxft);
1633-
return err;
1634-
}
1635-
1636-
static void teardown_steering(struct mlx5_vdpa_net *ndev)
1637-
{
1638-
clear_mac_vlan_table(ndev);
1514+
mlx5_del_flow_rules(ndev->rx_rule_mcast);
1515+
ndev->rx_rule_mcast = NULL;
1516+
mlx5_del_flow_rules(ndev->rx_rule_ucast);
1517+
ndev->rx_rule_ucast = NULL;
16391518
mlx5_destroy_flow_table(ndev->rxft);
16401519
}
16411520

@@ -1686,9 +1565,9 @@ static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd)
16861565

16871566
/* Need recreate the flow table entry, so that the packet could forward back
16881567
*/
1689-
mac_vlan_del(ndev, mac_back, 0, false);
1568+
remove_fwd_to_tir(ndev);
16901569

1691-
if (mac_vlan_add(ndev, ndev->config.mac, 0, false)) {
1570+
if (add_fwd_to_tir(ndev)) {
16921571
mlx5_vdpa_warn(mvdev, "failed to insert forward rules, try to restore\n");
16931572

16941573
/* Although it hardly run here, we still need double check */
@@ -1712,7 +1591,7 @@ static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd)
17121591

17131592
memcpy(ndev->config.mac, mac_back, ETH_ALEN);
17141593

1715-
if (mac_vlan_add(ndev, ndev->config.mac, 0, false))
1594+
if (add_fwd_to_tir(ndev))
17161595
mlx5_vdpa_warn(mvdev, "restore forward rules failed: insert forward rules failed\n");
17171596

17181597
break;
@@ -1814,46 +1693,6 @@ static virtio_net_ctrl_ack handle_ctrl_mq(struct mlx5_vdpa_dev *mvdev, u8 cmd)
18141693
return status;
18151694
}
18161695

1817-
static virtio_net_ctrl_ack handle_ctrl_vlan(struct mlx5_vdpa_dev *mvdev, u8 cmd)
1818-
{
1819-
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1820-
virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
1821-
struct mlx5_control_vq *cvq = &mvdev->cvq;
1822-
__virtio16 vlan;
1823-
size_t read;
1824-
u16 id;
1825-
1826-
if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN)))
1827-
return status;
1828-
1829-
switch (cmd) {
1830-
case VIRTIO_NET_CTRL_VLAN_ADD:
1831-
read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, &vlan, sizeof(vlan));
1832-
if (read != sizeof(vlan))
1833-
break;
1834-
1835-
id = mlx5vdpa16_to_cpu(mvdev, vlan);
1836-
if (mac_vlan_add(ndev, ndev->config.mac, id, true))
1837-
break;
1838-
1839-
status = VIRTIO_NET_OK;
1840-
break;
1841-
case VIRTIO_NET_CTRL_VLAN_DEL:
1842-
read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, &vlan, sizeof(vlan));
1843-
if (read != sizeof(vlan))
1844-
break;
1845-
1846-
id = mlx5vdpa16_to_cpu(mvdev, vlan);
1847-
mac_vlan_del(ndev, ndev->config.mac, id, true);
1848-
status = VIRTIO_NET_OK;
1849-
break;
1850-
default:
1851-
break;
1852-
}
1853-
1854-
return status;
1855-
}
1856-
18571696
static void mlx5_cvq_kick_handler(struct work_struct *work)
18581697
{
18591698
virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
@@ -1899,9 +1738,7 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
18991738
case VIRTIO_NET_CTRL_MQ:
19001739
status = handle_ctrl_mq(mvdev, ctrl.cmd);
19011740
break;
1902-
case VIRTIO_NET_CTRL_VLAN:
1903-
status = handle_ctrl_vlan(mvdev, ctrl.cmd);
1904-
break;
1741+
19051742
default:
19061743
break;
19071744
}
@@ -2182,7 +2019,6 @@ static u64 get_supported_features(struct mlx5_core_dev *mdev)
21822019
mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_MQ);
21832020
mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_STATUS);
21842021
mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_MTU);
2185-
mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_CTRL_VLAN);
21862022

21872023
return mlx_vdpa_features;
21882024
}
@@ -2458,9 +2294,9 @@ static int setup_driver(struct mlx5_vdpa_dev *mvdev)
24582294
goto err_tir;
24592295
}
24602296

2461-
err = setup_steering(ndev);
2297+
err = add_fwd_to_tir(ndev);
24622298
if (err) {
2463-
mlx5_vdpa_warn(mvdev, "setup_steering\n");
2299+
mlx5_vdpa_warn(mvdev, "add_fwd_to_tir\n");
24642300
goto err_fwd;
24652301
}
24662302
ndev->setup = true;
@@ -2486,7 +2322,7 @@ static void teardown_driver(struct mlx5_vdpa_net *ndev)
24862322
if (!ndev->setup)
24872323
return;
24882324

2489-
teardown_steering(ndev);
2325+
remove_fwd_to_tir(ndev);
24902326
destroy_tir(ndev);
24912327
destroy_rqt(ndev);
24922328
teardown_virtqueues(ndev);

0 commit comments

Comments
 (0)