Skip to content

Commit 43a612b

Browse files
Revert "vdpa/mlx5: Add RX MAC VLAN filter support"
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2169174 Upstream Status: RHEL only SVQ can not work with vlan filtering and support VLAN in SVQ is not a high priority now, so need to revert these commit in rhel9.2 This reverts commit 3b0a4e6. Signed-off-by: Cindy Lu <lulu@redhat.com>
1 parent 8616795 commit 43a612b

File tree

1 file changed

+58
-216
lines changed

1 file changed

+58
-216
lines changed

drivers/vdpa/mlx5/net/mlx5_vnet.c

Lines changed: 58 additions & 216 deletions
Original file line numberDiff line numberDiff line change
@@ -48,8 +48,6 @@ MODULE_LICENSE("Dual BSD/GPL");
4848

4949
#define MLX5_FEATURE(_mvdev, _feature) (!!((_mvdev)->actual_features & BIT_ULL(_feature)))
5050

51-
#define MLX5V_UNTAGGED 0x1000
52-
5351
struct mlx5_vdpa_net_resources {
5452
u32 tisn;
5553
u32 tdn;
@@ -146,8 +144,6 @@ static bool is_index_valid(struct mlx5_vdpa_dev *mvdev, u16 idx)
146144
return idx <= mvdev->max_idx;
147145
}
148146

149-
#define MLX5V_MACVLAN_SIZE 256
150-
151147
struct mlx5_vdpa_net {
152148
struct mlx5_vdpa_dev mvdev;
153149
struct mlx5_vdpa_net_resources res;
@@ -161,21 +157,15 @@ struct mlx5_vdpa_net {
161157
*/
162158
struct rw_semaphore reslock;
163159
struct mlx5_flow_table *rxft;
160+
struct mlx5_flow_handle *rx_rule_ucast;
161+
struct mlx5_flow_handle *rx_rule_mcast;
164162
bool setup;
165163
u32 cur_num_vqs;
166164
u32 rqt_size;
167165
bool nb_registered;
168166
struct notifier_block nb;
169167
struct vdpa_callback config_cb;
170168
struct mlx5_vdpa_wq_ent cvq_ent;
171-
struct hlist_head macvlan_hash[MLX5V_MACVLAN_SIZE];
172-
};
173-
174-
struct macvlan_node {
175-
struct hlist_node hlist;
176-
struct mlx5_flow_handle *ucast_rule;
177-
struct mlx5_flow_handle *mcast_rule;
178-
u64 macvlan;
179169
};
180170

181171
static void free_resources(struct mlx5_vdpa_net *ndev);
@@ -1439,17 +1429,12 @@ static void destroy_tir(struct mlx5_vdpa_net *ndev)
14391429
mlx5_vdpa_destroy_tir(&ndev->mvdev, ndev->res.tirn);
14401430
}
14411431

1442-
#define MAX_STEERING_ENT 0x8000
1443-
#define MAX_STEERING_GROUPS 2
1444-
1445-
static int mlx5_vdpa_add_mac_vlan_rules(struct mlx5_vdpa_net *ndev, u8 *mac,
1446-
u16 vid, bool tagged,
1447-
struct mlx5_flow_handle **ucast,
1448-
struct mlx5_flow_handle **mcast)
1432+
static int add_fwd_to_tir(struct mlx5_vdpa_net *ndev)
14491433
{
14501434
struct mlx5_flow_destination dest = {};
1435+
struct mlx5_flow_table_attr ft_attr = {};
14511436
struct mlx5_flow_act flow_act = {};
1452-
struct mlx5_flow_handle *rule;
1437+
struct mlx5_flow_namespace *ns;
14531438
struct mlx5_flow_spec *spec;
14541439
void *headers_c;
14551440
void *headers_v;
@@ -1462,178 +1447,74 @@ static int mlx5_vdpa_add_mac_vlan_rules(struct mlx5_vdpa_net *ndev, u8 *mac,
14621447
return -ENOMEM;
14631448

14641449
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1450+
ft_attr.max_fte = 2;
1451+
ft_attr.autogroup.max_num_groups = 2;
1452+
1453+
ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, MLX5_FLOW_NAMESPACE_BYPASS);
1454+
if (!ns) {
1455+
mlx5_vdpa_warn(&ndev->mvdev, "failed to get flow namespace\n");
1456+
err = -EOPNOTSUPP;
1457+
goto err_ns;
1458+
}
1459+
1460+
ndev->rxft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
1461+
if (IS_ERR(ndev->rxft)) {
1462+
err = PTR_ERR(ndev->rxft);
1463+
goto err_ns;
1464+
}
1465+
14651466
headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
1466-
headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
14671467
dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c, outer_headers.dmac_47_16);
1468-
dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v, outer_headers.dmac_47_16);
14691468
memset(dmac_c, 0xff, ETH_ALEN);
1470-
ether_addr_copy(dmac_v, mac);
1471-
MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
1472-
if (tagged) {
1473-
MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
1474-
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, first_vid);
1475-
MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, vid);
1476-
}
1469+
headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
1470+
dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v, outer_headers.dmac_47_16);
1471+
ether_addr_copy(dmac_v, ndev->config.mac);
1472+
14771473
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
14781474
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
14791475
dest.tir_num = ndev->res.tirn;
1480-
rule = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, &dest, 1);
1481-
if (IS_ERR(rule))
1482-
return PTR_ERR(rule);
1476+
ndev->rx_rule_ucast = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, &dest, 1);
14831477

1484-
*ucast = rule;
1478+
if (IS_ERR(ndev->rx_rule_ucast)) {
1479+
err = PTR_ERR(ndev->rx_rule_ucast);
1480+
ndev->rx_rule_ucast = NULL;
1481+
goto err_rule_ucast;
1482+
}
14851483

14861484
memset(dmac_c, 0, ETH_ALEN);
14871485
memset(dmac_v, 0, ETH_ALEN);
14881486
dmac_c[0] = 1;
14891487
dmac_v[0] = 1;
1490-
rule = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, &dest, 1);
1491-
kvfree(spec);
1492-
if (IS_ERR(rule)) {
1493-
err = PTR_ERR(rule);
1494-
goto err_mcast;
1495-
}
1496-
1497-
*mcast = rule;
1498-
return 0;
1499-
1500-
err_mcast:
1501-
mlx5_del_flow_rules(*ucast);
1502-
return err;
1503-
}
1504-
1505-
static void mlx5_vdpa_del_mac_vlan_rules(struct mlx5_vdpa_net *ndev,
1506-
struct mlx5_flow_handle *ucast,
1507-
struct mlx5_flow_handle *mcast)
1508-
{
1509-
mlx5_del_flow_rules(ucast);
1510-
mlx5_del_flow_rules(mcast);
1511-
}
1512-
1513-
static u64 search_val(u8 *mac, u16 vlan, bool tagged)
1514-
{
1515-
u64 val;
1516-
1517-
if (!tagged)
1518-
vlan = MLX5V_UNTAGGED;
1519-
1520-
val = (u64)vlan << 48 |
1521-
(u64)mac[0] << 40 |
1522-
(u64)mac[1] << 32 |
1523-
(u64)mac[2] << 24 |
1524-
(u64)mac[3] << 16 |
1525-
(u64)mac[4] << 8 |
1526-
(u64)mac[5];
1527-
1528-
return val;
1529-
}
1530-
1531-
static struct macvlan_node *mac_vlan_lookup(struct mlx5_vdpa_net *ndev, u64 value)
1532-
{
1533-
struct macvlan_node *pos;
1534-
u32 idx;
1535-
1536-
idx = hash_64(value, 8); // tbd 8
1537-
hlist_for_each_entry(pos, &ndev->macvlan_hash[idx], hlist) {
1538-
if (pos->macvlan == value)
1539-
return pos;
1488+
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1489+
ndev->rx_rule_mcast = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, &dest, 1);
1490+
if (IS_ERR(ndev->rx_rule_mcast)) {
1491+
err = PTR_ERR(ndev->rx_rule_mcast);
1492+
ndev->rx_rule_mcast = NULL;
1493+
goto err_rule_mcast;
15401494
}
1541-
return NULL;
1542-
}
1543-
1544-
static int mac_vlan_add(struct mlx5_vdpa_net *ndev, u8 *mac, u16 vlan, bool tagged) // vlan -> vid
1545-
{
1546-
struct macvlan_node *ptr;
1547-
u64 val;
1548-
u32 idx;
1549-
int err;
1550-
1551-
val = search_val(mac, vlan, tagged);
1552-
if (mac_vlan_lookup(ndev, val))
1553-
return -EEXIST;
15541495

1555-
ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
1556-
if (!ptr)
1557-
return -ENOMEM;
1558-
1559-
err = mlx5_vdpa_add_mac_vlan_rules(ndev, ndev->config.mac, vlan, tagged,
1560-
&ptr->ucast_rule, &ptr->mcast_rule);
1561-
if (err)
1562-
goto err_add;
1563-
1564-
ptr->macvlan = val;
1565-
idx = hash_64(val, 8);
1566-
hlist_add_head(&ptr->hlist, &ndev->macvlan_hash[idx]);
1496+
kvfree(spec);
15671497
return 0;
15681498

1569-
err_add:
1570-
kfree(ptr);
1499+
err_rule_mcast:
1500+
mlx5_del_flow_rules(ndev->rx_rule_ucast);
1501+
ndev->rx_rule_ucast = NULL;
1502+
err_rule_ucast:
1503+
mlx5_destroy_flow_table(ndev->rxft);
1504+
err_ns:
1505+
kvfree(spec);
15711506
return err;
15721507
}
15731508

1574-
static void mac_vlan_del(struct mlx5_vdpa_net *ndev, u8 *mac, u16 vlan, bool tagged)
1509+
static void remove_fwd_to_tir(struct mlx5_vdpa_net *ndev)
15751510
{
1576-
struct macvlan_node *ptr;
1577-
1578-
ptr = mac_vlan_lookup(ndev, search_val(mac, vlan, tagged));
1579-
if (!ptr)
1511+
if (!ndev->rx_rule_ucast)
15801512
return;
15811513

1582-
hlist_del(&ptr->hlist);
1583-
mlx5_vdpa_del_mac_vlan_rules(ndev, ptr->ucast_rule, ptr->mcast_rule);
1584-
kfree(ptr);
1585-
}
1586-
1587-
static void clear_mac_vlan_table(struct mlx5_vdpa_net *ndev)
1588-
{
1589-
struct macvlan_node *pos;
1590-
struct hlist_node *n;
1591-
int i;
1592-
1593-
for (i = 0; i < MLX5V_MACVLAN_SIZE; i++) {
1594-
hlist_for_each_entry_safe(pos, n, &ndev->macvlan_hash[i], hlist) {
1595-
hlist_del(&pos->hlist);
1596-
mlx5_vdpa_del_mac_vlan_rules(ndev, pos->ucast_rule, pos->mcast_rule);
1597-
kfree(pos);
1598-
}
1599-
}
1600-
}
1601-
1602-
static int setup_steering(struct mlx5_vdpa_net *ndev)
1603-
{
1604-
struct mlx5_flow_table_attr ft_attr = {};
1605-
struct mlx5_flow_namespace *ns;
1606-
int err;
1607-
1608-
ft_attr.max_fte = MAX_STEERING_ENT;
1609-
ft_attr.autogroup.max_num_groups = MAX_STEERING_GROUPS;
1610-
1611-
ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, MLX5_FLOW_NAMESPACE_BYPASS);
1612-
if (!ns) {
1613-
mlx5_vdpa_warn(&ndev->mvdev, "failed to get flow namespace\n");
1614-
return -EOPNOTSUPP;
1615-
}
1616-
1617-
ndev->rxft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
1618-
if (IS_ERR(ndev->rxft)) {
1619-
mlx5_vdpa_warn(&ndev->mvdev, "failed to create flow table\n");
1620-
return PTR_ERR(ndev->rxft);
1621-
}
1622-
1623-
err = mac_vlan_add(ndev, ndev->config.mac, 0, false);
1624-
if (err)
1625-
goto err_add;
1626-
1627-
return 0;
1628-
1629-
err_add:
1630-
mlx5_destroy_flow_table(ndev->rxft);
1631-
return err;
1632-
}
1633-
1634-
static void teardown_steering(struct mlx5_vdpa_net *ndev)
1635-
{
1636-
clear_mac_vlan_table(ndev);
1514+
mlx5_del_flow_rules(ndev->rx_rule_mcast);
1515+
ndev->rx_rule_mcast = NULL;
1516+
mlx5_del_flow_rules(ndev->rx_rule_ucast);
1517+
ndev->rx_rule_ucast = NULL;
16371518
mlx5_destroy_flow_table(ndev->rxft);
16381519
}
16391520

@@ -1684,9 +1565,9 @@ static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd)
16841565

16851566
/* Need recreate the flow table entry, so that the packet could forward back
16861567
*/
1687-
mac_vlan_del(ndev, ndev->config.mac, 0, false);
1568+
remove_fwd_to_tir(ndev);
16881569

1689-
if (mac_vlan_add(ndev, ndev->config.mac, 0, false)) {
1570+
if (add_fwd_to_tir(ndev)) {
16901571
mlx5_vdpa_warn(mvdev, "failed to insert forward rules, try to restore\n");
16911572

16921573
/* Although it hardly run here, we still need double check */
@@ -1710,7 +1591,7 @@ static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd)
17101591

17111592
memcpy(ndev->config.mac, mac_back, ETH_ALEN);
17121593

1713-
if (mac_vlan_add(ndev, ndev->config.mac, 0, false))
1594+
if (add_fwd_to_tir(ndev))
17141595
mlx5_vdpa_warn(mvdev, "restore forward rules failed: insert forward rules failed\n");
17151596

17161597
break;
@@ -1812,42 +1693,6 @@ static virtio_net_ctrl_ack handle_ctrl_mq(struct mlx5_vdpa_dev *mvdev, u8 cmd)
18121693
return status;
18131694
}
18141695

1815-
static virtio_net_ctrl_ack handle_ctrl_vlan(struct mlx5_vdpa_dev *mvdev, u8 cmd)
1816-
{
1817-
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
1818-
virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
1819-
struct mlx5_control_vq *cvq = &mvdev->cvq;
1820-
__virtio16 vlan;
1821-
size_t read;
1822-
u16 id;
1823-
1824-
switch (cmd) {
1825-
case VIRTIO_NET_CTRL_VLAN_ADD:
1826-
read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, &vlan, sizeof(vlan));
1827-
if (read != sizeof(vlan))
1828-
break;
1829-
1830-
id = mlx5vdpa16_to_cpu(mvdev, vlan);
1831-
if (mac_vlan_add(ndev, ndev->config.mac, id, true))
1832-
break;
1833-
1834-
status = VIRTIO_NET_OK;
1835-
break;
1836-
case VIRTIO_NET_CTRL_VLAN_DEL:
1837-
read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, &vlan, sizeof(vlan));
1838-
if (read != sizeof(vlan))
1839-
break;
1840-
1841-
id = mlx5vdpa16_to_cpu(mvdev, vlan);
1842-
mac_vlan_del(ndev, ndev->config.mac, id, true);
1843-
break;
1844-
default:
1845-
break;
1846-
}
1847-
1848-
return status;
1849-
}
1850-
18511696
static void mlx5_cvq_kick_handler(struct work_struct *work)
18521697
{
18531698
virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
@@ -1893,9 +1738,7 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
18931738
case VIRTIO_NET_CTRL_MQ:
18941739
status = handle_ctrl_mq(mvdev, ctrl.cmd);
18951740
break;
1896-
case VIRTIO_NET_CTRL_VLAN:
1897-
status = handle_ctrl_vlan(mvdev, ctrl.cmd);
1898-
break;
1741+
18991742
default:
19001743
break;
19011744
}
@@ -2176,7 +2019,6 @@ static u64 get_supported_features(struct mlx5_core_dev *mdev)
21762019
mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_MQ);
21772020
mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_STATUS);
21782021
mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_MTU);
2179-
mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_CTRL_VLAN);
21802022

21812023
return mlx_vdpa_features;
21822024
}
@@ -2452,9 +2294,9 @@ static int setup_driver(struct mlx5_vdpa_dev *mvdev)
24522294
goto err_tir;
24532295
}
24542296

2455-
err = setup_steering(ndev);
2297+
err = add_fwd_to_tir(ndev);
24562298
if (err) {
2457-
mlx5_vdpa_warn(mvdev, "setup_steering\n");
2299+
mlx5_vdpa_warn(mvdev, "add_fwd_to_tir\n");
24582300
goto err_fwd;
24592301
}
24602302
ndev->setup = true;
@@ -2480,7 +2322,7 @@ static void teardown_driver(struct mlx5_vdpa_net *ndev)
24802322
if (!ndev->setup)
24812323
return;
24822324

2483-
teardown_steering(ndev);
2325+
remove_fwd_to_tir(ndev);
24842326
destroy_tir(ndev);
24852327
destroy_rqt(ndev);
24862328
teardown_virtqueues(ndev);

0 commit comments

Comments
 (0)