Skip to content

Commit b91a38f

Browse files
committed
xsk: Bring back busy polling support
JIRA: https://issues.redhat.com/browse/RHEL-77816 Upstream Status: net.git commit 5ef44b3 commit 5ef44b3 Author: Stanislav Fomichev <sdf@fomichev.me> Date: Wed Jan 8 16:34:36 2025 -0800 xsk: Bring back busy polling support Commit 86e25f4 ("net: napi: Add napi_config") moved napi->napi_id assignment to a later point in time (napi_hash_add_with_id). This breaks __xdp_rxq_info_reg which copies napi_id at an earlier time and now stores 0 napi_id. It also makes sk_mark_napi_id_once_xdp and __sk_mark_napi_id_once useless because they now work against 0 napi_id. Since sk_busy_loop requires valid napi_id to busy-poll on, there is no way to busy-poll AF_XDP sockets anymore. Bring back the ability to busy-poll on XSK by resolving socket's napi_id at bind time. This relies on relatively recent netif_queue_set_napi, but (assume) at this point most popular drivers should have been converted. This also removes per-tx/rx cycles which used to check and/or set the napi_id value. Confirmed by running a busy-polling AF_XDP socket (github.com/fomichev/xskrtt) on mlx5 and looking at BusyPollRxPackets from /proc/net/netstat. Fixes: 86e25f4 ("net: napi: Add napi_config") Signed-off-by: Stanislav Fomichev <sdf@fomichev.me> Acked-by: Magnus Karlsson <magnus.karlsson@intel.com> Reviewed-by: Jakub Kicinski <kuba@kernel.org> Link: https://patch.msgid.link/20250109003436.2829560-1-sdf@fomichev.me Signed-off-by: Jakub Kicinski <kuba@kernel.org> Signed-off-by: Davide Caratti <dcaratti@redhat.com>
1 parent 3f9183b commit b91a38f

File tree

5 files changed

+9
-29
lines changed

5 files changed

+9
-29
lines changed

include/net/busy_poll.h

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -156,12 +156,4 @@ static inline void sk_mark_napi_id_once(struct sock *sk,
156156
#endif
157157
}
158158

159-
static inline void sk_mark_napi_id_once_xdp(struct sock *sk,
160-
const struct xdp_buff *xdp)
161-
{
162-
#ifdef CONFIG_NET_RX_BUSY_POLL
163-
__sk_mark_napi_id_once(sk, xdp->rxq->napi_id);
164-
#endif
165-
}
166-
167159
#endif /* _LINUX_NET_BUSY_POLL_H */

include/net/xdp.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,6 @@ struct xdp_rxq_info {
6363
u32 queue_index;
6464
u32 reg_state;
6565
struct xdp_mem_info mem;
66-
unsigned int napi_id;
6766
u32 frag_size;
6867
} ____cacheline_aligned; /* perf critical, avoid false-sharing */
6968

include/net/xdp_sock_drv.h

Lines changed: 0 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -59,15 +59,6 @@ static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
5959
xp_fill_cb(pool, desc);
6060
}
6161

62-
static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool)
63-
{
64-
#ifdef CONFIG_NET_RX_BUSY_POLL
65-
return pool->heads[0].xdp.rxq->napi_id;
66-
#else
67-
return 0;
68-
#endif
69-
}
70-
7162
static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
7263
unsigned long attrs)
7364
{
@@ -306,11 +297,6 @@ static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
306297
{
307298
}
308299

309-
static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool)
310-
{
311-
return 0;
312-
}
313-
314300
static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
315301
unsigned long attrs)
316302
{

net/core/xdp.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -185,7 +185,6 @@ int __xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
185185
xdp_rxq_info_init(xdp_rxq);
186186
xdp_rxq->dev = dev;
187187
xdp_rxq->queue_index = queue_index;
188-
xdp_rxq->napi_id = napi_id;
189188
xdp_rxq->frag_size = frag_size;
190189

191190
xdp_rxq->reg_state = REG_STATE_REGISTERED;

net/xdp/xsk.c

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -323,7 +323,6 @@ static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
323323
return -ENOSPC;
324324
}
325325

326-
sk_mark_napi_id_once_xdp(&xs->sk, xdp);
327326
return 0;
328327
}
329328

@@ -891,11 +890,8 @@ static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len
891890
if (unlikely(!xs->tx))
892891
return -ENOBUFS;
893892

894-
if (sk_can_busy_loop(sk)) {
895-
if (xs->zc)
896-
__sk_mark_napi_id_once(sk, xsk_pool_get_napi_id(xs->pool));
893+
if (sk_can_busy_loop(sk))
897894
sk_busy_loop(sk, 1); /* only support non-blocking sockets */
898-
}
899895

900896
if (xs->zc && xsk_no_wakeup(sk))
901897
return 0;
@@ -1276,6 +1272,14 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
12761272
xs->queue_id = qid;
12771273
xp_add_xsk(xs->pool, xs);
12781274

1275+
if (xs->zc && qid < dev->real_num_rx_queues) {
1276+
struct netdev_rx_queue *rxq;
1277+
1278+
rxq = __netif_get_rx_queue(dev, qid);
1279+
if (rxq->napi)
1280+
__sk_mark_napi_id_once(sk, rxq->napi->napi_id);
1281+
}
1282+
12791283
out_unlock:
12801284
if (err) {
12811285
dev_put(dev);

0 commit comments

Comments
 (0)