Skip to content

Commit ea2247f

Browse files
committed
net: mana: Switch to page pool for jumbo frames
jira LE-4365 Rebuild_History Non-Buildable kernel-6.12.0-55.38.1.el10_0 commit-author Haiyang Zhang <haiyangz@microsoft.com> commit fa37a88 Frag allocators, such as netdev_alloc_frag(), were not designed to work for fragsz > PAGE_SIZE. So, switch to page pool for jumbo frames instead of using page frag allocators. This driver is using page pool for smaller MTUs already. Cc: stable@vger.kernel.org Fixes: 80f6215 ("net: mana: Add support for jumbo frame") Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com> Reviewed-by: Long Li <longli@microsoft.com> Reviewed-by: Shradha Gupta <shradhagupta@linux.microsoft.com> Link: https://patch.msgid.link/1742920357-27263-1-git-send-email-haiyangz@microsoft.com Signed-off-by: Jakub Kicinski <kuba@kernel.org> (cherry picked from commit fa37a88) Signed-off-by: Jonathan Maple <jmaple@ciq.com>
1 parent 885a5df commit ea2247f

File tree

1 file changed

+9
-37
lines changed

1 file changed

+9
-37
lines changed

drivers/net/ethernet/microsoft/mana/mana_en.c

Lines changed: 9 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -660,30 +660,16 @@ int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_qu
660660
mpc->rxbpre_total = 0;
661661

662662
for (i = 0; i < num_rxb; i++) {
663-
if (mpc->rxbpre_alloc_size > PAGE_SIZE) {
664-
va = netdev_alloc_frag(mpc->rxbpre_alloc_size);
665-
if (!va)
666-
goto error;
667-
668-
page = virt_to_head_page(va);
669-
/* Check if the frag falls back to single page */
670-
if (compound_order(page) <
671-
get_order(mpc->rxbpre_alloc_size)) {
672-
put_page(page);
673-
goto error;
674-
}
675-
} else {
676-
page = dev_alloc_page();
677-
if (!page)
678-
goto error;
663+
page = dev_alloc_pages(get_order(mpc->rxbpre_alloc_size));
664+
if (!page)
665+
goto error;
679666

680-
va = page_to_virt(page);
681-
}
667+
va = page_to_virt(page);
682668

683669
da = dma_map_single(dev, va + mpc->rxbpre_headroom,
684670
mpc->rxbpre_datasize, DMA_FROM_DEVICE);
685671
if (dma_mapping_error(dev, da)) {
686-
put_page(virt_to_head_page(va));
672+
put_page(page);
687673
goto error;
688674
}
689675

@@ -1675,7 +1661,7 @@ static void mana_rx_skb(void *buf_va, bool from_pool,
16751661
}
16761662

16771663
static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
1678-
dma_addr_t *da, bool *from_pool, bool is_napi)
1664+
dma_addr_t *da, bool *from_pool)
16791665
{
16801666
struct page *page;
16811667
void *va;
@@ -1686,21 +1672,6 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
16861672
if (rxq->xdp_save_va) {
16871673
va = rxq->xdp_save_va;
16881674
rxq->xdp_save_va = NULL;
1689-
} else if (rxq->alloc_size > PAGE_SIZE) {
1690-
if (is_napi)
1691-
va = napi_alloc_frag(rxq->alloc_size);
1692-
else
1693-
va = netdev_alloc_frag(rxq->alloc_size);
1694-
1695-
if (!va)
1696-
return NULL;
1697-
1698-
page = virt_to_head_page(va);
1699-
/* Check if the frag falls back to single page */
1700-
if (compound_order(page) < get_order(rxq->alloc_size)) {
1701-
put_page(page);
1702-
return NULL;
1703-
}
17041675
} else {
17051676
page = page_pool_dev_alloc_pages(rxq->page_pool);
17061677
if (!page)
@@ -1733,7 +1704,7 @@ static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq,
17331704
dma_addr_t da;
17341705
void *va;
17351706

1736-
va = mana_get_rxfrag(rxq, dev, &da, &from_pool, true);
1707+
va = mana_get_rxfrag(rxq, dev, &da, &from_pool);
17371708
if (!va)
17381709
return;
17391710

@@ -2175,7 +2146,7 @@ static int mana_fill_rx_oob(struct mana_recv_buf_oob *rx_oob, u32 mem_key,
21752146
if (mpc->rxbufs_pre)
21762147
va = mana_get_rxbuf_pre(rxq, &da);
21772148
else
2178-
va = mana_get_rxfrag(rxq, dev, &da, &from_pool, false);
2149+
va = mana_get_rxfrag(rxq, dev, &da, &from_pool);
21792150

21802151
if (!va)
21812152
return -ENOMEM;
@@ -2261,6 +2232,7 @@ static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc)
22612232
pprm.nid = gc->numa_node;
22622233
pprm.napi = &rxq->rx_cq.napi;
22632234
pprm.netdev = rxq->ndev;
2235+
pprm.order = get_order(rxq->alloc_size);
22642236

22652237
rxq->page_pool = page_pool_create(&pprm);
22662238

0 commit comments

Comments
 (0)