Skip to content

Commit 82e0773

Browse files
committed
Merge: CNB97: skbuff: Optimize SKB coalescing for page pool
MR: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-9/-/merge_requests/6972 JIRA: https://issues.redhat.com/browse/RHEL-91107 Commits ``` aaf153a ("page_pool: halve BIAS_MAX for multiple user references of a fragment") f7dc324 ("skbuff: Optimization of SKB coalescing for page pool") ``` Signed-off-by: Ivan Vecera <ivecera@redhat.com> Approved-by: Davide Caratti <dcaratti@redhat.com> Approved-by: José Ignacio Tornos Martínez <jtornosm@redhat.com> Approved-by: Petr Oros <poros@redhat.com> Approved-by: CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> Merged-by: Augusto Caringi <acaringi@redhat.com>
2 parents 4f345a0 + 8369fd5 commit 82e0773

File tree

3 files changed

+46
-13
lines changed

3 files changed

+46
-13
lines changed

include/net/page_pool/helpers.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -279,6 +279,11 @@ static inline long page_pool_unref_page(struct page *page, long nr)
279279
return ret;
280280
}
281281

282+
static inline void page_pool_ref_page(struct page *page)
283+
{
284+
atomic_long_inc(&page->pp_ref_count);
285+
}
286+
282287
static inline bool page_pool_is_last_ref(struct page *page)
283288
{
284289
/* If page_pool_unref_page() returns 0, we were the last user */

net/core/page_pool.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929
#define DEFER_TIME (msecs_to_jiffies(1000))
3030
#define DEFER_WARN_INTERVAL (60 * HZ)
3131

32-
#define BIAS_MAX LONG_MAX
32+
#define BIAS_MAX (LONG_MAX >> 1)
3333

3434
#ifdef CONFIG_PAGE_POOL_STATS
3535
static DEFINE_PER_CPU(struct page_pool_recycle_stats, pp_system_recycle_stats);

net/core/skbuff.c

Lines changed: 40 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -877,6 +877,37 @@ static bool skb_pp_recycle(struct sk_buff *skb, void *data)
877877
return napi_pp_put_page(virt_to_page(data));
878878
}
879879

880+
/**
881+
* skb_pp_frag_ref() - Increase fragment references of a page pool aware skb
882+
* @skb: page pool aware skb
883+
*
884+
* Increase the fragment reference count (pp_ref_count) of a skb. This is
885+
* intended to gain fragment references only for page pool aware skbs,
886+
* i.e. when skb->pp_recycle is true, and not for fragments in a
887+
* non-pp-recycling skb. It has a fallback to increase references on normal
888+
* pages, as page pool aware skbs may also have normal page fragments.
889+
*/
890+
static int skb_pp_frag_ref(struct sk_buff *skb)
891+
{
892+
struct skb_shared_info *shinfo;
893+
struct page *head_page;
894+
int i;
895+
896+
if (!skb->pp_recycle)
897+
return -EINVAL;
898+
899+
shinfo = skb_shinfo(skb);
900+
901+
for (i = 0; i < shinfo->nr_frags; i++) {
902+
head_page = compound_head(skb_frag_page(&shinfo->frags[i]));
903+
if (likely(page_pool_page_is_pp(head_page)))
904+
page_pool_ref_page(head_page);
905+
else
906+
page_ref_inc(head_page);
907+
}
908+
return 0;
909+
}
910+
880911
static void skb_free_head(struct sk_buff *skb)
881912
{
882913
unsigned char *head = skb->head;
@@ -5654,17 +5685,12 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
56545685
return false;
56555686

56565687
/* In general, avoid mixing page_pool and non-page_pool allocated
5657-
* pages within the same SKB. Additionally avoid dealing with clones
5658-
* with page_pool pages, in case the SKB is using page_pool fragment
5659-
* references (page_pool_alloc_frag()). Since we only take full page
5660-
* references for cloned SKBs at the moment that would result in
5661-
* inconsistent reference counts.
5662-
* In theory we could take full references if @from is cloned and
5663-
* !@to->pp_recycle but its tricky (due to potential race with
5664-
* the clone disappearing) and rare, so not worth dealing with.
5688+
* pages within the same SKB. In theory we could take full
5689+
* references if @from is cloned and !@to->pp_recycle but its
5690+
* tricky (due to potential race with the clone disappearing) and
5691+
* rare, so not worth dealing with.
56655692
*/
5666-
if (to->pp_recycle != from->pp_recycle ||
5667-
(from->pp_recycle && skb_cloned(from)))
5693+
if (to->pp_recycle != from->pp_recycle)
56685694
return false;
56695695

56705696
if (len <= skb_tailroom(to)) {
@@ -5721,8 +5747,10 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
57215747
/* if the skb is not cloned this does nothing
57225748
* since we set nr_frags to 0.
57235749
*/
5724-
for (i = 0; i < from_shinfo->nr_frags; i++)
5725-
__skb_frag_ref(&from_shinfo->frags[i]);
5750+
if (skb_pp_frag_ref(from)) {
5751+
for (i = 0; i < from_shinfo->nr_frags; i++)
5752+
__skb_frag_ref(&from_shinfo->frags[i]);
5753+
}
57265754

57275755
to->truesize += delta;
57285756
to->len += len;

0 commit comments

Comments
 (0)