Skip to content

Commit ca5d467

Browse files
committed
mm/page_alloc: ensure try_alloc_pages() plays well with unaccepted memory
JIRA: https://issues.redhat.com/browse/RHEL-78202 commit 23fa022 Author: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Date: Tue May 6 14:25:08 2025 +0300 mm/page_alloc: ensure try_alloc_pages() plays well with unaccepted memory try_alloc_pages() will not attempt to allocate memory if the system has *any* unaccepted memory. Memory is accepted as needed and can remain in the system indefinitely, causing the interface to always fail. Rather than immediately giving up, attempt to use already accepted memory on free lists. Pass 'alloc_flags' to cond_accept_memory() and do not accept new memory for ALLOC_TRYLOCK requests. Found via code inspection - only BPF uses this at present and the runtime effects are unclear. Link: https://lkml.kernel.org/r/20250506112509.905147-2-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Fixes: 97769a5 ("mm, bpf: Introduce try_alloc_pages() for opportunistic page allocation") Cc: Alexei Starovoitov <ast@kernel.org> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Brendan Jackman <jackmanb@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Gregory Bell <grbell@redhat.com>
1 parent 9c44f82 commit ca5d467

File tree

1 file changed

+15
-13
lines changed

1 file changed

+15
-13
lines changed

mm/page_alloc.c

Lines changed: 15 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -289,7 +289,8 @@ EXPORT_SYMBOL(nr_online_nodes);
289289
#endif
290290

291291
static bool page_contains_unaccepted(struct page *page, unsigned int order);
292-
static bool cond_accept_memory(struct zone *zone, unsigned int order);
292+
static bool cond_accept_memory(struct zone *zone, unsigned int order,
293+
int alloc_flags);
293294
static bool __free_unaccepted(struct page *page);
294295

295296
int page_group_by_mobility_disabled __read_mostly;
@@ -3464,7 +3465,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
34643465
}
34653466
}
34663467

3467-
cond_accept_memory(zone, order);
3468+
cond_accept_memory(zone, order, alloc_flags);
34683469

34693470
/*
34703471
* Detect whether the number of free pages is below high
@@ -3491,7 +3492,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
34913492
gfp_mask)) {
34923493
int ret;
34933494

3494-
if (cond_accept_memory(zone, order))
3495+
if (cond_accept_memory(zone, order, alloc_flags))
34953496
goto try_this_zone;
34963497

34973498
/*
@@ -3544,7 +3545,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
35443545

35453546
return page;
35463547
} else {
3547-
if (cond_accept_memory(zone, order))
3548+
if (cond_accept_memory(zone, order, alloc_flags))
35483549
goto try_this_zone;
35493550

35503551
/* Try again if zone has deferred pages */
@@ -4688,7 +4689,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
46884689
goto failed;
46894690
}
46904691

4691-
cond_accept_memory(zone, 0);
4692+
cond_accept_memory(zone, 0, alloc_flags);
46924693
retry_this_zone:
46934694
mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages;
46944695
if (zone_watermark_fast(zone, 0, mark,
@@ -4697,7 +4698,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
46974698
break;
46984699
}
46994700

4700-
if (cond_accept_memory(zone, 0))
4701+
if (cond_accept_memory(zone, 0, alloc_flags))
47014702
goto retry_this_zone;
47024703

47034704
/* Try again if zone has deferred pages */
@@ -7063,7 +7064,8 @@ static inline bool has_unaccepted_memory(void)
70637064
return static_branch_unlikely(&zones_with_unaccepted_pages);
70647065
}
70657066

7066-
static bool cond_accept_memory(struct zone *zone, unsigned int order)
7067+
static bool cond_accept_memory(struct zone *zone, unsigned int order,
7068+
int alloc_flags)
70677069
{
70687070
long to_accept, wmark;
70697071
bool ret = false;
@@ -7074,6 +7076,10 @@ static bool cond_accept_memory(struct zone *zone, unsigned int order)
70747076
if (list_empty(&zone->unaccepted_pages))
70757077
return false;
70767078

7079+
/* Bailout, since try_to_accept_memory_one() needs to take a lock */
7080+
if (alloc_flags & ALLOC_TRYLOCK)
7081+
return false;
7082+
70777083
wmark = promo_wmark_pages(zone);
70787084

70797085
/*
@@ -7130,7 +7136,8 @@ static bool page_contains_unaccepted(struct page *page, unsigned int order)
71307136
return false;
71317137
}
71327138

7133-
static bool cond_accept_memory(struct zone *zone, unsigned int order)
7139+
static bool cond_accept_memory(struct zone *zone, unsigned int order,
7140+
int alloc_flags)
71347141
{
71357142
return false;
71367143
}
@@ -7201,11 +7208,6 @@ struct page *try_alloc_pages_noprof(int nid, unsigned int order)
72017208
if (!pcp_allowed_order(order))
72027209
return NULL;
72037210

7204-
#ifdef CONFIG_UNACCEPTED_MEMORY
7205-
/* Bailout, since try_to_accept_memory_one() needs to take a lock */
7206-
if (has_unaccepted_memory())
7207-
return NULL;
7208-
#endif
72097211
/* Bailout, since _deferred_grow_zone() needs to take a lock */
72107212
if (deferred_pages_enabled())
72117213
return NULL;

0 commit comments

Comments
 (0)