Skip to content

Commit 7eb97da

Browse files
committed
mm/page_alloc: avoid second trylock of zone->lock
JIRA: https://issues.redhat.com/browse/RHEL-78202 commit c5bb27e Author: Alexei Starovoitov <ast@kernel.org> Date: Sun Mar 30 17:28:09 2025 -0700 mm/page_alloc: avoid second trylock of zone->lock spin_trylock followed by spin_lock will cause extra write cache access. If the lock is contended it may cause unnecessary cache line bouncing and will execute redundant irq restore/save pair. Therefore, check alloc/fpi_flags first and use spin_trylock or spin_lock. Link: https://lkml.kernel.org/r/20250331002809.94758-1-alexei.starovoitov@gmail.com Fixes: 97769a5 ("mm, bpf: Introduce try_alloc_pages() for opportunistic page allocation") Signed-off-by: Alexei Starovoitov <ast@kernel.org> Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Harry Yoo <harry.yoo@oracle.com> Reviewed-by: Shakeel Butt <shakeel.butt@linux.dev> Cc: Andrii Nakryiko <andrii@kernel.org> Cc: Daniel Borkman <daniel@iogearbox.net> Cc: Martin KaFai Lau <martin.lau@kernel.org> Cc: Michal Hocko <mhocko@suse.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Gregory Bell <grbell@redhat.com>
1 parent a23a231 commit 7eb97da

File tree

1 file changed

+9
-6
lines changed

1 file changed

+9
-6
lines changed

mm/page_alloc.c

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1264,11 +1264,12 @@ static void free_one_page(struct zone *zone, struct page *page,
12641264
struct llist_head *llhead;
12651265
unsigned long flags;
12661266

1267-
if (!spin_trylock_irqsave(&zone->lock, flags)) {
1268-
if (unlikely(fpi_flags & FPI_TRYLOCK)) {
1267+
if (unlikely(fpi_flags & FPI_TRYLOCK)) {
1268+
if (!spin_trylock_irqsave(&zone->lock, flags)) {
12691269
add_page_to_zone_llist(zone, page, order);
12701270
return;
12711271
}
1272+
} else {
12721273
spin_lock_irqsave(&zone->lock, flags);
12731274
}
12741275

@@ -2336,9 +2337,10 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
23362337
unsigned long flags;
23372338
int i;
23382339

2339-
if (!spin_trylock_irqsave(&zone->lock, flags)) {
2340-
if (unlikely(alloc_flags & ALLOC_TRYLOCK))
2340+
if (unlikely(alloc_flags & ALLOC_TRYLOCK)) {
2341+
if (!spin_trylock_irqsave(&zone->lock, flags))
23412342
return 0;
2343+
} else {
23422344
spin_lock_irqsave(&zone->lock, flags);
23432345
}
23442346
for (i = 0; i < count; ++i) {
@@ -2959,9 +2961,10 @@ struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
29592961

29602962
do {
29612963
page = NULL;
2962-
if (!spin_trylock_irqsave(&zone->lock, flags)) {
2963-
if (unlikely(alloc_flags & ALLOC_TRYLOCK))
2964+
if (unlikely(alloc_flags & ALLOC_TRYLOCK)) {
2965+
if (!spin_trylock_irqsave(&zone->lock, flags))
29642966
return NULL;
2967+
} else {
29652968
spin_lock_irqsave(&zone->lock, flags);
29662969
}
29672970
if (alloc_flags & ALLOC_HIGHATOMIC)

0 commit comments

Comments
 (0)