Skip to content

Commit 60f7b35

Browse files
committed
bpf: Use try_alloc_pages() to allocate pages for bpf needs.
JIRA: https://issues.redhat.com/browse/RHEL-78202 commit c9eb810 Author: Alexei Starovoitov <ast@kernel.org> Date: Fri Feb 21 18:44:27 2025 -0800 bpf: Use try_alloc_pages() to allocate pages for bpf needs. Use try_alloc_pages() and free_pages_nolock() for BPF needs when context doesn't allow using normal alloc_pages. This is a prerequisite for further work. Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/r/20250222024427.30294-7-alexei.starovoitov@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Gregory Bell <grbell@redhat.com>
1 parent 7f8f18d commit 60f7b35

File tree

3 files changed

+23
-7
lines changed

3 files changed

+23
-7
lines changed

include/linux/bpf.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2385,7 +2385,7 @@ int generic_map_delete_batch(struct bpf_map *map,
23852385
struct bpf_map *bpf_map_get_curr_or_next(u32 *id);
23862386
struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
23872387

2388-
int bpf_map_alloc_pages(const struct bpf_map *map, gfp_t gfp, int nid,
2388+
int bpf_map_alloc_pages(const struct bpf_map *map, int nid,
23892389
unsigned long nr_pages, struct page **page_array);
23902390
#ifdef CONFIG_MEMCG
23912391
void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,

kernel/bpf/arena.c

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -287,7 +287,7 @@ static vm_fault_t arena_vm_fault(struct vm_fault *vmf)
287287
return VM_FAULT_SIGSEGV;
288288

289289
/* Account into memcg of the process that created bpf_arena */
290-
ret = bpf_map_alloc_pages(map, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE, 1, &page);
290+
ret = bpf_map_alloc_pages(map, NUMA_NO_NODE, 1, &page);
291291
if (ret) {
292292
range_tree_set(&arena->rt, vmf->pgoff, 1);
293293
return VM_FAULT_SIGSEGV;
@@ -465,8 +465,7 @@ static long arena_alloc_pages(struct bpf_arena *arena, long uaddr, long page_cnt
465465
if (ret)
466466
goto out_free_pages;
467467

468-
ret = bpf_map_alloc_pages(&arena->map, GFP_KERNEL | __GFP_ZERO,
469-
node_id, page_cnt, pages);
468+
ret = bpf_map_alloc_pages(&arena->map, node_id, page_cnt, pages);
470469
if (ret)
471470
goto out;
472471

kernel/bpf/syscall.c

Lines changed: 20 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -586,7 +586,24 @@ static void bpf_map_release_memcg(struct bpf_map *map)
586586
}
587587
#endif
588588

589-
int bpf_map_alloc_pages(const struct bpf_map *map, gfp_t gfp, int nid,
589+
static bool can_alloc_pages(void)
590+
{
591+
return preempt_count() == 0 && !irqs_disabled() &&
592+
!IS_ENABLED(CONFIG_PREEMPT_RT);
593+
}
594+
595+
static struct page *__bpf_alloc_page(int nid)
596+
{
597+
if (!can_alloc_pages())
598+
return try_alloc_pages(nid, 0);
599+
600+
return alloc_pages_node(nid,
601+
GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT
602+
| __GFP_NOWARN,
603+
0);
604+
}
605+
606+
int bpf_map_alloc_pages(const struct bpf_map *map, int nid,
590607
unsigned long nr_pages, struct page **pages)
591608
{
592609
unsigned long i, j;
@@ -599,14 +616,14 @@ int bpf_map_alloc_pages(const struct bpf_map *map, gfp_t gfp, int nid,
599616
old_memcg = set_active_memcg(memcg);
600617
#endif
601618
for (i = 0; i < nr_pages; i++) {
602-
pg = alloc_pages_node(nid, gfp | __GFP_ACCOUNT, 0);
619+
pg = __bpf_alloc_page(nid);
603620

604621
if (pg) {
605622
pages[i] = pg;
606623
continue;
607624
}
608625
for (j = 0; j < i; j++)
609-
__free_page(pages[j]);
626+
free_pages_nolock(pages[j], 0);
610627
ret = -ENOMEM;
611628
break;
612629
}

0 commit comments

Comments
 (0)