Skip to content

Commit dbc73dc

Browse files
committed
libbpf: move global data mmap()'ing into bpf_object__load()
JIRA: https://issues.redhat.com/browse/RHEL-85485 commit 137978f Author: Andrii Nakryiko <andrii@kernel.org> Date: Tue Oct 22 21:39:07 2024 -0700 libbpf: move global data mmap()'ing into bpf_object__load() Since BPF skeleton inception libbpf has been doing mmap()'ing of global data ARRAY maps in bpf_object__load_skeleton() API, which is used by code generated .skel.h files (i.e., by BPF skeletons only). This is wrong because if BPF object is loaded through generic bpf_object__load() API, global data maps won't be re-mmap()'ed after load step, and memory pointers returned from bpf_map__initial_value() would be wrong and won't reflect the actual memory shared between BPF program and user space. bpf_map__initial_value() return result is rarely used after load, so this went unnoticed for a really long time, until bpftrace project attempted to load BPF object through generic bpf_object__load() API and then used BPF subskeleton instantiated from such bpf_object. It turned out that .data/.rodata/.bss data updates through such subskeleton was "blackholed", all because libbpf wouldn't re-mmap() those maps during bpf_object__load() phase. Long story short, this step should be done by libbpf regardless of BPF skeleton usage, right after BPF map is created in the kernel. This patch moves this functionality into bpf_object__populate_internal_map() to achieve this. And bpf_object__load_skeleton() is now simple and almost trivial, only propagating these mmap()'ed pointers into user-supplied skeleton structs. We also do trivial adjustments to error reporting inside bpf_object__populate_internal_map() for consistency with the rest of libbpf's map-handling code. Reported-by: Alastair Robertson <ajor@meta.com> Reported-by: Jonathan Wiepert <jwiepert@meta.com> Fixes: d66562f ("libbpf: Add BPF object skeleton support") Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/r/20241023043908.3834423-3-andrii@kernel.org Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Viktor Malik <vmalik@redhat.com>
1 parent 69afcfb commit dbc73dc

File tree

1 file changed

+40
-43
lines changed

1 file changed

+40
-43
lines changed

tools/lib/bpf/libbpf.c

Lines changed: 40 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -5094,6 +5094,7 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
50945094
enum libbpf_map_type map_type = map->libbpf_type;
50955095
char *cp, errmsg[STRERR_BUFSIZE];
50965096
int err, zero = 0;
5097+
size_t mmap_sz;
50975098

50985099
if (obj->gen_loader) {
50995100
bpf_gen__map_update_elem(obj->gen_loader, map - obj->maps,
@@ -5107,8 +5108,8 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
51075108
if (err) {
51085109
err = -errno;
51095110
cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
5110-
pr_warn("Error setting initial map(%s) contents: %s\n",
5111-
map->name, cp);
5111+
pr_warn("map '%s': failed to set initial contents: %s\n",
5112+
bpf_map__name(map), cp);
51125113
return err;
51135114
}
51145115

@@ -5118,11 +5119,43 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
51185119
if (err) {
51195120
err = -errno;
51205121
cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
5121-
pr_warn("Error freezing map(%s) as read-only: %s\n",
5122-
map->name, cp);
5122+
pr_warn("map '%s': failed to freeze as read-only: %s\n",
5123+
bpf_map__name(map), cp);
51235124
return err;
51245125
}
51255126
}
5127+
5128+
/* Remap anonymous mmap()-ed "map initialization image" as
5129+
* a BPF map-backed mmap()-ed memory, but preserving the same
5130+
* memory address. This will cause kernel to change process'
5131+
* page table to point to a different piece of kernel memory,
5132+
* but from userspace point of view memory address (and its
5133+
* contents, being identical at this point) will stay the
5134+
* same. This mapping will be released by bpf_object__close()
5135+
* as per normal clean up procedure.
5136+
*/
5137+
mmap_sz = bpf_map_mmap_sz(map);
5138+
if (map->def.map_flags & BPF_F_MMAPABLE) {
5139+
void *mmaped;
5140+
int prot;
5141+
5142+
if (map->def.map_flags & BPF_F_RDONLY_PROG)
5143+
prot = PROT_READ;
5144+
else
5145+
prot = PROT_READ | PROT_WRITE;
5146+
mmaped = mmap(map->mmaped, mmap_sz, prot, MAP_SHARED | MAP_FIXED, map->fd, 0);
5147+
if (mmaped == MAP_FAILED) {
5148+
err = -errno;
5149+
pr_warn("map '%s': failed to re-mmap() contents: %d\n",
5150+
bpf_map__name(map), err);
5151+
return err;
5152+
}
5153+
map->mmaped = mmaped;
5154+
} else if (map->mmaped) {
5155+
munmap(map->mmaped, mmap_sz);
5156+
map->mmaped = NULL;
5157+
}
5158+
51265159
return 0;
51275160
}
51285161

@@ -5439,8 +5472,7 @@ bpf_object__create_maps(struct bpf_object *obj)
54395472
err = bpf_object__populate_internal_map(obj, map);
54405473
if (err < 0)
54415474
goto err_out;
5442-
}
5443-
if (map->def.type == BPF_MAP_TYPE_ARENA) {
5475+
} else if (map->def.type == BPF_MAP_TYPE_ARENA) {
54445476
map->mmaped = mmap((void *)(long)map->map_extra,
54455477
bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE,
54465478
map->map_extra ? MAP_SHARED | MAP_FIXED : MAP_SHARED,
@@ -13881,46 +13913,11 @@ int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
1388113913
for (i = 0; i < s->map_cnt; i++) {
1388213914
struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz;
1388313915
struct bpf_map *map = *map_skel->map;
13884-
size_t mmap_sz = bpf_map_mmap_sz(map);
13885-
int prot, map_fd = map->fd;
13886-
void **mmaped = map_skel->mmaped;
13887-
13888-
if (!mmaped)
13889-
continue;
13890-
13891-
if (!(map->def.map_flags & BPF_F_MMAPABLE)) {
13892-
*mmaped = NULL;
13893-
continue;
13894-
}
1389513916

13896-
if (map->def.type == BPF_MAP_TYPE_ARENA) {
13897-
*mmaped = map->mmaped;
13917+
if (!map_skel->mmaped)
1389813918
continue;
13899-
}
13900-
13901-
if (map->def.map_flags & BPF_F_RDONLY_PROG)
13902-
prot = PROT_READ;
13903-
else
13904-
prot = PROT_READ | PROT_WRITE;
1390513919

13906-
/* Remap anonymous mmap()-ed "map initialization image" as
13907-
* a BPF map-backed mmap()-ed memory, but preserving the same
13908-
* memory address. This will cause kernel to change process'
13909-
* page table to point to a different piece of kernel memory,
13910-
* but from userspace point of view memory address (and its
13911-
* contents, being identical at this point) will stay the
13912-
* same. This mapping will be released by bpf_object__close()
13913-
* as per normal clean up procedure, so we don't need to worry
13914-
* about it from skeleton's clean up perspective.
13915-
*/
13916-
*mmaped = mmap(map->mmaped, mmap_sz, prot, MAP_SHARED | MAP_FIXED, map_fd, 0);
13917-
if (*mmaped == MAP_FAILED) {
13918-
err = -errno;
13919-
*mmaped = NULL;
13920-
pr_warn("failed to re-mmap() map '%s': %d\n",
13921-
bpf_map__name(map), err);
13922-
return libbpf_err(err);
13923-
}
13920+
*map_skel->mmaped = map->mmaped;
1392413921
}
1392513922

1392613923
return 0;

0 commit comments

Comments
 (0)