Skip to content

Commit d2f966a

Browse files
committed
Merge: bpf: update to v6.14
MR: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-10/-/merge_requests/899 JIRA: https://issues.redhat.com/browse/RHEL-78201 JIRA: https://issues.redhat.com/browse/RHEL-83206 JIRA: https://issues.redhat.com/browse/RHEL-83311 JIRA: https://issues.redhat.com/browse/RHEL-83351 CVE: CVE-2025-21851 CVE: CVE-2024-58088 CVE: CVE-2025-21853 Update the BPF subsystem to upstream kernel version 6.13. Signed-off-by: Jerome Marchand <jmarchan@redhat.com> Approved-by: Viktor Malik <vmalik@redhat.com> Approved-by: Toke Høiland-Jørgensen <toke@redhat.com> Approved-by: Jan Stancek <jstancek@redhat.com> Approved-by: CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> Merged-by: Jan Stancek <jstancek@redhat.com>
2 parents 9361d91 + ab29301 commit d2f966a

File tree

143 files changed

+4816
-2059
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

143 files changed

+4816
-2059
lines changed

include/linux/bpf.h

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2299,6 +2299,14 @@ void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu);
22992299
struct bpf_map *bpf_map_get(u32 ufd);
23002300
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
23012301

2302+
/*
2303+
* The __bpf_map_get() and __btf_get_by_fd() functions parse a file
2304+
* descriptor and return a corresponding map or btf object.
2305+
* Their names are double underscored to emphasize the fact that they
2306+
* do not increase refcnt. To also increase refcnt use corresponding
2307+
* bpf_map_get() and btf_get_by_fd() functions.
2308+
*/
2309+
23022310
static inline struct bpf_map *__bpf_map_get(struct fd f)
23032311
{
23042312
if (fd_empty(f))
@@ -2308,6 +2316,15 @@ static inline struct bpf_map *__bpf_map_get(struct fd f)
23082316
return fd_file(f)->private_data;
23092317
}
23102318

2319+
static inline struct btf *__btf_get_by_fd(struct fd f)
2320+
{
2321+
if (fd_empty(f))
2322+
return ERR_PTR(-EBADF);
2323+
if (unlikely(fd_file(f)->f_op != &btf_fops))
2324+
return ERR_PTR(-EINVAL);
2325+
return fd_file(f)->private_data;
2326+
}
2327+
23112328
void bpf_map_inc(struct bpf_map *map);
23122329
void bpf_map_inc_with_uref(struct bpf_map *map);
23132330
struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref);

include/linux/bpf_verifier.h

Lines changed: 16 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -233,6 +233,7 @@ enum bpf_stack_slot_type {
233233
*/
234234
STACK_DYNPTR,
235235
STACK_ITER,
236+
STACK_IRQ_FLAG,
236237
};
237238

238239
#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
@@ -254,8 +255,9 @@ struct bpf_reference_state {
254255
* default to pointer reference on zero initialization of a state.
255256
*/
256257
enum ref_state_type {
257-
REF_TYPE_PTR = 0,
258-
REF_TYPE_LOCK,
258+
REF_TYPE_PTR = 1,
259+
REF_TYPE_IRQ = 2,
260+
REF_TYPE_LOCK = 3,
259261
} type;
260262
/* Track each reference created with a unique id, even if the same
261263
* instruction creates the reference multiple times (eg, via CALL).
@@ -315,9 +317,6 @@ struct bpf_func_state {
315317
u32 callback_depth;
316318

317319
/* The following fields should be last. See copy_func_state() */
318-
int acquired_refs;
319-
int active_locks;
320-
struct bpf_reference_state *refs;
321320
/* The state of the stack. Each element of the array describes BPF_REG_SIZE
322321
* (i.e. 8) bytes worth of stack memory.
323322
* stack[0] represents bytes [*(r10-8)..*(r10-1)]
@@ -370,6 +369,8 @@ struct bpf_verifier_state {
370369
/* call stack tracking */
371370
struct bpf_func_state *frame[MAX_CALL_FRAMES];
372371
struct bpf_verifier_state *parent;
372+
/* Acquired reference states */
373+
struct bpf_reference_state *refs;
373374
/*
374375
* 'branches' field is the number of branches left to explore:
375376
* 0 - all possible paths from this state reached bpf_exit or
@@ -419,9 +420,13 @@ struct bpf_verifier_state {
419420
u32 insn_idx;
420421
u32 curframe;
421422

422-
bool speculative;
423+
u32 acquired_refs;
424+
u32 active_locks;
425+
u32 active_preempt_locks;
426+
u32 active_irq_id;
423427
bool active_rcu_lock;
424-
u32 active_preempt_lock;
428+
429+
bool speculative;
425430
/* If this state was ever pointed-to by other state's loop_entry field
426431
* this flag would be set to true. Used to avoid freeing such states
427432
* while they are still in use.
@@ -980,8 +985,9 @@ const char *dynptr_type_str(enum bpf_dynptr_type type);
980985
const char *iter_type_str(const struct btf *btf, u32 btf_id);
981986
const char *iter_state_str(enum bpf_iter_state state);
982987

983-
void print_verifier_state(struct bpf_verifier_env *env,
984-
const struct bpf_func_state *state, bool print_all);
985-
void print_insn_state(struct bpf_verifier_env *env, const struct bpf_func_state *state);
988+
void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate,
989+
u32 frameno, bool print_all);
990+
void print_insn_state(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate,
991+
u32 frameno);
986992

987993
#endif /* _LINUX_BPF_VERIFIER_H */

include/linux/btf.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -353,6 +353,11 @@ static inline bool btf_type_is_scalar(const struct btf_type *t)
353353
return btf_type_is_int(t) || btf_type_is_enum(t);
354354
}
355355

356+
static inline bool btf_type_is_fwd(const struct btf_type *t)
357+
{
358+
return BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
359+
}
360+
356361
static inline bool btf_type_is_typedef(const struct btf_type *t)
357362
{
358363
return BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF;

include/uapi/linux/bpf.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1573,6 +1573,16 @@ union bpf_attr {
15731573
* If provided, prog_flags should have BPF_F_TOKEN_FD flag set.
15741574
*/
15751575
__s32 prog_token_fd;
1576+
/* The fd_array_cnt can be used to pass the length of the
1577+
* fd_array array. In this case all the [map] file descriptors
1578+
* passed in this array will be bound to the program, even if
1579+
* the maps are not referenced directly. The functionality is
1580+
* similar to the BPF_PROG_BIND_MAP syscall, but maps can be
1581+
* used by the verifier during the program load. If provided,
1582+
* then the fd_array[0,...,fd_array_cnt-1] is expected to be
1583+
* continuous.
1584+
*/
1585+
__u32 fd_array_cnt;
15761586
};
15771587

15781588
struct { /* anonymous struct used by BPF_OBJ_* commands */

kernel/bpf/arena.c

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@
3939
*/
4040

4141
/* number of bytes addressable by LDX/STX insn with 16-bit 'off' field */
42-
#define GUARD_SZ (1ull << sizeof_field(struct bpf_insn, off) * 8)
42+
#define GUARD_SZ round_up(1ull << sizeof_field(struct bpf_insn, off) * 8, PAGE_SIZE << 1)
4343
#define KERN_VM_SZ (SZ_4G + GUARD_SZ)
4444

4545
struct bpf_arena {
@@ -138,7 +138,11 @@ static struct bpf_map *arena_map_alloc(union bpf_attr *attr)
138138
INIT_LIST_HEAD(&arena->vma_list);
139139
bpf_map_init_from_attr(&arena->map, attr);
140140
range_tree_init(&arena->rt);
141-
range_tree_set(&arena->rt, 0, attr->max_entries);
141+
err = range_tree_set(&arena->rt, 0, attr->max_entries);
142+
if (err) {
143+
bpf_map_area_free(arena);
144+
goto err;
145+
}
142146
mutex_init(&arena->lock);
143147

144148
return &arena->map;
@@ -218,7 +222,7 @@ static u64 arena_map_mem_usage(const struct bpf_map *map)
218222
struct vma_list {
219223
struct vm_area_struct *vma;
220224
struct list_head head;
221-
atomic_t mmap_count;
225+
refcount_t mmap_count;
222226
};
223227

224228
static int remember_vma(struct bpf_arena *arena, struct vm_area_struct *vma)
@@ -228,7 +232,7 @@ static int remember_vma(struct bpf_arena *arena, struct vm_area_struct *vma)
228232
vml = kmalloc(sizeof(*vml), GFP_KERNEL);
229233
if (!vml)
230234
return -ENOMEM;
231-
atomic_set(&vml->mmap_count, 1);
235+
refcount_set(&vml->mmap_count, 1);
232236
vma->vm_private_data = vml;
233237
vml->vma = vma;
234238
list_add(&vml->head, &arena->vma_list);
@@ -239,7 +243,7 @@ static void arena_vm_open(struct vm_area_struct *vma)
239243
{
240244
struct vma_list *vml = vma->vm_private_data;
241245

242-
atomic_inc(&vml->mmap_count);
246+
refcount_inc(&vml->mmap_count);
243247
}
244248

245249
static void arena_vm_close(struct vm_area_struct *vma)
@@ -248,7 +252,7 @@ static void arena_vm_close(struct vm_area_struct *vma)
248252
struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
249253
struct vma_list *vml = vma->vm_private_data;
250254

251-
if (!atomic_dec_and_test(&vml->mmap_count))
255+
if (!refcount_dec_and_test(&vml->mmap_count))
252256
return;
253257
guard(mutex)(&arena->lock);
254258
/* update link list under lock */
@@ -257,8 +261,6 @@ static void arena_vm_close(struct vm_area_struct *vma)
257261
kfree(vml);
258262
}
259263

260-
#define MT_ENTRY ((void *)&arena_map_ops) /* unused. has to be valid pointer */
261-
262264
static vm_fault_t arena_vm_fault(struct vm_fault *vmf)
263265
{
264266
struct bpf_map *map = vmf->vma->vm_file->private_data;

kernel/bpf/arraymap.c

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -735,13 +735,13 @@ static long bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback
735735
u64 ret = 0;
736736
void *val;
737737

738+
cant_migrate();
739+
738740
if (flags != 0)
739741
return -EINVAL;
740742

741743
is_percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
742744
array = container_of(map, struct bpf_array, map);
743-
if (is_percpu)
744-
migrate_disable();
745745
for (i = 0; i < map->max_entries; i++) {
746746
if (is_percpu)
747747
val = this_cpu_ptr(array->pptrs[i]);
@@ -756,8 +756,6 @@ static long bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback
756756
break;
757757
}
758758

759-
if (is_percpu)
760-
migrate_enable();
761759
return num_elems;
762760
}
763761

kernel/bpf/bpf_cgrp_storage.c

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -15,22 +15,20 @@ static DEFINE_PER_CPU(int, bpf_cgrp_storage_busy);
1515

1616
static void bpf_cgrp_storage_lock(void)
1717
{
18-
migrate_disable();
18+
cant_migrate();
1919
this_cpu_inc(bpf_cgrp_storage_busy);
2020
}
2121

2222
static void bpf_cgrp_storage_unlock(void)
2323
{
2424
this_cpu_dec(bpf_cgrp_storage_busy);
25-
migrate_enable();
2625
}
2726

2827
static bool bpf_cgrp_storage_trylock(void)
2928
{
30-
migrate_disable();
29+
cant_migrate();
3130
if (unlikely(this_cpu_inc_return(bpf_cgrp_storage_busy) != 1)) {
3231
this_cpu_dec(bpf_cgrp_storage_busy);
33-
migrate_enable();
3432
return false;
3533
}
3634
return true;
@@ -47,17 +45,18 @@ void bpf_cgrp_storage_free(struct cgroup *cgroup)
4745
{
4846
struct bpf_local_storage *local_storage;
4947

48+
migrate_disable();
5049
rcu_read_lock();
5150
local_storage = rcu_dereference(cgroup->bpf_cgrp_storage);
52-
if (!local_storage) {
53-
rcu_read_unlock();
54-
return;
55-
}
51+
if (!local_storage)
52+
goto out;
5653

5754
bpf_cgrp_storage_lock();
5855
bpf_local_storage_destroy(local_storage);
5956
bpf_cgrp_storage_unlock();
57+
out:
6058
rcu_read_unlock();
59+
migrate_enable();
6160
}
6261

6362
static struct bpf_local_storage_data *
@@ -154,7 +153,7 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
154153

155154
static void cgroup_storage_map_free(struct bpf_map *map)
156155
{
157-
bpf_local_storage_map_free(map, &cgroup_cache, NULL);
156+
bpf_local_storage_map_free(map, &cgroup_cache, &bpf_cgrp_storage_busy);
158157
}
159158

160159
/* *gfp_flags* is a hidden argument provided by the verifier */

kernel/bpf/bpf_inode_storage.c

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -63,16 +63,17 @@ void bpf_inode_storage_free(struct inode *inode)
6363
if (!bsb)
6464
return;
6565

66+
migrate_disable();
6667
rcu_read_lock();
6768

6869
local_storage = rcu_dereference(bsb->storage);
69-
if (!local_storage) {
70-
rcu_read_unlock();
71-
return;
72-
}
70+
if (!local_storage)
71+
goto out;
7372

7473
bpf_local_storage_destroy(local_storage);
74+
out:
7575
rcu_read_unlock();
76+
migrate_enable();
7677
}
7778

7879
static void *bpf_fd_inode_storage_lookup_elem(struct bpf_map *map, void *key)

0 commit comments

Comments
 (0)