Skip to content

Commit d303caf

Browse files
committed
Merge tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Pull bpf fixes from Alexei Starovoitov: - Replace bpf_map_kmalloc_node() with kmalloc_nolock() to fix kmemleak imbalance in tracking of bpf_async_cb structures (Alexei Starovoitov) - Make selftests/bpf arg_parsing.c more robust to errors (Andrii Nakryiko) - Fix redefinition of 'off' as different kind of symbol when I40E driver is builtin (Brahmajit Das) - Do not disable preemption in bpf_test_run (Sahil Chandna) - Fix memory leak in __lookup_instance error path (Shardul Bankar) - Ensure test data is flushed to disk before reading it (Xing Guo) * tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf: selftests/bpf: Fix redefinition of 'off' as different kind of symbol bpf: Do not disable preemption in bpf_test_run(). bpf: Fix memory leak in __lookup_instance error path selftests: arg_parsing: Ensure data is flushed to disk before reading. bpf: Replace bpf_map_kmalloc_node() with kmalloc_nolock() to allocate bpf_async_cb structures. selftests/bpf: make arg_parsing.c more robust to crashes bpf: test_run: Fix ctx leak in bpf_prog_test_run_xdp error path
2 parents 847f242 + a1e83d4 commit d303caf

File tree

7 files changed

+59
-40
lines changed

7 files changed

+59
-40
lines changed

include/linux/bpf.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2499,6 +2499,8 @@ int bpf_map_alloc_pages(const struct bpf_map *map, int nid,
24992499
#ifdef CONFIG_MEMCG
25002500
void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
25012501
int node);
2502+
void *bpf_map_kmalloc_nolock(const struct bpf_map *map, size_t size, gfp_t flags,
2503+
int node);
25022504
void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags);
25032505
void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size,
25042506
gfp_t flags);
@@ -2511,6 +2513,8 @@ void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
25112513
*/
25122514
#define bpf_map_kmalloc_node(_map, _size, _flags, _node) \
25132515
kmalloc_node(_size, _flags, _node)
2516+
#define bpf_map_kmalloc_nolock(_map, _size, _flags, _node) \
2517+
kmalloc_nolock(_size, _flags, _node)
25142518
#define bpf_map_kzalloc(_map, _size, _flags) \
25152519
kzalloc(_size, _flags)
25162520
#define bpf_map_kvcalloc(_map, _n, _size, _flags) \

kernel/bpf/helpers.c

Lines changed: 14 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1215,13 +1215,20 @@ static void bpf_wq_work(struct work_struct *work)
12151215
rcu_read_unlock_trace();
12161216
}
12171217

1218+
static void bpf_async_cb_rcu_free(struct rcu_head *rcu)
1219+
{
1220+
struct bpf_async_cb *cb = container_of(rcu, struct bpf_async_cb, rcu);
1221+
1222+
kfree_nolock(cb);
1223+
}
1224+
12181225
static void bpf_wq_delete_work(struct work_struct *work)
12191226
{
12201227
struct bpf_work *w = container_of(work, struct bpf_work, delete_work);
12211228

12221229
cancel_work_sync(&w->work);
12231230

1224-
kfree_rcu(w, cb.rcu);
1231+
call_rcu(&w->cb.rcu, bpf_async_cb_rcu_free);
12251232
}
12261233

12271234
static void bpf_timer_delete_work(struct work_struct *work)
@@ -1230,13 +1237,13 @@ static void bpf_timer_delete_work(struct work_struct *work)
12301237

12311238
/* Cancel the timer and wait for callback to complete if it was running.
12321239
* If hrtimer_cancel() can be safely called it's safe to call
1233-
* kfree_rcu(t) right after for both preallocated and non-preallocated
1240+
* call_rcu() right after for both preallocated and non-preallocated
12341241
* maps. The async->cb = NULL was already done and no code path can see
12351242
* address 't' anymore. Timer if armed for existing bpf_hrtimer before
12361243
* bpf_timer_cancel_and_free will have been cancelled.
12371244
*/
12381245
hrtimer_cancel(&t->timer);
1239-
kfree_rcu(t, cb.rcu);
1246+
call_rcu(&t->cb.rcu, bpf_async_cb_rcu_free);
12401247
}
12411248

12421249
static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u64 flags,
@@ -1270,11 +1277,7 @@ static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u
12701277
goto out;
12711278
}
12721279

1273-
/* Allocate via bpf_map_kmalloc_node() for memcg accounting. Until
1274-
* kmalloc_nolock() is available, avoid locking issues by using
1275-
* __GFP_HIGH (GFP_ATOMIC & ~__GFP_RECLAIM).
1276-
*/
1277-
cb = bpf_map_kmalloc_node(map, size, __GFP_HIGH, map->numa_node);
1280+
cb = bpf_map_kmalloc_nolock(map, size, 0, map->numa_node);
12781281
if (!cb) {
12791282
ret = -ENOMEM;
12801283
goto out;
@@ -1315,7 +1318,7 @@ static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u
13151318
* or pinned in bpffs.
13161319
*/
13171320
WRITE_ONCE(async->cb, NULL);
1318-
kfree(cb);
1321+
kfree_nolock(cb);
13191322
ret = -EPERM;
13201323
}
13211324
out:
@@ -1580,7 +1583,7 @@ void bpf_timer_cancel_and_free(void *val)
15801583
* timer _before_ calling us, such that failing to cancel it here will
15811584
* cause it to possibly use struct hrtimer after freeing bpf_hrtimer.
15821585
* Therefore, we _need_ to cancel any outstanding timers before we do
1583-
* kfree_rcu, even though no more timers can be armed.
1586+
* call_rcu, even though no more timers can be armed.
15841587
*
15851588
* Moreover, we need to schedule work even if timer does not belong to
15861589
* the calling callback_fn, as on two different CPUs, we can end up in a
@@ -1607,7 +1610,7 @@ void bpf_timer_cancel_and_free(void *val)
16071610
* completion.
16081611
*/
16091612
if (hrtimer_try_to_cancel(&t->timer) >= 0)
1610-
kfree_rcu(t, cb.rcu);
1613+
call_rcu(&t->cb.rcu, bpf_async_cb_rcu_free);
16111614
else
16121615
queue_work(system_dfl_wq, &t->cb.delete_work);
16131616
} else {

kernel/bpf/liveness.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -195,8 +195,10 @@ static struct func_instance *__lookup_instance(struct bpf_verifier_env *env,
195195
return ERR_PTR(-ENOMEM);
196196
result->must_write_set = kvcalloc(subprog_sz, sizeof(*result->must_write_set),
197197
GFP_KERNEL_ACCOUNT);
198-
if (!result->must_write_set)
198+
if (!result->must_write_set) {
199+
kvfree(result);
199200
return ERR_PTR(-ENOMEM);
201+
}
200202
memcpy(&result->callchain, callchain, sizeof(*callchain));
201203
result->insn_cnt = subprog_sz;
202204
hash_add(liveness->func_instances, &result->hl_node, key);

kernel/bpf/syscall.c

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -520,6 +520,21 @@ void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
520520
return ptr;
521521
}
522522

523+
void *bpf_map_kmalloc_nolock(const struct bpf_map *map, size_t size, gfp_t flags,
524+
int node)
525+
{
526+
struct mem_cgroup *memcg, *old_memcg;
527+
void *ptr;
528+
529+
memcg = bpf_map_get_memcg(map);
530+
old_memcg = set_active_memcg(memcg);
531+
ptr = kmalloc_nolock(size, flags | __GFP_ACCOUNT, node);
532+
set_active_memcg(old_memcg);
533+
mem_cgroup_put(memcg);
534+
535+
return ptr;
536+
}
537+
523538
void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
524539
{
525540
struct mem_cgroup *memcg, *old_memcg;

net/bpf/test_run.c

Lines changed: 7 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -29,33 +29,22 @@
2929
#include <trace/events/bpf_test_run.h>
3030

3131
struct bpf_test_timer {
32-
enum { NO_PREEMPT, NO_MIGRATE } mode;
3332
u32 i;
3433
u64 time_start, time_spent;
3534
};
3635

3736
static void bpf_test_timer_enter(struct bpf_test_timer *t)
3837
__acquires(rcu)
3938
{
40-
rcu_read_lock();
41-
if (t->mode == NO_PREEMPT)
42-
preempt_disable();
43-
else
44-
migrate_disable();
45-
39+
rcu_read_lock_dont_migrate();
4640
t->time_start = ktime_get_ns();
4741
}
4842

4943
static void bpf_test_timer_leave(struct bpf_test_timer *t)
5044
__releases(rcu)
5145
{
5246
t->time_start = 0;
53-
54-
if (t->mode == NO_PREEMPT)
55-
preempt_enable();
56-
else
57-
migrate_enable();
58-
rcu_read_unlock();
47+
rcu_read_unlock_migrate();
5948
}
6049

6150
static bool bpf_test_timer_continue(struct bpf_test_timer *t, int iterations,
@@ -374,7 +363,7 @@ static int bpf_test_run_xdp_live(struct bpf_prog *prog, struct xdp_buff *ctx,
374363

375364
{
376365
struct xdp_test_data xdp = { .batch_size = batch_size };
377-
struct bpf_test_timer t = { .mode = NO_MIGRATE };
366+
struct bpf_test_timer t = {};
378367
int ret;
379368

380369
if (!repeat)
@@ -404,7 +393,7 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
404393
struct bpf_prog_array_item item = {.prog = prog};
405394
struct bpf_run_ctx *old_ctx;
406395
struct bpf_cg_run_ctx run_ctx;
407-
struct bpf_test_timer t = { NO_MIGRATE };
396+
struct bpf_test_timer t = {};
408397
enum bpf_cgroup_storage_type stype;
409398
int ret;
410399

@@ -1269,7 +1258,7 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
12691258
goto free_ctx;
12701259

12711260
if (kattr->test.data_size_in - meta_sz < ETH_HLEN)
1272-
return -EINVAL;
1261+
goto free_ctx;
12731262

12741263
data = bpf_test_init(kattr, linear_sz, max_linear_sz, headroom, tailroom);
12751264
if (IS_ERR(data)) {
@@ -1377,7 +1366,7 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
13771366
const union bpf_attr *kattr,
13781367
union bpf_attr __user *uattr)
13791368
{
1380-
struct bpf_test_timer t = { NO_PREEMPT };
1369+
struct bpf_test_timer t = {};
13811370
u32 size = kattr->test.data_size_in;
13821371
struct bpf_flow_dissector ctx = {};
13831372
u32 repeat = kattr->test.repeat;
@@ -1445,7 +1434,7 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
14451434
int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr,
14461435
union bpf_attr __user *uattr)
14471436
{
1448-
struct bpf_test_timer t = { NO_PREEMPT };
1437+
struct bpf_test_timer t = {};
14491438
struct bpf_prog_array *progs = NULL;
14501439
struct bpf_sk_lookup_kern ctx = {};
14511440
u32 repeat = kattr->test.repeat;

tools/testing/selftests/bpf/prog_tests/arg_parsing.c

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -144,11 +144,17 @@ static void test_parse_test_list_file(void)
144144
if (!ASSERT_OK(ferror(fp), "prepare tmp"))
145145
goto out_fclose;
146146

147+
if (!ASSERT_OK(fsync(fileno(fp)), "fsync tmp"))
148+
goto out_fclose;
149+
147150
init_test_filter_set(&set);
148151

149-
ASSERT_OK(parse_test_list_file(tmpfile, &set, true), "parse file");
152+
if (!ASSERT_OK(parse_test_list_file(tmpfile, &set, true), "parse file"))
153+
goto out_fclose;
154+
155+
if (!ASSERT_EQ(set.cnt, 4, "test count"))
156+
goto out_free_set;
150157

151-
ASSERT_EQ(set.cnt, 4, "test count");
152158
ASSERT_OK(strcmp("test_with_spaces", set.tests[0].name), "test 0 name");
153159
ASSERT_EQ(set.tests[0].subtest_cnt, 0, "test 0 subtest count");
154160
ASSERT_OK(strcmp("testA", set.tests[1].name), "test 1 name");
@@ -158,8 +164,8 @@ static void test_parse_test_list_file(void)
158164
ASSERT_OK(strcmp("testB", set.tests[2].name), "test 2 name");
159165
ASSERT_OK(strcmp("testC_no_eof_newline", set.tests[3].name), "test 3 name");
160166

167+
out_free_set:
161168
free_test_filter_set(&set);
162-
163169
out_fclose:
164170
fclose(fp);
165171
out_remove:

tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -225,7 +225,7 @@ int trusted_to_untrusted(void *ctx)
225225
}
226226

227227
char mem[16];
228-
u32 off;
228+
u32 offset;
229229

230230
SEC("tp_btf/sys_enter")
231231
__success
@@ -240,9 +240,9 @@ int anything_to_untrusted(void *ctx)
240240
/* scalar to untrusted */
241241
subprog_untrusted(0);
242242
/* variable offset to untrusted (map) */
243-
subprog_untrusted((void *)mem + off);
243+
subprog_untrusted((void *)mem + offset);
244244
/* variable offset to untrusted (trusted) */
245-
subprog_untrusted((void *)bpf_get_current_task_btf() + off);
245+
subprog_untrusted((void *)bpf_get_current_task_btf() + offset);
246246
return 0;
247247
}
248248

@@ -298,12 +298,12 @@ int anything_to_untrusted_mem(void *ctx)
298298
/* scalar to untrusted mem */
299299
subprog_void_untrusted(0);
300300
/* variable offset to untrusted mem (map) */
301-
subprog_void_untrusted((void *)mem + off);
301+
subprog_void_untrusted((void *)mem + offset);
302302
/* variable offset to untrusted mem (trusted) */
303-
subprog_void_untrusted(bpf_get_current_task_btf() + off);
303+
subprog_void_untrusted(bpf_get_current_task_btf() + offset);
304304
/* variable offset to untrusted char/enum (map) */
305-
subprog_char_untrusted(mem + off);
306-
subprog_enum_untrusted((void *)mem + off);
305+
subprog_char_untrusted(mem + offset);
306+
subprog_enum_untrusted((void *)mem + offset);
307307
return 0;
308308
}
309309

0 commit comments

Comments
 (0)