Skip to content

Commit e41c237

Browse files
eddyz87Alexei Starovoitov
authored andcommitted
bpf: enable callchain sensitive stack liveness tracking
Allocate analysis instance: - Add bpf_stack_liveness_{init,free}() calls to bpf_check(). Notify the instance about any stack reads and writes: - Add bpf_mark_stack_write() call at every location where REG_LIVE_WRITTEN is recorded for a stack slot. - Add bpf_mark_stack_read() call at every location mark_reg_read() is called. - Both bpf_mark_stack_{read,write}() rely on env->liveness->cur_instance callchain being in sync with env->cur_state. It is possible to update env->liveness->cur_instance every time a mark read/write is called, but that costs a hash table lookup and is noticeable in the performance profile. Hence, manually reset env->liveness->cur_instance whenever the verifier changes env->cur_state call stack: - call bpf_reset_live_stack_callchain() when the verifier enters a subprogram; - call bpf_update_live_stack() when the verifier exits a subprogram (it implies the reset). Make sure bpf_update_live_stack() is called for a callchain before issuing liveness queries. And make sure that bpf_update_live_stack() is called for any callee callchain first: - Add bpf_update_live_stack() call at every location that processes BPF_EXIT: - exit from a subprogram; - before pop_stack() call. This makes sure that bpf_update_live_stack() is called for callee callchains before caller callchains. Make sure must_write marks are set to zero for instructions that do not always access the stack: - Wrap do_check_insn() with bpf_reset_stack_write_marks() / bpf_commit_stack_write_marks() calls. Any calls to bpf_mark_stack_write() are accumulated between this pair of calls. If no bpf_mark_stack_write() calls were made it means that the instruction does not access stack (at-least on the current verification path) and it is important to record this fact. Finally, use bpf_live_stack_query_init() / bpf_stack_slot_alive() to query stack liveness info. The manual tracking of the correct order for callee/caller bpf_update_live_stack() calls is a bit convoluted and may warrant some automation in future revisions. Signed-off-by: Eduard Zingerman <eddyz87@gmail.com> Link: https://lore.kernel.org/r/20250918-callchain-sensitive-liveness-v3-7-c3cd27bacc60@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
1 parent b3698c3 commit e41c237

File tree

1 file changed

+53
-8
lines changed

1 file changed

+53
-8
lines changed

kernel/bpf/verifier.c

Lines changed: 53 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -789,6 +789,7 @@ static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_
789789

790790
state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
791791
state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN;
792+
bpf_mark_stack_write(env, state->frameno, BIT(spi - 1) | BIT(spi));
792793

793794
return 0;
794795
}
@@ -828,6 +829,7 @@ static void invalidate_dynptr(struct bpf_verifier_env *env, struct bpf_func_stat
828829
*/
829830
state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
830831
state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN;
832+
bpf_mark_stack_write(env, state->frameno, BIT(spi - 1) | BIT(spi));
831833
}
832834

833835
static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
@@ -939,6 +941,7 @@ static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env,
939941
/* Same reason as unmark_stack_slots_dynptr above */
940942
state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
941943
state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN;
944+
bpf_mark_stack_write(env, state->frameno, BIT(spi - 1) | BIT(spi));
942945

943946
return 0;
944947
}
@@ -1066,6 +1069,7 @@ static int mark_stack_slots_iter(struct bpf_verifier_env *env,
10661069
for (j = 0; j < BPF_REG_SIZE; j++)
10671070
slot->slot_type[j] = STACK_ITER;
10681071

1072+
bpf_mark_stack_write(env, state->frameno, BIT(spi - i));
10691073
mark_stack_slot_scratched(env, spi - i);
10701074
}
10711075

@@ -1097,6 +1101,7 @@ static int unmark_stack_slots_iter(struct bpf_verifier_env *env,
10971101
for (j = 0; j < BPF_REG_SIZE; j++)
10981102
slot->slot_type[j] = STACK_INVALID;
10991103

1104+
bpf_mark_stack_write(env, state->frameno, BIT(spi - i));
11001105
mark_stack_slot_scratched(env, spi - i);
11011106
}
11021107

@@ -1186,6 +1191,7 @@ static int mark_stack_slot_irq_flag(struct bpf_verifier_env *env,
11861191
slot = &state->stack[spi];
11871192
st = &slot->spilled_ptr;
11881193

1194+
bpf_mark_stack_write(env, reg->frameno, BIT(spi));
11891195
__mark_reg_known_zero(st);
11901196
st->type = PTR_TO_STACK; /* we don't have dedicated reg type */
11911197
st->live |= REG_LIVE_WRITTEN;
@@ -1244,6 +1250,7 @@ static int unmark_stack_slot_irq_flag(struct bpf_verifier_env *env, struct bpf_r
12441250

12451251
/* see unmark_stack_slots_dynptr() for why we need to set REG_LIVE_WRITTEN */
12461252
st->live |= REG_LIVE_WRITTEN;
1253+
bpf_mark_stack_write(env, reg->frameno, BIT(spi));
12471254

12481255
for (i = 0; i < BPF_REG_SIZE; i++)
12491256
slot->slot_type[i] = STACK_INVALID;
@@ -3634,6 +3641,9 @@ static int mark_stack_slot_obj_read(struct bpf_verifier_env *env, struct bpf_reg
36343641
if (err)
36353642
return err;
36363643

3644+
err = bpf_mark_stack_read(env, reg->frameno, env->insn_idx, BIT(spi - i));
3645+
if (err)
3646+
return err;
36373647
mark_stack_slot_scratched(env, spi - i);
36383648
}
36393649
return 0;
@@ -5166,6 +5176,18 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
51665176
if (err)
51675177
return err;
51685178

5179+
if (!(off % BPF_REG_SIZE) && size == BPF_REG_SIZE) {
5180+
/* only mark the slot as written if all 8 bytes were written
5181+
* otherwise read propagation may incorrectly stop too soon
5182+
* when stack slots are partially written.
5183+
* This heuristic means that read propagation will be
5184+
* conservative, since it will add reg_live_read marks
5185+
* to stack slots all the way to first state when programs
5186+
* writes+reads less than 8 bytes
5187+
*/
5188+
bpf_mark_stack_write(env, state->frameno, BIT(spi));
5189+
}
5190+
51695191
check_fastcall_stack_contract(env, state, insn_idx, off);
51705192
mark_stack_slot_scratched(env, spi);
51715193
if (reg && !(off % BPF_REG_SIZE) && reg->type == SCALAR_VALUE && env->bpf_capable) {
@@ -5435,12 +5457,16 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
54355457
struct bpf_reg_state *reg;
54365458
u8 *stype, type;
54375459
int insn_flags = insn_stack_access_flags(reg_state->frameno, spi);
5460+
int err;
54385461

54395462
stype = reg_state->stack[spi].slot_type;
54405463
reg = &reg_state->stack[spi].spilled_ptr;
54415464

54425465
mark_stack_slot_scratched(env, spi);
54435466
check_fastcall_stack_contract(env, state, env->insn_idx, off);
5467+
err = bpf_mark_stack_read(env, reg_state->frameno, env->insn_idx, BIT(spi));
5468+
if (err)
5469+
return err;
54445470

54455471
if (is_spilled_reg(&reg_state->stack[spi])) {
54465472
u8 spill_size = 1;
@@ -8174,6 +8200,9 @@ static int check_stack_range_initialized(
81748200
mark_reg_read(env, &state->stack[spi].spilled_ptr,
81758201
state->stack[spi].spilled_ptr.parent,
81768202
REG_LIVE_READ64);
8203+
err = bpf_mark_stack_read(env, reg->frameno, env->insn_idx, BIT(spi));
8204+
if (err)
8205+
return err;
81778206
/* We do not set REG_LIVE_WRITTEN for stack slot, as we can not
81788207
* be sure that whether stack slot is written to or not. Hence,
81798208
* we must still conservatively propagate reads upwards even if
@@ -10735,6 +10764,8 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
1073510764
/* and go analyze first insn of the callee */
1073610765
*insn_idx = env->subprog_info[subprog].start - 1;
1073710766

10767+
bpf_reset_live_stack_callchain(env);
10768+
1073810769
if (env->log.level & BPF_LOG_LEVEL) {
1073910770
verbose(env, "caller:\n");
1074010771
print_verifier_state(env, state, caller->frameno, true);
@@ -18532,7 +18563,6 @@ static void clean_func_state(struct bpf_verifier_env *env,
1853218563
u32 ip)
1853318564
{
1853418565
u16 live_regs = env->insn_aux_data[ip].live_regs_before;
18535-
enum bpf_reg_liveness live;
1853618566
int i, j;
1853718567

1853818568
for (i = 0; i < BPF_REG_FP; i++) {
@@ -18545,9 +18575,7 @@ static void clean_func_state(struct bpf_verifier_env *env,
1854518575
}
1854618576

1854718577
for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) {
18548-
live = st->stack[i].spilled_ptr.live;
18549-
/* liveness must not touch this stack slot anymore */
18550-
if (!(live & REG_LIVE_READ)) {
18578+
if (!bpf_stack_slot_alive(env, st->frameno, i)) {
1855118579
__mark_reg_not_init(env, &st->stack[i].spilled_ptr);
1855218580
for (j = 0; j < BPF_REG_SIZE; j++)
1855318581
st->stack[i].slot_type[j] = STACK_INVALID;
@@ -18560,6 +18588,7 @@ static void clean_verifier_state(struct bpf_verifier_env *env,
1856018588
{
1856118589
int i, ip;
1856218590

18591+
bpf_live_stack_query_init(env, st);
1856318592
st->cleaned = true;
1856418593
for (i = 0; i <= st->curframe; i++) {
1856518594
ip = frame_insn_idx(st, i);
@@ -18645,9 +18674,6 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
1864518674
if (exact == EXACT)
1864618675
return regs_exact(rold, rcur, idmap);
1864718676

18648-
if (!(rold->live & REG_LIVE_READ) && exact == NOT_EXACT)
18649-
/* explored state didn't use this */
18650-
return true;
1865118677
if (rold->type == NOT_INIT) {
1865218678
if (exact == NOT_EXACT || rcur->type == NOT_INIT)
1865318679
/* explored state can't have used this */
@@ -19886,6 +19912,9 @@ static int process_bpf_exit_full(struct bpf_verifier_env *env,
1988619912
return PROCESS_BPF_EXIT;
1988719913

1988819914
if (env->cur_state->curframe) {
19915+
err = bpf_update_live_stack(env);
19916+
if (err)
19917+
return err;
1988919918
/* exit from nested function */
1989019919
err = prepare_func_exit(env, &env->insn_idx);
1989119920
if (err)
@@ -20071,7 +20100,7 @@ static int do_check(struct bpf_verifier_env *env)
2007120100
for (;;) {
2007220101
struct bpf_insn *insn;
2007320102
struct bpf_insn_aux_data *insn_aux;
20074-
int err;
20103+
int err, marks_err;
2007520104

2007620105
/* reset current history entry on each new instruction */
2007720106
env->cur_hist_ent = NULL;
@@ -20164,7 +20193,15 @@ static int do_check(struct bpf_verifier_env *env)
2016420193
if (state->speculative && insn_aux->nospec)
2016520194
goto process_bpf_exit;
2016620195

20196+
err = bpf_reset_stack_write_marks(env, env->insn_idx);
20197+
if (err)
20198+
return err;
2016720199
err = do_check_insn(env, &do_print_state);
20200+
if (err >= 0 || error_recoverable_with_nospec(err)) {
20201+
marks_err = bpf_commit_stack_write_marks(env);
20202+
if (marks_err)
20203+
return marks_err;
20204+
}
2016820205
if (error_recoverable_with_nospec(err) && state->speculative) {
2016920206
/* Prevent this speculative path from ever reaching the
2017020207
* insn that would have been unsafe to execute.
@@ -20203,6 +20240,9 @@ static int do_check(struct bpf_verifier_env *env)
2020320240
process_bpf_exit:
2020420241
mark_verifier_state_scratched(env);
2020520242
err = update_branch_counts(env, env->cur_state);
20243+
if (err)
20244+
return err;
20245+
err = bpf_update_live_stack(env);
2020620246
if (err)
2020720247
return err;
2020820248
err = pop_stack(env, &prev_insn_idx, &env->insn_idx,
@@ -24769,6 +24809,10 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
2476924809
if (ret < 0)
2477024810
goto skip_full_check;
2477124811

24812+
ret = bpf_stack_liveness_init(env);
24813+
if (ret)
24814+
goto skip_full_check;
24815+
2477224816
ret = check_attach_btf_id(env);
2477324817
if (ret)
2477424818
goto skip_full_check;
@@ -24918,6 +24962,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
2491824962
mutex_unlock(&bpf_verifier_lock);
2491924963
vfree(env->insn_aux_data);
2492024964
err_free_env:
24965+
bpf_stack_liveness_free(env);
2492124966
kvfree(env->cfg.insn_postorder);
2492224967
kvfree(env->scc_info);
2492324968
kvfree(env);

0 commit comments

Comments
 (0)