Skip to content

Commit 44b44b6

Browse files
melvergregkh
authored andcommitted
kfence: move saving stack trace of allocations into __kfence_alloc()
[ Upstream commit a9ab52b ] Move the saving of the stack trace of allocations into __kfence_alloc(), so that the stack entries array can be used outside of kfence_guarded_alloc() and we avoid potentially unwinding the stack multiple times. Link: https://lkml.kernel.org/r/20210923104803.2620285-3-elver@google.com Signed-off-by: Marco Elver <elver@google.com> Reviewed-by: Dmitry Vyukov <dvyukov@google.com> Acked-by: Alexander Potapenko <glider@google.com> Cc: Aleksandr Nogikh <nogikh@google.com> Cc: Jann Horn <jannh@google.com> Cc: Taras Madan <tarasmadan@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Sasha Levin <sashal@kernel.org>
1 parent 72d2d94 commit 44b44b6

File tree

1 file changed

+24
-11
lines changed

1 file changed

+24
-11
lines changed

mm/kfence/core.c

Lines changed: 24 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -188,19 +188,26 @@ static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *m
188188
* Update the object's metadata state, including updating the alloc/free stacks
189189
* depending on the state transition.
190190
*/
191-
static noinline void metadata_update_state(struct kfence_metadata *meta,
192-
enum kfence_object_state next)
191+
static noinline void
192+
metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state next,
193+
unsigned long *stack_entries, size_t num_stack_entries)
193194
{
194195
struct kfence_track *track =
195196
next == KFENCE_OBJECT_FREED ? &meta->free_track : &meta->alloc_track;
196197

197198
lockdep_assert_held(&meta->lock);
198199

199-
/*
200-
* Skip over 1 (this) functions; noinline ensures we do not accidentally
201-
* skip over the caller by never inlining.
202-
*/
203-
track->num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1);
200+
if (stack_entries) {
201+
memcpy(track->stack_entries, stack_entries,
202+
num_stack_entries * sizeof(stack_entries[0]));
203+
} else {
204+
/*
205+
* Skip over 1 (this) functions; noinline ensures we do not
206+
* accidentally skip over the caller by never inlining.
207+
*/
208+
num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1);
209+
}
210+
track->num_stack_entries = num_stack_entries;
204211
track->pid = task_pid_nr(current);
205212
track->cpu = raw_smp_processor_id();
206213
track->ts_nsec = local_clock(); /* Same source as printk timestamps. */
@@ -262,7 +269,8 @@ static __always_inline void for_each_canary(const struct kfence_metadata *meta,
262269
}
263270
}
264271

265-
static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp)
272+
static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp,
273+
unsigned long *stack_entries, size_t num_stack_entries)
266274
{
267275
struct kfence_metadata *meta = NULL;
268276
unsigned long flags;
@@ -321,7 +329,7 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
321329
addr = (void *)meta->addr;
322330

323331
/* Update remaining metadata. */
324-
metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED);
332+
metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED, stack_entries, num_stack_entries);
325333
/* Pairs with READ_ONCE() in kfence_shutdown_cache(). */
326334
WRITE_ONCE(meta->cache, cache);
327335
meta->size = size;
@@ -401,7 +409,7 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z
401409
memzero_explicit(addr, meta->size);
402410

403411
/* Mark the object as freed. */
404-
metadata_update_state(meta, KFENCE_OBJECT_FREED);
412+
metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0);
405413

406414
raw_spin_unlock_irqrestore(&meta->lock, flags);
407415

@@ -746,6 +754,9 @@ void kfence_shutdown_cache(struct kmem_cache *s)
746754

747755
void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
748756
{
757+
unsigned long stack_entries[KFENCE_STACK_DEPTH];
758+
size_t num_stack_entries;
759+
749760
/*
750761
* Perform size check before switching kfence_allocation_gate, so that
751762
* we don't disable KFENCE without making an allocation.
@@ -785,7 +796,9 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
785796
if (!READ_ONCE(kfence_enabled))
786797
return NULL;
787798

788-
return kfence_guarded_alloc(s, size, flags);
799+
num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 0);
800+
801+
return kfence_guarded_alloc(s, size, flags, stack_entries, num_stack_entries);
789802
}
790803

791804
size_t kfence_ksize(const void *addr)

0 commit comments

Comments
 (0)