Skip to content

Commit 72d2d94

Browse files
melvergregkh
authored andcommitted
kfence: count unexpectedly skipped allocations
[ Upstream commit 9a19aeb ] Maintain a counter to count allocations that are skipped due to being incompatible (oversized, incompatible gfp flags) or no capacity. This is to compute the fraction of allocations that could not be serviced by KFENCE, which we expect to be rare. Link: https://lkml.kernel.org/r/20210923104803.2620285-2-elver@google.com Signed-off-by: Marco Elver <elver@google.com> Reviewed-by: Dmitry Vyukov <dvyukov@google.com> Acked-by: Alexander Potapenko <glider@google.com> Cc: Aleksandr Nogikh <nogikh@google.com> Cc: Jann Horn <jannh@google.com> Cc: Taras Madan <tarasmadan@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Sasha Levin <sashal@kernel.org>
1 parent 5142720 commit 72d2d94

File tree

1 file changed

+13
-3
lines changed

1 file changed

+13
-3
lines changed

mm/kfence/core.c

Lines changed: 13 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -113,6 +113,8 @@ enum kfence_counter_id {
113113
KFENCE_COUNTER_FREES,
114114
KFENCE_COUNTER_ZOMBIES,
115115
KFENCE_COUNTER_BUGS,
116+
KFENCE_COUNTER_SKIP_INCOMPAT,
117+
KFENCE_COUNTER_SKIP_CAPACITY,
116118
KFENCE_COUNTER_COUNT,
117119
};
118120
static atomic_long_t counters[KFENCE_COUNTER_COUNT];
@@ -122,6 +124,8 @@ static const char *const counter_names[] = {
122124
[KFENCE_COUNTER_FREES] = "total frees",
123125
[KFENCE_COUNTER_ZOMBIES] = "zombie allocations",
124126
[KFENCE_COUNTER_BUGS] = "total bugs",
127+
[KFENCE_COUNTER_SKIP_INCOMPAT] = "skipped allocations (incompatible)",
128+
[KFENCE_COUNTER_SKIP_CAPACITY] = "skipped allocations (capacity)",
125129
};
126130
static_assert(ARRAY_SIZE(counter_names) == KFENCE_COUNTER_COUNT);
127131

@@ -272,8 +276,10 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
272276
list_del_init(&meta->list);
273277
}
274278
raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
275-
if (!meta)
279+
if (!meta) {
280+
atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_CAPACITY]);
276281
return NULL;
282+
}
277283

278284
if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) {
279285
/*
@@ -744,17 +750,21 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
744750
* Perform size check before switching kfence_allocation_gate, so that
745751
* we don't disable KFENCE without making an allocation.
746752
*/
747-
if (size > PAGE_SIZE)
753+
if (size > PAGE_SIZE) {
754+
atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]);
748755
return NULL;
756+
}
749757

750758
/*
751759
* Skip allocations from non-default zones, including DMA. We cannot
752760
* guarantee that pages in the KFENCE pool will have the requested
753761
* properties (e.g. reside in DMAable memory).
754762
*/
755763
if ((flags & GFP_ZONEMASK) ||
756-
(s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32)))
764+
(s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) {
765+
atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]);
757766
return NULL;
767+
}
758768

759769
if (atomic_inc_return(&kfence_allocation_gate) > 1)
760770
return NULL;

0 commit comments

Comments
 (0)