Skip to content

Commit 1c101c3

Browse files
author
Mamatha Inamdar
committed
book3s64/hash: Remove kfence support temporarily
JIRA: https://issues.redhat.com/browse/RHEL-114396 Conflicts: We found a kernel boot hang on systems using the Hash MMU, caused by KFENCE. Instead of backporting the full upstream fix (which involves multiple patches), we suggest adding a single patch that disables KFENCE at runtime when Hash MMU is enabled. This prevents the boot hang. To make the behavior clearer, we added an out-of-tree printk so users see an explicit message instead of just kfence_init failed: dmesg | grep -Eai "kfence" [ 0.001193] kfence: not supported with Hash MMU; disabled at runtime. [ 0.001249] kfence: kfence_init failed commit 47780e7 Author: Ritesh Harjani (IBM) <ritesh.list@gmail.com> Date: Fri Oct 18 22:59:43 2024 +0530 book3s64/hash: Remove kfence support temporarily Kfence on book3s Hash on pseries is anyways broken. It fails to boot due to RMA size limitation. That is because, kfence with Hash uses debug_pagealloc infrastructure. debug_pagealloc allocates linear map for entire dram size instead of just kfence relevant objects. This means for 16TB of DRAM it will require (16TB >> PAGE_SHIFT) which is 256MB which is half of RMA region on P8. crash kernel reserves 256MB and we also need 2048 * 16KB * 3 for emergency stack and some more for paca allocations. That means there is not enough memory for reserving the full linear map in the RMA region, if the DRAM size is too big (>=16TB) (The issue is seen above 8TB with crash kernel 256 MB reservation). Now Kfence does not require linear memory map for entire DRAM. It only needs for kfence objects. So this patch temporarily removes the kfence functionality since debug_pagealloc code needs some refactoring. We will bring in kfence on Hash support in later patches. Signed-off-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://patch.msgid.link/1761bc39674473c8878dedca15e0d9a0d3a1b528.1729271995.git.ritesh.list@gmail.com Signed-off-by: Mamatha Inamdar <minamdar@redhat.com>
1 parent e1b834f commit 1c101c3

File tree

2 files changed

+18
-5
lines changed

2 files changed

+18
-5
lines changed

arch/powerpc/include/asm/kfence.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010

1111
#include <linux/mm.h>
1212
#include <asm/pgtable.h>
13+
#include <asm/mmu.h>
1314

1415
#ifdef CONFIG_PPC64_ELF_ABI_V1
1516
#define ARCH_FUNC_PREFIX "."
@@ -25,6 +26,12 @@ static inline void disable_kfence(void)
2526

2627
static inline bool arch_kfence_init_pool(void)
2728
{
29+
#ifdef CONFIG_PPC64
30+
if (!radix_enabled()) {
31+
pr_info("Not supported with Hash MMU; disabled at runtime.\n");
32+
return false;
33+
}
34+
#endif
2835
return !kfence_disabled;
2936
}
3037
#endif

arch/powerpc/mm/book3s64/hash_utils.c

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -424,7 +424,7 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
424424
break;
425425

426426
cond_resched();
427-
if (debug_pagealloc_enabled_or_kfence() &&
427+
if (debug_pagealloc_enabled() &&
428428
(paddr >> PAGE_SHIFT) < linear_map_hash_count)
429429
linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80;
430430
}
@@ -807,7 +807,7 @@ static void __init htab_init_page_sizes(void)
807807
bool aligned = true;
808808
init_hpte_page_sizes();
809809

810-
if (!debug_pagealloc_enabled_or_kfence()) {
810+
if (!debug_pagealloc_enabled()) {
811811
/*
812812
* Pick a size for the linear mapping. Currently, we only
813813
* support 16M, 1M and 4K which is the default
@@ -1127,7 +1127,7 @@ static void __init htab_initialize(void)
11271127

11281128
prot = pgprot_val(PAGE_KERNEL);
11291129

1130-
if (debug_pagealloc_enabled_or_kfence()) {
1130+
if (debug_pagealloc_enabled()) {
11311131
linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
11321132
linear_map_hash_slots = memblock_alloc_try_nid(
11331133
linear_map_hash_count, 1, MEMBLOCK_LOW_LIMIT,
@@ -2110,7 +2110,7 @@ void hpt_do_stress(unsigned long ea, unsigned long hpte_group)
21102110
}
21112111
}
21122112

2113-
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
2113+
#ifdef CONFIG_DEBUG_PAGEALLOC
21142114
static DEFINE_RAW_SPINLOCK(linear_map_hash_lock);
21152115

21162116
static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
@@ -2183,7 +2183,13 @@ void hash__kernel_map_pages(struct page *page, int numpages, int enable)
21832183
}
21842184
local_irq_restore(flags);
21852185
}
2186-
#endif /* CONFIG_DEBUG_PAGEALLOC || CONFIG_KFENCE */
2186+
#else /* CONFIG_DEBUG_PAGEALLOC */
2187+
void hash__kernel_map_pages(struct page *page, int numpages,
2188+
int enable)
2189+
{
2190+
2191+
}
2192+
#endif /* CONFIG_DEBUG_PAGEALLOC */
21872193

21882194
void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
21892195
phys_addr_t first_memblock_size)

0 commit comments

Comments
 (0)