Skip to content

Commit 20daefe

Browse files
committed
x86/kasan: Map shadow for percpu pages on demand
jira VULN-3958 cve-pre CVE-2023-0597 commit-author Andrey Ryabinin <ryabinin.a.a@gmail.com> commit 3f148f3 KASAN maps shadow for the entire CPU-entry-area: [CPU_ENTRY_AREA_BASE, CPU_ENTRY_AREA_BASE + CPU_ENTRY_AREA_MAP_SIZE] This will explode once the per-cpu entry areas are randomized since it will increase CPU_ENTRY_AREA_MAP_SIZE to 512 GB and KASAN fails to allocate shadow for such big area. Fix this by allocating KASAN shadow only for really used cpu entry area addresses mapped by cea_map_percpu_pages() Thanks to the 0day folks for finding and reporting this to be an issue. [ dhansen: tweak changelog since this will get committed before peterz's actual cpu-entry-area randomization ] Signed-off-by: Andrey Ryabinin <ryabinin.a.a@gmail.com> Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Tested-by: Yujie Liu <yujie.liu@intel.com> Cc: kernel test robot <yujie.liu@intel.com> Link: https://lore.kernel.org/r/202210241508.2e203c3d-yujie.liu@intel.com (cherry picked from commit 3f148f3) Signed-off-by: Marcin Wcisło <marcin.wcislo@conclusive.pl>
1 parent 3de8302 commit 20daefe

File tree

3 files changed

+22
-4
lines changed

3 files changed

+22
-4
lines changed

arch/x86/include/asm/kasan.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,9 +28,12 @@
2828
#ifdef CONFIG_KASAN
2929
void __init kasan_early_init(void);
3030
void __init kasan_init(void);
31+
void __init kasan_populate_shadow_for_vaddr(void *va, size_t size, int nid);
3132
#else
3233
static inline void kasan_early_init(void) { }
3334
static inline void kasan_init(void) { }
35+
static inline void kasan_populate_shadow_for_vaddr(void *va, size_t size,
36+
int nid) { }
3437
#endif
3538

3639
#endif

arch/x86/mm/cpu_entry_area.c

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
#include <asm/pgtable.h>
1010
#include <asm/fixmap.h>
1111
#include <asm/desc.h>
12+
#include <asm/kasan.h>
1213

1314
static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);
1415

@@ -49,8 +50,13 @@ void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags)
4950
static void __init
5051
cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot)
5152
{
53+
phys_addr_t pa = per_cpu_ptr_to_phys(ptr);
54+
55+
kasan_populate_shadow_for_vaddr(cea_vaddr, pages * PAGE_SIZE,
56+
early_pfn_to_nid(PFN_DOWN(pa)));
57+
5258
for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE)
53-
cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot);
59+
cea_set_pte(cea_vaddr, pa, prot);
5460
}
5561

5662
static void __init percpu_setup_debug_store(unsigned int cpu)

arch/x86/mm/kasan_init_64.c

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -291,6 +291,18 @@ void __init kasan_early_init(void)
291291
kasan_map_early_shadow(init_top_pgt);
292292
}
293293

294+
void __init kasan_populate_shadow_for_vaddr(void *va, size_t size, int nid)
295+
{
296+
unsigned long shadow_start, shadow_end;
297+
298+
shadow_start = (unsigned long)kasan_mem_to_shadow(va);
299+
shadow_start = round_down(shadow_start, PAGE_SIZE);
300+
shadow_end = (unsigned long)kasan_mem_to_shadow(va + size);
301+
shadow_end = round_up(shadow_end, PAGE_SIZE);
302+
303+
kasan_populate_shadow(shadow_start, shadow_end, nid);
304+
}
305+
294306
void __init kasan_init(void)
295307
{
296308
int i;
@@ -354,9 +366,6 @@ void __init kasan_init(void)
354366
kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
355367
shadow_cpu_entry_begin);
356368

357-
kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
358-
(unsigned long)shadow_cpu_entry_end, 0);
359-
360369
kasan_populate_early_shadow(shadow_cpu_entry_end,
361370
kasan_mem_to_shadow((void *)__START_KERNEL_map));
362371

0 commit comments

Comments
 (0)