Skip to content

Commit d056a06

Browse files
committed
Merge: CVE-2024-41055 kernel: mm: prevent derefencing NULL ptr in pfn_section_valid()
MR: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-9/-/merge_requests/4950 JIRA: https://issues.redhat.com/browse/RHEL-51138 CVE: CVE-2024-41055 Backporting two fixes to resolve CVE-2024-41055. Signed-off-by: Audra Mitchell <audra@redhat.com> Approved-by: Waiman Long <longman@redhat.com> Approved-by: Rafael Aquini <raquini@redhat.com> Approved-by: Chris von Recklinghausen <crecklin@redhat.com> Approved-by: Lenny Szubowicz <lszubowi@redhat.com> Approved-by: David Arcari <darcari@redhat.com> Approved-by: CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> Merged-by: Lucas Zampieri <lzampier@redhat.com>
2 parents 197c8a9 + beaf339 commit d056a06

File tree

2 files changed

+21
-5
lines changed

2 files changed

+21
-5
lines changed

arch/x86/include/asm/kmsan.h

Lines changed: 16 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,7 @@ static inline bool kmsan_virt_addr_valid(void *addr)
6464
{
6565
unsigned long x = (unsigned long)addr;
6666
unsigned long y = x - __START_KERNEL_map;
67+
bool ret;
6768

6869
/* use the carry flag to determine if x was < __START_KERNEL_map */
6970
if (unlikely(x > y)) {
@@ -79,7 +80,21 @@ static inline bool kmsan_virt_addr_valid(void *addr)
7980
return false;
8081
}
8182

82-
return pfn_valid(x >> PAGE_SHIFT);
83+
/*
84+
* pfn_valid() relies on RCU, and may call into the scheduler on exiting
85+
* the critical section. However, this would result in recursion with
86+
* KMSAN. Therefore, disable preemption here, and re-enable preemption
87+
* below while suppressing reschedules to avoid recursion.
88+
*
89+
* Note, this sacrifices occasionally breaking scheduling guarantees.
90+
* Although, a kernel compiled with KMSAN has already given up on any
91+
* performance guarantees due to being heavily instrumented.
92+
*/
93+
preempt_disable();
94+
ret = pfn_valid(x >> PAGE_SHIFT);
95+
preempt_enable_no_resched();
96+
97+
return ret;
8398
}
8499

85100
#endif /* !MODULE */

include/linux/mmzone.h

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1945,8 +1945,9 @@ static inline int subsection_map_index(unsigned long pfn)
19451945
static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
19461946
{
19471947
int idx = subsection_map_index(pfn);
1948+
struct mem_section_usage *usage = READ_ONCE(ms->usage);
19481949

1949-
return test_bit(idx, READ_ONCE(ms->usage)->subsection_map);
1950+
return usage ? test_bit(idx, usage->subsection_map) : 0;
19501951
}
19511952
#else
19521953
static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
@@ -1984,17 +1985,17 @@ static inline int pfn_valid(unsigned long pfn)
19841985
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
19851986
return 0;
19861987
ms = __pfn_to_section(pfn);
1987-
rcu_read_lock();
1988+
rcu_read_lock_sched();
19881989
if (!valid_section(ms)) {
1989-
rcu_read_unlock();
1990+
rcu_read_unlock_sched();
19901991
return 0;
19911992
}
19921993
/*
19931994
* Traditionally early sections always returned pfn_valid() for
19941995
* the entire section-sized span.
19951996
*/
19961997
ret = early_section(ms) || pfn_section_valid(ms, pfn);
1997-
rcu_read_unlock();
1998+
rcu_read_unlock_sched();
19981999

19992000
return ret;
20002001
}

0 commit comments

Comments
 (0)