Skip to content

Commit 91d920f

Browse files
committed
mm/kmemleak: simplify kmemleak_cond_resched() usage
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2151065 Upstream Status: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git/commit/?h=mm-stable&id=6061e740822530a4ef443548b19c4e0bc6342c7a commit 6061e74 Author: Waiman Long <longman@redhat.com> Date: Wed, 18 Jan 2023 23:01:10 -0500 mm/kmemleak: simplify kmemleak_cond_resched() usage Patch series "mm/kmemleak: Simplify kmemleak_cond_resched() & fix UAF", v2. It was found that a KASAN use-after-free error was reported in the kmemleak_scan() function. After further examination, it is believe that even though a reference is taken from the current object, it does not prevent the object pointed to by the next pointer from going away after a cond_resched(). To fix that, additional flags are added to make sure that the current object won't be removed from the object_list during the duration of the cond_resched() to ensure the validity of the next pointer. While making the change, I also simplify the current usage of kmemleak_cond_resched() to make it easier to understand. This patch (of 2): The presence of a pinned argument and the 64k loop count make kmemleak_cond_resched() a bit more complex to read. The pinned argument is used only by first kmemleak_scan() loop. Simplify the usage of kmemleak_cond_resched() by removing the pinned argument and always do a get_object()/put_object() sequence. In addition, the 64k loop is removed by using need_resched() to decide if kmemleak_cond_resched() should be called. Link: https://lkml.kernel.org/r/20230119040111.350923-1-longman@redhat.com Link: https://lkml.kernel.org/r/20230119040111.350923-2-longman@redhat.com Signed-off-by: Waiman Long <longman@redhat.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Muchun Song <songmuchun@bytedance.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Waiman Long <longman@redhat.com>
1 parent 470c25f commit 91d920f

File tree

1 file changed

+12
-36
lines changed

1 file changed

+12
-36
lines changed

mm/kmemleak.c

Lines changed: 12 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -1466,22 +1466,17 @@ static void scan_gray_list(void)
14661466
/*
14671467
* Conditionally call resched() in an object iteration loop while making sure
14681468
* that the given object won't go away without RCU read lock by performing a
1469-
* get_object() if !pinned.
1470-
*
1471-
* Return: false if can't do a cond_resched() due to get_object() failure
1472-
* true otherwise
1469+
* get_object() if necessaary.
14731470
*/
1474-
static bool kmemleak_cond_resched(struct kmemleak_object *object, bool pinned)
1471+
static void kmemleak_cond_resched(struct kmemleak_object *object)
14751472
{
1476-
if (!pinned && !get_object(object))
1477-
return false;
1473+
if (!get_object(object))
1474+
return; /* Try next object */
14781475

14791476
rcu_read_unlock();
14801477
cond_resched();
14811478
rcu_read_lock();
1482-
if (!pinned)
1483-
put_object(object);
1484-
return true;
1479+
put_object(object);
14851480
}
14861481

14871482
/*
@@ -1495,15 +1490,12 @@ static void kmemleak_scan(void)
14951490
struct zone *zone;
14961491
int __maybe_unused i;
14971492
int new_leaks = 0;
1498-
int loop_cnt = 0;
14991493

15001494
jiffies_last_scan = jiffies;
15011495

15021496
/* prepare the kmemleak_object's */
15031497
rcu_read_lock();
15041498
list_for_each_entry_rcu(object, &object_list, object_list) {
1505-
bool obj_pinned = false;
1506-
15071499
raw_spin_lock_irq(&object->lock);
15081500
#ifdef DEBUG
15091501
/*
@@ -1529,19 +1521,13 @@ static void kmemleak_scan(void)
15291521

15301522
/* reset the reference count (whiten the object) */
15311523
object->count = 0;
1532-
if (color_gray(object) && get_object(object)) {
1524+
if (color_gray(object) && get_object(object))
15331525
list_add_tail(&object->gray_list, &gray_list);
1534-
obj_pinned = true;
1535-
}
15361526

15371527
raw_spin_unlock_irq(&object->lock);
15381528

1539-
/*
1540-
* Do a cond_resched() every 64k objects to avoid soft lockup.
1541-
*/
1542-
if (!(++loop_cnt & 0xffff) &&
1543-
!kmemleak_cond_resched(object, obj_pinned))
1544-
loop_cnt--; /* Try again on next object */
1529+
if (need_resched())
1530+
kmemleak_cond_resched(object);
15451531
}
15461532
rcu_read_unlock();
15471533

@@ -1608,14 +1594,9 @@ static void kmemleak_scan(void)
16081594
* scan and color them gray until the next scan.
16091595
*/
16101596
rcu_read_lock();
1611-
loop_cnt = 0;
16121597
list_for_each_entry_rcu(object, &object_list, object_list) {
1613-
/*
1614-
* Do a cond_resched() every 64k objects to avoid soft lockup.
1615-
*/
1616-
if (!(++loop_cnt & 0xffff) &&
1617-
!kmemleak_cond_resched(object, false))
1618-
loop_cnt--; /* Try again on next object */
1598+
if (need_resched())
1599+
kmemleak_cond_resched(object);
16191600

16201601
/*
16211602
* This is racy but we can save the overhead of lock/unlock
@@ -1650,14 +1631,9 @@ static void kmemleak_scan(void)
16501631
* Scanning result reporting.
16511632
*/
16521633
rcu_read_lock();
1653-
loop_cnt = 0;
16541634
list_for_each_entry_rcu(object, &object_list, object_list) {
1655-
/*
1656-
* Do a cond_resched() every 64k objects to avoid soft lockup.
1657-
*/
1658-
if (!(++loop_cnt & 0xffff) &&
1659-
!kmemleak_cond_resched(object, false))
1660-
loop_cnt--; /* Try again on next object */
1635+
if (need_resched())
1636+
kmemleak_cond_resched(object);
16611637

16621638
/*
16631639
* This is racy but we can save the overhead of lock/unlock

0 commit comments

Comments
 (0)