Skip to content

Commit 1acc369

Browse files
RichardWeiYangakpm00
authored andcommitted
mm/khugepaged: use start_addr/addr for improved readability
When collapsing a pmd, there are two address in use: * address points to the start of pmd * address points to each individual page Current naming makes it difficult to distinguish these two and is hence error prone. Considering the plan to collapse mTHP, name the first one `start_addr' and the second `addr' for better readability and consistency. Link: https://lkml.kernel.org/r/20250922140938.27343-1-richard.weiyang@gmail.com Signed-off-by: Wei Yang <richard.weiyang@gmail.com> Suggested-by: David Hildenbrand <david@redhat.com> Reviewed-by: Zi Yan <ziy@nvidia.com> Reviewed-by: Nico Pache <npache@redhat.com> Acked-by: David Hildenbrand <david@redhat.com> Reviewed-by: Dev Jain <dev.jain@arm.com> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: Barry Song <baohua@kernel.org> Cc: Lance Yang <lance.yang@linux.dev> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Cc: Mariano Pache <npache@redhat.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent dd83609 commit 1acc369

File tree

1 file changed

+22
-21
lines changed

1 file changed

+22
-21
lines changed

mm/khugepaged.c

Lines changed: 22 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -537,18 +537,19 @@ static void release_pte_pages(pte_t *pte, pte_t *_pte,
537537
}
538538

539539
static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
540-
unsigned long address,
540+
unsigned long start_addr,
541541
pte_t *pte,
542542
struct collapse_control *cc,
543543
struct list_head *compound_pagelist)
544544
{
545545
struct page *page = NULL;
546546
struct folio *folio = NULL;
547+
unsigned long addr = start_addr;
547548
pte_t *_pte;
548549
int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0;
549550

550551
for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
551-
_pte++, address += PAGE_SIZE) {
552+
_pte++, addr += PAGE_SIZE) {
552553
pte_t pteval = ptep_get(_pte);
553554
if (pte_none(pteval) || (pte_present(pteval) &&
554555
is_zero_pfn(pte_pfn(pteval)))) {
@@ -571,7 +572,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
571572
result = SCAN_PTE_UFFD_WP;
572573
goto out;
573574
}
574-
page = vm_normal_page(vma, address, pteval);
575+
page = vm_normal_page(vma, addr, pteval);
575576
if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
576577
result = SCAN_PAGE_NULL;
577578
goto out;
@@ -656,8 +657,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
656657
*/
657658
if (cc->is_khugepaged &&
658659
(pte_young(pteval) || folio_test_young(folio) ||
659-
folio_test_referenced(folio) || mmu_notifier_test_young(vma->vm_mm,
660-
address)))
660+
folio_test_referenced(folio) ||
661+
mmu_notifier_test_young(vma->vm_mm, addr)))
661662
referenced++;
662663
}
663664

@@ -986,21 +987,21 @@ static int check_pmd_still_valid(struct mm_struct *mm,
986987
*/
987988
static int __collapse_huge_page_swapin(struct mm_struct *mm,
988989
struct vm_area_struct *vma,
989-
unsigned long haddr, pmd_t *pmd,
990+
unsigned long start_addr, pmd_t *pmd,
990991
int referenced)
991992
{
992993
int swapped_in = 0;
993994
vm_fault_t ret = 0;
994-
unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
995+
unsigned long addr, end = start_addr + (HPAGE_PMD_NR * PAGE_SIZE);
995996
int result;
996997
pte_t *pte = NULL;
997998
spinlock_t *ptl;
998999

999-
for (address = haddr; address < end; address += PAGE_SIZE) {
1000+
for (addr = start_addr; addr < end; addr += PAGE_SIZE) {
10001001
struct vm_fault vmf = {
10011002
.vma = vma,
1002-
.address = address,
1003-
.pgoff = linear_page_index(vma, address),
1003+
.address = addr,
1004+
.pgoff = linear_page_index(vma, addr),
10041005
.flags = FAULT_FLAG_ALLOW_RETRY,
10051006
.pmd = pmd,
10061007
};
@@ -1010,7 +1011,7 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm,
10101011
* Here the ptl is only used to check pte_same() in
10111012
* do_swap_page(), so readonly version is enough.
10121013
*/
1013-
pte = pte_offset_map_ro_nolock(mm, pmd, address, &ptl);
1014+
pte = pte_offset_map_ro_nolock(mm, pmd, addr, &ptl);
10141015
if (!pte) {
10151016
mmap_read_unlock(mm);
10161017
result = SCAN_PMD_NULL;
@@ -1253,7 +1254,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
12531254

12541255
static int hpage_collapse_scan_pmd(struct mm_struct *mm,
12551256
struct vm_area_struct *vma,
1256-
unsigned long address, bool *mmap_locked,
1257+
unsigned long start_addr, bool *mmap_locked,
12571258
struct collapse_control *cc)
12581259
{
12591260
pmd_t *pmd;
@@ -1262,26 +1263,26 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
12621263
int none_or_zero = 0, shared = 0;
12631264
struct page *page = NULL;
12641265
struct folio *folio = NULL;
1265-
unsigned long _address;
1266+
unsigned long addr;
12661267
spinlock_t *ptl;
12671268
int node = NUMA_NO_NODE, unmapped = 0;
12681269

1269-
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1270+
VM_BUG_ON(start_addr & ~HPAGE_PMD_MASK);
12701271

1271-
result = find_pmd_or_thp_or_none(mm, address, &pmd);
1272+
result = find_pmd_or_thp_or_none(mm, start_addr, &pmd);
12721273
if (result != SCAN_SUCCEED)
12731274
goto out;
12741275

12751276
memset(cc->node_load, 0, sizeof(cc->node_load));
12761277
nodes_clear(cc->alloc_nmask);
1277-
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1278+
pte = pte_offset_map_lock(mm, pmd, start_addr, &ptl);
12781279
if (!pte) {
12791280
result = SCAN_PMD_NULL;
12801281
goto out;
12811282
}
12821283

1283-
for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR;
1284-
_pte++, _address += PAGE_SIZE) {
1284+
for (addr = start_addr, _pte = pte; _pte < pte + HPAGE_PMD_NR;
1285+
_pte++, addr += PAGE_SIZE) {
12851286
pte_t pteval = ptep_get(_pte);
12861287
if (is_swap_pte(pteval)) {
12871288
++unmapped;
@@ -1329,7 +1330,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
13291330
goto out_unmap;
13301331
}
13311332

1332-
page = vm_normal_page(vma, _address, pteval);
1333+
page = vm_normal_page(vma, addr, pteval);
13331334
if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
13341335
result = SCAN_PAGE_NULL;
13351336
goto out_unmap;
@@ -1398,7 +1399,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
13981399
if (cc->is_khugepaged &&
13991400
(pte_young(pteval) || folio_test_young(folio) ||
14001401
folio_test_referenced(folio) ||
1401-
mmu_notifier_test_young(vma->vm_mm, _address)))
1402+
mmu_notifier_test_young(vma->vm_mm, addr)))
14021403
referenced++;
14031404
}
14041405
if (cc->is_khugepaged &&
@@ -1411,7 +1412,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
14111412
out_unmap:
14121413
pte_unmap_unlock(pte, ptl);
14131414
if (result == SCAN_SUCCEED) {
1414-
result = collapse_huge_page(mm, address, referenced,
1415+
result = collapse_huge_page(mm, start_addr, referenced,
14151416
unmapped, cc);
14161417
/* collapse_huge_page will return with the mmap_lock released */
14171418
*mmap_locked = false;

0 commit comments

Comments
 (0)