Skip to content

Commit 2a44683

Browse files
committed
mm: pass nid to reserve_bootmem_region()
JIRA: https://issues.redhat.com/browse/RHEL-36126 commit 61167ad Author: Yajun Deng <yajun.deng@linux.dev> Date: Mon Jun 19 10:34:06 2023 +0800 mm: pass nid to reserve_bootmem_region() early_pfn_to_nid() is called frequently in init_reserved_page(), it returns the node id of the PFN. These PFN are probably from the same memory region, they have the same node id. It's not necessary to call early_pfn_to_nid() for each PFN. Pass nid to reserve_bootmem_region() and drop the call to early_pfn_to_nid() in init_reserved_page(). Also, set nid on all reserved pages before doing this, as some reserved memory regions may not be set nid. The most beneficial function is memmap_init_reserved_pages() if CONFIG_DEFERRED_STRUCT_PAGE_INIT is enabled. The following data was tested on an x86 machine with 190GB of RAM. before: memmap_init_reserved_pages() 67ms after: memmap_init_reserved_pages() 20ms Link: https://lkml.kernel.org/r/20230619023406.424298-1-yajun.deng@linux.dev Signed-off-by: Yajun Deng <yajun.deng@linux.dev> Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Eric Chanudet <echanude@redhat.com>
1 parent ab62de2 commit 2a44683

File tree

3 files changed

+40
-24
lines changed

3 files changed

+40
-24
lines changed

include/linux/mm.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2948,7 +2948,8 @@ extern unsigned long free_reserved_area(void *start, void *end,
29482948

29492949
extern void adjust_managed_page_count(struct page *page, long count);
29502950

2951-
extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end);
2951+
extern void reserve_bootmem_region(phys_addr_t start,
2952+
phys_addr_t end, int nid);
29522953

29532954
/* Free the reserved page into the buddy system, so it gets managed. */
29542955
static inline void free_reserved_page(struct page *page)

mm/memblock.c

Lines changed: 21 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -2092,19 +2092,30 @@ static void __init memmap_init_reserved_pages(void)
20922092
{
20932093
struct memblock_region *region;
20942094
phys_addr_t start, end;
2095-
u64 i;
2095+
int nid;
2096+
2097+
/*
2098+
* set nid on all reserved pages and also treat struct
2099+
* pages for the NOMAP regions as PageReserved
2100+
*/
2101+
for_each_mem_region(region) {
2102+
nid = memblock_get_region_node(region);
2103+
start = region->base;
2104+
end = start + region->size;
2105+
2106+
if (memblock_is_nomap(region))
2107+
reserve_bootmem_region(start, end, nid);
2108+
2109+
memblock_set_node(start, end, &memblock.reserved, nid);
2110+
}
20962111

20972112
/* initialize struct pages for the reserved regions */
2098-
for_each_reserved_mem_range(i, &start, &end)
2099-
reserve_bootmem_region(start, end);
2113+
for_each_reserved_mem_region(region) {
2114+
nid = memblock_get_region_node(region);
2115+
start = region->base;
2116+
end = start + region->size;
21002117

2101-
/* and also treat struct pages for the NOMAP regions as PageReserved */
2102-
for_each_mem_region(region) {
2103-
if (memblock_is_nomap(region)) {
2104-
start = region->base;
2105-
end = start + region->size;
2106-
reserve_bootmem_region(start, end);
2107-
}
2118+
reserve_bootmem_region(start, end, nid);
21082119
}
21092120
}
21102121

mm/mm_init.c

Lines changed: 17 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -644,10 +644,8 @@ static inline void pgdat_set_deferred_range(pg_data_t *pgdat)
644644
}
645645

646646
/* Returns true if the struct page for the pfn is initialised */
647-
static inline bool __meminit early_page_initialised(unsigned long pfn)
647+
static inline bool __meminit early_page_initialised(unsigned long pfn, int nid)
648648
{
649-
int nid = early_pfn_to_nid(pfn);
650-
651649
if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
652650
return false;
653651

@@ -693,15 +691,14 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
693691
return false;
694692
}
695693

696-
static void __meminit init_reserved_page(unsigned long pfn)
694+
static void __meminit init_reserved_page(unsigned long pfn, int nid)
697695
{
698696
pg_data_t *pgdat;
699-
int nid, zid;
697+
int zid;
700698

701-
if (early_page_initialised(pfn))
699+
if (early_page_initialised(pfn, nid))
702700
return;
703701

704-
nid = early_pfn_to_nid(pfn);
705702
pgdat = NODE_DATA(nid);
706703

707704
for (zid = 0; zid < MAX_NR_ZONES; zid++) {
@@ -715,7 +712,7 @@ static void __meminit init_reserved_page(unsigned long pfn)
715712
#else
716713
static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {}
717714

718-
static inline bool early_page_initialised(unsigned long pfn)
715+
static inline bool early_page_initialised(unsigned long pfn, int nid)
719716
{
720717
return true;
721718
}
@@ -725,7 +722,7 @@ static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
725722
return false;
726723
}
727724

728-
static inline void init_reserved_page(unsigned long pfn)
725+
static inline void init_reserved_page(unsigned long pfn, int nid)
729726
{
730727
}
731728
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
@@ -736,7 +733,8 @@ static inline void init_reserved_page(unsigned long pfn)
736733
* marks the pages PageReserved. The remaining valid pages are later
737734
* sent to the buddy page allocator.
738735
*/
739-
void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
736+
void __meminit reserve_bootmem_region(phys_addr_t start,
737+
phys_addr_t end, int nid)
740738
{
741739
unsigned long start_pfn = PFN_DOWN(start);
742740
unsigned long end_pfn = PFN_UP(end);
@@ -745,7 +743,7 @@ void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
745743
if (pfn_valid(start_pfn)) {
746744
struct page *page = pfn_to_page(start_pfn);
747745

748-
init_reserved_page(start_pfn);
746+
init_reserved_page(start_pfn, nid);
749747

750748
/* Avoid false-positive PageTail() */
751749
INIT_LIST_HEAD(&page->lru);
@@ -2538,8 +2536,14 @@ void __init set_dma_reserve(unsigned long new_dma_reserve)
25382536
void __init memblock_free_pages(struct page *page, unsigned long pfn,
25392537
unsigned int order)
25402538
{
2541-
if (!early_page_initialised(pfn))
2542-
return;
2539+
2540+
if (IS_ENABLED(CONFIG_DEFERRED_STRUCT_PAGE_INIT)) {
2541+
int nid = early_pfn_to_nid(pfn);
2542+
2543+
if (!early_page_initialised(pfn, nid))
2544+
return;
2545+
}
2546+
25432547
if (!kmsan_memblock_free_pages(page, order)) {
25442548
/* KMSAN will take care of these pages. */
25452549
return;

0 commit comments

Comments
 (0)