Skip to content

Commit d637f3c

Browse files
committed
Merge: hugetlb: force allocating surplus hugepages on mempolicy allowed nodes
MR: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-9/-/merge_requests/4827 JIRA: https://issues.redhat.com/browse/RHEL-38605 Signed-off-by: Aristeu Rozanski <arozansk@redhat.com> Approved-by: Waiman Long <longman@redhat.com> Approved-by: Rafael Aquini <aquini@redhat.com> Approved-by: CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> Merged-by: Scott Weaver <scweaver@redhat.com>
2 parents bd0c781 + 286db24 commit d637f3c

File tree

1 file changed

+28
-19
lines changed

1 file changed

+28
-19
lines changed

mm/hugetlb.c

Lines changed: 28 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -2591,6 +2591,23 @@ struct folio *alloc_hugetlb_folio_vma(struct hstate *h, struct vm_area_struct *v
25912591
return folio;
25922592
}
25932593

2594+
static nodemask_t *policy_mbind_nodemask(gfp_t gfp)
2595+
{
2596+
#ifdef CONFIG_NUMA
2597+
struct mempolicy *mpol = get_task_policy(current);
2598+
2599+
/*
2600+
* Only enforce MPOL_BIND policy which overlaps with cpuset policy
2601+
* (from policy_nodemask) specifically for hugetlb case
2602+
*/
2603+
if (mpol->mode == MPOL_BIND &&
2604+
(apply_policy_zone(mpol, gfp_zone(gfp)) &&
2605+
cpuset_nodemask_valid_mems_allowed(&mpol->nodes)))
2606+
return &mpol->nodes;
2607+
#endif
2608+
return NULL;
2609+
}
2610+
25942611
/*
25952612
* Increase the hugetlb pool such that it can accommodate a reservation
25962613
* of size 'delta'.
@@ -2605,6 +2622,8 @@ static int gather_surplus_pages(struct hstate *h, long delta)
26052622
long i;
26062623
long needed, allocated;
26072624
bool alloc_ok = true;
2625+
int node;
2626+
nodemask_t *mbind_nodemask = policy_mbind_nodemask(htlb_alloc_mask(h));
26082627

26092628
lockdep_assert_held(&hugetlb_lock);
26102629
needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
@@ -2619,8 +2638,15 @@ static int gather_surplus_pages(struct hstate *h, long delta)
26192638
retry:
26202639
spin_unlock_irq(&hugetlb_lock);
26212640
for (i = 0; i < needed; i++) {
2622-
folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h),
2623-
NUMA_NO_NODE, NULL);
2641+
folio = NULL;
2642+
for_each_node_mask(node, cpuset_current_mems_allowed) {
2643+
if (!mbind_nodemask || node_isset(node, *mbind_nodemask)) {
2644+
folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h),
2645+
node, NULL);
2646+
if (folio)
2647+
break;
2648+
}
2649+
}
26242650
if (!folio) {
26252651
alloc_ok = false;
26262652
break;
@@ -4608,23 +4634,6 @@ static int __init default_hugepagesz_setup(char *s)
46084634
}
46094635
__setup("default_hugepagesz=", default_hugepagesz_setup);
46104636

4611-
static nodemask_t *policy_mbind_nodemask(gfp_t gfp)
4612-
{
4613-
#ifdef CONFIG_NUMA
4614-
struct mempolicy *mpol = get_task_policy(current);
4615-
4616-
/*
4617-
* Only enforce MPOL_BIND policy which overlaps with cpuset policy
4618-
* (from policy_nodemask) specifically for hugetlb case
4619-
*/
4620-
if (mpol->mode == MPOL_BIND &&
4621-
(apply_policy_zone(mpol, gfp_zone(gfp)) &&
4622-
cpuset_nodemask_valid_mems_allowed(&mpol->nodes)))
4623-
return &mpol->nodes;
4624-
#endif
4625-
return NULL;
4626-
}
4627-
46284637
static unsigned int allowed_mems_nr(struct hstate *h)
46294638
{
46304639
int node;

0 commit comments

Comments
 (0)