@@ -2591,6 +2591,23 @@ struct folio *alloc_hugetlb_folio_vma(struct hstate *h, struct vm_area_struct *v
25912591 return folio ;
25922592}
25932593
2594+ static nodemask_t * policy_mbind_nodemask (gfp_t gfp )
2595+ {
2596+ #ifdef CONFIG_NUMA
2597+ struct mempolicy * mpol = get_task_policy (current );
2598+
2599+ /*
2600+ * Only enforce MPOL_BIND policy which overlaps with cpuset policy
2601+ * (from policy_nodemask) specifically for hugetlb case
2602+ */
2603+ if (mpol -> mode == MPOL_BIND &&
2604+ (apply_policy_zone (mpol , gfp_zone (gfp )) &&
2605+ cpuset_nodemask_valid_mems_allowed (& mpol -> nodes )))
2606+ return & mpol -> nodes ;
2607+ #endif
2608+ return NULL ;
2609+ }
2610+
25942611/*
25952612 * Increase the hugetlb pool such that it can accommodate a reservation
25962613 * of size 'delta'.
@@ -2605,6 +2622,8 @@ static int gather_surplus_pages(struct hstate *h, long delta)
26052622 long i ;
26062623 long needed , allocated ;
26072624 bool alloc_ok = true;
2625+ int node ;
2626+ nodemask_t * mbind_nodemask = policy_mbind_nodemask (htlb_alloc_mask (h ));
26082627
26092628 lockdep_assert_held (& hugetlb_lock );
26102629 needed = (h -> resv_huge_pages + delta ) - h -> free_huge_pages ;
@@ -2619,8 +2638,15 @@ static int gather_surplus_pages(struct hstate *h, long delta)
26192638retry :
26202639 spin_unlock_irq (& hugetlb_lock );
26212640 for (i = 0 ; i < needed ; i ++ ) {
2622- folio = alloc_surplus_hugetlb_folio (h , htlb_alloc_mask (h ),
2623- NUMA_NO_NODE , NULL );
2641+ folio = NULL ;
2642+ for_each_node_mask (node , cpuset_current_mems_allowed ) {
2643+ if (!mbind_nodemask || node_isset (node , * mbind_nodemask )) {
2644+ folio = alloc_surplus_hugetlb_folio (h , htlb_alloc_mask (h ),
2645+ node , NULL );
2646+ if (folio )
2647+ break ;
2648+ }
2649+ }
26242650 if (!folio ) {
26252651 alloc_ok = false;
26262652 break ;
@@ -4608,23 +4634,6 @@ static int __init default_hugepagesz_setup(char *s)
46084634}
46094635__setup ("default_hugepagesz=" , default_hugepagesz_setup );
46104636
4611- static nodemask_t * policy_mbind_nodemask (gfp_t gfp )
4612- {
4613- #ifdef CONFIG_NUMA
4614- struct mempolicy * mpol = get_task_policy (current );
4615-
4616- /*
4617- * Only enforce MPOL_BIND policy which overlaps with cpuset policy
4618- * (from policy_nodemask) specifically for hugetlb case
4619- */
4620- if (mpol -> mode == MPOL_BIND &&
4621- (apply_policy_zone (mpol , gfp_zone (gfp )) &&
4622- cpuset_nodemask_valid_mems_allowed (& mpol -> nodes )))
4623- return & mpol -> nodes ;
4624- #endif
4625- return NULL ;
4626- }
4627-
46284637static unsigned int allowed_mems_nr (struct hstate * h )
46294638{
46304639 int node ;
0 commit comments