Skip to content

Commit f78325c

Browse files
committed
sched/topology: Remove sched_domain_topology_level::flags
JIRA: https://issues.redhat.com/browse/RHEL-110301 commit 1eec89a Author: K Prateek Nayak <kprateek.nayak@amd.com> Date: Fri Jul 11 11:20:30 2025 +0530 sched/topology: Remove sched_domain_topology_level::flags Support for overlapping domains added in commit e3589f6 ("sched: Allow for overlapping sched_domain spans") also allowed forcefully setting SD_OVERLAP for !NUMA domains via FORCE_SD_OVERLAP sched_feat(). Since NUMA domains had to be presumed overlapping to ensure correct behavior, "sched_domain_topology_level::flags" was introduced. NUMA domains added the SDTL_OVERLAP flag would ensure SD_OVERLAP was always added during build_sched_domains() for these domains, even when FORCE_SD_OVERLAP was off. Condition for adding the SD_OVERLAP flag at the aforementioned commit was as follows: if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP)) sd->flags |= SD_OVERLAP; The FORCE_SD_OVERLAP debug feature was removed in commit af85596 ("sched/topology: Remove FORCE_SD_OVERLAP") which left the NUMA domains as the exclusive users of SDTL_OVERLAP, SD_OVERLAP, and SD_NUMA flags. Get rid of SDTL_OVERLAP and SD_OVERLAP as they have become redundant and instead rely on SD_NUMA to detect the only overlapping domain currently supported. Since SDTL_OVERLAP was the only user of "tl->flags", get rid of "sched_domain_topology_level::flags" too. Signed-off-by: K Prateek Nayak <kprateek.nayak@amd.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/ba4dbdf8-bc37-493d-b2e0-2efb00ea3e19@amd.com Signed-off-by: Phil Auld <pauld@redhat.com>
1 parent 89d5a14 commit f78325c

File tree

4 files changed

+13
-23
lines changed

4 files changed

+13
-23
lines changed

include/linux/sched/sd_flags.h

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -153,14 +153,6 @@ SD_FLAG(SD_ASYM_PACKING, SDF_NEEDS_GROUPS)
153153
*/
154154
SD_FLAG(SD_PREFER_SIBLING, SDF_NEEDS_GROUPS)
155155

156-
/*
157-
* sched_groups of this level overlap
158-
*
159-
* SHARED_PARENT: Set for all NUMA levels above NODE.
160-
* NEEDS_GROUPS: Overlaps can only exist with more than one group.
161-
*/
162-
SD_FLAG(SD_OVERLAP, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS)
163-
164156
/*
165157
* Cross-node balancing
166158
*

include/linux/sched/topology.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -175,8 +175,6 @@ bool cpus_share_resources(int this_cpu, int that_cpu);
175175
typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
176176
typedef int (*sched_domain_flags_f)(void);
177177

178-
#define SDTL_OVERLAP 0x01
179-
180178
struct sd_data {
181179
struct sched_domain *__percpu *sd;
182180
struct sched_domain_shared *__percpu *sds;
@@ -187,7 +185,6 @@ struct sd_data {
187185
struct sched_domain_topology_level {
188186
sched_domain_mask_f mask;
189187
sched_domain_flags_f sd_flags;
190-
int flags;
191188
int numa_level;
192189
struct sd_data data;
193190
char *name;

kernel/sched/fair.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9926,9 +9926,9 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
99269926
min_capacity = ULONG_MAX;
99279927
max_capacity = 0;
99289928

9929-
if (child->flags & SD_OVERLAP) {
9929+
if (child->flags & SD_NUMA) {
99309930
/*
9931-
* SD_OVERLAP domains cannot assume that child groups
9931+
* SD_NUMA domains cannot assume that child groups
99329932
* span the current group.
99339933
*/
99349934

@@ -9941,7 +9941,7 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
99419941
}
99429942
} else {
99439943
/*
9944-
* !SD_OVERLAP domains can assume that child groups
9944+
* !SD_NUMA domains can assume that child groups
99459945
* span the current group.
99469946
*/
99479947

kernel/sched/topology.c

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
8989
break;
9090
}
9191

92-
if (!(sd->flags & SD_OVERLAP) &&
92+
if (!(sd->flags & SD_NUMA) &&
9393
cpumask_intersects(groupmask, sched_group_span(group))) {
9494
printk(KERN_CONT "\n");
9595
printk(KERN_ERR "ERROR: repeated CPUs\n");
@@ -102,7 +102,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
102102
group->sgc->id,
103103
cpumask_pr_args(sched_group_span(group)));
104104

105-
if ((sd->flags & SD_OVERLAP) &&
105+
if ((sd->flags & SD_NUMA) &&
106106
!cpumask_equal(group_balance_mask(group), sched_group_span(group))) {
107107
printk(KERN_CONT " mask=%*pbl",
108108
cpumask_pr_args(group_balance_mask(group)));
@@ -1359,7 +1359,7 @@ void sched_update_asym_prefer_cpu(int cpu, int old_prio, int new_prio)
13591359
* "sg->asym_prefer_cpu" to "sg->sgc->asym_prefer_cpu"
13601360
* which is shared by all the overlapping groups.
13611361
*/
1362-
WARN_ON_ONCE(sd->flags & SD_OVERLAP);
1362+
WARN_ON_ONCE(sd->flags & SD_NUMA);
13631363

13641364
sg = sd->groups;
13651365
if (cpu != sg->asym_prefer_cpu) {
@@ -2031,7 +2031,6 @@ void sched_init_numa(int offline_node)
20312031
for (j = 1; j < nr_levels; i++, j++) {
20322032
tl[i] = SDTL_INIT(sd_numa_mask, cpu_numa_flags, NUMA);
20332033
tl[i].numa_level = j;
2034-
tl[i].flags = SDTL_OVERLAP;
20352034
}
20362035

20372036
sched_domain_topology_saved = sched_domain_topology;
@@ -2342,7 +2341,7 @@ static void __sdt_free(const struct cpumask *cpu_map)
23422341

23432342
if (sdd->sd) {
23442343
sd = *per_cpu_ptr(sdd->sd, j);
2345-
if (sd && (sd->flags & SD_OVERLAP))
2344+
if (sd && (sd->flags & SD_NUMA))
23462345
free_sched_groups(sd->groups, 0);
23472346
kfree(*per_cpu_ptr(sdd->sd, j));
23482347
}
@@ -2408,9 +2407,13 @@ static bool topology_span_sane(const struct cpumask *cpu_map)
24082407
id_seen = sched_domains_tmpmask2;
24092408

24102409
for_each_sd_topology(tl) {
2410+
int tl_common_flags = 0;
2411+
2412+
if (tl->sd_flags)
2413+
tl_common_flags = (*tl->sd_flags)();
24112414

24122415
/* NUMA levels are allowed to overlap */
2413-
if (tl->flags & SDTL_OVERLAP)
2416+
if (tl_common_flags & SD_NUMA)
24142417
continue;
24152418

24162419
cpumask_clear(covered);
@@ -2481,8 +2484,6 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
24812484

24822485
if (tl == sched_domain_topology)
24832486
*per_cpu_ptr(d.sd, i) = sd;
2484-
if (tl->flags & SDTL_OVERLAP)
2485-
sd->flags |= SD_OVERLAP;
24862487
if (cpumask_equal(cpu_map, sched_domain_span(sd)))
24872488
break;
24882489
}
@@ -2495,7 +2496,7 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
24952496
for_each_cpu(i, cpu_map) {
24962497
for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
24972498
sd->span_weight = cpumask_weight(sched_domain_span(sd));
2498-
if (sd->flags & SD_OVERLAP) {
2499+
if (sd->flags & SD_NUMA) {
24992500
if (build_overlap_sched_groups(sd, i))
25002501
goto error;
25012502
} else {

0 commit comments

Comments
 (0)