@@ -1711,13 +1711,15 @@ static int remote_partition_enable(struct cpuset *cs, int new_prs,
17111711 * The requested exclusive_cpus must not be allocated to other
17121712 * partitions and it can't use up all the root's effective_cpus.
17131713 *
1714- * Note that if there is any local partition root above it or
1715- * remote partition root underneath it, its exclusive_cpus must
1716- * have overlapped with subpartitions_cpus.
1714+ * The effective_xcpus mask can contain offline CPUs, but there must
1715+ * be at least one or more online CPUs present before it can be enabled.
1716+ *
1717+ * Note that creating a remote partition with any local partition root
1718+ * above it or remote partition root underneath it is not allowed.
17171719 */
17181720 compute_effective_exclusive_cpumask (cs , tmp -> new_cpus , NULL );
1719- if ( cpumask_empty (tmp -> new_cpus ) ||
1720- cpumask_intersects (tmp -> new_cpus , subpartitions_cpus ) ||
1721+ WARN_ON_ONCE ( cpumask_intersects (tmp -> new_cpus , subpartitions_cpus ));
1722+ if (! cpumask_intersects (tmp -> new_cpus , cpu_active_mask ) ||
17211723 cpumask_subset (top_cpuset .effective_cpus , tmp -> new_cpus ))
17221724 return PERR_INVCPUS ;
17231725
@@ -1813,6 +1815,7 @@ static void remote_cpus_update(struct cpuset *cs, struct cpumask *xcpus,
18131815 * left in the top cpuset.
18141816 */
18151817 if (adding ) {
1818+ WARN_ON_ONCE (cpumask_intersects (tmp -> addmask , subpartitions_cpus ));
18161819 if (!capable (CAP_SYS_ADMIN ))
18171820 cs -> prs_err = PERR_ACCESS ;
18181821 else if (cpumask_intersects (tmp -> addmask , subpartitions_cpus ) ||
@@ -1922,7 +1925,7 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
19221925 bool nocpu ;
19231926
19241927 lockdep_assert_held (& cpuset_mutex );
1925- WARN_ON_ONCE (is_remote_partition (cs ));
1928+ WARN_ON_ONCE (is_remote_partition (cs )); /* For local partition only */
19261929
19271930 /*
19281931 * new_prs will only be changed for the partcmd_update and
@@ -1968,7 +1971,7 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
19681971 * exclusive_cpus not set. Sibling conflict should only happen
19691972 * if exclusive_cpus isn't set.
19701973 */
1971- xcpus = tmp -> new_cpus ;
1974+ xcpus = tmp -> delmask ;
19721975 if (compute_effective_exclusive_cpumask (cs , xcpus , NULL ))
19731976 WARN_ON_ONCE (!cpumask_empty (cs -> exclusive_cpus ));
19741977
@@ -1989,9 +1992,20 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
19891992 if (nocpu )
19901993 return PERR_NOCPUS ;
19911994
1992- deleting = cpumask_and (tmp -> delmask , xcpus , parent -> effective_xcpus );
1993- if (deleting )
1994- subparts_delta ++ ;
1995+ /*
1996+ * This function will only be called when all the preliminary
1997+ * checks have passed. At this point, the following condition
1998+ * should hold.
1999+ *
2000+ * (cs->effective_xcpus & cpu_active_mask) ⊆ parent->effective_cpus
2001+ *
2002+ * Warn if it is not the case.
2003+ */
2004+ cpumask_and (tmp -> new_cpus , xcpus , cpu_active_mask );
2005+ WARN_ON_ONCE (!cpumask_subset (tmp -> new_cpus , parent -> effective_cpus ));
2006+
2007+ deleting = true;
2008+ subparts_delta ++ ;
19952009 new_prs = (cmd == partcmd_enable ) ? PRS_ROOT : PRS_ISOLATED ;
19962010 } else if (cmd == partcmd_disable ) {
19972011 /*
@@ -2045,6 +2059,15 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
20452059 deleting = cpumask_and (tmp -> delmask , tmp -> delmask ,
20462060 parent -> effective_xcpus );
20472061 }
2062+ /*
2063+ * The new CPUs to be removed from parent's effective CPUs
2064+ * must be present.
2065+ */
2066+ if (deleting ) {
2067+ cpumask_and (tmp -> new_cpus , tmp -> delmask , cpu_active_mask );
2068+ WARN_ON_ONCE (!cpumask_subset (tmp -> new_cpus , parent -> effective_cpus ));
2069+ }
2070+
20482071 /*
20492072 * Make partition invalid if parent's effective_cpus could
20502073 * become empty and there are tasks in the parent.
0 commit comments