@@ -1282,49 +1282,9 @@ void play_dead_common(void)
12821282 local_irq_disable ();
12831283}
12841284
1285- /*
1286- * We need to flush the caches before going to sleep, lest we have
1287- * dirty data in our caches when we come back up.
1288- */
1289- static inline void mwait_play_dead (void )
1285+ void __noreturn mwait_play_dead (unsigned int eax_hint )
12901286{
12911287 struct mwait_cpu_dead * md = this_cpu_ptr (& mwait_cpu_dead );
1292- unsigned int eax , ebx , ecx , edx ;
1293- unsigned int highest_cstate = 0 ;
1294- unsigned int highest_subcstate = 0 ;
1295- int i ;
1296-
1297- if (boot_cpu_data .x86_vendor == X86_VENDOR_AMD ||
1298- boot_cpu_data .x86_vendor == X86_VENDOR_HYGON )
1299- return ;
1300- if (!this_cpu_has (X86_FEATURE_MWAIT ))
1301- return ;
1302- if (!this_cpu_has (X86_FEATURE_CLFLUSH ))
1303- return ;
1304- if (__this_cpu_read (cpu_info .cpuid_level ) < CPUID_MWAIT_LEAF )
1305- return ;
1306-
1307- eax = CPUID_MWAIT_LEAF ;
1308- ecx = 0 ;
1309- native_cpuid (& eax , & ebx , & ecx , & edx );
1310-
1311- /*
1312- * eax will be 0 if EDX enumeration is not valid.
1313- * Initialized below to cstate, sub_cstate value when EDX is valid.
1314- */
1315- if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED )) {
1316- eax = 0 ;
1317- } else {
1318- edx >>= MWAIT_SUBSTATE_SIZE ;
1319- for (i = 0 ; i < 7 && edx ; i ++ , edx >>= MWAIT_SUBSTATE_SIZE ) {
1320- if (edx & MWAIT_SUBSTATE_MASK ) {
1321- highest_cstate = i ;
1322- highest_subcstate = edx & MWAIT_SUBSTATE_MASK ;
1323- }
1324- }
1325- eax = (highest_cstate << MWAIT_SUBSTATE_SIZE ) |
1326- (highest_subcstate - 1 );
1327- }
13281288
13291289 /* Set up state for the kexec() hack below */
13301290 md -> status = CPUDEAD_MWAIT_WAIT ;
@@ -1345,7 +1305,7 @@ static inline void mwait_play_dead(void)
13451305 mb ();
13461306 __monitor (md , 0 , 0 );
13471307 mb ();
1348- __mwait (eax , 0 );
1308+ __mwait (eax_hint , 0 );
13491309
13501310 if (READ_ONCE (md -> control ) == CPUDEAD_MWAIT_KEXEC_HLT ) {
13511311 /*
@@ -1368,6 +1328,50 @@ static inline void mwait_play_dead(void)
13681328 }
13691329}
13701330
1331+ /*
1332+ * We need to flush the caches before going to sleep, lest we have
1333+ * dirty data in our caches when we come back up.
1334+ */
1335+ static inline void mwait_play_dead_cpuid_hint (void )
1336+ {
1337+ unsigned int eax , ebx , ecx , edx ;
1338+ unsigned int highest_cstate = 0 ;
1339+ unsigned int highest_subcstate = 0 ;
1340+ int i ;
1341+
1342+ if (boot_cpu_data .x86_vendor == X86_VENDOR_AMD ||
1343+ boot_cpu_data .x86_vendor == X86_VENDOR_HYGON )
1344+ return ;
1345+ if (!this_cpu_has (X86_FEATURE_MWAIT ))
1346+ return ;
1347+ if (!this_cpu_has (X86_FEATURE_CLFLUSH ))
1348+ return ;
1349+
1350+ eax = CPUID_MWAIT_LEAF ;
1351+ ecx = 0 ;
1352+ native_cpuid (& eax , & ebx , & ecx , & edx );
1353+
1354+ /*
1355+ * eax will be 0 if EDX enumeration is not valid.
1356+ * Initialized below to cstate, sub_cstate value when EDX is valid.
1357+ */
1358+ if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED )) {
1359+ eax = 0 ;
1360+ } else {
1361+ edx >>= MWAIT_SUBSTATE_SIZE ;
1362+ for (i = 0 ; i < 7 && edx ; i ++ , edx >>= MWAIT_SUBSTATE_SIZE ) {
1363+ if (edx & MWAIT_SUBSTATE_MASK ) {
1364+ highest_cstate = i ;
1365+ highest_subcstate = edx & MWAIT_SUBSTATE_MASK ;
1366+ }
1367+ }
1368+ eax = (highest_cstate << MWAIT_SUBSTATE_SIZE ) |
1369+ (highest_subcstate - 1 );
1370+ }
1371+
1372+ mwait_play_dead (eax );
1373+ }
1374+
13711375/*
13721376 * Kick all "offline" CPUs out of mwait on kexec(). See comment in
13731377 * mwait_play_dead().
@@ -1418,7 +1422,7 @@ void native_play_dead(void)
14181422 play_dead_common ();
14191423 tboot_shutdown (TB_SHUTDOWN_WFS );
14201424
1421- mwait_play_dead ();
1425+ mwait_play_dead_cpuid_hint ();
14221426 if (cpuidle_play_dead ())
14231427 hlt_play_dead ();
14241428}
0 commit comments