@@ -72,6 +72,10 @@ static int smu_set_power_limit(void *handle, uint32_t limit);
7272static int smu_set_fan_speed_rpm (void * handle , uint32_t speed );
7373static int smu_set_gfx_cgpg (struct smu_context * smu , bool enabled );
7474static int smu_set_mp1_state (void * handle , enum pp_mp1_state mp1_state );
75+ static void smu_power_profile_mode_get (struct smu_context * smu ,
76+ enum PP_SMC_POWER_PROFILE profile_mode );
77+ static void smu_power_profile_mode_put (struct smu_context * smu ,
78+ enum PP_SMC_POWER_PROFILE profile_mode );
7579
7680static int smu_sys_get_pp_feature_mask (void * handle ,
7781 char * buf )
@@ -1259,35 +1263,19 @@ static int smu_sw_init(struct amdgpu_ip_block *ip_block)
12591263 INIT_WORK (& smu -> interrupt_work , smu_interrupt_work_fn );
12601264 atomic64_set (& smu -> throttle_int_counter , 0 );
12611265 smu -> watermarks_bitmap = 0 ;
1262- smu -> power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT ;
1263- smu -> default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT ;
12641266
12651267 atomic_set (& smu -> smu_power .power_gate .vcn_gated , 1 );
12661268 atomic_set (& smu -> smu_power .power_gate .jpeg_gated , 1 );
12671269 atomic_set (& smu -> smu_power .power_gate .vpe_gated , 1 );
12681270 atomic_set (& smu -> smu_power .power_gate .umsch_mm_gated , 1 );
12691271
1270- smu -> workload_prority [PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT ] = 0 ;
1271- smu -> workload_prority [PP_SMC_POWER_PROFILE_FULLSCREEN3D ] = 1 ;
1272- smu -> workload_prority [PP_SMC_POWER_PROFILE_POWERSAVING ] = 2 ;
1273- smu -> workload_prority [PP_SMC_POWER_PROFILE_VIDEO ] = 3 ;
1274- smu -> workload_prority [PP_SMC_POWER_PROFILE_VR ] = 4 ;
1275- smu -> workload_prority [PP_SMC_POWER_PROFILE_COMPUTE ] = 5 ;
1276- smu -> workload_prority [PP_SMC_POWER_PROFILE_CUSTOM ] = 6 ;
1277-
12781272 if (smu -> is_apu ||
12791273 !smu_is_workload_profile_available (smu , PP_SMC_POWER_PROFILE_FULLSCREEN3D ))
1280- smu -> workload_mask = 1 << smu -> workload_prority [ PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT ] ;
1274+ smu -> power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT ;
12811275 else
1282- smu -> workload_mask = 1 << smu -> workload_prority [PP_SMC_POWER_PROFILE_FULLSCREEN3D ];
1283-
1284- smu -> workload_setting [0 ] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT ;
1285- smu -> workload_setting [1 ] = PP_SMC_POWER_PROFILE_FULLSCREEN3D ;
1286- smu -> workload_setting [2 ] = PP_SMC_POWER_PROFILE_POWERSAVING ;
1287- smu -> workload_setting [3 ] = PP_SMC_POWER_PROFILE_VIDEO ;
1288- smu -> workload_setting [4 ] = PP_SMC_POWER_PROFILE_VR ;
1289- smu -> workload_setting [5 ] = PP_SMC_POWER_PROFILE_COMPUTE ;
1290- smu -> workload_setting [6 ] = PP_SMC_POWER_PROFILE_CUSTOM ;
1276+ smu -> power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D ;
1277+ smu_power_profile_mode_get (smu , smu -> power_profile_mode );
1278+
12911279 smu -> display_config = & adev -> pm .pm_display_cfg ;
12921280
12931281 smu -> smu_dpm .dpm_level = AMD_DPM_FORCED_LEVEL_AUTO ;
@@ -1340,6 +1328,11 @@ static int smu_sw_fini(struct amdgpu_ip_block *ip_block)
13401328 return ret ;
13411329 }
13421330
1331+ if (smu -> custom_profile_params ) {
1332+ kfree (smu -> custom_profile_params );
1333+ smu -> custom_profile_params = NULL ;
1334+ }
1335+
13431336 smu_fini_microcode (smu );
13441337
13451338 return 0 ;
@@ -2124,6 +2117,9 @@ static int smu_suspend(struct amdgpu_ip_block *ip_block)
21242117 if (!ret )
21252118 adev -> gfx .gfx_off_entrycount = count ;
21262119
2120+ /* clear this on suspend so it will get reprogrammed on resume */
2121+ smu -> workload_mask = 0 ;
2122+
21272123 return 0 ;
21282124}
21292125
@@ -2236,25 +2232,49 @@ static int smu_enable_umd_pstate(void *handle,
22362232}
22372233
22382234static int smu_bump_power_profile_mode (struct smu_context * smu ,
2239- long * param ,
2240- uint32_t param_size )
2235+ long * custom_params ,
2236+ u32 custom_params_max_idx )
22412237{
2242- int ret = 0 ;
2238+ u32 workload_mask = 0 ;
2239+ int i , ret = 0 ;
2240+
2241+ for (i = 0 ; i < PP_SMC_POWER_PROFILE_COUNT ; i ++ ) {
2242+ if (smu -> workload_refcount [i ])
2243+ workload_mask |= 1 << i ;
2244+ }
2245+
2246+ if (smu -> workload_mask == workload_mask )
2247+ return 0 ;
22432248
22442249 if (smu -> ppt_funcs -> set_power_profile_mode )
2245- ret = smu -> ppt_funcs -> set_power_profile_mode (smu , param , param_size );
2250+ ret = smu -> ppt_funcs -> set_power_profile_mode (smu , workload_mask ,
2251+ custom_params ,
2252+ custom_params_max_idx );
2253+
2254+ if (!ret )
2255+ smu -> workload_mask = workload_mask ;
22462256
22472257 return ret ;
22482258}
22492259
2260+ static void smu_power_profile_mode_get (struct smu_context * smu ,
2261+ enum PP_SMC_POWER_PROFILE profile_mode )
2262+ {
2263+ smu -> workload_refcount [profile_mode ]++ ;
2264+ }
2265+
2266+ static void smu_power_profile_mode_put (struct smu_context * smu ,
2267+ enum PP_SMC_POWER_PROFILE profile_mode )
2268+ {
2269+ if (smu -> workload_refcount [profile_mode ])
2270+ smu -> workload_refcount [profile_mode ]-- ;
2271+ }
2272+
22502273static int smu_adjust_power_state_dynamic (struct smu_context * smu ,
22512274 enum amd_dpm_forced_level level ,
2252- bool skip_display_settings ,
2253- bool init )
2275+ bool skip_display_settings )
22542276{
22552277 int ret = 0 ;
2256- int index = 0 ;
2257- long workload [1 ];
22582278 struct smu_dpm_context * smu_dpm_ctx = & (smu -> smu_dpm );
22592279
22602280 if (!skip_display_settings ) {
@@ -2291,14 +2311,8 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
22912311 }
22922312
22932313 if (smu_dpm_ctx -> dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
2294- smu_dpm_ctx -> dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM ) {
2295- index = fls (smu -> workload_mask );
2296- index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0 ;
2297- workload [0 ] = smu -> workload_setting [index ];
2298-
2299- if (init || smu -> power_profile_mode != workload [0 ])
2300- smu_bump_power_profile_mode (smu , workload , 0 );
2301- }
2314+ smu_dpm_ctx -> dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM )
2315+ smu_bump_power_profile_mode (smu , NULL , 0 );
23022316
23032317 return ret ;
23042318}
@@ -2317,13 +2331,13 @@ static int smu_handle_task(struct smu_context *smu,
23172331 ret = smu_pre_display_config_changed (smu );
23182332 if (ret )
23192333 return ret ;
2320- ret = smu_adjust_power_state_dynamic (smu , level , false, false );
2334+ ret = smu_adjust_power_state_dynamic (smu , level , false);
23212335 break ;
23222336 case AMD_PP_TASK_COMPLETE_INIT :
2323- ret = smu_adjust_power_state_dynamic (smu , level , true, true );
2337+ ret = smu_adjust_power_state_dynamic (smu , level , true);
23242338 break ;
23252339 case AMD_PP_TASK_READJUST_POWER_STATE :
2326- ret = smu_adjust_power_state_dynamic (smu , level , true, false );
2340+ ret = smu_adjust_power_state_dynamic (smu , level , true);
23272341 break ;
23282342 default :
23292343 break ;
@@ -2345,34 +2359,33 @@ static int smu_handle_dpm_task(void *handle,
23452359
23462360static int smu_switch_power_profile (void * handle ,
23472361 enum PP_SMC_POWER_PROFILE type ,
2348- bool en )
2362+ bool enable )
23492363{
23502364 struct smu_context * smu = handle ;
23512365 struct smu_dpm_context * smu_dpm_ctx = & (smu -> smu_dpm );
2352- long workload [1 ];
2353- uint32_t index ;
2366+ int ret ;
23542367
23552368 if (!smu -> pm_enabled || !smu -> adev -> pm .dpm_enabled )
23562369 return - EOPNOTSUPP ;
23572370
23582371 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM ))
23592372 return - EINVAL ;
23602373
2361- if (!en ) {
2362- smu -> workload_mask &= ~(1 << smu -> workload_prority [type ]);
2363- index = fls (smu -> workload_mask );
2364- index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0 ;
2365- workload [0 ] = smu -> workload_setting [index ];
2366- } else {
2367- smu -> workload_mask |= (1 << smu -> workload_prority [type ]);
2368- index = fls (smu -> workload_mask );
2369- index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0 ;
2370- workload [0 ] = smu -> workload_setting [index ];
2371- }
2372-
23732374 if (smu_dpm_ctx -> dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
2374- smu_dpm_ctx -> dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM )
2375- smu_bump_power_profile_mode (smu , workload , 0 );
2375+ smu_dpm_ctx -> dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM ) {
2376+ if (enable )
2377+ smu_power_profile_mode_get (smu , type );
2378+ else
2379+ smu_power_profile_mode_put (smu , type );
2380+ ret = smu_bump_power_profile_mode (smu , NULL , 0 );
2381+ if (ret ) {
2382+ if (enable )
2383+ smu_power_profile_mode_put (smu , type );
2384+ else
2385+ smu_power_profile_mode_get (smu , type );
2386+ return ret ;
2387+ }
2388+ }
23762389
23772390 return 0 ;
23782391}
@@ -3064,12 +3077,35 @@ static int smu_set_power_profile_mode(void *handle,
30643077 uint32_t param_size )
30653078{
30663079 struct smu_context * smu = handle ;
3080+ bool custom = false;
3081+ int ret = 0 ;
30673082
30683083 if (!smu -> pm_enabled || !smu -> adev -> pm .dpm_enabled ||
30693084 !smu -> ppt_funcs -> set_power_profile_mode )
30703085 return - EOPNOTSUPP ;
30713086
3072- return smu_bump_power_profile_mode (smu , param , param_size );
3087+ if (param [param_size ] == PP_SMC_POWER_PROFILE_CUSTOM ) {
3088+ custom = true;
3089+ /* clear frontend mask so custom changes propogate */
3090+ smu -> workload_mask = 0 ;
3091+ }
3092+
3093+ if ((param [param_size ] != smu -> power_profile_mode ) || custom ) {
3094+ /* clear the old user preference */
3095+ smu_power_profile_mode_put (smu , smu -> power_profile_mode );
3096+ /* set the new user preference */
3097+ smu_power_profile_mode_get (smu , param [param_size ]);
3098+ ret = smu_bump_power_profile_mode (smu ,
3099+ custom ? param : NULL ,
3100+ custom ? param_size : 0 );
3101+ if (ret )
3102+ smu_power_profile_mode_put (smu , param [param_size ]);
3103+ else
3104+ /* store the user's preference */
3105+ smu -> power_profile_mode = param [param_size ];
3106+ }
3107+
3108+ return ret ;
30733109}
30743110
30753111static int smu_get_fan_control_mode (void * handle , u32 * fan_mode )
0 commit comments