1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include "pp_debug.h" 24 #include <linux/errno.h> 25 #include "hwmgr.h" 26 #include "hardwaremanager.h" 27 #include "power_state.h" 28 29 30 #define TEMP_RANGE_MIN (0) 31 #define TEMP_RANGE_MAX (80 * 1000) 32 33 #define PHM_FUNC_CHECK(hw) \ 34 do { \ 35 if ((hw) == NULL || (hw)->hwmgr_func == NULL) \ 36 return -EINVAL; \ 37 } while (0) 38 39 int phm_setup_asic(struct pp_hwmgr *hwmgr) 40 { 41 PHM_FUNC_CHECK(hwmgr); 42 43 if (NULL != hwmgr->hwmgr_func->asic_setup) 44 return hwmgr->hwmgr_func->asic_setup(hwmgr); 45 46 return 0; 47 } 48 49 int phm_power_down_asic(struct pp_hwmgr *hwmgr) 50 { 51 PHM_FUNC_CHECK(hwmgr); 52 53 if (NULL != hwmgr->hwmgr_func->power_off_asic) 54 return hwmgr->hwmgr_func->power_off_asic(hwmgr); 55 56 return 0; 57 } 58 59 int phm_set_power_state(struct pp_hwmgr *hwmgr, 60 const struct pp_hw_power_state *pcurrent_state, 61 const struct pp_hw_power_state *pnew_power_state) 62 { 63 struct phm_set_power_state_input states; 64 65 PHM_FUNC_CHECK(hwmgr); 66 67 states.pcurrent_state = pcurrent_state; 68 states.pnew_state = pnew_power_state; 69 70 if (NULL != hwmgr->hwmgr_func->power_state_set) 71 return hwmgr->hwmgr_func->power_state_set(hwmgr, &states); 72 73 return 0; 74 } 75 76 int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr) 77 { 78 struct amdgpu_device *adev = NULL; 79 int ret = -EINVAL; 80 PHM_FUNC_CHECK(hwmgr); 81 adev = hwmgr->adev; 82 83 /* Skip for suspend/resume case */ 84 if (!hwmgr->pp_one_vf && smum_is_dpm_running(hwmgr) 85 && !amdgpu_passthrough(adev) && adev->in_suspend) { 86 pr_info("dpm has been enabled\n"); 87 return 0; 88 } 89 90 if (NULL != hwmgr->hwmgr_func->dynamic_state_management_enable) 91 ret = hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr); 92 93 return ret; 94 } 95 96 int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr) 97 { 98 int ret = -EINVAL; 99 100 PHM_FUNC_CHECK(hwmgr); 101 102 if (!hwmgr->not_vf) 103 return 0; 104 105 if (!smum_is_dpm_running(hwmgr)) { 106 pr_info("dpm has been disabled\n"); 107 return 0; 108 } 109 110 if (hwmgr->hwmgr_func->dynamic_state_management_disable) 111 ret = hwmgr->hwmgr_func->dynamic_state_management_disable(hwmgr); 112 113 return ret; 114 } 115 116 int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level) 117 { 118 int ret = 0; 119 120 PHM_FUNC_CHECK(hwmgr); 121 122 if (hwmgr->hwmgr_func->force_dpm_level != NULL) 123 ret = hwmgr->hwmgr_func->force_dpm_level(hwmgr, level); 124 125 return ret; 126 } 127 128 int phm_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, 129 struct pp_power_state *adjusted_ps, 130 const struct pp_power_state *current_ps) 131 { 132 PHM_FUNC_CHECK(hwmgr); 133 134 if (hwmgr->hwmgr_func->apply_state_adjust_rules != NULL) 135 return hwmgr->hwmgr_func->apply_state_adjust_rules( 136 hwmgr, 137 adjusted_ps, 138 current_ps); 139 return 0; 140 } 141 142 int phm_apply_clock_adjust_rules(struct pp_hwmgr *hwmgr) 143 { 144 PHM_FUNC_CHECK(hwmgr); 145 146 if (hwmgr->hwmgr_func->apply_clocks_adjust_rules != NULL) 147 return hwmgr->hwmgr_func->apply_clocks_adjust_rules(hwmgr); 148 return 0; 149 } 150 151 int phm_powerdown_uvd(struct pp_hwmgr *hwmgr) 152 { 153 PHM_FUNC_CHECK(hwmgr); 154 155 if (hwmgr->hwmgr_func->powerdown_uvd != NULL) 156 return hwmgr->hwmgr_func->powerdown_uvd(hwmgr); 157 return 0; 158 } 159 160 161 int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr) 162 { 163 PHM_FUNC_CHECK(hwmgr); 164 165 if (NULL != hwmgr->hwmgr_func->disable_clock_power_gating) 166 return hwmgr->hwmgr_func->disable_clock_power_gating(hwmgr); 167 168 return 0; 169 } 170 171 int phm_pre_display_configuration_changed(struct pp_hwmgr *hwmgr) 172 { 173 PHM_FUNC_CHECK(hwmgr); 174 175 if (NULL != hwmgr->hwmgr_func->pre_display_config_changed) 176 hwmgr->hwmgr_func->pre_display_config_changed(hwmgr); 177 178 return 0; 179 180 } 181 182 int phm_display_configuration_changed(struct pp_hwmgr *hwmgr) 183 { 184 PHM_FUNC_CHECK(hwmgr); 185 186 if (NULL != hwmgr->hwmgr_func->display_config_changed) 187 hwmgr->hwmgr_func->display_config_changed(hwmgr); 188 189 return 0; 190 } 191 192 int phm_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr) 193 { 194 PHM_FUNC_CHECK(hwmgr); 195 196 if (NULL != hwmgr->hwmgr_func->notify_smc_display_config_after_ps_adjustment) 197 hwmgr->hwmgr_func->notify_smc_display_config_after_ps_adjustment(hwmgr); 198 199 return 0; 200 } 201 202 int phm_stop_thermal_controller(struct pp_hwmgr *hwmgr) 203 { 204 PHM_FUNC_CHECK(hwmgr); 205 206 if (!hwmgr->not_vf) 207 return 0; 208 209 if (hwmgr->hwmgr_func->stop_thermal_controller == NULL) 210 return -EINVAL; 211 212 return hwmgr->hwmgr_func->stop_thermal_controller(hwmgr); 213 } 214 215 int phm_register_irq_handlers(struct pp_hwmgr *hwmgr) 216 { 217 PHM_FUNC_CHECK(hwmgr); 218 219 if (hwmgr->hwmgr_func->register_irq_handlers != NULL) 220 return hwmgr->hwmgr_func->register_irq_handlers(hwmgr); 221 222 return 0; 223 } 224 225 /** 226 * Initializes the thermal controller subsystem. 227 * 228 * @param pHwMgr the address of the powerplay hardware manager. 229 * @exception PP_Result_Failed if any of the paramters is NULL, otherwise the return value from the dispatcher. 230 */ 231 int phm_start_thermal_controller(struct pp_hwmgr *hwmgr) 232 { 233 int ret = 0; 234 struct PP_TemperatureRange range = { 235 TEMP_RANGE_MIN, 236 TEMP_RANGE_MAX, 237 TEMP_RANGE_MAX, 238 TEMP_RANGE_MIN, 239 TEMP_RANGE_MAX, 240 TEMP_RANGE_MAX, 241 TEMP_RANGE_MIN, 242 TEMP_RANGE_MAX, 243 TEMP_RANGE_MAX}; 244 struct amdgpu_device *adev = hwmgr->adev; 245 246 if (!hwmgr->not_vf) 247 return 0; 248 249 if (hwmgr->hwmgr_func->get_thermal_temperature_range) 250 hwmgr->hwmgr_func->get_thermal_temperature_range( 251 hwmgr, &range); 252 253 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 254 PHM_PlatformCaps_ThermalController) 255 && hwmgr->hwmgr_func->start_thermal_controller != NULL) 256 ret = hwmgr->hwmgr_func->start_thermal_controller(hwmgr, &range); 257 258 adev->pm.dpm.thermal.min_temp = range.min; 259 adev->pm.dpm.thermal.max_temp = range.max; 260 adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max; 261 adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min; 262 adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max; 263 adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max; 264 adev->pm.dpm.thermal.min_mem_temp = range.mem_min; 265 adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max; 266 adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max; 267 268 return ret; 269 } 270 271 272 bool phm_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr) 273 { 274 if (hwmgr == NULL || 275 hwmgr->hwmgr_func == NULL) 276 return false; 277 278 if (hwmgr->pp_one_vf) 279 return false; 280 281 if (hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration == NULL) 282 return false; 283 284 return hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration(hwmgr); 285 } 286 287 288 int phm_check_states_equal(struct pp_hwmgr *hwmgr, 289 const struct pp_hw_power_state *pstate1, 290 const struct pp_hw_power_state *pstate2, 291 bool *equal) 292 { 293 PHM_FUNC_CHECK(hwmgr); 294 295 if (hwmgr->hwmgr_func->check_states_equal == NULL) 296 return -EINVAL; 297 298 return hwmgr->hwmgr_func->check_states_equal(hwmgr, pstate1, pstate2, equal); 299 } 300 301 int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr, 302 const struct amd_pp_display_configuration *display_config) 303 { 304 int index = 0; 305 int number_of_active_display = 0; 306 307 PHM_FUNC_CHECK(hwmgr); 308 309 if (display_config == NULL) 310 return -EINVAL; 311 312 if (NULL != hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk) 313 hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, display_config->min_dcef_deep_sleep_set_clk); 314 315 for (index = 0; index < display_config->num_path_including_non_display; index++) { 316 if (display_config->displays[index].controller_id != 0) 317 number_of_active_display++; 318 } 319 320 if (NULL != hwmgr->hwmgr_func->set_active_display_count) 321 hwmgr->hwmgr_func->set_active_display_count(hwmgr, number_of_active_display); 322 323 if (hwmgr->hwmgr_func->store_cc6_data == NULL) 324 return -EINVAL; 325 326 /* TODO: pass other display configuration in the future */ 327 328 if (hwmgr->hwmgr_func->store_cc6_data) 329 hwmgr->hwmgr_func->store_cc6_data(hwmgr, 330 display_config->cpu_pstate_separation_time, 331 display_config->cpu_cc6_disable, 332 display_config->cpu_pstate_disable, 333 display_config->nb_pstate_switch_disable); 334 335 return 0; 336 } 337 338 int phm_get_dal_power_level(struct pp_hwmgr *hwmgr, 339 struct amd_pp_simple_clock_info *info) 340 { 341 PHM_FUNC_CHECK(hwmgr); 342 343 if (info == NULL || hwmgr->hwmgr_func->get_dal_power_level == NULL) 344 return -EINVAL; 345 return hwmgr->hwmgr_func->get_dal_power_level(hwmgr, info); 346 } 347 348 int phm_set_cpu_power_state(struct pp_hwmgr *hwmgr) 349 { 350 PHM_FUNC_CHECK(hwmgr); 351 352 if (hwmgr->hwmgr_func->set_cpu_power_state != NULL) 353 return hwmgr->hwmgr_func->set_cpu_power_state(hwmgr); 354 355 return 0; 356 } 357 358 359 int phm_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, 360 PHM_PerformanceLevelDesignation designation, uint32_t index, 361 PHM_PerformanceLevel *level) 362 { 363 PHM_FUNC_CHECK(hwmgr); 364 if (hwmgr->hwmgr_func->get_performance_level == NULL) 365 return -EINVAL; 366 367 return hwmgr->hwmgr_func->get_performance_level(hwmgr, state, designation, index, level); 368 369 370 } 371 372 373 /** 374 * Gets Clock Info. 375 * 376 * @param pHwMgr the address of the powerplay hardware manager. 377 * @param pPowerState the address of the Power State structure. 378 * @param pClockInfo the address of PP_ClockInfo structure where the result will be returned. 379 * @exception PP_Result_Failed if any of the paramters is NULL, otherwise the return value from the back-end. 380 */ 381 int phm_get_clock_info(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, struct pp_clock_info *pclock_info, 382 PHM_PerformanceLevelDesignation designation) 383 { 384 int result; 385 PHM_PerformanceLevel performance_level = {0}; 386 387 PHM_FUNC_CHECK(hwmgr); 388 389 PP_ASSERT_WITH_CODE((NULL != state), "Invalid Input!", return -EINVAL); 390 PP_ASSERT_WITH_CODE((NULL != pclock_info), "Invalid Input!", return -EINVAL); 391 392 result = phm_get_performance_level(hwmgr, state, PHM_PerformanceLevelDesignation_Activity, 0, &performance_level); 393 394 PP_ASSERT_WITH_CODE((0 == result), "Failed to retrieve minimum clocks.", return result); 395 396 397 pclock_info->min_mem_clk = performance_level.memory_clock; 398 pclock_info->min_eng_clk = performance_level.coreClock; 399 pclock_info->min_bus_bandwidth = performance_level.nonLocalMemoryFreq * performance_level.nonLocalMemoryWidth; 400 401 402 result = phm_get_performance_level(hwmgr, state, designation, 403 (hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1), &performance_level); 404 405 PP_ASSERT_WITH_CODE((0 == result), "Failed to retrieve maximum clocks.", return result); 406 407 pclock_info->max_mem_clk = performance_level.memory_clock; 408 pclock_info->max_eng_clk = performance_level.coreClock; 409 pclock_info->max_bus_bandwidth = performance_level.nonLocalMemoryFreq * performance_level.nonLocalMemoryWidth; 410 411 return 0; 412 } 413 414 int phm_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, struct pp_clock_info *clock_info) 415 { 416 PHM_FUNC_CHECK(hwmgr); 417 418 if (hwmgr->hwmgr_func->get_current_shallow_sleep_clocks == NULL) 419 return -EINVAL; 420 421 return hwmgr->hwmgr_func->get_current_shallow_sleep_clocks(hwmgr, state, clock_info); 422 423 } 424 425 int phm_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks) 426 { 427 PHM_FUNC_CHECK(hwmgr); 428 429 if (hwmgr->hwmgr_func->get_clock_by_type == NULL) 430 return -EINVAL; 431 432 return hwmgr->hwmgr_func->get_clock_by_type(hwmgr, type, clocks); 433 434 } 435 436 int phm_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr, 437 enum amd_pp_clock_type type, 438 struct pp_clock_levels_with_latency *clocks) 439 { 440 PHM_FUNC_CHECK(hwmgr); 441 442 if (hwmgr->hwmgr_func->get_clock_by_type_with_latency == NULL) 443 return -EINVAL; 444 445 return hwmgr->hwmgr_func->get_clock_by_type_with_latency(hwmgr, type, clocks); 446 447 } 448 449 int phm_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr, 450 enum amd_pp_clock_type type, 451 struct pp_clock_levels_with_voltage *clocks) 452 { 453 PHM_FUNC_CHECK(hwmgr); 454 455 if (hwmgr->hwmgr_func->get_clock_by_type_with_voltage == NULL) 456 return -EINVAL; 457 458 return hwmgr->hwmgr_func->get_clock_by_type_with_voltage(hwmgr, type, clocks); 459 460 } 461 462 int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, 463 void *clock_ranges) 464 { 465 PHM_FUNC_CHECK(hwmgr); 466 467 if (!hwmgr->hwmgr_func->set_watermarks_for_clocks_ranges) 468 return -EINVAL; 469 470 return hwmgr->hwmgr_func->set_watermarks_for_clocks_ranges(hwmgr, 471 clock_ranges); 472 } 473 474 int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr, 475 struct pp_display_clock_request *clock) 476 { 477 PHM_FUNC_CHECK(hwmgr); 478 479 if (!hwmgr->hwmgr_func->display_clock_voltage_request) 480 return -EINVAL; 481 482 return hwmgr->hwmgr_func->display_clock_voltage_request(hwmgr, clock); 483 } 484 485 int phm_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks) 486 { 487 PHM_FUNC_CHECK(hwmgr); 488 489 if (hwmgr->hwmgr_func->get_max_high_clocks == NULL) 490 return -EINVAL; 491 492 return hwmgr->hwmgr_func->get_max_high_clocks(hwmgr, clocks); 493 } 494 495 int phm_disable_smc_firmware_ctf(struct pp_hwmgr *hwmgr) 496 { 497 PHM_FUNC_CHECK(hwmgr); 498 499 if (!hwmgr->not_vf) 500 return 0; 501 502 if (hwmgr->hwmgr_func->disable_smc_firmware_ctf == NULL) 503 return -EINVAL; 504 505 return hwmgr->hwmgr_func->disable_smc_firmware_ctf(hwmgr); 506 } 507 508 int phm_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count) 509 { 510 PHM_FUNC_CHECK(hwmgr); 511 512 if (!hwmgr->hwmgr_func->set_active_display_count) 513 return -EINVAL; 514 515 return hwmgr->hwmgr_func->set_active_display_count(hwmgr, count); 516 } 517 518 int phm_set_min_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock) 519 { 520 PHM_FUNC_CHECK(hwmgr); 521 522 if (!hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk) 523 return -EINVAL; 524 525 return hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock); 526 } 527 528 int phm_set_hard_min_dcefclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock) 529 { 530 PHM_FUNC_CHECK(hwmgr); 531 532 if (!hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq) 533 return -EINVAL; 534 535 return hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock); 536 } 537 538 int phm_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock) 539 { 540 PHM_FUNC_CHECK(hwmgr); 541 542 if (!hwmgr->hwmgr_func->set_hard_min_fclk_by_freq) 543 return -EINVAL; 544 545 return hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock); 546 } 547 548