1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include "pp_debug.h" 24 #include <linux/delay.h> 25 #include <linux/fb.h> 26 #include <linux/module.h> 27 #include <linux/pci.h> 28 #include <linux/slab.h> 29 #include <asm/div64.h> 30 #include <drm/amdgpu_drm.h> 31 #include "ppatomctrl.h" 32 #include "atombios.h" 33 #include "pptable_v1_0.h" 34 #include "pppcielanes.h" 35 #include "amd_pcie_helpers.h" 36 #include "hardwaremanager.h" 37 #include "process_pptables_v1_0.h" 38 #include "cgs_common.h" 39 40 #include "smu7_common.h" 41 42 #include "hwmgr.h" 43 #include "smu7_hwmgr.h" 44 #include "smu_ucode_xfer_vi.h" 45 #include "smu7_powertune.h" 46 #include "smu7_dyn_defaults.h" 47 #include "smu7_thermal.h" 48 #include "smu7_clockpowergating.h" 49 #include "processpptables.h" 50 #include "pp_thermal.h" 51 #include "smu7_baco.h" 52 53 #include "ivsrcid/ivsrcid_vislands30.h" 54 55 #define MC_CG_ARB_FREQ_F0 0x0a 56 #define MC_CG_ARB_FREQ_F1 0x0b 57 #define MC_CG_ARB_FREQ_F2 0x0c 58 #define MC_CG_ARB_FREQ_F3 0x0d 59 60 #define MC_CG_SEQ_DRAMCONF_S0 0x05 61 #define MC_CG_SEQ_DRAMCONF_S1 0x06 62 #define MC_CG_SEQ_YCLK_SUSPEND 0x04 63 #define MC_CG_SEQ_YCLK_RESUME 0x0a 64 65 #define SMC_CG_IND_START 0xc0030000 66 #define SMC_CG_IND_END 0xc0040000 67 68 #define MEM_FREQ_LOW_LATENCY 25000 69 #define MEM_FREQ_HIGH_LATENCY 80000 70 71 #define MEM_LATENCY_HIGH 45 72 #define MEM_LATENCY_LOW 35 73 #define MEM_LATENCY_ERR 0xFFFF 74 75 #define MC_SEQ_MISC0_GDDR5_SHIFT 28 76 #define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000 77 #define MC_SEQ_MISC0_GDDR5_VALUE 5 78 79 #define PCIE_BUS_CLK 10000 80 #define TCLK (PCIE_BUS_CLK / 10) 81 82 static struct profile_mode_setting smu7_profiling[7] = 83 {{0, 0, 0, 0, 0, 0, 0, 0}, 84 {1, 0, 100, 30, 1, 0, 100, 10}, 85 {1, 10, 0, 30, 0, 0, 0, 0}, 86 {0, 0, 0, 0, 1, 10, 16, 31}, 87 {1, 0, 11, 50, 1, 0, 100, 10}, 88 {1, 0, 5, 30, 0, 0, 0, 0}, 89 {0, 0, 0, 0, 0, 0, 0, 0}, 90 }; 91 92 #define PPSMC_MSG_SetVBITimeout_VEGAM ((uint16_t) 0x310) 93 94 #define ixPWR_SVI2_PLANE1_LOAD 0xC0200280 95 #define PWR_SVI2_PLANE1_LOAD__PSI1_MASK 0x00000020L 96 #define PWR_SVI2_PLANE1_LOAD__PSI0_EN_MASK 0x00000040L 97 #define PWR_SVI2_PLANE1_LOAD__PSI1__SHIFT 0x00000005 98 #define PWR_SVI2_PLANE1_LOAD__PSI0_EN__SHIFT 0x00000006 99 100 /** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */ 101 enum DPM_EVENT_SRC { 102 DPM_EVENT_SRC_ANALOG = 0, 103 DPM_EVENT_SRC_EXTERNAL = 1, 104 DPM_EVENT_SRC_DIGITAL = 2, 105 DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, 106 DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4 107 }; 108 109 static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic); 110 static int smu7_force_clock_level(struct pp_hwmgr *hwmgr, 111 enum pp_clock_type type, uint32_t mask); 112 113 static struct smu7_power_state *cast_phw_smu7_power_state( 114 struct pp_hw_power_state *hw_ps) 115 { 116 PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic), 117 "Invalid Powerstate Type!", 118 return NULL); 119 120 return (struct smu7_power_state *)hw_ps; 121 } 122 123 static const struct smu7_power_state *cast_const_phw_smu7_power_state( 124 const struct pp_hw_power_state *hw_ps) 125 { 126 PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic), 127 "Invalid Powerstate Type!", 128 return NULL); 129 130 return (const struct smu7_power_state *)hw_ps; 131 } 132 133 /** 134 * Find the MC microcode version and store it in the HwMgr struct 135 * 136 * @param hwmgr the address of the powerplay hardware manager. 137 * @return always 0 138 */ 139 static int smu7_get_mc_microcode_version(struct pp_hwmgr *hwmgr) 140 { 141 cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F); 142 143 hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA); 144 145 return 0; 146 } 147 148 static uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr) 149 { 150 uint32_t speedCntl = 0; 151 152 /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */ 153 speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE, 154 ixPCIE_LC_SPEED_CNTL); 155 return((uint16_t)PHM_GET_FIELD(speedCntl, 156 PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE)); 157 } 158 159 static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr) 160 { 161 uint32_t link_width; 162 163 /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */ 164 link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, 165 PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD); 166 167 PP_ASSERT_WITH_CODE((7 >= link_width), 168 "Invalid PCIe lane width!", return 0); 169 170 return decode_pcie_lane_width(link_width); 171 } 172 173 /** 174 * Enable voltage control 175 * 176 * @param pHwMgr the address of the powerplay hardware manager. 177 * @return always PP_Result_OK 178 */ 179 static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr) 180 { 181 if (hwmgr->chip_id == CHIP_VEGAM) { 182 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, 183 CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI1, 0); 184 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, 185 CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI0_EN, 0); 186 } 187 188 if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK) 189 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable, NULL); 190 191 return 0; 192 } 193 194 /** 195 * Checks if we want to support voltage control 196 * 197 * @param hwmgr the address of the powerplay hardware manager. 198 */ 199 static bool smu7_voltage_control(const struct pp_hwmgr *hwmgr) 200 { 201 const struct smu7_hwmgr *data = 202 (const struct smu7_hwmgr *)(hwmgr->backend); 203 204 return (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control); 205 } 206 207 /** 208 * Enable voltage control 209 * 210 * @param hwmgr the address of the powerplay hardware manager. 211 * @return always 0 212 */ 213 static int smu7_enable_voltage_control(struct pp_hwmgr *hwmgr) 214 { 215 /* enable voltage control */ 216 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 217 GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1); 218 219 return 0; 220 } 221 222 static int phm_get_svi2_voltage_table_v0(pp_atomctrl_voltage_table *voltage_table, 223 struct phm_clock_voltage_dependency_table *voltage_dependency_table 224 ) 225 { 226 uint32_t i; 227 228 PP_ASSERT_WITH_CODE((NULL != voltage_table), 229 "Voltage Dependency Table empty.", return -EINVAL;); 230 231 voltage_table->mask_low = 0; 232 voltage_table->phase_delay = 0; 233 voltage_table->count = voltage_dependency_table->count; 234 235 for (i = 0; i < voltage_dependency_table->count; i++) { 236 voltage_table->entries[i].value = 237 voltage_dependency_table->entries[i].v; 238 voltage_table->entries[i].smio_low = 0; 239 } 240 241 return 0; 242 } 243 244 245 /** 246 * Create Voltage Tables. 247 * 248 * @param hwmgr the address of the powerplay hardware manager. 249 * @return always 0 250 */ 251 static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr) 252 { 253 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 254 struct phm_ppt_v1_information *table_info = 255 (struct phm_ppt_v1_information *)hwmgr->pptable; 256 int result = 0; 257 uint32_t tmp; 258 259 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { 260 result = atomctrl_get_voltage_table_v3(hwmgr, 261 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT, 262 &(data->mvdd_voltage_table)); 263 PP_ASSERT_WITH_CODE((0 == result), 264 "Failed to retrieve MVDD table.", 265 return result); 266 } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) { 267 if (hwmgr->pp_table_version == PP_TABLE_V1) 268 result = phm_get_svi2_mvdd_voltage_table(&(data->mvdd_voltage_table), 269 table_info->vdd_dep_on_mclk); 270 else if (hwmgr->pp_table_version == PP_TABLE_V0) 271 result = phm_get_svi2_voltage_table_v0(&(data->mvdd_voltage_table), 272 hwmgr->dyn_state.mvdd_dependency_on_mclk); 273 274 PP_ASSERT_WITH_CODE((0 == result), 275 "Failed to retrieve SVI2 MVDD table from dependency table.", 276 return result;); 277 } 278 279 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) { 280 result = atomctrl_get_voltage_table_v3(hwmgr, 281 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT, 282 &(data->vddci_voltage_table)); 283 PP_ASSERT_WITH_CODE((0 == result), 284 "Failed to retrieve VDDCI table.", 285 return result); 286 } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) { 287 if (hwmgr->pp_table_version == PP_TABLE_V1) 288 result = phm_get_svi2_vddci_voltage_table(&(data->vddci_voltage_table), 289 table_info->vdd_dep_on_mclk); 290 else if (hwmgr->pp_table_version == PP_TABLE_V0) 291 result = phm_get_svi2_voltage_table_v0(&(data->vddci_voltage_table), 292 hwmgr->dyn_state.vddci_dependency_on_mclk); 293 PP_ASSERT_WITH_CODE((0 == result), 294 "Failed to retrieve SVI2 VDDCI table from dependency table.", 295 return result); 296 } 297 298 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) { 299 /* VDDGFX has only SVI2 voltage control */ 300 result = phm_get_svi2_vdd_voltage_table(&(data->vddgfx_voltage_table), 301 table_info->vddgfx_lookup_table); 302 PP_ASSERT_WITH_CODE((0 == result), 303 "Failed to retrieve SVI2 VDDGFX table from lookup table.", return result;); 304 } 305 306 307 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) { 308 result = atomctrl_get_voltage_table_v3(hwmgr, 309 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT, 310 &data->vddc_voltage_table); 311 PP_ASSERT_WITH_CODE((0 == result), 312 "Failed to retrieve VDDC table.", return result;); 313 } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) { 314 315 if (hwmgr->pp_table_version == PP_TABLE_V0) 316 result = phm_get_svi2_voltage_table_v0(&data->vddc_voltage_table, 317 hwmgr->dyn_state.vddc_dependency_on_mclk); 318 else if (hwmgr->pp_table_version == PP_TABLE_V1) 319 result = phm_get_svi2_vdd_voltage_table(&(data->vddc_voltage_table), 320 table_info->vddc_lookup_table); 321 322 PP_ASSERT_WITH_CODE((0 == result), 323 "Failed to retrieve SVI2 VDDC table from dependency table.", return result;); 324 } 325 326 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDC); 327 PP_ASSERT_WITH_CODE( 328 (data->vddc_voltage_table.count <= tmp), 329 "Too many voltage values for VDDC. Trimming to fit state table.", 330 phm_trim_voltage_table_to_fit_state_table(tmp, 331 &(data->vddc_voltage_table))); 332 333 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX); 334 PP_ASSERT_WITH_CODE( 335 (data->vddgfx_voltage_table.count <= tmp), 336 "Too many voltage values for VDDC. Trimming to fit state table.", 337 phm_trim_voltage_table_to_fit_state_table(tmp, 338 &(data->vddgfx_voltage_table))); 339 340 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDCI); 341 PP_ASSERT_WITH_CODE( 342 (data->vddci_voltage_table.count <= tmp), 343 "Too many voltage values for VDDCI. Trimming to fit state table.", 344 phm_trim_voltage_table_to_fit_state_table(tmp, 345 &(data->vddci_voltage_table))); 346 347 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_MVDD); 348 PP_ASSERT_WITH_CODE( 349 (data->mvdd_voltage_table.count <= tmp), 350 "Too many voltage values for MVDD. Trimming to fit state table.", 351 phm_trim_voltage_table_to_fit_state_table(tmp, 352 &(data->mvdd_voltage_table))); 353 354 return 0; 355 } 356 357 /** 358 * Programs static screed detection parameters 359 * 360 * @param hwmgr the address of the powerplay hardware manager. 361 * @return always 0 362 */ 363 static int smu7_program_static_screen_threshold_parameters( 364 struct pp_hwmgr *hwmgr) 365 { 366 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 367 368 /* Set static screen threshold unit */ 369 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 370 CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT, 371 data->static_screen_threshold_unit); 372 /* Set static screen threshold */ 373 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 374 CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD, 375 data->static_screen_threshold); 376 377 return 0; 378 } 379 380 /** 381 * Setup display gap for glitch free memory clock switching. 382 * 383 * @param hwmgr the address of the powerplay hardware manager. 384 * @return always 0 385 */ 386 static int smu7_enable_display_gap(struct pp_hwmgr *hwmgr) 387 { 388 uint32_t display_gap = 389 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, 390 ixCG_DISPLAY_GAP_CNTL); 391 392 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, 393 DISP_GAP, DISPLAY_GAP_IGNORE); 394 395 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, 396 DISP_GAP_MCHG, DISPLAY_GAP_VBLANK); 397 398 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 399 ixCG_DISPLAY_GAP_CNTL, display_gap); 400 401 return 0; 402 } 403 404 /** 405 * Programs activity state transition voting clients 406 * 407 * @param hwmgr the address of the powerplay hardware manager. 408 * @return always 0 409 */ 410 static int smu7_program_voting_clients(struct pp_hwmgr *hwmgr) 411 { 412 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 413 int i; 414 415 /* Clear reset for voting clients before enabling DPM */ 416 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 417 SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0); 418 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 419 SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0); 420 421 for (i = 0; i < 8; i++) 422 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 423 ixCG_FREQ_TRAN_VOTING_0 + i * 4, 424 data->voting_rights_clients[i]); 425 return 0; 426 } 427 428 static int smu7_clear_voting_clients(struct pp_hwmgr *hwmgr) 429 { 430 int i; 431 432 /* Reset voting clients before disabling DPM */ 433 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 434 SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1); 435 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 436 SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1); 437 438 for (i = 0; i < 8; i++) 439 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 440 ixCG_FREQ_TRAN_VOTING_0 + i * 4, 0); 441 442 return 0; 443 } 444 445 /* Copy one arb setting to another and then switch the active set. 446 * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants. 447 */ 448 static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr, 449 uint32_t arb_src, uint32_t arb_dest) 450 { 451 uint32_t mc_arb_dram_timing; 452 uint32_t mc_arb_dram_timing2; 453 uint32_t burst_time; 454 uint32_t mc_cg_config; 455 456 switch (arb_src) { 457 case MC_CG_ARB_FREQ_F0: 458 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); 459 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); 460 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0); 461 break; 462 case MC_CG_ARB_FREQ_F1: 463 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1); 464 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1); 465 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1); 466 break; 467 default: 468 return -EINVAL; 469 } 470 471 switch (arb_dest) { 472 case MC_CG_ARB_FREQ_F0: 473 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing); 474 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2); 475 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time); 476 break; 477 case MC_CG_ARB_FREQ_F1: 478 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing); 479 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2); 480 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time); 481 break; 482 default: 483 return -EINVAL; 484 } 485 486 mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG); 487 mc_cg_config |= 0x0000000F; 488 cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config); 489 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest); 490 491 return 0; 492 } 493 494 static int smu7_reset_to_default(struct pp_hwmgr *hwmgr) 495 { 496 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ResetToDefaults, NULL); 497 } 498 499 /** 500 * Initial switch from ARB F0->F1 501 * 502 * @param hwmgr the address of the powerplay hardware manager. 503 * @return always 0 504 * This function is to be called from the SetPowerState table. 505 */ 506 static int smu7_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr) 507 { 508 return smu7_copy_and_switch_arb_sets(hwmgr, 509 MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); 510 } 511 512 static int smu7_force_switch_to_arbf0(struct pp_hwmgr *hwmgr) 513 { 514 uint32_t tmp; 515 516 tmp = (cgs_read_ind_register(hwmgr->device, 517 CGS_IND_REG__SMC, ixSMC_SCRATCH9) & 518 0x0000ff00) >> 8; 519 520 if (tmp == MC_CG_ARB_FREQ_F0) 521 return 0; 522 523 return smu7_copy_and_switch_arb_sets(hwmgr, 524 tmp, MC_CG_ARB_FREQ_F0); 525 } 526 527 static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr) 528 { 529 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 530 531 struct phm_ppt_v1_information *table_info = 532 (struct phm_ppt_v1_information *)(hwmgr->pptable); 533 struct phm_ppt_v1_pcie_table *pcie_table = NULL; 534 535 uint32_t i, max_entry; 536 uint32_t tmp; 537 538 PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels || 539 data->use_pcie_power_saving_levels), "No pcie performance levels!", 540 return -EINVAL); 541 542 if (table_info != NULL) 543 pcie_table = table_info->pcie_table; 544 545 if (data->use_pcie_performance_levels && 546 !data->use_pcie_power_saving_levels) { 547 data->pcie_gen_power_saving = data->pcie_gen_performance; 548 data->pcie_lane_power_saving = data->pcie_lane_performance; 549 } else if (!data->use_pcie_performance_levels && 550 data->use_pcie_power_saving_levels) { 551 data->pcie_gen_performance = data->pcie_gen_power_saving; 552 data->pcie_lane_performance = data->pcie_lane_power_saving; 553 } 554 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_LINK); 555 phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table, 556 tmp, 557 MAX_REGULAR_DPM_NUMBER); 558 559 if (pcie_table != NULL) { 560 /* max_entry is used to make sure we reserve one PCIE level 561 * for boot level (fix for A+A PSPP issue). 562 * If PCIE table from PPTable have ULV entry + 8 entries, 563 * then ignore the last entry.*/ 564 max_entry = (tmp < pcie_table->count) ? tmp : pcie_table->count; 565 for (i = 1; i < max_entry; i++) { 566 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1, 567 get_pcie_gen_support(data->pcie_gen_cap, 568 pcie_table->entries[i].gen_speed), 569 get_pcie_lane_support(data->pcie_lane_cap, 570 pcie_table->entries[i].lane_width)); 571 } 572 data->dpm_table.pcie_speed_table.count = max_entry - 1; 573 smum_update_smc_table(hwmgr, SMU_BIF_TABLE); 574 } else { 575 /* Hardcode Pcie Table */ 576 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0, 577 get_pcie_gen_support(data->pcie_gen_cap, 578 PP_Min_PCIEGen), 579 get_pcie_lane_support(data->pcie_lane_cap, 580 PP_Max_PCIELane)); 581 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1, 582 get_pcie_gen_support(data->pcie_gen_cap, 583 PP_Min_PCIEGen), 584 get_pcie_lane_support(data->pcie_lane_cap, 585 PP_Max_PCIELane)); 586 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2, 587 get_pcie_gen_support(data->pcie_gen_cap, 588 PP_Max_PCIEGen), 589 get_pcie_lane_support(data->pcie_lane_cap, 590 PP_Max_PCIELane)); 591 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3, 592 get_pcie_gen_support(data->pcie_gen_cap, 593 PP_Max_PCIEGen), 594 get_pcie_lane_support(data->pcie_lane_cap, 595 PP_Max_PCIELane)); 596 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4, 597 get_pcie_gen_support(data->pcie_gen_cap, 598 PP_Max_PCIEGen), 599 get_pcie_lane_support(data->pcie_lane_cap, 600 PP_Max_PCIELane)); 601 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5, 602 get_pcie_gen_support(data->pcie_gen_cap, 603 PP_Max_PCIEGen), 604 get_pcie_lane_support(data->pcie_lane_cap, 605 PP_Max_PCIELane)); 606 607 data->dpm_table.pcie_speed_table.count = 6; 608 } 609 /* Populate last level for boot PCIE level, but do not increment count. */ 610 if (hwmgr->chip_family == AMDGPU_FAMILY_CI) { 611 for (i = 0; i <= data->dpm_table.pcie_speed_table.count; i++) 612 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i, 613 get_pcie_gen_support(data->pcie_gen_cap, 614 PP_Max_PCIEGen), 615 data->vbios_boot_state.pcie_lane_bootup_value); 616 } else { 617 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 618 data->dpm_table.pcie_speed_table.count, 619 get_pcie_gen_support(data->pcie_gen_cap, 620 PP_Min_PCIEGen), 621 get_pcie_lane_support(data->pcie_lane_cap, 622 PP_Max_PCIELane)); 623 } 624 return 0; 625 } 626 627 static int smu7_reset_dpm_tables(struct pp_hwmgr *hwmgr) 628 { 629 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 630 631 memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table)); 632 633 phm_reset_single_dpm_table( 634 &data->dpm_table.sclk_table, 635 smum_get_mac_definition(hwmgr, 636 SMU_MAX_LEVELS_GRAPHICS), 637 MAX_REGULAR_DPM_NUMBER); 638 phm_reset_single_dpm_table( 639 &data->dpm_table.mclk_table, 640 smum_get_mac_definition(hwmgr, 641 SMU_MAX_LEVELS_MEMORY), MAX_REGULAR_DPM_NUMBER); 642 643 phm_reset_single_dpm_table( 644 &data->dpm_table.vddc_table, 645 smum_get_mac_definition(hwmgr, 646 SMU_MAX_LEVELS_VDDC), 647 MAX_REGULAR_DPM_NUMBER); 648 phm_reset_single_dpm_table( 649 &data->dpm_table.vddci_table, 650 smum_get_mac_definition(hwmgr, 651 SMU_MAX_LEVELS_VDDCI), MAX_REGULAR_DPM_NUMBER); 652 653 phm_reset_single_dpm_table( 654 &data->dpm_table.mvdd_table, 655 smum_get_mac_definition(hwmgr, 656 SMU_MAX_LEVELS_MVDD), 657 MAX_REGULAR_DPM_NUMBER); 658 return 0; 659 } 660 /* 661 * This function is to initialize all DPM state tables 662 * for SMU7 based on the dependency table. 663 * Dynamic state patching function will then trim these 664 * state tables to the allowed range based 665 * on the power policy or external client requests, 666 * such as UVD request, etc. 667 */ 668 669 static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr) 670 { 671 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 672 struct phm_clock_voltage_dependency_table *allowed_vdd_sclk_table = 673 hwmgr->dyn_state.vddc_dependency_on_sclk; 674 struct phm_clock_voltage_dependency_table *allowed_vdd_mclk_table = 675 hwmgr->dyn_state.vddc_dependency_on_mclk; 676 struct phm_cac_leakage_table *std_voltage_table = 677 hwmgr->dyn_state.cac_leakage_table; 678 uint32_t i; 679 680 PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL, 681 "SCLK dependency table is missing. This table is mandatory", return -EINVAL); 682 PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1, 683 "SCLK dependency table has to have is missing. This table is mandatory", return -EINVAL); 684 685 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL, 686 "MCLK dependency table is missing. This table is mandatory", return -EINVAL); 687 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1, 688 "VMCLK dependency table has to have is missing. This table is mandatory", return -EINVAL); 689 690 691 /* Initialize Sclk DPM table based on allow Sclk values*/ 692 data->dpm_table.sclk_table.count = 0; 693 694 for (i = 0; i < allowed_vdd_sclk_table->count; i++) { 695 if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value != 696 allowed_vdd_sclk_table->entries[i].clk) { 697 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value = 698 allowed_vdd_sclk_table->entries[i].clk; 699 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = (i == 0) ? 1 : 0; 700 data->dpm_table.sclk_table.count++; 701 } 702 } 703 704 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL, 705 "MCLK dependency table is missing. This table is mandatory", return -EINVAL); 706 /* Initialize Mclk DPM table based on allow Mclk values */ 707 data->dpm_table.mclk_table.count = 0; 708 for (i = 0; i < allowed_vdd_mclk_table->count; i++) { 709 if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value != 710 allowed_vdd_mclk_table->entries[i].clk) { 711 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value = 712 allowed_vdd_mclk_table->entries[i].clk; 713 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = (i == 0) ? 1 : 0; 714 data->dpm_table.mclk_table.count++; 715 } 716 } 717 718 /* Initialize Vddc DPM table based on allow Vddc values. And populate corresponding std values. */ 719 for (i = 0; i < allowed_vdd_sclk_table->count; i++) { 720 data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; 721 data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage; 722 /* param1 is for corresponding std voltage */ 723 data->dpm_table.vddc_table.dpm_levels[i].enabled = true; 724 } 725 726 data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count; 727 allowed_vdd_mclk_table = hwmgr->dyn_state.vddci_dependency_on_mclk; 728 729 if (NULL != allowed_vdd_mclk_table) { 730 /* Initialize Vddci DPM table based on allow Mclk values */ 731 for (i = 0; i < allowed_vdd_mclk_table->count; i++) { 732 data->dpm_table.vddci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; 733 data->dpm_table.vddci_table.dpm_levels[i].enabled = true; 734 } 735 data->dpm_table.vddci_table.count = allowed_vdd_mclk_table->count; 736 } 737 738 allowed_vdd_mclk_table = hwmgr->dyn_state.mvdd_dependency_on_mclk; 739 740 if (NULL != allowed_vdd_mclk_table) { 741 /* 742 * Initialize MVDD DPM table based on allow Mclk 743 * values 744 */ 745 for (i = 0; i < allowed_vdd_mclk_table->count; i++) { 746 data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; 747 data->dpm_table.mvdd_table.dpm_levels[i].enabled = true; 748 } 749 data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count; 750 } 751 752 return 0; 753 } 754 755 static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr) 756 { 757 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 758 struct phm_ppt_v1_information *table_info = 759 (struct phm_ppt_v1_information *)(hwmgr->pptable); 760 uint32_t i; 761 762 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table; 763 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table; 764 765 if (table_info == NULL) 766 return -EINVAL; 767 768 dep_sclk_table = table_info->vdd_dep_on_sclk; 769 dep_mclk_table = table_info->vdd_dep_on_mclk; 770 771 PP_ASSERT_WITH_CODE(dep_sclk_table != NULL, 772 "SCLK dependency table is missing.", 773 return -EINVAL); 774 PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1, 775 "SCLK dependency table count is 0.", 776 return -EINVAL); 777 778 PP_ASSERT_WITH_CODE(dep_mclk_table != NULL, 779 "MCLK dependency table is missing.", 780 return -EINVAL); 781 PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1, 782 "MCLK dependency table count is 0", 783 return -EINVAL); 784 785 /* Initialize Sclk DPM table based on allow Sclk values */ 786 data->dpm_table.sclk_table.count = 0; 787 for (i = 0; i < dep_sclk_table->count; i++) { 788 if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value != 789 dep_sclk_table->entries[i].clk) { 790 791 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value = 792 dep_sclk_table->entries[i].clk; 793 794 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = 795 (i == 0) ? true : false; 796 data->dpm_table.sclk_table.count++; 797 } 798 } 799 if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0) 800 hwmgr->platform_descriptor.overdriveLimit.engineClock = dep_sclk_table->entries[i-1].clk; 801 /* Initialize Mclk DPM table based on allow Mclk values */ 802 data->dpm_table.mclk_table.count = 0; 803 for (i = 0; i < dep_mclk_table->count; i++) { 804 if (i == 0 || data->dpm_table.mclk_table.dpm_levels 805 [data->dpm_table.mclk_table.count - 1].value != 806 dep_mclk_table->entries[i].clk) { 807 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value = 808 dep_mclk_table->entries[i].clk; 809 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = 810 (i == 0) ? true : false; 811 data->dpm_table.mclk_table.count++; 812 } 813 } 814 815 if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0) 816 hwmgr->platform_descriptor.overdriveLimit.memoryClock = dep_mclk_table->entries[i-1].clk; 817 return 0; 818 } 819 820 static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr) 821 { 822 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 823 struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table); 824 struct phm_ppt_v1_information *table_info = 825 (struct phm_ppt_v1_information *)(hwmgr->pptable); 826 uint32_t i; 827 828 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table; 829 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table; 830 struct phm_odn_performance_level *entries; 831 832 if (table_info == NULL) 833 return -EINVAL; 834 835 dep_sclk_table = table_info->vdd_dep_on_sclk; 836 dep_mclk_table = table_info->vdd_dep_on_mclk; 837 838 odn_table->odn_core_clock_dpm_levels.num_of_pl = 839 data->golden_dpm_table.sclk_table.count; 840 entries = odn_table->odn_core_clock_dpm_levels.entries; 841 for (i=0; i<data->golden_dpm_table.sclk_table.count; i++) { 842 entries[i].clock = data->golden_dpm_table.sclk_table.dpm_levels[i].value; 843 entries[i].enabled = true; 844 entries[i].vddc = dep_sclk_table->entries[i].vddc; 845 } 846 847 smu_get_voltage_dependency_table_ppt_v1(dep_sclk_table, 848 (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk)); 849 850 odn_table->odn_memory_clock_dpm_levels.num_of_pl = 851 data->golden_dpm_table.mclk_table.count; 852 entries = odn_table->odn_memory_clock_dpm_levels.entries; 853 for (i=0; i<data->golden_dpm_table.mclk_table.count; i++) { 854 entries[i].clock = data->golden_dpm_table.mclk_table.dpm_levels[i].value; 855 entries[i].enabled = true; 856 entries[i].vddc = dep_mclk_table->entries[i].vddc; 857 } 858 859 smu_get_voltage_dependency_table_ppt_v1(dep_mclk_table, 860 (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk)); 861 862 return 0; 863 } 864 865 static void smu7_setup_voltage_range_from_vbios(struct pp_hwmgr *hwmgr) 866 { 867 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 868 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table; 869 struct phm_ppt_v1_information *table_info = 870 (struct phm_ppt_v1_information *)(hwmgr->pptable); 871 uint32_t min_vddc = 0; 872 uint32_t max_vddc = 0; 873 874 if (!table_info) 875 return; 876 877 dep_sclk_table = table_info->vdd_dep_on_sclk; 878 879 atomctrl_get_voltage_range(hwmgr, &max_vddc, &min_vddc); 880 881 if (min_vddc == 0 || min_vddc > 2000 882 || min_vddc > dep_sclk_table->entries[0].vddc) 883 min_vddc = dep_sclk_table->entries[0].vddc; 884 885 if (max_vddc == 0 || max_vddc > 2000 886 || max_vddc < dep_sclk_table->entries[dep_sclk_table->count-1].vddc) 887 max_vddc = dep_sclk_table->entries[dep_sclk_table->count-1].vddc; 888 889 data->odn_dpm_table.min_vddc = min_vddc; 890 data->odn_dpm_table.max_vddc = max_vddc; 891 } 892 893 static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr) 894 { 895 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 896 struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table); 897 struct phm_ppt_v1_information *table_info = 898 (struct phm_ppt_v1_information *)(hwmgr->pptable); 899 uint32_t i; 900 901 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table; 902 struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table; 903 904 if (table_info == NULL) 905 return; 906 907 for (i = 0; i < data->dpm_table.sclk_table.count; i++) { 908 if (odn_table->odn_core_clock_dpm_levels.entries[i].clock != 909 data->dpm_table.sclk_table.dpm_levels[i].value) { 910 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; 911 break; 912 } 913 } 914 915 for (i = 0; i < data->dpm_table.mclk_table.count; i++) { 916 if (odn_table->odn_memory_clock_dpm_levels.entries[i].clock != 917 data->dpm_table.mclk_table.dpm_levels[i].value) { 918 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; 919 break; 920 } 921 } 922 923 dep_table = table_info->vdd_dep_on_mclk; 924 odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk); 925 926 for (i = 0; i < dep_table->count; i++) { 927 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { 928 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK; 929 return; 930 } 931 } 932 933 dep_table = table_info->vdd_dep_on_sclk; 934 odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk); 935 for (i = 0; i < dep_table->count; i++) { 936 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { 937 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK; 938 return; 939 } 940 } 941 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { 942 data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC; 943 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK; 944 } 945 } 946 947 static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) 948 { 949 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 950 951 smu7_reset_dpm_tables(hwmgr); 952 953 if (hwmgr->pp_table_version == PP_TABLE_V1) 954 smu7_setup_dpm_tables_v1(hwmgr); 955 else if (hwmgr->pp_table_version == PP_TABLE_V0) 956 smu7_setup_dpm_tables_v0(hwmgr); 957 958 smu7_setup_default_pcie_table(hwmgr); 959 960 /* save a copy of the default DPM table */ 961 memcpy(&(data->golden_dpm_table), &(data->dpm_table), 962 sizeof(struct smu7_dpm_table)); 963 964 /* initialize ODN table */ 965 if (hwmgr->od_enabled) { 966 if (data->odn_dpm_table.max_vddc) { 967 smu7_check_dpm_table_updated(hwmgr); 968 } else { 969 smu7_setup_voltage_range_from_vbios(hwmgr); 970 smu7_odn_initial_default_setting(hwmgr); 971 } 972 } 973 return 0; 974 } 975 976 static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr) 977 { 978 979 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 980 PHM_PlatformCaps_RegulatorHot)) 981 return smum_send_msg_to_smc(hwmgr, 982 PPSMC_MSG_EnableVRHotGPIOInterrupt, 983 NULL); 984 985 return 0; 986 } 987 988 static int smu7_enable_sclk_control(struct pp_hwmgr *hwmgr) 989 { 990 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, 991 SCLK_PWRMGT_OFF, 0); 992 return 0; 993 } 994 995 static int smu7_enable_ulv(struct pp_hwmgr *hwmgr) 996 { 997 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 998 999 if (data->ulv_supported) 1000 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableULV, NULL); 1001 1002 return 0; 1003 } 1004 1005 static int smu7_disable_ulv(struct pp_hwmgr *hwmgr) 1006 { 1007 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1008 1009 if (data->ulv_supported) 1010 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableULV, NULL); 1011 1012 return 0; 1013 } 1014 1015 static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) 1016 { 1017 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1018 PHM_PlatformCaps_SclkDeepSleep)) { 1019 if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_ON, NULL)) 1020 PP_ASSERT_WITH_CODE(false, 1021 "Attempt to enable Master Deep Sleep switch failed!", 1022 return -EINVAL); 1023 } else { 1024 if (smum_send_msg_to_smc(hwmgr, 1025 PPSMC_MSG_MASTER_DeepSleep_OFF, 1026 NULL)) { 1027 PP_ASSERT_WITH_CODE(false, 1028 "Attempt to disable Master Deep Sleep switch failed!", 1029 return -EINVAL); 1030 } 1031 } 1032 1033 return 0; 1034 } 1035 1036 static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) 1037 { 1038 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1039 PHM_PlatformCaps_SclkDeepSleep)) { 1040 if (smum_send_msg_to_smc(hwmgr, 1041 PPSMC_MSG_MASTER_DeepSleep_OFF, 1042 NULL)) { 1043 PP_ASSERT_WITH_CODE(false, 1044 "Attempt to disable Master Deep Sleep switch failed!", 1045 return -EINVAL); 1046 } 1047 } 1048 1049 return 0; 1050 } 1051 1052 static int smu7_disable_sclk_vce_handshake(struct pp_hwmgr *hwmgr) 1053 { 1054 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1055 uint32_t soft_register_value = 0; 1056 uint32_t handshake_disables_offset = data->soft_regs_start 1057 + smum_get_offsetof(hwmgr, 1058 SMU_SoftRegisters, HandshakeDisables); 1059 1060 soft_register_value = cgs_read_ind_register(hwmgr->device, 1061 CGS_IND_REG__SMC, handshake_disables_offset); 1062 soft_register_value |= SMU7_VCE_SCLK_HANDSHAKE_DISABLE; 1063 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 1064 handshake_disables_offset, soft_register_value); 1065 return 0; 1066 } 1067 1068 static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr) 1069 { 1070 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1071 uint32_t soft_register_value = 0; 1072 uint32_t handshake_disables_offset = data->soft_regs_start 1073 + smum_get_offsetof(hwmgr, 1074 SMU_SoftRegisters, HandshakeDisables); 1075 1076 soft_register_value = cgs_read_ind_register(hwmgr->device, 1077 CGS_IND_REG__SMC, handshake_disables_offset); 1078 soft_register_value |= smum_get_mac_definition(hwmgr, 1079 SMU_UVD_MCLK_HANDSHAKE_DISABLE); 1080 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 1081 handshake_disables_offset, soft_register_value); 1082 return 0; 1083 } 1084 1085 static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) 1086 { 1087 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1088 1089 /* enable SCLK dpm */ 1090 if (!data->sclk_dpm_key_disabled) { 1091 if (hwmgr->chip_id == CHIP_VEGAM) 1092 smu7_disable_sclk_vce_handshake(hwmgr); 1093 1094 PP_ASSERT_WITH_CODE( 1095 (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable, NULL)), 1096 "Failed to enable SCLK DPM during DPM Start Function!", 1097 return -EINVAL); 1098 } 1099 1100 /* enable MCLK dpm */ 1101 if (0 == data->mclk_dpm_key_disabled) { 1102 if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK)) 1103 smu7_disable_handshake_uvd(hwmgr); 1104 1105 PP_ASSERT_WITH_CODE( 1106 (0 == smum_send_msg_to_smc(hwmgr, 1107 PPSMC_MSG_MCLKDPM_Enable, 1108 NULL)), 1109 "Failed to enable MCLK DPM during DPM Start Function!", 1110 return -EINVAL); 1111 1112 if (hwmgr->chip_family != CHIP_VEGAM) 1113 PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1); 1114 1115 1116 if (hwmgr->chip_family == AMDGPU_FAMILY_CI) { 1117 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x5); 1118 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x5); 1119 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x100005); 1120 udelay(10); 1121 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x400005); 1122 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x400005); 1123 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x500005); 1124 } else { 1125 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5); 1126 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5); 1127 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005); 1128 udelay(10); 1129 if (hwmgr->chip_id == CHIP_VEGAM) { 1130 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400009); 1131 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400009); 1132 } else { 1133 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005); 1134 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005); 1135 } 1136 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005); 1137 } 1138 } 1139 1140 return 0; 1141 } 1142 1143 static int smu7_start_dpm(struct pp_hwmgr *hwmgr) 1144 { 1145 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1146 1147 /*enable general power management */ 1148 1149 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, 1150 GLOBAL_PWRMGT_EN, 1); 1151 1152 /* enable sclk deep sleep */ 1153 1154 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, 1155 DYNAMIC_PM_EN, 1); 1156 1157 /* prepare for PCIE DPM */ 1158 1159 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 1160 data->soft_regs_start + 1161 smum_get_offsetof(hwmgr, SMU_SoftRegisters, 1162 VoltageChangeTimeout), 0x1000); 1163 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, 1164 SWRST_COMMAND_1, RESETLC, 0x0); 1165 1166 if (hwmgr->chip_family == AMDGPU_FAMILY_CI) 1167 cgs_write_register(hwmgr->device, 0x1488, 1168 (cgs_read_register(hwmgr->device, 0x1488) & ~0x1)); 1169 1170 if (smu7_enable_sclk_mclk_dpm(hwmgr)) { 1171 pr_err("Failed to enable Sclk DPM and Mclk DPM!"); 1172 return -EINVAL; 1173 } 1174 1175 /* enable PCIE dpm */ 1176 if (0 == data->pcie_dpm_key_disabled) { 1177 PP_ASSERT_WITH_CODE( 1178 (0 == smum_send_msg_to_smc(hwmgr, 1179 PPSMC_MSG_PCIeDPM_Enable, 1180 NULL)), 1181 "Failed to enable pcie DPM during DPM Start Function!", 1182 return -EINVAL); 1183 } 1184 1185 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1186 PHM_PlatformCaps_Falcon_QuickTransition)) { 1187 PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr, 1188 PPSMC_MSG_EnableACDCGPIOInterrupt, 1189 NULL)), 1190 "Failed to enable AC DC GPIO Interrupt!", 1191 ); 1192 } 1193 1194 return 0; 1195 } 1196 1197 static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) 1198 { 1199 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1200 1201 /* disable SCLK dpm */ 1202 if (!data->sclk_dpm_key_disabled) { 1203 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), 1204 "Trying to disable SCLK DPM when DPM is disabled", 1205 return 0); 1206 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Disable, NULL); 1207 } 1208 1209 /* disable MCLK dpm */ 1210 if (!data->mclk_dpm_key_disabled) { 1211 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), 1212 "Trying to disable MCLK DPM when DPM is disabled", 1213 return 0); 1214 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Disable, NULL); 1215 } 1216 1217 return 0; 1218 } 1219 1220 static int smu7_stop_dpm(struct pp_hwmgr *hwmgr) 1221 { 1222 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1223 1224 /* disable general power management */ 1225 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, 1226 GLOBAL_PWRMGT_EN, 0); 1227 /* disable sclk deep sleep */ 1228 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, 1229 DYNAMIC_PM_EN, 0); 1230 1231 /* disable PCIE dpm */ 1232 if (!data->pcie_dpm_key_disabled) { 1233 PP_ASSERT_WITH_CODE( 1234 (smum_send_msg_to_smc(hwmgr, 1235 PPSMC_MSG_PCIeDPM_Disable, 1236 NULL) == 0), 1237 "Failed to disable pcie DPM during DPM Stop Function!", 1238 return -EINVAL); 1239 } 1240 1241 smu7_disable_sclk_mclk_dpm(hwmgr); 1242 1243 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), 1244 "Trying to disable voltage DPM when DPM is disabled", 1245 return 0); 1246 1247 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Disable, NULL); 1248 1249 return 0; 1250 } 1251 1252 static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources) 1253 { 1254 bool protection; 1255 enum DPM_EVENT_SRC src; 1256 1257 switch (sources) { 1258 default: 1259 pr_err("Unknown throttling event sources."); 1260 fallthrough; 1261 case 0: 1262 protection = false; 1263 /* src is unused */ 1264 break; 1265 case (1 << PHM_AutoThrottleSource_Thermal): 1266 protection = true; 1267 src = DPM_EVENT_SRC_DIGITAL; 1268 break; 1269 case (1 << PHM_AutoThrottleSource_External): 1270 protection = true; 1271 src = DPM_EVENT_SRC_EXTERNAL; 1272 break; 1273 case (1 << PHM_AutoThrottleSource_External) | 1274 (1 << PHM_AutoThrottleSource_Thermal): 1275 protection = true; 1276 src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL; 1277 break; 1278 } 1279 /* Order matters - don't enable thermal protection for the wrong source. */ 1280 if (protection) { 1281 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL, 1282 DPM_EVENT_SRC, src); 1283 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, 1284 THERMAL_PROTECTION_DIS, 1285 !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1286 PHM_PlatformCaps_ThermalController)); 1287 } else 1288 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, 1289 THERMAL_PROTECTION_DIS, 1); 1290 } 1291 1292 static int smu7_enable_auto_throttle_source(struct pp_hwmgr *hwmgr, 1293 PHM_AutoThrottleSource source) 1294 { 1295 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1296 1297 if (!(data->active_auto_throttle_sources & (1 << source))) { 1298 data->active_auto_throttle_sources |= 1 << source; 1299 smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources); 1300 } 1301 return 0; 1302 } 1303 1304 static int smu7_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) 1305 { 1306 return smu7_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); 1307 } 1308 1309 static int smu7_disable_auto_throttle_source(struct pp_hwmgr *hwmgr, 1310 PHM_AutoThrottleSource source) 1311 { 1312 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1313 1314 if (data->active_auto_throttle_sources & (1 << source)) { 1315 data->active_auto_throttle_sources &= ~(1 << source); 1316 smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources); 1317 } 1318 return 0; 1319 } 1320 1321 static int smu7_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) 1322 { 1323 return smu7_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); 1324 } 1325 1326 static int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr) 1327 { 1328 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1329 data->pcie_performance_request = true; 1330 1331 return 0; 1332 } 1333 1334 static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr) 1335 { 1336 int tmp_result = 0; 1337 int result = 0; 1338 1339 if (smu7_voltage_control(hwmgr)) { 1340 tmp_result = smu7_enable_voltage_control(hwmgr); 1341 PP_ASSERT_WITH_CODE(tmp_result == 0, 1342 "Failed to enable voltage control!", 1343 result = tmp_result); 1344 1345 tmp_result = smu7_construct_voltage_tables(hwmgr); 1346 PP_ASSERT_WITH_CODE((0 == tmp_result), 1347 "Failed to construct voltage tables!", 1348 result = tmp_result); 1349 } 1350 smum_initialize_mc_reg_table(hwmgr); 1351 1352 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1353 PHM_PlatformCaps_EngineSpreadSpectrumSupport)) 1354 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 1355 GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1); 1356 1357 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1358 PHM_PlatformCaps_ThermalController)) 1359 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 1360 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0); 1361 1362 tmp_result = smu7_program_static_screen_threshold_parameters(hwmgr); 1363 PP_ASSERT_WITH_CODE((0 == tmp_result), 1364 "Failed to program static screen threshold parameters!", 1365 result = tmp_result); 1366 1367 tmp_result = smu7_enable_display_gap(hwmgr); 1368 PP_ASSERT_WITH_CODE((0 == tmp_result), 1369 "Failed to enable display gap!", result = tmp_result); 1370 1371 tmp_result = smu7_program_voting_clients(hwmgr); 1372 PP_ASSERT_WITH_CODE((0 == tmp_result), 1373 "Failed to program voting clients!", result = tmp_result); 1374 1375 tmp_result = smum_process_firmware_header(hwmgr); 1376 PP_ASSERT_WITH_CODE((0 == tmp_result), 1377 "Failed to process firmware header!", result = tmp_result); 1378 1379 if (hwmgr->chip_id != CHIP_VEGAM) { 1380 tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr); 1381 PP_ASSERT_WITH_CODE((0 == tmp_result), 1382 "Failed to initialize switch from ArbF0 to F1!", 1383 result = tmp_result); 1384 } 1385 1386 result = smu7_setup_default_dpm_tables(hwmgr); 1387 PP_ASSERT_WITH_CODE(0 == result, 1388 "Failed to setup default DPM tables!", return result); 1389 1390 tmp_result = smum_init_smc_table(hwmgr); 1391 PP_ASSERT_WITH_CODE((0 == tmp_result), 1392 "Failed to initialize SMC table!", result = tmp_result); 1393 1394 tmp_result = smu7_enable_vrhot_gpio_interrupt(hwmgr); 1395 PP_ASSERT_WITH_CODE((0 == tmp_result), 1396 "Failed to enable VR hot GPIO interrupt!", result = tmp_result); 1397 1398 smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay, NULL); 1399 1400 tmp_result = smu7_enable_sclk_control(hwmgr); 1401 PP_ASSERT_WITH_CODE((0 == tmp_result), 1402 "Failed to enable SCLK control!", result = tmp_result); 1403 1404 tmp_result = smu7_enable_smc_voltage_controller(hwmgr); 1405 PP_ASSERT_WITH_CODE((0 == tmp_result), 1406 "Failed to enable voltage control!", result = tmp_result); 1407 1408 tmp_result = smu7_enable_ulv(hwmgr); 1409 PP_ASSERT_WITH_CODE((0 == tmp_result), 1410 "Failed to enable ULV!", result = tmp_result); 1411 1412 tmp_result = smu7_enable_deep_sleep_master_switch(hwmgr); 1413 PP_ASSERT_WITH_CODE((0 == tmp_result), 1414 "Failed to enable deep sleep master switch!", result = tmp_result); 1415 1416 tmp_result = smu7_enable_didt_config(hwmgr); 1417 PP_ASSERT_WITH_CODE((tmp_result == 0), 1418 "Failed to enable deep sleep master switch!", result = tmp_result); 1419 1420 tmp_result = smu7_start_dpm(hwmgr); 1421 PP_ASSERT_WITH_CODE((0 == tmp_result), 1422 "Failed to start DPM!", result = tmp_result); 1423 1424 tmp_result = smu7_enable_smc_cac(hwmgr); 1425 PP_ASSERT_WITH_CODE((0 == tmp_result), 1426 "Failed to enable SMC CAC!", result = tmp_result); 1427 1428 tmp_result = smu7_enable_power_containment(hwmgr); 1429 PP_ASSERT_WITH_CODE((0 == tmp_result), 1430 "Failed to enable power containment!", result = tmp_result); 1431 1432 tmp_result = smu7_power_control_set_level(hwmgr); 1433 PP_ASSERT_WITH_CODE((0 == tmp_result), 1434 "Failed to power control set level!", result = tmp_result); 1435 1436 tmp_result = smu7_enable_thermal_auto_throttle(hwmgr); 1437 PP_ASSERT_WITH_CODE((0 == tmp_result), 1438 "Failed to enable thermal auto throttle!", result = tmp_result); 1439 1440 tmp_result = smu7_pcie_performance_request(hwmgr); 1441 PP_ASSERT_WITH_CODE((0 == tmp_result), 1442 "pcie performance request failed!", result = tmp_result); 1443 1444 return 0; 1445 } 1446 1447 static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable) 1448 { 1449 if (!hwmgr->avfs_supported) 1450 return 0; 1451 1452 if (enable) { 1453 if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, 1454 CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) { 1455 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc( 1456 hwmgr, PPSMC_MSG_EnableAvfs, NULL), 1457 "Failed to enable AVFS!", 1458 return -EINVAL); 1459 } 1460 } else if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, 1461 CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) { 1462 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc( 1463 hwmgr, PPSMC_MSG_DisableAvfs, NULL), 1464 "Failed to disable AVFS!", 1465 return -EINVAL); 1466 } 1467 1468 return 0; 1469 } 1470 1471 static int smu7_update_avfs(struct pp_hwmgr *hwmgr) 1472 { 1473 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1474 1475 if (!hwmgr->avfs_supported) 1476 return 0; 1477 1478 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { 1479 smu7_avfs_control(hwmgr, false); 1480 } else if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) { 1481 smu7_avfs_control(hwmgr, false); 1482 smu7_avfs_control(hwmgr, true); 1483 } else { 1484 smu7_avfs_control(hwmgr, true); 1485 } 1486 1487 return 0; 1488 } 1489 1490 static int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr) 1491 { 1492 int tmp_result, result = 0; 1493 1494 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1495 PHM_PlatformCaps_ThermalController)) 1496 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 1497 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1); 1498 1499 tmp_result = smu7_disable_power_containment(hwmgr); 1500 PP_ASSERT_WITH_CODE((tmp_result == 0), 1501 "Failed to disable power containment!", result = tmp_result); 1502 1503 tmp_result = smu7_disable_smc_cac(hwmgr); 1504 PP_ASSERT_WITH_CODE((tmp_result == 0), 1505 "Failed to disable SMC CAC!", result = tmp_result); 1506 1507 tmp_result = smu7_disable_didt_config(hwmgr); 1508 PP_ASSERT_WITH_CODE((tmp_result == 0), 1509 "Failed to disable DIDT!", result = tmp_result); 1510 1511 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 1512 CG_SPLL_SPREAD_SPECTRUM, SSEN, 0); 1513 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 1514 GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0); 1515 1516 tmp_result = smu7_disable_thermal_auto_throttle(hwmgr); 1517 PP_ASSERT_WITH_CODE((tmp_result == 0), 1518 "Failed to disable thermal auto throttle!", result = tmp_result); 1519 1520 tmp_result = smu7_avfs_control(hwmgr, false); 1521 PP_ASSERT_WITH_CODE((tmp_result == 0), 1522 "Failed to disable AVFS!", result = tmp_result); 1523 1524 tmp_result = smu7_stop_dpm(hwmgr); 1525 PP_ASSERT_WITH_CODE((tmp_result == 0), 1526 "Failed to stop DPM!", result = tmp_result); 1527 1528 tmp_result = smu7_disable_deep_sleep_master_switch(hwmgr); 1529 PP_ASSERT_WITH_CODE((tmp_result == 0), 1530 "Failed to disable deep sleep master switch!", result = tmp_result); 1531 1532 tmp_result = smu7_disable_ulv(hwmgr); 1533 PP_ASSERT_WITH_CODE((tmp_result == 0), 1534 "Failed to disable ULV!", result = tmp_result); 1535 1536 tmp_result = smu7_clear_voting_clients(hwmgr); 1537 PP_ASSERT_WITH_CODE((tmp_result == 0), 1538 "Failed to clear voting clients!", result = tmp_result); 1539 1540 tmp_result = smu7_reset_to_default(hwmgr); 1541 PP_ASSERT_WITH_CODE((tmp_result == 0), 1542 "Failed to reset to default!", result = tmp_result); 1543 1544 tmp_result = smu7_force_switch_to_arbf0(hwmgr); 1545 PP_ASSERT_WITH_CODE((tmp_result == 0), 1546 "Failed to force to switch arbf0!", result = tmp_result); 1547 1548 return result; 1549 } 1550 1551 static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr) 1552 { 1553 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1554 struct phm_ppt_v1_information *table_info = 1555 (struct phm_ppt_v1_information *)(hwmgr->pptable); 1556 struct amdgpu_device *adev = hwmgr->adev; 1557 1558 data->dll_default_on = false; 1559 data->mclk_dpm0_activity_target = 0xa; 1560 data->vddc_vddgfx_delta = 300; 1561 data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT; 1562 data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT; 1563 data->voting_rights_clients[0] = SMU7_VOTINGRIGHTSCLIENTS_DFLT0; 1564 data->voting_rights_clients[1]= SMU7_VOTINGRIGHTSCLIENTS_DFLT1; 1565 data->voting_rights_clients[2] = SMU7_VOTINGRIGHTSCLIENTS_DFLT2; 1566 data->voting_rights_clients[3]= SMU7_VOTINGRIGHTSCLIENTS_DFLT3; 1567 data->voting_rights_clients[4]= SMU7_VOTINGRIGHTSCLIENTS_DFLT4; 1568 data->voting_rights_clients[5]= SMU7_VOTINGRIGHTSCLIENTS_DFLT5; 1569 data->voting_rights_clients[6]= SMU7_VOTINGRIGHTSCLIENTS_DFLT6; 1570 data->voting_rights_clients[7]= SMU7_VOTINGRIGHTSCLIENTS_DFLT7; 1571 1572 data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true; 1573 data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true; 1574 data->pcie_dpm_key_disabled = hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true; 1575 /* need to set voltage control types before EVV patching */ 1576 data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE; 1577 data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE; 1578 data->mvdd_control = SMU7_VOLTAGE_CONTROL_NONE; 1579 data->enable_tdc_limit_feature = true; 1580 data->enable_pkg_pwr_tracking_feature = true; 1581 data->force_pcie_gen = PP_PCIEGenInvalid; 1582 data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false; 1583 data->current_profile_setting.bupdate_sclk = 1; 1584 data->current_profile_setting.sclk_up_hyst = 0; 1585 data->current_profile_setting.sclk_down_hyst = 100; 1586 data->current_profile_setting.sclk_activity = SMU7_SCLK_TARGETACTIVITY_DFLT; 1587 data->current_profile_setting.bupdate_mclk = 1; 1588 if (adev->gmc.vram_width == 256) { 1589 data->current_profile_setting.mclk_up_hyst = 10; 1590 data->current_profile_setting.mclk_down_hyst = 60; 1591 data->current_profile_setting.mclk_activity = 25; 1592 } else if (adev->gmc.vram_width == 128) { 1593 data->current_profile_setting.mclk_up_hyst = 5; 1594 data->current_profile_setting.mclk_down_hyst = 16; 1595 data->current_profile_setting.mclk_activity = 20; 1596 } else if (adev->gmc.vram_width == 64) { 1597 data->current_profile_setting.mclk_up_hyst = 3; 1598 data->current_profile_setting.mclk_down_hyst = 16; 1599 data->current_profile_setting.mclk_activity = 20; 1600 } 1601 hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D]; 1602 hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D; 1603 hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D; 1604 1605 if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->is_kicker) { 1606 uint8_t tmp1, tmp2; 1607 uint16_t tmp3 = 0; 1608 atomctrl_get_svi2_info(hwmgr, VOLTAGE_TYPE_VDDC, &tmp1, &tmp2, 1609 &tmp3); 1610 tmp3 = (tmp3 >> 5) & 0x3; 1611 data->vddc_phase_shed_control = ((tmp3 << 1) | (tmp3 >> 1)) & 0x3; 1612 } else if (hwmgr->chip_family == AMDGPU_FAMILY_CI) { 1613 data->vddc_phase_shed_control = 1; 1614 } else { 1615 data->vddc_phase_shed_control = 0; 1616 } 1617 1618 if (hwmgr->chip_id == CHIP_HAWAII) { 1619 data->thermal_temp_setting.temperature_low = 94500; 1620 data->thermal_temp_setting.temperature_high = 95000; 1621 data->thermal_temp_setting.temperature_shutdown = 104000; 1622 } else { 1623 data->thermal_temp_setting.temperature_low = 99500; 1624 data->thermal_temp_setting.temperature_high = 100000; 1625 data->thermal_temp_setting.temperature_shutdown = 104000; 1626 } 1627 1628 data->fast_watermark_threshold = 100; 1629 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, 1630 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) 1631 data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; 1632 else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, 1633 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT)) 1634 data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_GPIO; 1635 1636 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1637 PHM_PlatformCaps_ControlVDDGFX)) { 1638 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, 1639 VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) { 1640 data->vdd_gfx_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; 1641 } 1642 } 1643 1644 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1645 PHM_PlatformCaps_EnableMVDDControl)) { 1646 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, 1647 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT)) 1648 data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_GPIO; 1649 else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, 1650 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2)) 1651 data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; 1652 } 1653 1654 if (SMU7_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control) 1655 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 1656 PHM_PlatformCaps_ControlVDDGFX); 1657 1658 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1659 PHM_PlatformCaps_ControlVDDCI)) { 1660 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, 1661 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT)) 1662 data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_GPIO; 1663 else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, 1664 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2)) 1665 data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; 1666 } 1667 1668 if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE) 1669 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 1670 PHM_PlatformCaps_EnableMVDDControl); 1671 1672 if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE) 1673 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 1674 PHM_PlatformCaps_ControlVDDCI); 1675 1676 if ((hwmgr->pp_table_version != PP_TABLE_V0) && (hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK) 1677 && (table_info->cac_dtp_table->usClockStretchAmount != 0)) 1678 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 1679 PHM_PlatformCaps_ClockStretcher); 1680 1681 data->pcie_gen_performance.max = PP_PCIEGen1; 1682 data->pcie_gen_performance.min = PP_PCIEGen3; 1683 data->pcie_gen_power_saving.max = PP_PCIEGen1; 1684 data->pcie_gen_power_saving.min = PP_PCIEGen3; 1685 data->pcie_lane_performance.max = 0; 1686 data->pcie_lane_performance.min = 16; 1687 data->pcie_lane_power_saving.max = 0; 1688 data->pcie_lane_power_saving.min = 16; 1689 1690 1691 if (adev->pg_flags & AMD_PG_SUPPORT_UVD) 1692 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 1693 PHM_PlatformCaps_UVDPowerGating); 1694 if (adev->pg_flags & AMD_PG_SUPPORT_VCE) 1695 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 1696 PHM_PlatformCaps_VCEPowerGating); 1697 } 1698 1699 /** 1700 * Get Leakage VDDC based on leakage ID. 1701 * 1702 * @param hwmgr the address of the powerplay hardware manager. 1703 * @return always 0 1704 */ 1705 static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr) 1706 { 1707 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1708 uint16_t vv_id; 1709 uint16_t vddc = 0; 1710 uint16_t vddgfx = 0; 1711 uint16_t i, j; 1712 uint32_t sclk = 0; 1713 struct phm_ppt_v1_information *table_info = 1714 (struct phm_ppt_v1_information *)hwmgr->pptable; 1715 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL; 1716 1717 1718 for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) { 1719 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; 1720 1721 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { 1722 if ((hwmgr->pp_table_version == PP_TABLE_V1) 1723 && !phm_get_sclk_for_voltage_evv(hwmgr, 1724 table_info->vddgfx_lookup_table, vv_id, &sclk)) { 1725 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1726 PHM_PlatformCaps_ClockStretcher)) { 1727 sclk_table = table_info->vdd_dep_on_sclk; 1728 1729 for (j = 1; j < sclk_table->count; j++) { 1730 if (sclk_table->entries[j].clk == sclk && 1731 sclk_table->entries[j].cks_enable == 0) { 1732 sclk += 5000; 1733 break; 1734 } 1735 } 1736 } 1737 if (0 == atomctrl_get_voltage_evv_on_sclk 1738 (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk, 1739 vv_id, &vddgfx)) { 1740 /* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */ 1741 PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -EINVAL); 1742 1743 /* the voltage should not be zero nor equal to leakage ID */ 1744 if (vddgfx != 0 && vddgfx != vv_id) { 1745 data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx; 1746 data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = vv_id; 1747 data->vddcgfx_leakage.count++; 1748 } 1749 } else { 1750 pr_info("Error retrieving EVV voltage value!\n"); 1751 } 1752 } 1753 } else { 1754 if ((hwmgr->pp_table_version == PP_TABLE_V0) 1755 || !phm_get_sclk_for_voltage_evv(hwmgr, 1756 table_info->vddc_lookup_table, vv_id, &sclk)) { 1757 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1758 PHM_PlatformCaps_ClockStretcher)) { 1759 if (table_info == NULL) 1760 return -EINVAL; 1761 sclk_table = table_info->vdd_dep_on_sclk; 1762 1763 for (j = 1; j < sclk_table->count; j++) { 1764 if (sclk_table->entries[j].clk == sclk && 1765 sclk_table->entries[j].cks_enable == 0) { 1766 sclk += 5000; 1767 break; 1768 } 1769 } 1770 } 1771 1772 if (phm_get_voltage_evv_on_sclk(hwmgr, 1773 VOLTAGE_TYPE_VDDC, 1774 sclk, vv_id, &vddc) == 0) { 1775 if (vddc >= 2000 || vddc == 0) 1776 return -EINVAL; 1777 } else { 1778 pr_debug("failed to retrieving EVV voltage!\n"); 1779 continue; 1780 } 1781 1782 /* the voltage should not be zero nor equal to leakage ID */ 1783 if (vddc != 0 && vddc != vv_id) { 1784 data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc); 1785 data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id; 1786 data->vddc_leakage.count++; 1787 } 1788 } 1789 } 1790 } 1791 1792 return 0; 1793 } 1794 1795 /** 1796 * Change virtual leakage voltage to actual value. 1797 * 1798 * @param hwmgr the address of the powerplay hardware manager. 1799 * @param pointer to changing voltage 1800 * @param pointer to leakage table 1801 */ 1802 static void smu7_patch_ppt_v1_with_vdd_leakage(struct pp_hwmgr *hwmgr, 1803 uint16_t *voltage, struct smu7_leakage_voltage *leakage_table) 1804 { 1805 uint32_t index; 1806 1807 /* search for leakage voltage ID 0xff01 ~ 0xff08 */ 1808 for (index = 0; index < leakage_table->count; index++) { 1809 /* if this voltage matches a leakage voltage ID */ 1810 /* patch with actual leakage voltage */ 1811 if (leakage_table->leakage_id[index] == *voltage) { 1812 *voltage = leakage_table->actual_voltage[index]; 1813 break; 1814 } 1815 } 1816 1817 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0) 1818 pr_err("Voltage value looks like a Leakage ID but it's not patched \n"); 1819 } 1820 1821 /** 1822 * Patch voltage lookup table by EVV leakages. 1823 * 1824 * @param hwmgr the address of the powerplay hardware manager. 1825 * @param pointer to voltage lookup table 1826 * @param pointer to leakage table 1827 * @return always 0 1828 */ 1829 static int smu7_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr, 1830 phm_ppt_v1_voltage_lookup_table *lookup_table, 1831 struct smu7_leakage_voltage *leakage_table) 1832 { 1833 uint32_t i; 1834 1835 for (i = 0; i < lookup_table->count; i++) 1836 smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, 1837 &lookup_table->entries[i].us_vdd, leakage_table); 1838 1839 return 0; 1840 } 1841 1842 static int smu7_patch_clock_voltage_limits_with_vddc_leakage( 1843 struct pp_hwmgr *hwmgr, struct smu7_leakage_voltage *leakage_table, 1844 uint16_t *vddc) 1845 { 1846 struct phm_ppt_v1_information *table_info = 1847 (struct phm_ppt_v1_information *)(hwmgr->pptable); 1848 smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table); 1849 hwmgr->dyn_state.max_clock_voltage_on_dc.vddc = 1850 table_info->max_clock_voltage_on_dc.vddc; 1851 return 0; 1852 } 1853 1854 static int smu7_patch_voltage_dependency_tables_with_lookup_table( 1855 struct pp_hwmgr *hwmgr) 1856 { 1857 uint8_t entry_id; 1858 uint8_t voltage_id; 1859 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1860 struct phm_ppt_v1_information *table_info = 1861 (struct phm_ppt_v1_information *)(hwmgr->pptable); 1862 1863 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = 1864 table_info->vdd_dep_on_sclk; 1865 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table = 1866 table_info->vdd_dep_on_mclk; 1867 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = 1868 table_info->mm_dep_table; 1869 1870 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { 1871 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) { 1872 voltage_id = sclk_table->entries[entry_id].vddInd; 1873 sclk_table->entries[entry_id].vddgfx = 1874 table_info->vddgfx_lookup_table->entries[voltage_id].us_vdd; 1875 } 1876 } else { 1877 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) { 1878 voltage_id = sclk_table->entries[entry_id].vddInd; 1879 sclk_table->entries[entry_id].vddc = 1880 table_info->vddc_lookup_table->entries[voltage_id].us_vdd; 1881 } 1882 } 1883 1884 for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) { 1885 voltage_id = mclk_table->entries[entry_id].vddInd; 1886 mclk_table->entries[entry_id].vddc = 1887 table_info->vddc_lookup_table->entries[voltage_id].us_vdd; 1888 } 1889 1890 for (entry_id = 0; entry_id < mm_table->count; ++entry_id) { 1891 voltage_id = mm_table->entries[entry_id].vddcInd; 1892 mm_table->entries[entry_id].vddc = 1893 table_info->vddc_lookup_table->entries[voltage_id].us_vdd; 1894 } 1895 1896 return 0; 1897 1898 } 1899 1900 static int phm_add_voltage(struct pp_hwmgr *hwmgr, 1901 phm_ppt_v1_voltage_lookup_table *look_up_table, 1902 phm_ppt_v1_voltage_lookup_record *record) 1903 { 1904 uint32_t i; 1905 1906 PP_ASSERT_WITH_CODE((NULL != look_up_table), 1907 "Lookup Table empty.", return -EINVAL); 1908 PP_ASSERT_WITH_CODE((0 != look_up_table->count), 1909 "Lookup Table empty.", return -EINVAL); 1910 1911 i = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX); 1912 PP_ASSERT_WITH_CODE((i >= look_up_table->count), 1913 "Lookup Table is full.", return -EINVAL); 1914 1915 /* This is to avoid entering duplicate calculated records. */ 1916 for (i = 0; i < look_up_table->count; i++) { 1917 if (look_up_table->entries[i].us_vdd == record->us_vdd) { 1918 if (look_up_table->entries[i].us_calculated == 1) 1919 return 0; 1920 break; 1921 } 1922 } 1923 1924 look_up_table->entries[i].us_calculated = 1; 1925 look_up_table->entries[i].us_vdd = record->us_vdd; 1926 look_up_table->entries[i].us_cac_low = record->us_cac_low; 1927 look_up_table->entries[i].us_cac_mid = record->us_cac_mid; 1928 look_up_table->entries[i].us_cac_high = record->us_cac_high; 1929 /* Only increment the count when we're appending, not replacing duplicate entry. */ 1930 if (i == look_up_table->count) 1931 look_up_table->count++; 1932 1933 return 0; 1934 } 1935 1936 1937 static int smu7_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr) 1938 { 1939 uint8_t entry_id; 1940 struct phm_ppt_v1_voltage_lookup_record v_record; 1941 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1942 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); 1943 1944 phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk; 1945 phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk; 1946 1947 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { 1948 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) { 1949 if (sclk_table->entries[entry_id].vdd_offset & (1 << 15)) 1950 v_record.us_vdd = sclk_table->entries[entry_id].vddgfx + 1951 sclk_table->entries[entry_id].vdd_offset - 0xFFFF; 1952 else 1953 v_record.us_vdd = sclk_table->entries[entry_id].vddgfx + 1954 sclk_table->entries[entry_id].vdd_offset; 1955 1956 sclk_table->entries[entry_id].vddc = 1957 v_record.us_cac_low = v_record.us_cac_mid = 1958 v_record.us_cac_high = v_record.us_vdd; 1959 1960 phm_add_voltage(hwmgr, pptable_info->vddc_lookup_table, &v_record); 1961 } 1962 1963 for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) { 1964 if (mclk_table->entries[entry_id].vdd_offset & (1 << 15)) 1965 v_record.us_vdd = mclk_table->entries[entry_id].vddc + 1966 mclk_table->entries[entry_id].vdd_offset - 0xFFFF; 1967 else 1968 v_record.us_vdd = mclk_table->entries[entry_id].vddc + 1969 mclk_table->entries[entry_id].vdd_offset; 1970 1971 mclk_table->entries[entry_id].vddgfx = v_record.us_cac_low = 1972 v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd; 1973 phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record); 1974 } 1975 } 1976 return 0; 1977 } 1978 1979 static int smu7_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr) 1980 { 1981 uint8_t entry_id; 1982 struct phm_ppt_v1_voltage_lookup_record v_record; 1983 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1984 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); 1985 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table; 1986 1987 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { 1988 for (entry_id = 0; entry_id < mm_table->count; entry_id++) { 1989 if (mm_table->entries[entry_id].vddgfx_offset & (1 << 15)) 1990 v_record.us_vdd = mm_table->entries[entry_id].vddc + 1991 mm_table->entries[entry_id].vddgfx_offset - 0xFFFF; 1992 else 1993 v_record.us_vdd = mm_table->entries[entry_id].vddc + 1994 mm_table->entries[entry_id].vddgfx_offset; 1995 1996 /* Add the calculated VDDGFX to the VDDGFX lookup table */ 1997 mm_table->entries[entry_id].vddgfx = v_record.us_cac_low = 1998 v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd; 1999 phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record); 2000 } 2001 } 2002 return 0; 2003 } 2004 2005 static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr, 2006 struct phm_ppt_v1_voltage_lookup_table *lookup_table) 2007 { 2008 uint32_t table_size, i, j; 2009 table_size = lookup_table->count; 2010 2011 PP_ASSERT_WITH_CODE(0 != lookup_table->count, 2012 "Lookup table is empty", return -EINVAL); 2013 2014 /* Sorting voltages */ 2015 for (i = 0; i < table_size - 1; i++) { 2016 for (j = i + 1; j > 0; j--) { 2017 if (lookup_table->entries[j].us_vdd < 2018 lookup_table->entries[j - 1].us_vdd) { 2019 swap(lookup_table->entries[j - 1], 2020 lookup_table->entries[j]); 2021 } 2022 } 2023 } 2024 2025 return 0; 2026 } 2027 2028 static int smu7_complete_dependency_tables(struct pp_hwmgr *hwmgr) 2029 { 2030 int result = 0; 2031 int tmp_result; 2032 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2033 struct phm_ppt_v1_information *table_info = 2034 (struct phm_ppt_v1_information *)(hwmgr->pptable); 2035 2036 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { 2037 tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr, 2038 table_info->vddgfx_lookup_table, &(data->vddcgfx_leakage)); 2039 if (tmp_result != 0) 2040 result = tmp_result; 2041 2042 smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, 2043 &table_info->max_clock_voltage_on_dc.vddgfx, &(data->vddcgfx_leakage)); 2044 } else { 2045 2046 tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr, 2047 table_info->vddc_lookup_table, &(data->vddc_leakage)); 2048 if (tmp_result) 2049 result = tmp_result; 2050 2051 tmp_result = smu7_patch_clock_voltage_limits_with_vddc_leakage(hwmgr, 2052 &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc); 2053 if (tmp_result) 2054 result = tmp_result; 2055 } 2056 2057 tmp_result = smu7_patch_voltage_dependency_tables_with_lookup_table(hwmgr); 2058 if (tmp_result) 2059 result = tmp_result; 2060 2061 tmp_result = smu7_calc_voltage_dependency_tables(hwmgr); 2062 if (tmp_result) 2063 result = tmp_result; 2064 2065 tmp_result = smu7_calc_mm_voltage_dependency_table(hwmgr); 2066 if (tmp_result) 2067 result = tmp_result; 2068 2069 tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddgfx_lookup_table); 2070 if (tmp_result) 2071 result = tmp_result; 2072 2073 tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddc_lookup_table); 2074 if (tmp_result) 2075 result = tmp_result; 2076 2077 return result; 2078 } 2079 2080 static int smu7_set_private_data_based_on_pptable_v1(struct pp_hwmgr *hwmgr) 2081 { 2082 struct phm_ppt_v1_information *table_info = 2083 (struct phm_ppt_v1_information *)(hwmgr->pptable); 2084 2085 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table = 2086 table_info->vdd_dep_on_sclk; 2087 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table = 2088 table_info->vdd_dep_on_mclk; 2089 2090 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL, 2091 "VDD dependency on SCLK table is missing.", 2092 return -EINVAL); 2093 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1, 2094 "VDD dependency on SCLK table has to have is missing.", 2095 return -EINVAL); 2096 2097 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL, 2098 "VDD dependency on MCLK table is missing", 2099 return -EINVAL); 2100 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1, 2101 "VDD dependency on MCLK table has to have is missing.", 2102 return -EINVAL); 2103 2104 table_info->max_clock_voltage_on_ac.sclk = 2105 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk; 2106 table_info->max_clock_voltage_on_ac.mclk = 2107 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk; 2108 table_info->max_clock_voltage_on_ac.vddc = 2109 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc; 2110 table_info->max_clock_voltage_on_ac.vddci = 2111 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci; 2112 2113 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = table_info->max_clock_voltage_on_ac.sclk; 2114 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = table_info->max_clock_voltage_on_ac.mclk; 2115 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = table_info->max_clock_voltage_on_ac.vddc; 2116 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = table_info->max_clock_voltage_on_ac.vddci; 2117 2118 return 0; 2119 } 2120 2121 static int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr) 2122 { 2123 struct phm_ppt_v1_information *table_info = 2124 (struct phm_ppt_v1_information *)(hwmgr->pptable); 2125 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table; 2126 struct phm_ppt_v1_voltage_lookup_table *lookup_table; 2127 uint32_t i; 2128 uint32_t hw_revision, sub_vendor_id, sub_sys_id; 2129 struct amdgpu_device *adev = hwmgr->adev; 2130 2131 if (table_info != NULL) { 2132 dep_mclk_table = table_info->vdd_dep_on_mclk; 2133 lookup_table = table_info->vddc_lookup_table; 2134 } else 2135 return 0; 2136 2137 hw_revision = adev->pdev->revision; 2138 sub_sys_id = adev->pdev->subsystem_device; 2139 sub_vendor_id = adev->pdev->subsystem_vendor; 2140 2141 if (hwmgr->chip_id == CHIP_POLARIS10 && hw_revision == 0xC7 && 2142 ((sub_sys_id == 0xb37 && sub_vendor_id == 0x1002) || 2143 (sub_sys_id == 0x4a8 && sub_vendor_id == 0x1043) || 2144 (sub_sys_id == 0x9480 && sub_vendor_id == 0x1682))) { 2145 if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000) 2146 return 0; 2147 2148 for (i = 0; i < lookup_table->count; i++) { 2149 if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) { 2150 dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i; 2151 return 0; 2152 } 2153 } 2154 } 2155 return 0; 2156 } 2157 2158 static int smu7_thermal_parameter_init(struct pp_hwmgr *hwmgr) 2159 { 2160 struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment; 2161 uint32_t temp_reg; 2162 struct phm_ppt_v1_information *table_info = 2163 (struct phm_ppt_v1_information *)(hwmgr->pptable); 2164 2165 2166 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) { 2167 temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL); 2168 switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) { 2169 case 0: 2170 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1); 2171 break; 2172 case 1: 2173 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2); 2174 break; 2175 case 2: 2176 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW, 0x1); 2177 break; 2178 case 3: 2179 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1); 2180 break; 2181 case 4: 2182 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1); 2183 break; 2184 default: 2185 break; 2186 } 2187 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg); 2188 } 2189 2190 if (table_info == NULL) 2191 return 0; 2192 2193 if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp != 0 && 2194 hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode) { 2195 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit = 2196 (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit; 2197 2198 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMaxLimit = 2199 (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM; 2200 2201 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMStep = 1; 2202 2203 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = 100; 2204 2205 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMinLimit = 2206 (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit; 2207 2208 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMStep = 1; 2209 2210 table_info->cac_dtp_table->usDefaultTargetOperatingTemp = (table_info->cac_dtp_table->usDefaultTargetOperatingTemp >= 50) ? 2211 (table_info->cac_dtp_table->usDefaultTargetOperatingTemp - 50) : 0; 2212 2213 table_info->cac_dtp_table->usOperatingTempMaxLimit = table_info->cac_dtp_table->usDefaultTargetOperatingTemp; 2214 table_info->cac_dtp_table->usOperatingTempStep = 1; 2215 table_info->cac_dtp_table->usOperatingTempHyst = 1; 2216 2217 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM = 2218 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM; 2219 2220 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM = 2221 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM; 2222 2223 hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit = 2224 table_info->cac_dtp_table->usOperatingTempMinLimit; 2225 2226 hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit = 2227 table_info->cac_dtp_table->usOperatingTempMaxLimit; 2228 2229 hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp = 2230 table_info->cac_dtp_table->usDefaultTargetOperatingTemp; 2231 2232 hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep = 2233 table_info->cac_dtp_table->usOperatingTempStep; 2234 2235 hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp = 2236 table_info->cac_dtp_table->usTargetOperatingTemp; 2237 if (hwmgr->feature_mask & PP_OD_FUZZY_FAN_CONTROL_MASK) 2238 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 2239 PHM_PlatformCaps_ODFuzzyFanControlSupport); 2240 } 2241 2242 return 0; 2243 } 2244 2245 /** 2246 * Change virtual leakage voltage to actual value. 2247 * 2248 * @param hwmgr the address of the powerplay hardware manager. 2249 * @param pointer to changing voltage 2250 * @param pointer to leakage table 2251 */ 2252 static void smu7_patch_ppt_v0_with_vdd_leakage(struct pp_hwmgr *hwmgr, 2253 uint32_t *voltage, struct smu7_leakage_voltage *leakage_table) 2254 { 2255 uint32_t index; 2256 2257 /* search for leakage voltage ID 0xff01 ~ 0xff08 */ 2258 for (index = 0; index < leakage_table->count; index++) { 2259 /* if this voltage matches a leakage voltage ID */ 2260 /* patch with actual leakage voltage */ 2261 if (leakage_table->leakage_id[index] == *voltage) { 2262 *voltage = leakage_table->actual_voltage[index]; 2263 break; 2264 } 2265 } 2266 2267 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0) 2268 pr_err("Voltage value looks like a Leakage ID but it's not patched \n"); 2269 } 2270 2271 2272 static int smu7_patch_vddc(struct pp_hwmgr *hwmgr, 2273 struct phm_clock_voltage_dependency_table *tab) 2274 { 2275 uint16_t i; 2276 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2277 2278 if (tab) 2279 for (i = 0; i < tab->count; i++) 2280 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, 2281 &data->vddc_leakage); 2282 2283 return 0; 2284 } 2285 2286 static int smu7_patch_vddci(struct pp_hwmgr *hwmgr, 2287 struct phm_clock_voltage_dependency_table *tab) 2288 { 2289 uint16_t i; 2290 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2291 2292 if (tab) 2293 for (i = 0; i < tab->count; i++) 2294 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, 2295 &data->vddci_leakage); 2296 2297 return 0; 2298 } 2299 2300 static int smu7_patch_vce_vddc(struct pp_hwmgr *hwmgr, 2301 struct phm_vce_clock_voltage_dependency_table *tab) 2302 { 2303 uint16_t i; 2304 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2305 2306 if (tab) 2307 for (i = 0; i < tab->count; i++) 2308 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, 2309 &data->vddc_leakage); 2310 2311 return 0; 2312 } 2313 2314 2315 static int smu7_patch_uvd_vddc(struct pp_hwmgr *hwmgr, 2316 struct phm_uvd_clock_voltage_dependency_table *tab) 2317 { 2318 uint16_t i; 2319 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2320 2321 if (tab) 2322 for (i = 0; i < tab->count; i++) 2323 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, 2324 &data->vddc_leakage); 2325 2326 return 0; 2327 } 2328 2329 static int smu7_patch_vddc_shed_limit(struct pp_hwmgr *hwmgr, 2330 struct phm_phase_shedding_limits_table *tab) 2331 { 2332 uint16_t i; 2333 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2334 2335 if (tab) 2336 for (i = 0; i < tab->count; i++) 2337 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].Voltage, 2338 &data->vddc_leakage); 2339 2340 return 0; 2341 } 2342 2343 static int smu7_patch_samu_vddc(struct pp_hwmgr *hwmgr, 2344 struct phm_samu_clock_voltage_dependency_table *tab) 2345 { 2346 uint16_t i; 2347 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2348 2349 if (tab) 2350 for (i = 0; i < tab->count; i++) 2351 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, 2352 &data->vddc_leakage); 2353 2354 return 0; 2355 } 2356 2357 static int smu7_patch_acp_vddc(struct pp_hwmgr *hwmgr, 2358 struct phm_acp_clock_voltage_dependency_table *tab) 2359 { 2360 uint16_t i; 2361 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2362 2363 if (tab) 2364 for (i = 0; i < tab->count; i++) 2365 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, 2366 &data->vddc_leakage); 2367 2368 return 0; 2369 } 2370 2371 static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr, 2372 struct phm_clock_and_voltage_limits *tab) 2373 { 2374 uint32_t vddc, vddci; 2375 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2376 2377 if (tab) { 2378 vddc = tab->vddc; 2379 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, 2380 &data->vddc_leakage); 2381 tab->vddc = vddc; 2382 vddci = tab->vddci; 2383 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddci, 2384 &data->vddci_leakage); 2385 tab->vddci = vddci; 2386 } 2387 2388 return 0; 2389 } 2390 2391 static int smu7_patch_cac_vddc(struct pp_hwmgr *hwmgr, struct phm_cac_leakage_table *tab) 2392 { 2393 uint32_t i; 2394 uint32_t vddc; 2395 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2396 2397 if (tab) { 2398 for (i = 0; i < tab->count; i++) { 2399 vddc = (uint32_t)(tab->entries[i].Vddc); 2400 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, &data->vddc_leakage); 2401 tab->entries[i].Vddc = (uint16_t)vddc; 2402 } 2403 } 2404 2405 return 0; 2406 } 2407 2408 static int smu7_patch_dependency_tables_with_leakage(struct pp_hwmgr *hwmgr) 2409 { 2410 int tmp; 2411 2412 tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_sclk); 2413 if (tmp) 2414 return -EINVAL; 2415 2416 tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_mclk); 2417 if (tmp) 2418 return -EINVAL; 2419 2420 tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dep_on_dal_pwrl); 2421 if (tmp) 2422 return -EINVAL; 2423 2424 tmp = smu7_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk); 2425 if (tmp) 2426 return -EINVAL; 2427 2428 tmp = smu7_patch_vce_vddc(hwmgr, hwmgr->dyn_state.vce_clock_voltage_dependency_table); 2429 if (tmp) 2430 return -EINVAL; 2431 2432 tmp = smu7_patch_uvd_vddc(hwmgr, hwmgr->dyn_state.uvd_clock_voltage_dependency_table); 2433 if (tmp) 2434 return -EINVAL; 2435 2436 tmp = smu7_patch_samu_vddc(hwmgr, hwmgr->dyn_state.samu_clock_voltage_dependency_table); 2437 if (tmp) 2438 return -EINVAL; 2439 2440 tmp = smu7_patch_acp_vddc(hwmgr, hwmgr->dyn_state.acp_clock_voltage_dependency_table); 2441 if (tmp) 2442 return -EINVAL; 2443 2444 tmp = smu7_patch_vddc_shed_limit(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table); 2445 if (tmp) 2446 return -EINVAL; 2447 2448 tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_ac); 2449 if (tmp) 2450 return -EINVAL; 2451 2452 tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_dc); 2453 if (tmp) 2454 return -EINVAL; 2455 2456 tmp = smu7_patch_cac_vddc(hwmgr, hwmgr->dyn_state.cac_leakage_table); 2457 if (tmp) 2458 return -EINVAL; 2459 2460 return 0; 2461 } 2462 2463 2464 static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr) 2465 { 2466 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2467 2468 struct phm_clock_voltage_dependency_table *allowed_sclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_sclk; 2469 struct phm_clock_voltage_dependency_table *allowed_mclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_mclk; 2470 struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk; 2471 2472 PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL, 2473 "VDDC dependency on SCLK table is missing. This table is mandatory", 2474 return -EINVAL); 2475 PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1, 2476 "VDDC dependency on SCLK table has to have is missing. This table is mandatory", 2477 return -EINVAL); 2478 2479 PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL, 2480 "VDDC dependency on MCLK table is missing. This table is mandatory", 2481 return -EINVAL); 2482 PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1, 2483 "VDD dependency on MCLK table has to have is missing. This table is mandatory", 2484 return -EINVAL); 2485 2486 data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[0].v; 2487 data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; 2488 2489 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = 2490 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk; 2491 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = 2492 allowed_mclk_vddc_table->entries[allowed_mclk_vddc_table->count - 1].clk; 2493 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = 2494 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; 2495 2496 if (allowed_mclk_vddci_table != NULL && allowed_mclk_vddci_table->count >= 1) { 2497 data->min_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[0].v; 2498 data->max_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v; 2499 } 2500 2501 if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count >= 1) 2502 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v; 2503 2504 return 0; 2505 } 2506 2507 static int smu7_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) 2508 { 2509 kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl); 2510 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL; 2511 kfree(hwmgr->backend); 2512 hwmgr->backend = NULL; 2513 2514 return 0; 2515 } 2516 2517 static int smu7_get_elb_voltages(struct pp_hwmgr *hwmgr) 2518 { 2519 uint16_t virtual_voltage_id, vddc, vddci, efuse_voltage_id; 2520 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2521 int i; 2522 2523 if (atomctrl_get_leakage_id_from_efuse(hwmgr, &efuse_voltage_id) == 0) { 2524 for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) { 2525 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; 2526 if (atomctrl_get_leakage_vddc_base_on_leakage(hwmgr, &vddc, &vddci, 2527 virtual_voltage_id, 2528 efuse_voltage_id) == 0) { 2529 if (vddc != 0 && vddc != virtual_voltage_id) { 2530 data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc; 2531 data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id; 2532 data->vddc_leakage.count++; 2533 } 2534 if (vddci != 0 && vddci != virtual_voltage_id) { 2535 data->vddci_leakage.actual_voltage[data->vddci_leakage.count] = vddci; 2536 data->vddci_leakage.leakage_id[data->vddci_leakage.count] = virtual_voltage_id; 2537 data->vddci_leakage.count++; 2538 } 2539 } 2540 } 2541 } 2542 return 0; 2543 } 2544 2545 static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr) 2546 { 2547 struct smu7_hwmgr *data; 2548 int result = 0; 2549 2550 data = kzalloc(sizeof(struct smu7_hwmgr), GFP_KERNEL); 2551 if (data == NULL) 2552 return -ENOMEM; 2553 2554 hwmgr->backend = data; 2555 smu7_patch_voltage_workaround(hwmgr); 2556 smu7_init_dpm_defaults(hwmgr); 2557 2558 /* Get leakage voltage based on leakage ID. */ 2559 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 2560 PHM_PlatformCaps_EVV)) { 2561 result = smu7_get_evv_voltages(hwmgr); 2562 if (result) { 2563 pr_info("Get EVV Voltage Failed. Abort Driver loading!\n"); 2564 return -EINVAL; 2565 } 2566 } else { 2567 smu7_get_elb_voltages(hwmgr); 2568 } 2569 2570 if (hwmgr->pp_table_version == PP_TABLE_V1) { 2571 smu7_complete_dependency_tables(hwmgr); 2572 smu7_set_private_data_based_on_pptable_v1(hwmgr); 2573 } else if (hwmgr->pp_table_version == PP_TABLE_V0) { 2574 smu7_patch_dependency_tables_with_leakage(hwmgr); 2575 smu7_set_private_data_based_on_pptable_v0(hwmgr); 2576 } 2577 2578 /* Initalize Dynamic State Adjustment Rule Settings */ 2579 result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr); 2580 2581 if (0 == result) { 2582 struct amdgpu_device *adev = hwmgr->adev; 2583 2584 data->is_tlu_enabled = false; 2585 2586 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = 2587 SMU7_MAX_HARDWARE_POWERLEVELS; 2588 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; 2589 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; 2590 2591 data->pcie_gen_cap = adev->pm.pcie_gen_mask; 2592 if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) 2593 data->pcie_spc_cap = 20; 2594 data->pcie_lane_cap = adev->pm.pcie_mlw_mask; 2595 2596 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */ 2597 /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */ 2598 hwmgr->platform_descriptor.clockStep.engineClock = 500; 2599 hwmgr->platform_descriptor.clockStep.memoryClock = 500; 2600 smu7_thermal_parameter_init(hwmgr); 2601 } else { 2602 /* Ignore return value in here, we are cleaning up a mess. */ 2603 smu7_hwmgr_backend_fini(hwmgr); 2604 } 2605 2606 return 0; 2607 } 2608 2609 static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr) 2610 { 2611 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2612 uint32_t level, tmp; 2613 2614 if (!data->pcie_dpm_key_disabled) { 2615 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) { 2616 level = 0; 2617 tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask; 2618 while (tmp >>= 1) 2619 level++; 2620 2621 if (level) 2622 smum_send_msg_to_smc_with_parameter(hwmgr, 2623 PPSMC_MSG_PCIeDPM_ForceLevel, level, 2624 NULL); 2625 } 2626 } 2627 2628 if (!data->sclk_dpm_key_disabled) { 2629 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) { 2630 level = 0; 2631 tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask; 2632 while (tmp >>= 1) 2633 level++; 2634 2635 if (level) 2636 smum_send_msg_to_smc_with_parameter(hwmgr, 2637 PPSMC_MSG_SCLKDPM_SetEnabledMask, 2638 (1 << level), 2639 NULL); 2640 } 2641 } 2642 2643 if (!data->mclk_dpm_key_disabled) { 2644 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) { 2645 level = 0; 2646 tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask; 2647 while (tmp >>= 1) 2648 level++; 2649 2650 if (level) 2651 smum_send_msg_to_smc_with_parameter(hwmgr, 2652 PPSMC_MSG_MCLKDPM_SetEnabledMask, 2653 (1 << level), 2654 NULL); 2655 } 2656 } 2657 2658 return 0; 2659 } 2660 2661 static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr) 2662 { 2663 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2664 2665 if (hwmgr->pp_table_version == PP_TABLE_V1) 2666 phm_apply_dal_min_voltage_request(hwmgr); 2667 /* TO DO for v0 iceland and Ci*/ 2668 2669 if (!data->sclk_dpm_key_disabled) { 2670 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) 2671 smum_send_msg_to_smc_with_parameter(hwmgr, 2672 PPSMC_MSG_SCLKDPM_SetEnabledMask, 2673 data->dpm_level_enable_mask.sclk_dpm_enable_mask, 2674 NULL); 2675 } 2676 2677 if (!data->mclk_dpm_key_disabled) { 2678 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) 2679 smum_send_msg_to_smc_with_parameter(hwmgr, 2680 PPSMC_MSG_MCLKDPM_SetEnabledMask, 2681 data->dpm_level_enable_mask.mclk_dpm_enable_mask, 2682 NULL); 2683 } 2684 2685 return 0; 2686 } 2687 2688 static int smu7_unforce_dpm_levels(struct pp_hwmgr *hwmgr) 2689 { 2690 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2691 2692 if (!smum_is_dpm_running(hwmgr)) 2693 return -EINVAL; 2694 2695 if (!data->pcie_dpm_key_disabled) { 2696 smum_send_msg_to_smc(hwmgr, 2697 PPSMC_MSG_PCIeDPM_UnForceLevel, 2698 NULL); 2699 } 2700 2701 return smu7_upload_dpm_level_enable_mask(hwmgr); 2702 } 2703 2704 static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr) 2705 { 2706 struct smu7_hwmgr *data = 2707 (struct smu7_hwmgr *)(hwmgr->backend); 2708 uint32_t level; 2709 2710 if (!data->sclk_dpm_key_disabled) 2711 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) { 2712 level = phm_get_lowest_enabled_level(hwmgr, 2713 data->dpm_level_enable_mask.sclk_dpm_enable_mask); 2714 smum_send_msg_to_smc_with_parameter(hwmgr, 2715 PPSMC_MSG_SCLKDPM_SetEnabledMask, 2716 (1 << level), 2717 NULL); 2718 2719 } 2720 2721 if (!data->mclk_dpm_key_disabled) { 2722 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) { 2723 level = phm_get_lowest_enabled_level(hwmgr, 2724 data->dpm_level_enable_mask.mclk_dpm_enable_mask); 2725 smum_send_msg_to_smc_with_parameter(hwmgr, 2726 PPSMC_MSG_MCLKDPM_SetEnabledMask, 2727 (1 << level), 2728 NULL); 2729 } 2730 } 2731 2732 if (!data->pcie_dpm_key_disabled) { 2733 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) { 2734 level = phm_get_lowest_enabled_level(hwmgr, 2735 data->dpm_level_enable_mask.pcie_dpm_enable_mask); 2736 smum_send_msg_to_smc_with_parameter(hwmgr, 2737 PPSMC_MSG_PCIeDPM_ForceLevel, 2738 (level), 2739 NULL); 2740 } 2741 } 2742 2743 return 0; 2744 } 2745 2746 static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level, 2747 uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *pcie_mask) 2748 { 2749 uint32_t percentage; 2750 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2751 struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table; 2752 int32_t tmp_mclk; 2753 int32_t tmp_sclk; 2754 int32_t count; 2755 2756 if (golden_dpm_table->mclk_table.count < 1) 2757 return -EINVAL; 2758 2759 percentage = 100 * golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value / 2760 golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value; 2761 2762 if (golden_dpm_table->mclk_table.count == 1) { 2763 percentage = 70; 2764 tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value; 2765 *mclk_mask = golden_dpm_table->mclk_table.count - 1; 2766 } else { 2767 tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 2].value; 2768 *mclk_mask = golden_dpm_table->mclk_table.count - 2; 2769 } 2770 2771 tmp_sclk = tmp_mclk * percentage / 100; 2772 2773 if (hwmgr->pp_table_version == PP_TABLE_V0) { 2774 for (count = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1; 2775 count >= 0; count--) { 2776 if (tmp_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) { 2777 tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk; 2778 *sclk_mask = count; 2779 break; 2780 } 2781 } 2782 if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { 2783 *sclk_mask = 0; 2784 tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].clk; 2785 } 2786 2787 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) 2788 *sclk_mask = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1; 2789 } else if (hwmgr->pp_table_version == PP_TABLE_V1) { 2790 struct phm_ppt_v1_information *table_info = 2791 (struct phm_ppt_v1_information *)(hwmgr->pptable); 2792 2793 for (count = table_info->vdd_dep_on_sclk->count-1; count >= 0; count--) { 2794 if (tmp_sclk >= table_info->vdd_dep_on_sclk->entries[count].clk) { 2795 tmp_sclk = table_info->vdd_dep_on_sclk->entries[count].clk; 2796 *sclk_mask = count; 2797 break; 2798 } 2799 } 2800 if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { 2801 *sclk_mask = 0; 2802 tmp_sclk = table_info->vdd_dep_on_sclk->entries[0].clk; 2803 } 2804 2805 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) 2806 *sclk_mask = table_info->vdd_dep_on_sclk->count - 1; 2807 } 2808 2809 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) 2810 *mclk_mask = 0; 2811 else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) 2812 *mclk_mask = golden_dpm_table->mclk_table.count - 1; 2813 2814 *pcie_mask = data->dpm_table.pcie_speed_table.count - 1; 2815 hwmgr->pstate_sclk = tmp_sclk; 2816 hwmgr->pstate_mclk = tmp_mclk; 2817 2818 return 0; 2819 } 2820 2821 static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr, 2822 enum amd_dpm_forced_level level) 2823 { 2824 int ret = 0; 2825 uint32_t sclk_mask = 0; 2826 uint32_t mclk_mask = 0; 2827 uint32_t pcie_mask = 0; 2828 2829 if (hwmgr->pstate_sclk == 0) 2830 smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask); 2831 2832 switch (level) { 2833 case AMD_DPM_FORCED_LEVEL_HIGH: 2834 ret = smu7_force_dpm_highest(hwmgr); 2835 break; 2836 case AMD_DPM_FORCED_LEVEL_LOW: 2837 ret = smu7_force_dpm_lowest(hwmgr); 2838 break; 2839 case AMD_DPM_FORCED_LEVEL_AUTO: 2840 ret = smu7_unforce_dpm_levels(hwmgr); 2841 break; 2842 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 2843 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 2844 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 2845 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 2846 ret = smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask); 2847 if (ret) 2848 return ret; 2849 smu7_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask); 2850 smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask); 2851 smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask); 2852 break; 2853 case AMD_DPM_FORCED_LEVEL_MANUAL: 2854 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 2855 default: 2856 break; 2857 } 2858 2859 if (!ret) { 2860 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) 2861 smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100); 2862 else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) 2863 smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr); 2864 } 2865 return ret; 2866 } 2867 2868 static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr) 2869 { 2870 return sizeof(struct smu7_power_state); 2871 } 2872 2873 static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr, 2874 uint32_t vblank_time_us) 2875 { 2876 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2877 uint32_t switch_limit_us; 2878 2879 switch (hwmgr->chip_id) { 2880 case CHIP_POLARIS10: 2881 case CHIP_POLARIS11: 2882 case CHIP_POLARIS12: 2883 if (hwmgr->is_kicker) 2884 switch_limit_us = data->is_memory_gddr5 ? 450 : 150; 2885 else 2886 switch_limit_us = data->is_memory_gddr5 ? 200 : 150; 2887 break; 2888 case CHIP_VEGAM: 2889 switch_limit_us = 30; 2890 break; 2891 default: 2892 switch_limit_us = data->is_memory_gddr5 ? 450 : 150; 2893 break; 2894 } 2895 2896 if (vblank_time_us < switch_limit_us) 2897 return true; 2898 else 2899 return false; 2900 } 2901 2902 static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, 2903 struct pp_power_state *request_ps, 2904 const struct pp_power_state *current_ps) 2905 { 2906 struct amdgpu_device *adev = hwmgr->adev; 2907 struct smu7_power_state *smu7_ps = 2908 cast_phw_smu7_power_state(&request_ps->hardware); 2909 uint32_t sclk; 2910 uint32_t mclk; 2911 struct PP_Clocks minimum_clocks = {0}; 2912 bool disable_mclk_switching; 2913 bool disable_mclk_switching_for_frame_lock; 2914 const struct phm_clock_and_voltage_limits *max_limits; 2915 uint32_t i; 2916 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2917 struct phm_ppt_v1_information *table_info = 2918 (struct phm_ppt_v1_information *)(hwmgr->pptable); 2919 int32_t count; 2920 int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0; 2921 2922 data->battery_state = (PP_StateUILabel_Battery == 2923 request_ps->classification.ui_label); 2924 2925 PP_ASSERT_WITH_CODE(smu7_ps->performance_level_count == 2, 2926 "VI should always have 2 performance levels", 2927 ); 2928 2929 max_limits = adev->pm.ac_power ? 2930 &(hwmgr->dyn_state.max_clock_voltage_on_ac) : 2931 &(hwmgr->dyn_state.max_clock_voltage_on_dc); 2932 2933 /* Cap clock DPM tables at DC MAX if it is in DC. */ 2934 if (!adev->pm.ac_power) { 2935 for (i = 0; i < smu7_ps->performance_level_count; i++) { 2936 if (smu7_ps->performance_levels[i].memory_clock > max_limits->mclk) 2937 smu7_ps->performance_levels[i].memory_clock = max_limits->mclk; 2938 if (smu7_ps->performance_levels[i].engine_clock > max_limits->sclk) 2939 smu7_ps->performance_levels[i].engine_clock = max_limits->sclk; 2940 } 2941 } 2942 2943 minimum_clocks.engineClock = hwmgr->display_config->min_core_set_clock; 2944 minimum_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock; 2945 2946 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 2947 PHM_PlatformCaps_StablePState)) { 2948 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac); 2949 stable_pstate_sclk = (max_limits->sclk * 75) / 100; 2950 2951 for (count = table_info->vdd_dep_on_sclk->count - 1; 2952 count >= 0; count--) { 2953 if (stable_pstate_sclk >= 2954 table_info->vdd_dep_on_sclk->entries[count].clk) { 2955 stable_pstate_sclk = 2956 table_info->vdd_dep_on_sclk->entries[count].clk; 2957 break; 2958 } 2959 } 2960 2961 if (count < 0) 2962 stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk; 2963 2964 stable_pstate_mclk = max_limits->mclk; 2965 2966 minimum_clocks.engineClock = stable_pstate_sclk; 2967 minimum_clocks.memoryClock = stable_pstate_mclk; 2968 } 2969 2970 disable_mclk_switching_for_frame_lock = phm_cap_enabled( 2971 hwmgr->platform_descriptor.platformCaps, 2972 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); 2973 2974 2975 if (hwmgr->display_config->num_display == 0) 2976 disable_mclk_switching = false; 2977 else 2978 disable_mclk_switching = ((1 < hwmgr->display_config->num_display) && 2979 !hwmgr->display_config->multi_monitor_in_sync) || 2980 disable_mclk_switching_for_frame_lock || 2981 smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time); 2982 2983 sclk = smu7_ps->performance_levels[0].engine_clock; 2984 mclk = smu7_ps->performance_levels[0].memory_clock; 2985 2986 if (disable_mclk_switching) 2987 mclk = smu7_ps->performance_levels 2988 [smu7_ps->performance_level_count - 1].memory_clock; 2989 2990 if (sclk < minimum_clocks.engineClock) 2991 sclk = (minimum_clocks.engineClock > max_limits->sclk) ? 2992 max_limits->sclk : minimum_clocks.engineClock; 2993 2994 if (mclk < minimum_clocks.memoryClock) 2995 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ? 2996 max_limits->mclk : minimum_clocks.memoryClock; 2997 2998 smu7_ps->performance_levels[0].engine_clock = sclk; 2999 smu7_ps->performance_levels[0].memory_clock = mclk; 3000 3001 smu7_ps->performance_levels[1].engine_clock = 3002 (smu7_ps->performance_levels[1].engine_clock >= 3003 smu7_ps->performance_levels[0].engine_clock) ? 3004 smu7_ps->performance_levels[1].engine_clock : 3005 smu7_ps->performance_levels[0].engine_clock; 3006 3007 if (disable_mclk_switching) { 3008 if (mclk < smu7_ps->performance_levels[1].memory_clock) 3009 mclk = smu7_ps->performance_levels[1].memory_clock; 3010 3011 smu7_ps->performance_levels[0].memory_clock = mclk; 3012 smu7_ps->performance_levels[1].memory_clock = mclk; 3013 } else { 3014 if (smu7_ps->performance_levels[1].memory_clock < 3015 smu7_ps->performance_levels[0].memory_clock) 3016 smu7_ps->performance_levels[1].memory_clock = 3017 smu7_ps->performance_levels[0].memory_clock; 3018 } 3019 3020 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 3021 PHM_PlatformCaps_StablePState)) { 3022 for (i = 0; i < smu7_ps->performance_level_count; i++) { 3023 smu7_ps->performance_levels[i].engine_clock = stable_pstate_sclk; 3024 smu7_ps->performance_levels[i].memory_clock = stable_pstate_mclk; 3025 smu7_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max; 3026 smu7_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max; 3027 } 3028 } 3029 return 0; 3030 } 3031 3032 3033 static uint32_t smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) 3034 { 3035 struct pp_power_state *ps; 3036 struct smu7_power_state *smu7_ps; 3037 3038 if (hwmgr == NULL) 3039 return -EINVAL; 3040 3041 ps = hwmgr->request_ps; 3042 3043 if (ps == NULL) 3044 return -EINVAL; 3045 3046 smu7_ps = cast_phw_smu7_power_state(&ps->hardware); 3047 3048 if (low) 3049 return smu7_ps->performance_levels[0].memory_clock; 3050 else 3051 return smu7_ps->performance_levels 3052 [smu7_ps->performance_level_count-1].memory_clock; 3053 } 3054 3055 static uint32_t smu7_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) 3056 { 3057 struct pp_power_state *ps; 3058 struct smu7_power_state *smu7_ps; 3059 3060 if (hwmgr == NULL) 3061 return -EINVAL; 3062 3063 ps = hwmgr->request_ps; 3064 3065 if (ps == NULL) 3066 return -EINVAL; 3067 3068 smu7_ps = cast_phw_smu7_power_state(&ps->hardware); 3069 3070 if (low) 3071 return smu7_ps->performance_levels[0].engine_clock; 3072 else 3073 return smu7_ps->performance_levels 3074 [smu7_ps->performance_level_count-1].engine_clock; 3075 } 3076 3077 static int smu7_dpm_patch_boot_state(struct pp_hwmgr *hwmgr, 3078 struct pp_hw_power_state *hw_ps) 3079 { 3080 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3081 struct smu7_power_state *ps = (struct smu7_power_state *)hw_ps; 3082 ATOM_FIRMWARE_INFO_V2_2 *fw_info; 3083 uint16_t size; 3084 uint8_t frev, crev; 3085 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); 3086 3087 /* First retrieve the Boot clocks and VDDC from the firmware info table. 3088 * We assume here that fw_info is unchanged if this call fails. 3089 */ 3090 fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)smu_atom_get_data_table(hwmgr->adev, index, 3091 &size, &frev, &crev); 3092 if (!fw_info) 3093 /* During a test, there is no firmware info table. */ 3094 return 0; 3095 3096 /* Patch the state. */ 3097 data->vbios_boot_state.sclk_bootup_value = 3098 le32_to_cpu(fw_info->ulDefaultEngineClock); 3099 data->vbios_boot_state.mclk_bootup_value = 3100 le32_to_cpu(fw_info->ulDefaultMemoryClock); 3101 data->vbios_boot_state.mvdd_bootup_value = 3102 le16_to_cpu(fw_info->usBootUpMVDDCVoltage); 3103 data->vbios_boot_state.vddc_bootup_value = 3104 le16_to_cpu(fw_info->usBootUpVDDCVoltage); 3105 data->vbios_boot_state.vddci_bootup_value = 3106 le16_to_cpu(fw_info->usBootUpVDDCIVoltage); 3107 data->vbios_boot_state.pcie_gen_bootup_value = 3108 smu7_get_current_pcie_speed(hwmgr); 3109 3110 data->vbios_boot_state.pcie_lane_bootup_value = 3111 (uint16_t)smu7_get_current_pcie_lane_number(hwmgr); 3112 3113 /* set boot power state */ 3114 ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value; 3115 ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value; 3116 ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value; 3117 ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value; 3118 3119 return 0; 3120 } 3121 3122 static int smu7_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr) 3123 { 3124 int result; 3125 unsigned long ret = 0; 3126 3127 if (hwmgr->pp_table_version == PP_TABLE_V0) { 3128 result = pp_tables_get_num_of_entries(hwmgr, &ret); 3129 return result ? 0 : ret; 3130 } else if (hwmgr->pp_table_version == PP_TABLE_V1) { 3131 result = get_number_of_powerplay_table_entries_v1_0(hwmgr); 3132 return result; 3133 } 3134 return 0; 3135 } 3136 3137 static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr, 3138 void *state, struct pp_power_state *power_state, 3139 void *pp_table, uint32_t classification_flag) 3140 { 3141 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3142 struct smu7_power_state *smu7_power_state = 3143 (struct smu7_power_state *)(&(power_state->hardware)); 3144 struct smu7_performance_level *performance_level; 3145 ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state; 3146 ATOM_Tonga_POWERPLAYTABLE *powerplay_table = 3147 (ATOM_Tonga_POWERPLAYTABLE *)pp_table; 3148 PPTable_Generic_SubTable_Header *sclk_dep_table = 3149 (PPTable_Generic_SubTable_Header *) 3150 (((unsigned long)powerplay_table) + 3151 le16_to_cpu(powerplay_table->usSclkDependencyTableOffset)); 3152 3153 ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = 3154 (ATOM_Tonga_MCLK_Dependency_Table *) 3155 (((unsigned long)powerplay_table) + 3156 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset)); 3157 3158 /* The following fields are not initialized here: id orderedList allStatesList */ 3159 power_state->classification.ui_label = 3160 (le16_to_cpu(state_entry->usClassification) & 3161 ATOM_PPLIB_CLASSIFICATION_UI_MASK) >> 3162 ATOM_PPLIB_CLASSIFICATION_UI_SHIFT; 3163 power_state->classification.flags = classification_flag; 3164 /* NOTE: There is a classification2 flag in BIOS that is not being used right now */ 3165 3166 power_state->classification.temporary_state = false; 3167 power_state->classification.to_be_deleted = false; 3168 3169 power_state->validation.disallowOnDC = 3170 (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & 3171 ATOM_Tonga_DISALLOW_ON_DC)); 3172 3173 power_state->pcie.lanes = 0; 3174 3175 power_state->display.disableFrameModulation = false; 3176 power_state->display.limitRefreshrate = false; 3177 power_state->display.enableVariBright = 3178 (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & 3179 ATOM_Tonga_ENABLE_VARIBRIGHT)); 3180 3181 power_state->validation.supportedPowerLevels = 0; 3182 power_state->uvd_clocks.VCLK = 0; 3183 power_state->uvd_clocks.DCLK = 0; 3184 power_state->temperatures.min = 0; 3185 power_state->temperatures.max = 0; 3186 3187 performance_level = &(smu7_power_state->performance_levels 3188 [smu7_power_state->performance_level_count++]); 3189 3190 PP_ASSERT_WITH_CODE( 3191 (smu7_power_state->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)), 3192 "Performance levels exceeds SMC limit!", 3193 return -EINVAL); 3194 3195 PP_ASSERT_WITH_CODE( 3196 (smu7_power_state->performance_level_count <= 3197 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels), 3198 "Performance levels exceeds Driver limit!", 3199 return -EINVAL); 3200 3201 /* Performance levels are arranged from low to high. */ 3202 performance_level->memory_clock = mclk_dep_table->entries 3203 [state_entry->ucMemoryClockIndexLow].ulMclk; 3204 if (sclk_dep_table->ucRevId == 0) 3205 performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries 3206 [state_entry->ucEngineClockIndexLow].ulSclk; 3207 else if (sclk_dep_table->ucRevId == 1) 3208 performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries 3209 [state_entry->ucEngineClockIndexLow].ulSclk; 3210 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, 3211 state_entry->ucPCIEGenLow); 3212 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, 3213 state_entry->ucPCIELaneLow); 3214 3215 performance_level = &(smu7_power_state->performance_levels 3216 [smu7_power_state->performance_level_count++]); 3217 performance_level->memory_clock = mclk_dep_table->entries 3218 [state_entry->ucMemoryClockIndexHigh].ulMclk; 3219 3220 if (sclk_dep_table->ucRevId == 0) 3221 performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries 3222 [state_entry->ucEngineClockIndexHigh].ulSclk; 3223 else if (sclk_dep_table->ucRevId == 1) 3224 performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries 3225 [state_entry->ucEngineClockIndexHigh].ulSclk; 3226 3227 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, 3228 state_entry->ucPCIEGenHigh); 3229 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, 3230 state_entry->ucPCIELaneHigh); 3231 3232 return 0; 3233 } 3234 3235 static int smu7_get_pp_table_entry_v1(struct pp_hwmgr *hwmgr, 3236 unsigned long entry_index, struct pp_power_state *state) 3237 { 3238 int result; 3239 struct smu7_power_state *ps; 3240 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3241 struct phm_ppt_v1_information *table_info = 3242 (struct phm_ppt_v1_information *)(hwmgr->pptable); 3243 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table = 3244 table_info->vdd_dep_on_mclk; 3245 3246 state->hardware.magic = PHM_VIslands_Magic; 3247 3248 ps = (struct smu7_power_state *)(&state->hardware); 3249 3250 result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state, 3251 smu7_get_pp_table_entry_callback_func_v1); 3252 3253 /* This is the earliest time we have all the dependency table and the VBIOS boot state 3254 * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state 3255 * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state 3256 */ 3257 if (dep_mclk_table != NULL && dep_mclk_table->count == 1) { 3258 if (dep_mclk_table->entries[0].clk != 3259 data->vbios_boot_state.mclk_bootup_value) 3260 pr_debug("Single MCLK entry VDDCI/MCLK dependency table " 3261 "does not match VBIOS boot MCLK level"); 3262 if (dep_mclk_table->entries[0].vddci != 3263 data->vbios_boot_state.vddci_bootup_value) 3264 pr_debug("Single VDDCI entry VDDCI/MCLK dependency table " 3265 "does not match VBIOS boot VDDCI level"); 3266 } 3267 3268 /* set DC compatible flag if this state supports DC */ 3269 if (!state->validation.disallowOnDC) 3270 ps->dc_compatible = true; 3271 3272 if (state->classification.flags & PP_StateClassificationFlag_ACPI) 3273 data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen; 3274 3275 ps->uvd_clks.vclk = state->uvd_clocks.VCLK; 3276 ps->uvd_clks.dclk = state->uvd_clocks.DCLK; 3277 3278 if (!result) { 3279 uint32_t i; 3280 3281 switch (state->classification.ui_label) { 3282 case PP_StateUILabel_Performance: 3283 data->use_pcie_performance_levels = true; 3284 for (i = 0; i < ps->performance_level_count; i++) { 3285 if (data->pcie_gen_performance.max < 3286 ps->performance_levels[i].pcie_gen) 3287 data->pcie_gen_performance.max = 3288 ps->performance_levels[i].pcie_gen; 3289 3290 if (data->pcie_gen_performance.min > 3291 ps->performance_levels[i].pcie_gen) 3292 data->pcie_gen_performance.min = 3293 ps->performance_levels[i].pcie_gen; 3294 3295 if (data->pcie_lane_performance.max < 3296 ps->performance_levels[i].pcie_lane) 3297 data->pcie_lane_performance.max = 3298 ps->performance_levels[i].pcie_lane; 3299 if (data->pcie_lane_performance.min > 3300 ps->performance_levels[i].pcie_lane) 3301 data->pcie_lane_performance.min = 3302 ps->performance_levels[i].pcie_lane; 3303 } 3304 break; 3305 case PP_StateUILabel_Battery: 3306 data->use_pcie_power_saving_levels = true; 3307 3308 for (i = 0; i < ps->performance_level_count; i++) { 3309 if (data->pcie_gen_power_saving.max < 3310 ps->performance_levels[i].pcie_gen) 3311 data->pcie_gen_power_saving.max = 3312 ps->performance_levels[i].pcie_gen; 3313 3314 if (data->pcie_gen_power_saving.min > 3315 ps->performance_levels[i].pcie_gen) 3316 data->pcie_gen_power_saving.min = 3317 ps->performance_levels[i].pcie_gen; 3318 3319 if (data->pcie_lane_power_saving.max < 3320 ps->performance_levels[i].pcie_lane) 3321 data->pcie_lane_power_saving.max = 3322 ps->performance_levels[i].pcie_lane; 3323 3324 if (data->pcie_lane_power_saving.min > 3325 ps->performance_levels[i].pcie_lane) 3326 data->pcie_lane_power_saving.min = 3327 ps->performance_levels[i].pcie_lane; 3328 } 3329 break; 3330 default: 3331 break; 3332 } 3333 } 3334 return 0; 3335 } 3336 3337 static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr, 3338 struct pp_hw_power_state *power_state, 3339 unsigned int index, const void *clock_info) 3340 { 3341 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3342 struct smu7_power_state *ps = cast_phw_smu7_power_state(power_state); 3343 const ATOM_PPLIB_CI_CLOCK_INFO *visland_clk_info = clock_info; 3344 struct smu7_performance_level *performance_level; 3345 uint32_t engine_clock, memory_clock; 3346 uint16_t pcie_gen_from_bios; 3347 3348 engine_clock = visland_clk_info->ucEngineClockHigh << 16 | visland_clk_info->usEngineClockLow; 3349 memory_clock = visland_clk_info->ucMemoryClockHigh << 16 | visland_clk_info->usMemoryClockLow; 3350 3351 if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk) 3352 data->highest_mclk = memory_clock; 3353 3354 PP_ASSERT_WITH_CODE( 3355 (ps->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)), 3356 "Performance levels exceeds SMC limit!", 3357 return -EINVAL); 3358 3359 PP_ASSERT_WITH_CODE( 3360 (ps->performance_level_count < 3361 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels), 3362 "Performance levels exceeds Driver limit, Skip!", 3363 return 0); 3364 3365 performance_level = &(ps->performance_levels 3366 [ps->performance_level_count++]); 3367 3368 /* Performance levels are arranged from low to high. */ 3369 performance_level->memory_clock = memory_clock; 3370 performance_level->engine_clock = engine_clock; 3371 3372 pcie_gen_from_bios = visland_clk_info->ucPCIEGen; 3373 3374 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, pcie_gen_from_bios); 3375 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, visland_clk_info->usPCIELane); 3376 3377 return 0; 3378 } 3379 3380 static int smu7_get_pp_table_entry_v0(struct pp_hwmgr *hwmgr, 3381 unsigned long entry_index, struct pp_power_state *state) 3382 { 3383 int result; 3384 struct smu7_power_state *ps; 3385 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3386 struct phm_clock_voltage_dependency_table *dep_mclk_table = 3387 hwmgr->dyn_state.vddci_dependency_on_mclk; 3388 3389 memset(&state->hardware, 0x00, sizeof(struct pp_hw_power_state)); 3390 3391 state->hardware.magic = PHM_VIslands_Magic; 3392 3393 ps = (struct smu7_power_state *)(&state->hardware); 3394 3395 result = pp_tables_get_entry(hwmgr, entry_index, state, 3396 smu7_get_pp_table_entry_callback_func_v0); 3397 3398 /* 3399 * This is the earliest time we have all the dependency table 3400 * and the VBIOS boot state as 3401 * PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot 3402 * state if there is only one VDDCI/MCLK level, check if it's 3403 * the same as VBIOS boot state 3404 */ 3405 if (dep_mclk_table != NULL && dep_mclk_table->count == 1) { 3406 if (dep_mclk_table->entries[0].clk != 3407 data->vbios_boot_state.mclk_bootup_value) 3408 pr_debug("Single MCLK entry VDDCI/MCLK dependency table " 3409 "does not match VBIOS boot MCLK level"); 3410 if (dep_mclk_table->entries[0].v != 3411 data->vbios_boot_state.vddci_bootup_value) 3412 pr_debug("Single VDDCI entry VDDCI/MCLK dependency table " 3413 "does not match VBIOS boot VDDCI level"); 3414 } 3415 3416 /* set DC compatible flag if this state supports DC */ 3417 if (!state->validation.disallowOnDC) 3418 ps->dc_compatible = true; 3419 3420 if (state->classification.flags & PP_StateClassificationFlag_ACPI) 3421 data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen; 3422 3423 ps->uvd_clks.vclk = state->uvd_clocks.VCLK; 3424 ps->uvd_clks.dclk = state->uvd_clocks.DCLK; 3425 3426 if (!result) { 3427 uint32_t i; 3428 3429 switch (state->classification.ui_label) { 3430 case PP_StateUILabel_Performance: 3431 data->use_pcie_performance_levels = true; 3432 3433 for (i = 0; i < ps->performance_level_count; i++) { 3434 if (data->pcie_gen_performance.max < 3435 ps->performance_levels[i].pcie_gen) 3436 data->pcie_gen_performance.max = 3437 ps->performance_levels[i].pcie_gen; 3438 3439 if (data->pcie_gen_performance.min > 3440 ps->performance_levels[i].pcie_gen) 3441 data->pcie_gen_performance.min = 3442 ps->performance_levels[i].pcie_gen; 3443 3444 if (data->pcie_lane_performance.max < 3445 ps->performance_levels[i].pcie_lane) 3446 data->pcie_lane_performance.max = 3447 ps->performance_levels[i].pcie_lane; 3448 3449 if (data->pcie_lane_performance.min > 3450 ps->performance_levels[i].pcie_lane) 3451 data->pcie_lane_performance.min = 3452 ps->performance_levels[i].pcie_lane; 3453 } 3454 break; 3455 case PP_StateUILabel_Battery: 3456 data->use_pcie_power_saving_levels = true; 3457 3458 for (i = 0; i < ps->performance_level_count; i++) { 3459 if (data->pcie_gen_power_saving.max < 3460 ps->performance_levels[i].pcie_gen) 3461 data->pcie_gen_power_saving.max = 3462 ps->performance_levels[i].pcie_gen; 3463 3464 if (data->pcie_gen_power_saving.min > 3465 ps->performance_levels[i].pcie_gen) 3466 data->pcie_gen_power_saving.min = 3467 ps->performance_levels[i].pcie_gen; 3468 3469 if (data->pcie_lane_power_saving.max < 3470 ps->performance_levels[i].pcie_lane) 3471 data->pcie_lane_power_saving.max = 3472 ps->performance_levels[i].pcie_lane; 3473 3474 if (data->pcie_lane_power_saving.min > 3475 ps->performance_levels[i].pcie_lane) 3476 data->pcie_lane_power_saving.min = 3477 ps->performance_levels[i].pcie_lane; 3478 } 3479 break; 3480 default: 3481 break; 3482 } 3483 } 3484 return 0; 3485 } 3486 3487 static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr, 3488 unsigned long entry_index, struct pp_power_state *state) 3489 { 3490 if (hwmgr->pp_table_version == PP_TABLE_V0) 3491 return smu7_get_pp_table_entry_v0(hwmgr, entry_index, state); 3492 else if (hwmgr->pp_table_version == PP_TABLE_V1) 3493 return smu7_get_pp_table_entry_v1(hwmgr, entry_index, state); 3494 3495 return 0; 3496 } 3497 3498 static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query) 3499 { 3500 struct amdgpu_device *adev = hwmgr->adev; 3501 int i; 3502 u32 tmp = 0; 3503 3504 if (!query) 3505 return -EINVAL; 3506 3507 /* 3508 * PPSMC_MSG_GetCurrPkgPwr is not supported on: 3509 * - Hawaii 3510 * - Bonaire 3511 * - Fiji 3512 * - Tonga 3513 */ 3514 if ((adev->asic_type != CHIP_HAWAII) && 3515 (adev->asic_type != CHIP_BONAIRE) && 3516 (adev->asic_type != CHIP_FIJI) && 3517 (adev->asic_type != CHIP_TONGA)) { 3518 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0, &tmp); 3519 *query = tmp; 3520 3521 if (tmp != 0) 3522 return 0; 3523 } 3524 3525 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart, NULL); 3526 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 3527 ixSMU_PM_STATUS_95, 0); 3528 3529 for (i = 0; i < 10; i++) { 3530 msleep(500); 3531 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample, NULL); 3532 tmp = cgs_read_ind_register(hwmgr->device, 3533 CGS_IND_REG__SMC, 3534 ixSMU_PM_STATUS_95); 3535 if (tmp != 0) 3536 break; 3537 } 3538 *query = tmp; 3539 3540 return 0; 3541 } 3542 3543 static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx, 3544 void *value, int *size) 3545 { 3546 uint32_t sclk, mclk, activity_percent; 3547 uint32_t offset, val_vid; 3548 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3549 3550 /* size must be at least 4 bytes for all sensors */ 3551 if (*size < 4) 3552 return -EINVAL; 3553 3554 switch (idx) { 3555 case AMDGPU_PP_SENSOR_GFX_SCLK: 3556 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &sclk); 3557 *((uint32_t *)value) = sclk; 3558 *size = 4; 3559 return 0; 3560 case AMDGPU_PP_SENSOR_GFX_MCLK: 3561 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &mclk); 3562 *((uint32_t *)value) = mclk; 3563 *size = 4; 3564 return 0; 3565 case AMDGPU_PP_SENSOR_GPU_LOAD: 3566 case AMDGPU_PP_SENSOR_MEM_LOAD: 3567 offset = data->soft_regs_start + smum_get_offsetof(hwmgr, 3568 SMU_SoftRegisters, 3569 (idx == AMDGPU_PP_SENSOR_GPU_LOAD) ? 3570 AverageGraphicsActivity: 3571 AverageMemoryActivity); 3572 3573 activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset); 3574 activity_percent += 0x80; 3575 activity_percent >>= 8; 3576 *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent; 3577 *size = 4; 3578 return 0; 3579 case AMDGPU_PP_SENSOR_GPU_TEMP: 3580 *((uint32_t *)value) = smu7_thermal_get_temperature(hwmgr); 3581 *size = 4; 3582 return 0; 3583 case AMDGPU_PP_SENSOR_UVD_POWER: 3584 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1; 3585 *size = 4; 3586 return 0; 3587 case AMDGPU_PP_SENSOR_VCE_POWER: 3588 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1; 3589 *size = 4; 3590 return 0; 3591 case AMDGPU_PP_SENSOR_GPU_POWER: 3592 return smu7_get_gpu_power(hwmgr, (uint32_t *)value); 3593 case AMDGPU_PP_SENSOR_VDDGFX: 3594 if ((data->vr_config & VRCONF_VDDGFX_MASK) == 3595 (VR_SVI2_PLANE_2 << VRCONF_VDDGFX_SHIFT)) 3596 val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device, 3597 CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE2_VID); 3598 else 3599 val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device, 3600 CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE1_VID); 3601 3602 *((uint32_t *)value) = (uint32_t)convert_to_vddc(val_vid); 3603 return 0; 3604 default: 3605 return -EINVAL; 3606 } 3607 } 3608 3609 static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input) 3610 { 3611 const struct phm_set_power_state_input *states = 3612 (const struct phm_set_power_state_input *)input; 3613 const struct smu7_power_state *smu7_ps = 3614 cast_const_phw_smu7_power_state(states->pnew_state); 3615 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3616 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); 3617 uint32_t sclk = smu7_ps->performance_levels 3618 [smu7_ps->performance_level_count - 1].engine_clock; 3619 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); 3620 uint32_t mclk = smu7_ps->performance_levels 3621 [smu7_ps->performance_level_count - 1].memory_clock; 3622 struct PP_Clocks min_clocks = {0}; 3623 uint32_t i; 3624 3625 for (i = 0; i < sclk_table->count; i++) { 3626 if (sclk == sclk_table->dpm_levels[i].value) 3627 break; 3628 } 3629 3630 if (i >= sclk_table->count) { 3631 if (sclk > sclk_table->dpm_levels[i-1].value) { 3632 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; 3633 sclk_table->dpm_levels[i-1].value = sclk; 3634 } 3635 } else { 3636 /* TODO: Check SCLK in DAL's minimum clocks 3637 * in case DeepSleep divider update is required. 3638 */ 3639 if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR && 3640 (min_clocks.engineClockInSR >= SMU7_MINIMUM_ENGINE_CLOCK || 3641 data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK)) 3642 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK; 3643 } 3644 3645 for (i = 0; i < mclk_table->count; i++) { 3646 if (mclk == mclk_table->dpm_levels[i].value) 3647 break; 3648 } 3649 3650 if (i >= mclk_table->count) { 3651 if (mclk > mclk_table->dpm_levels[i-1].value) { 3652 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; 3653 mclk_table->dpm_levels[i-1].value = mclk; 3654 } 3655 } 3656 3657 if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display) 3658 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK; 3659 3660 return 0; 3661 } 3662 3663 static uint16_t smu7_get_maximum_link_speed(struct pp_hwmgr *hwmgr, 3664 const struct smu7_power_state *smu7_ps) 3665 { 3666 uint32_t i; 3667 uint32_t sclk, max_sclk = 0; 3668 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3669 struct smu7_dpm_table *dpm_table = &data->dpm_table; 3670 3671 for (i = 0; i < smu7_ps->performance_level_count; i++) { 3672 sclk = smu7_ps->performance_levels[i].engine_clock; 3673 if (max_sclk < sclk) 3674 max_sclk = sclk; 3675 } 3676 3677 for (i = 0; i < dpm_table->sclk_table.count; i++) { 3678 if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk) 3679 return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ? 3680 dpm_table->pcie_speed_table.dpm_levels 3681 [dpm_table->pcie_speed_table.count - 1].value : 3682 dpm_table->pcie_speed_table.dpm_levels[i].value); 3683 } 3684 3685 return 0; 3686 } 3687 3688 static int smu7_request_link_speed_change_before_state_change( 3689 struct pp_hwmgr *hwmgr, const void *input) 3690 { 3691 const struct phm_set_power_state_input *states = 3692 (const struct phm_set_power_state_input *)input; 3693 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3694 const struct smu7_power_state *smu7_nps = 3695 cast_const_phw_smu7_power_state(states->pnew_state); 3696 const struct smu7_power_state *polaris10_cps = 3697 cast_const_phw_smu7_power_state(states->pcurrent_state); 3698 3699 uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_nps); 3700 uint16_t current_link_speed; 3701 3702 if (data->force_pcie_gen == PP_PCIEGenInvalid) 3703 current_link_speed = smu7_get_maximum_link_speed(hwmgr, polaris10_cps); 3704 else 3705 current_link_speed = data->force_pcie_gen; 3706 3707 data->force_pcie_gen = PP_PCIEGenInvalid; 3708 data->pspp_notify_required = false; 3709 3710 if (target_link_speed > current_link_speed) { 3711 switch (target_link_speed) { 3712 #ifdef CONFIG_ACPI 3713 case PP_PCIEGen3: 3714 if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN3, false)) 3715 break; 3716 data->force_pcie_gen = PP_PCIEGen2; 3717 if (current_link_speed == PP_PCIEGen2) 3718 break; 3719 fallthrough; 3720 case PP_PCIEGen2: 3721 if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN2, false)) 3722 break; 3723 fallthrough; 3724 #endif 3725 default: 3726 data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr); 3727 break; 3728 } 3729 } else { 3730 if (target_link_speed < current_link_speed) 3731 data->pspp_notify_required = true; 3732 } 3733 3734 return 0; 3735 } 3736 3737 static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) 3738 { 3739 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3740 3741 if (0 == data->need_update_smu7_dpm_table) 3742 return 0; 3743 3744 if ((0 == data->sclk_dpm_key_disabled) && 3745 (data->need_update_smu7_dpm_table & 3746 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { 3747 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), 3748 "Trying to freeze SCLK DPM when DPM is disabled", 3749 ); 3750 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr, 3751 PPSMC_MSG_SCLKDPM_FreezeLevel, 3752 NULL), 3753 "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!", 3754 return -EINVAL); 3755 } 3756 3757 if ((0 == data->mclk_dpm_key_disabled) && 3758 (data->need_update_smu7_dpm_table & 3759 DPMTABLE_OD_UPDATE_MCLK)) { 3760 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), 3761 "Trying to freeze MCLK DPM when DPM is disabled", 3762 ); 3763 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr, 3764 PPSMC_MSG_MCLKDPM_FreezeLevel, 3765 NULL), 3766 "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!", 3767 return -EINVAL); 3768 } 3769 3770 return 0; 3771 } 3772 3773 static int smu7_populate_and_upload_sclk_mclk_dpm_levels( 3774 struct pp_hwmgr *hwmgr, const void *input) 3775 { 3776 int result = 0; 3777 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3778 struct smu7_dpm_table *dpm_table = &data->dpm_table; 3779 uint32_t count; 3780 struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table); 3781 struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels); 3782 struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels); 3783 3784 if (0 == data->need_update_smu7_dpm_table) 3785 return 0; 3786 3787 if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) { 3788 for (count = 0; count < dpm_table->sclk_table.count; count++) { 3789 dpm_table->sclk_table.dpm_levels[count].enabled = odn_sclk_table->entries[count].enabled; 3790 dpm_table->sclk_table.dpm_levels[count].value = odn_sclk_table->entries[count].clock; 3791 } 3792 } 3793 3794 if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) { 3795 for (count = 0; count < dpm_table->mclk_table.count; count++) { 3796 dpm_table->mclk_table.dpm_levels[count].enabled = odn_mclk_table->entries[count].enabled; 3797 dpm_table->mclk_table.dpm_levels[count].value = odn_mclk_table->entries[count].clock; 3798 } 3799 } 3800 3801 if (data->need_update_smu7_dpm_table & 3802 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) { 3803 result = smum_populate_all_graphic_levels(hwmgr); 3804 PP_ASSERT_WITH_CODE((0 == result), 3805 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!", 3806 return result); 3807 } 3808 3809 if (data->need_update_smu7_dpm_table & 3810 (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) { 3811 /*populate MCLK dpm table to SMU7 */ 3812 result = smum_populate_all_memory_levels(hwmgr); 3813 PP_ASSERT_WITH_CODE((0 == result), 3814 "Failed to populate MCLK during PopulateNewDPMClocksStates Function!", 3815 return result); 3816 } 3817 3818 return result; 3819 } 3820 3821 static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr, 3822 struct smu7_single_dpm_table *dpm_table, 3823 uint32_t low_limit, uint32_t high_limit) 3824 { 3825 uint32_t i; 3826 3827 /* force the trim if mclk_switching is disabled to prevent flicker */ 3828 bool force_trim = (low_limit == high_limit); 3829 for (i = 0; i < dpm_table->count; i++) { 3830 /*skip the trim if od is enabled*/ 3831 if ((!hwmgr->od_enabled || force_trim) 3832 && (dpm_table->dpm_levels[i].value < low_limit 3833 || dpm_table->dpm_levels[i].value > high_limit)) 3834 dpm_table->dpm_levels[i].enabled = false; 3835 else 3836 dpm_table->dpm_levels[i].enabled = true; 3837 } 3838 3839 return 0; 3840 } 3841 3842 static int smu7_trim_dpm_states(struct pp_hwmgr *hwmgr, 3843 const struct smu7_power_state *smu7_ps) 3844 { 3845 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3846 uint32_t high_limit_count; 3847 3848 PP_ASSERT_WITH_CODE((smu7_ps->performance_level_count >= 1), 3849 "power state did not have any performance level", 3850 return -EINVAL); 3851 3852 high_limit_count = (1 == smu7_ps->performance_level_count) ? 0 : 1; 3853 3854 smu7_trim_single_dpm_states(hwmgr, 3855 &(data->dpm_table.sclk_table), 3856 smu7_ps->performance_levels[0].engine_clock, 3857 smu7_ps->performance_levels[high_limit_count].engine_clock); 3858 3859 smu7_trim_single_dpm_states(hwmgr, 3860 &(data->dpm_table.mclk_table), 3861 smu7_ps->performance_levels[0].memory_clock, 3862 smu7_ps->performance_levels[high_limit_count].memory_clock); 3863 3864 return 0; 3865 } 3866 3867 static int smu7_generate_dpm_level_enable_mask( 3868 struct pp_hwmgr *hwmgr, const void *input) 3869 { 3870 int result = 0; 3871 const struct phm_set_power_state_input *states = 3872 (const struct phm_set_power_state_input *)input; 3873 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3874 const struct smu7_power_state *smu7_ps = 3875 cast_const_phw_smu7_power_state(states->pnew_state); 3876 3877 3878 result = smu7_trim_dpm_states(hwmgr, smu7_ps); 3879 if (result) 3880 return result; 3881 3882 data->dpm_level_enable_mask.sclk_dpm_enable_mask = 3883 phm_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table); 3884 data->dpm_level_enable_mask.mclk_dpm_enable_mask = 3885 phm_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table); 3886 data->dpm_level_enable_mask.pcie_dpm_enable_mask = 3887 phm_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table); 3888 3889 return 0; 3890 } 3891 3892 static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) 3893 { 3894 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3895 3896 if (0 == data->need_update_smu7_dpm_table) 3897 return 0; 3898 3899 if ((0 == data->sclk_dpm_key_disabled) && 3900 (data->need_update_smu7_dpm_table & 3901 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { 3902 3903 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), 3904 "Trying to Unfreeze SCLK DPM when DPM is disabled", 3905 ); 3906 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr, 3907 PPSMC_MSG_SCLKDPM_UnfreezeLevel, 3908 NULL), 3909 "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!", 3910 return -EINVAL); 3911 } 3912 3913 if ((0 == data->mclk_dpm_key_disabled) && 3914 (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { 3915 3916 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), 3917 "Trying to Unfreeze MCLK DPM when DPM is disabled", 3918 ); 3919 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr, 3920 PPSMC_MSG_MCLKDPM_UnfreezeLevel, 3921 NULL), 3922 "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!", 3923 return -EINVAL); 3924 } 3925 3926 data->need_update_smu7_dpm_table &= DPMTABLE_OD_UPDATE_VDDC; 3927 3928 return 0; 3929 } 3930 3931 static int smu7_notify_link_speed_change_after_state_change( 3932 struct pp_hwmgr *hwmgr, const void *input) 3933 { 3934 const struct phm_set_power_state_input *states = 3935 (const struct phm_set_power_state_input *)input; 3936 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3937 const struct smu7_power_state *smu7_ps = 3938 cast_const_phw_smu7_power_state(states->pnew_state); 3939 uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_ps); 3940 uint8_t request; 3941 3942 if (data->pspp_notify_required) { 3943 if (target_link_speed == PP_PCIEGen3) 3944 request = PCIE_PERF_REQ_GEN3; 3945 else if (target_link_speed == PP_PCIEGen2) 3946 request = PCIE_PERF_REQ_GEN2; 3947 else 3948 request = PCIE_PERF_REQ_GEN1; 3949 3950 if (request == PCIE_PERF_REQ_GEN1 && 3951 smu7_get_current_pcie_speed(hwmgr) > 0) 3952 return 0; 3953 3954 #ifdef CONFIG_ACPI 3955 if (amdgpu_acpi_pcie_performance_request(hwmgr->adev, request, false)) { 3956 if (PP_PCIEGen2 == target_link_speed) 3957 pr_info("PSPP request to switch to Gen2 from Gen3 Failed!"); 3958 else 3959 pr_info("PSPP request to switch to Gen1 from Gen2 Failed!"); 3960 } 3961 #endif 3962 } 3963 3964 return 0; 3965 } 3966 3967 static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr) 3968 { 3969 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3970 3971 if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) { 3972 if (hwmgr->chip_id == CHIP_VEGAM) 3973 smum_send_msg_to_smc_with_parameter(hwmgr, 3974 (PPSMC_Msg)PPSMC_MSG_SetVBITimeout_VEGAM, data->frame_time_x2, 3975 NULL); 3976 else 3977 smum_send_msg_to_smc_with_parameter(hwmgr, 3978 (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2, 3979 NULL); 3980 } 3981 return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay, NULL) == 0) ? 0 : -EINVAL; 3982 } 3983 3984 static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) 3985 { 3986 int tmp_result, result = 0; 3987 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3988 3989 tmp_result = smu7_find_dpm_states_clocks_in_dpm_table(hwmgr, input); 3990 PP_ASSERT_WITH_CODE((0 == tmp_result), 3991 "Failed to find DPM states clocks in DPM table!", 3992 result = tmp_result); 3993 3994 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 3995 PHM_PlatformCaps_PCIEPerformanceRequest)) { 3996 tmp_result = 3997 smu7_request_link_speed_change_before_state_change(hwmgr, input); 3998 PP_ASSERT_WITH_CODE((0 == tmp_result), 3999 "Failed to request link speed change before state change!", 4000 result = tmp_result); 4001 } 4002 4003 tmp_result = smu7_freeze_sclk_mclk_dpm(hwmgr); 4004 PP_ASSERT_WITH_CODE((0 == tmp_result), 4005 "Failed to freeze SCLK MCLK DPM!", result = tmp_result); 4006 4007 tmp_result = smu7_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input); 4008 PP_ASSERT_WITH_CODE((0 == tmp_result), 4009 "Failed to populate and upload SCLK MCLK DPM levels!", 4010 result = tmp_result); 4011 4012 /* 4013 * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag. 4014 * That effectively disables AVFS feature. 4015 */ 4016 if (hwmgr->hardcode_pp_table != NULL) 4017 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; 4018 4019 tmp_result = smu7_update_avfs(hwmgr); 4020 PP_ASSERT_WITH_CODE((0 == tmp_result), 4021 "Failed to update avfs voltages!", 4022 result = tmp_result); 4023 4024 tmp_result = smu7_generate_dpm_level_enable_mask(hwmgr, input); 4025 PP_ASSERT_WITH_CODE((0 == tmp_result), 4026 "Failed to generate DPM level enabled mask!", 4027 result = tmp_result); 4028 4029 tmp_result = smum_update_sclk_threshold(hwmgr); 4030 PP_ASSERT_WITH_CODE((0 == tmp_result), 4031 "Failed to update SCLK threshold!", 4032 result = tmp_result); 4033 4034 tmp_result = smu7_notify_smc_display(hwmgr); 4035 PP_ASSERT_WITH_CODE((0 == tmp_result), 4036 "Failed to notify smc display settings!", 4037 result = tmp_result); 4038 4039 tmp_result = smu7_unfreeze_sclk_mclk_dpm(hwmgr); 4040 PP_ASSERT_WITH_CODE((0 == tmp_result), 4041 "Failed to unfreeze SCLK MCLK DPM!", 4042 result = tmp_result); 4043 4044 tmp_result = smu7_upload_dpm_level_enable_mask(hwmgr); 4045 PP_ASSERT_WITH_CODE((0 == tmp_result), 4046 "Failed to upload DPM level enabled mask!", 4047 result = tmp_result); 4048 4049 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 4050 PHM_PlatformCaps_PCIEPerformanceRequest)) { 4051 tmp_result = 4052 smu7_notify_link_speed_change_after_state_change(hwmgr, input); 4053 PP_ASSERT_WITH_CODE((0 == tmp_result), 4054 "Failed to notify link speed change after state change!", 4055 result = tmp_result); 4056 } 4057 data->apply_optimized_settings = false; 4058 return result; 4059 } 4060 4061 static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm) 4062 { 4063 hwmgr->thermal_controller. 4064 advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm; 4065 4066 return smum_send_msg_to_smc_with_parameter(hwmgr, 4067 PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm, 4068 NULL); 4069 } 4070 4071 static int 4072 smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display) 4073 { 4074 PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay; 4075 4076 return (smum_send_msg_to_smc(hwmgr, msg, NULL) == 0) ? 0 : -1; 4077 } 4078 4079 static int 4080 smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr) 4081 { 4082 if (hwmgr->display_config->num_display > 1 && 4083 !hwmgr->display_config->multi_monitor_in_sync) 4084 smu7_notify_smc_display_change(hwmgr, false); 4085 4086 return 0; 4087 } 4088 4089 /** 4090 * Programs the display gap 4091 * 4092 * @param hwmgr the address of the powerplay hardware manager. 4093 * @return always OK 4094 */ 4095 static int smu7_program_display_gap(struct pp_hwmgr *hwmgr) 4096 { 4097 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4098 uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL); 4099 uint32_t display_gap2; 4100 uint32_t pre_vbi_time_in_us; 4101 uint32_t frame_time_in_us; 4102 uint32_t ref_clock, refresh_rate; 4103 4104 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (hwmgr->display_config->num_display > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE); 4105 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap); 4106 4107 ref_clock = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev); 4108 refresh_rate = hwmgr->display_config->vrefresh; 4109 4110 if (0 == refresh_rate) 4111 refresh_rate = 60; 4112 4113 frame_time_in_us = 1000000 / refresh_rate; 4114 4115 pre_vbi_time_in_us = frame_time_in_us - 200 - hwmgr->display_config->min_vblank_time; 4116 4117 data->frame_time_x2 = frame_time_in_us * 2 / 100; 4118 4119 if (data->frame_time_x2 < 280) { 4120 pr_debug("%s: enforce minimal VBITimeout: %d -> 280\n", __func__, data->frame_time_x2); 4121 data->frame_time_x2 = 280; 4122 } 4123 4124 display_gap2 = pre_vbi_time_in_us * (ref_clock / 100); 4125 4126 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2); 4127 4128 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 4129 data->soft_regs_start + smum_get_offsetof(hwmgr, 4130 SMU_SoftRegisters, 4131 PreVBlankGap), 0x64); 4132 4133 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 4134 data->soft_regs_start + smum_get_offsetof(hwmgr, 4135 SMU_SoftRegisters, 4136 VBlankTimeout), 4137 (frame_time_in_us - pre_vbi_time_in_us)); 4138 4139 return 0; 4140 } 4141 4142 static int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr) 4143 { 4144 return smu7_program_display_gap(hwmgr); 4145 } 4146 4147 /** 4148 * Set maximum target operating fan output RPM 4149 * 4150 * @param hwmgr: the address of the powerplay hardware manager. 4151 * @param usMaxFanRpm: max operating fan RPM value. 4152 * @return The response that came from the SMC. 4153 */ 4154 static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm) 4155 { 4156 hwmgr->thermal_controller. 4157 advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm; 4158 4159 return smum_send_msg_to_smc_with_parameter(hwmgr, 4160 PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm, 4161 NULL); 4162 } 4163 4164 static const struct amdgpu_irq_src_funcs smu7_irq_funcs = { 4165 .process = phm_irq_process, 4166 }; 4167 4168 static int smu7_register_irq_handlers(struct pp_hwmgr *hwmgr) 4169 { 4170 struct amdgpu_irq_src *source = 4171 kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL); 4172 4173 if (!source) 4174 return -ENOMEM; 4175 4176 source->funcs = &smu7_irq_funcs; 4177 4178 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev), 4179 AMDGPU_IRQ_CLIENTID_LEGACY, 4180 VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH, 4181 source); 4182 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev), 4183 AMDGPU_IRQ_CLIENTID_LEGACY, 4184 VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW, 4185 source); 4186 4187 /* Register CTF(GPIO_19) interrupt */ 4188 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev), 4189 AMDGPU_IRQ_CLIENTID_LEGACY, 4190 VISLANDS30_IV_SRCID_GPIO_19, 4191 source); 4192 4193 return 0; 4194 } 4195 4196 static bool 4197 smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr) 4198 { 4199 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4200 bool is_update_required = false; 4201 4202 if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display) 4203 is_update_required = true; 4204 4205 if (data->display_timing.vrefresh != hwmgr->display_config->vrefresh) 4206 is_update_required = true; 4207 4208 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { 4209 if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr && 4210 (data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK || 4211 hwmgr->display_config->min_core_set_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK)) 4212 is_update_required = true; 4213 } 4214 return is_update_required; 4215 } 4216 4217 static inline bool smu7_are_power_levels_equal(const struct smu7_performance_level *pl1, 4218 const struct smu7_performance_level *pl2) 4219 { 4220 return ((pl1->memory_clock == pl2->memory_clock) && 4221 (pl1->engine_clock == pl2->engine_clock) && 4222 (pl1->pcie_gen == pl2->pcie_gen) && 4223 (pl1->pcie_lane == pl2->pcie_lane)); 4224 } 4225 4226 static int smu7_check_states_equal(struct pp_hwmgr *hwmgr, 4227 const struct pp_hw_power_state *pstate1, 4228 const struct pp_hw_power_state *pstate2, bool *equal) 4229 { 4230 const struct smu7_power_state *psa; 4231 const struct smu7_power_state *psb; 4232 int i; 4233 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4234 4235 if (pstate1 == NULL || pstate2 == NULL || equal == NULL) 4236 return -EINVAL; 4237 4238 psa = cast_const_phw_smu7_power_state(pstate1); 4239 psb = cast_const_phw_smu7_power_state(pstate2); 4240 /* If the two states don't even have the same number of performance levels they cannot be the same state. */ 4241 if (psa->performance_level_count != psb->performance_level_count) { 4242 *equal = false; 4243 return 0; 4244 } 4245 4246 for (i = 0; i < psa->performance_level_count; i++) { 4247 if (!smu7_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) { 4248 /* If we have found even one performance level pair that is different the states are different. */ 4249 *equal = false; 4250 return 0; 4251 } 4252 } 4253 4254 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/ 4255 *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk)); 4256 *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk)); 4257 *equal &= (psa->sclk_threshold == psb->sclk_threshold); 4258 /* For OD call, set value based on flag */ 4259 *equal &= !(data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | 4260 DPMTABLE_OD_UPDATE_MCLK | 4261 DPMTABLE_OD_UPDATE_VDDC)); 4262 4263 return 0; 4264 } 4265 4266 static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr) 4267 { 4268 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4269 4270 uint32_t tmp; 4271 4272 /* Read MC indirect register offset 0x9F bits [3:0] to see 4273 * if VBIOS has already loaded a full version of MC ucode 4274 * or not. 4275 */ 4276 4277 smu7_get_mc_microcode_version(hwmgr); 4278 4279 data->need_long_memory_training = false; 4280 4281 cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 4282 ixMC_IO_DEBUG_UP_13); 4283 tmp = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA); 4284 4285 if (tmp & (1 << 23)) { 4286 data->mem_latency_high = MEM_LATENCY_HIGH; 4287 data->mem_latency_low = MEM_LATENCY_LOW; 4288 if ((hwmgr->chip_id == CHIP_POLARIS10) || 4289 (hwmgr->chip_id == CHIP_POLARIS11) || 4290 (hwmgr->chip_id == CHIP_POLARIS12)) 4291 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableFFC, NULL); 4292 } else { 4293 data->mem_latency_high = 330; 4294 data->mem_latency_low = 330; 4295 if ((hwmgr->chip_id == CHIP_POLARIS10) || 4296 (hwmgr->chip_id == CHIP_POLARIS11) || 4297 (hwmgr->chip_id == CHIP_POLARIS12)) 4298 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableFFC, NULL); 4299 } 4300 4301 return 0; 4302 } 4303 4304 static int smu7_read_clock_registers(struct pp_hwmgr *hwmgr) 4305 { 4306 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4307 4308 data->clock_registers.vCG_SPLL_FUNC_CNTL = 4309 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL); 4310 data->clock_registers.vCG_SPLL_FUNC_CNTL_2 = 4311 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2); 4312 data->clock_registers.vCG_SPLL_FUNC_CNTL_3 = 4313 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3); 4314 data->clock_registers.vCG_SPLL_FUNC_CNTL_4 = 4315 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4); 4316 data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM = 4317 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM); 4318 data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 = 4319 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2); 4320 data->clock_registers.vDLL_CNTL = 4321 cgs_read_register(hwmgr->device, mmDLL_CNTL); 4322 data->clock_registers.vMCLK_PWRMGT_CNTL = 4323 cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL); 4324 data->clock_registers.vMPLL_AD_FUNC_CNTL = 4325 cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL); 4326 data->clock_registers.vMPLL_DQ_FUNC_CNTL = 4327 cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL); 4328 data->clock_registers.vMPLL_FUNC_CNTL = 4329 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL); 4330 data->clock_registers.vMPLL_FUNC_CNTL_1 = 4331 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1); 4332 data->clock_registers.vMPLL_FUNC_CNTL_2 = 4333 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2); 4334 data->clock_registers.vMPLL_SS1 = 4335 cgs_read_register(hwmgr->device, mmMPLL_SS1); 4336 data->clock_registers.vMPLL_SS2 = 4337 cgs_read_register(hwmgr->device, mmMPLL_SS2); 4338 return 0; 4339 4340 } 4341 4342 /** 4343 * Find out if memory is GDDR5. 4344 * 4345 * @param hwmgr the address of the powerplay hardware manager. 4346 * @return always 0 4347 */ 4348 static int smu7_get_memory_type(struct pp_hwmgr *hwmgr) 4349 { 4350 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4351 struct amdgpu_device *adev = hwmgr->adev; 4352 4353 data->is_memory_gddr5 = (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5); 4354 4355 return 0; 4356 } 4357 4358 /** 4359 * Enables Dynamic Power Management by SMC 4360 * 4361 * @param hwmgr the address of the powerplay hardware manager. 4362 * @return always 0 4363 */ 4364 static int smu7_enable_acpi_power_management(struct pp_hwmgr *hwmgr) 4365 { 4366 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 4367 GENERAL_PWRMGT, STATIC_PM_EN, 1); 4368 4369 return 0; 4370 } 4371 4372 /** 4373 * Initialize PowerGating States for different engines 4374 * 4375 * @param hwmgr the address of the powerplay hardware manager. 4376 * @return always 0 4377 */ 4378 static int smu7_init_power_gate_state(struct pp_hwmgr *hwmgr) 4379 { 4380 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4381 4382 data->uvd_power_gated = false; 4383 data->vce_power_gated = false; 4384 4385 return 0; 4386 } 4387 4388 static int smu7_init_sclk_threshold(struct pp_hwmgr *hwmgr) 4389 { 4390 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4391 4392 data->low_sclk_interrupt_threshold = 0; 4393 return 0; 4394 } 4395 4396 static int smu7_setup_asic_task(struct pp_hwmgr *hwmgr) 4397 { 4398 int tmp_result, result = 0; 4399 4400 smu7_check_mc_firmware(hwmgr); 4401 4402 tmp_result = smu7_read_clock_registers(hwmgr); 4403 PP_ASSERT_WITH_CODE((0 == tmp_result), 4404 "Failed to read clock registers!", result = tmp_result); 4405 4406 tmp_result = smu7_get_memory_type(hwmgr); 4407 PP_ASSERT_WITH_CODE((0 == tmp_result), 4408 "Failed to get memory type!", result = tmp_result); 4409 4410 tmp_result = smu7_enable_acpi_power_management(hwmgr); 4411 PP_ASSERT_WITH_CODE((0 == tmp_result), 4412 "Failed to enable ACPI power management!", result = tmp_result); 4413 4414 tmp_result = smu7_init_power_gate_state(hwmgr); 4415 PP_ASSERT_WITH_CODE((0 == tmp_result), 4416 "Failed to init power gate state!", result = tmp_result); 4417 4418 tmp_result = smu7_get_mc_microcode_version(hwmgr); 4419 PP_ASSERT_WITH_CODE((0 == tmp_result), 4420 "Failed to get MC microcode version!", result = tmp_result); 4421 4422 tmp_result = smu7_init_sclk_threshold(hwmgr); 4423 PP_ASSERT_WITH_CODE((0 == tmp_result), 4424 "Failed to init sclk threshold!", result = tmp_result); 4425 4426 return result; 4427 } 4428 4429 static int smu7_force_clock_level(struct pp_hwmgr *hwmgr, 4430 enum pp_clock_type type, uint32_t mask) 4431 { 4432 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4433 4434 if (mask == 0) 4435 return -EINVAL; 4436 4437 switch (type) { 4438 case PP_SCLK: 4439 if (!data->sclk_dpm_key_disabled) 4440 smum_send_msg_to_smc_with_parameter(hwmgr, 4441 PPSMC_MSG_SCLKDPM_SetEnabledMask, 4442 data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask, 4443 NULL); 4444 break; 4445 case PP_MCLK: 4446 if (!data->mclk_dpm_key_disabled) 4447 smum_send_msg_to_smc_with_parameter(hwmgr, 4448 PPSMC_MSG_MCLKDPM_SetEnabledMask, 4449 data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask, 4450 NULL); 4451 break; 4452 case PP_PCIE: 4453 { 4454 uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask; 4455 4456 if (!data->pcie_dpm_key_disabled) { 4457 if (fls(tmp) != ffs(tmp)) 4458 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_UnForceLevel, 4459 NULL); 4460 else 4461 smum_send_msg_to_smc_with_parameter(hwmgr, 4462 PPSMC_MSG_PCIeDPM_ForceLevel, 4463 fls(tmp) - 1, 4464 NULL); 4465 } 4466 break; 4467 } 4468 default: 4469 break; 4470 } 4471 4472 return 0; 4473 } 4474 4475 static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, 4476 enum pp_clock_type type, char *buf) 4477 { 4478 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4479 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); 4480 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); 4481 struct smu7_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table); 4482 struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table); 4483 struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels); 4484 struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels); 4485 int i, now, size = 0; 4486 uint32_t clock, pcie_speed; 4487 4488 switch (type) { 4489 case PP_SCLK: 4490 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &clock); 4491 4492 for (i = 0; i < sclk_table->count; i++) { 4493 if (clock > sclk_table->dpm_levels[i].value) 4494 continue; 4495 break; 4496 } 4497 now = i; 4498 4499 for (i = 0; i < sclk_table->count; i++) 4500 size += sprintf(buf + size, "%d: %uMhz %s\n", 4501 i, sclk_table->dpm_levels[i].value / 100, 4502 (i == now) ? "*" : ""); 4503 break; 4504 case PP_MCLK: 4505 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &clock); 4506 4507 for (i = 0; i < mclk_table->count; i++) { 4508 if (clock > mclk_table->dpm_levels[i].value) 4509 continue; 4510 break; 4511 } 4512 now = i; 4513 4514 for (i = 0; i < mclk_table->count; i++) 4515 size += sprintf(buf + size, "%d: %uMhz %s\n", 4516 i, mclk_table->dpm_levels[i].value / 100, 4517 (i == now) ? "*" : ""); 4518 break; 4519 case PP_PCIE: 4520 pcie_speed = smu7_get_current_pcie_speed(hwmgr); 4521 for (i = 0; i < pcie_table->count; i++) { 4522 if (pcie_speed != pcie_table->dpm_levels[i].value) 4523 continue; 4524 break; 4525 } 4526 now = i; 4527 4528 for (i = 0; i < pcie_table->count; i++) 4529 size += sprintf(buf + size, "%d: %s %s\n", i, 4530 (pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x8" : 4531 (pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" : 4532 (pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "", 4533 (i == now) ? "*" : ""); 4534 break; 4535 case OD_SCLK: 4536 if (hwmgr->od_enabled) { 4537 size = sprintf(buf, "%s:\n", "OD_SCLK"); 4538 for (i = 0; i < odn_sclk_table->num_of_pl; i++) 4539 size += sprintf(buf + size, "%d: %10uMHz %10umV\n", 4540 i, odn_sclk_table->entries[i].clock/100, 4541 odn_sclk_table->entries[i].vddc); 4542 } 4543 break; 4544 case OD_MCLK: 4545 if (hwmgr->od_enabled) { 4546 size = sprintf(buf, "%s:\n", "OD_MCLK"); 4547 for (i = 0; i < odn_mclk_table->num_of_pl; i++) 4548 size += sprintf(buf + size, "%d: %10uMHz %10umV\n", 4549 i, odn_mclk_table->entries[i].clock/100, 4550 odn_mclk_table->entries[i].vddc); 4551 } 4552 break; 4553 case OD_RANGE: 4554 if (hwmgr->od_enabled) { 4555 size = sprintf(buf, "%s:\n", "OD_RANGE"); 4556 size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n", 4557 data->golden_dpm_table.sclk_table.dpm_levels[0].value/100, 4558 hwmgr->platform_descriptor.overdriveLimit.engineClock/100); 4559 size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n", 4560 data->golden_dpm_table.mclk_table.dpm_levels[0].value/100, 4561 hwmgr->platform_descriptor.overdriveLimit.memoryClock/100); 4562 size += sprintf(buf + size, "VDDC: %7umV %11umV\n", 4563 data->odn_dpm_table.min_vddc, 4564 data->odn_dpm_table.max_vddc); 4565 } 4566 break; 4567 default: 4568 break; 4569 } 4570 return size; 4571 } 4572 4573 static void smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) 4574 { 4575 switch (mode) { 4576 case AMD_FAN_CTRL_NONE: 4577 smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100); 4578 break; 4579 case AMD_FAN_CTRL_MANUAL: 4580 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 4581 PHM_PlatformCaps_MicrocodeFanControl)) 4582 smu7_fan_ctrl_stop_smc_fan_control(hwmgr); 4583 break; 4584 case AMD_FAN_CTRL_AUTO: 4585 if (!smu7_fan_ctrl_set_static_mode(hwmgr, mode)) 4586 smu7_fan_ctrl_start_smc_fan_control(hwmgr); 4587 break; 4588 default: 4589 break; 4590 } 4591 } 4592 4593 static uint32_t smu7_get_fan_control_mode(struct pp_hwmgr *hwmgr) 4594 { 4595 return hwmgr->fan_ctrl_enabled ? AMD_FAN_CTRL_AUTO : AMD_FAN_CTRL_MANUAL; 4596 } 4597 4598 static int smu7_get_sclk_od(struct pp_hwmgr *hwmgr) 4599 { 4600 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4601 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); 4602 struct smu7_single_dpm_table *golden_sclk_table = 4603 &(data->golden_dpm_table.sclk_table); 4604 int value = sclk_table->dpm_levels[sclk_table->count - 1].value; 4605 int golden_value = golden_sclk_table->dpm_levels 4606 [golden_sclk_table->count - 1].value; 4607 4608 value -= golden_value; 4609 value = DIV_ROUND_UP(value * 100, golden_value); 4610 4611 return value; 4612 } 4613 4614 static int smu7_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value) 4615 { 4616 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4617 struct smu7_single_dpm_table *golden_sclk_table = 4618 &(data->golden_dpm_table.sclk_table); 4619 struct pp_power_state *ps; 4620 struct smu7_power_state *smu7_ps; 4621 4622 if (value > 20) 4623 value = 20; 4624 4625 ps = hwmgr->request_ps; 4626 4627 if (ps == NULL) 4628 return -EINVAL; 4629 4630 smu7_ps = cast_phw_smu7_power_state(&ps->hardware); 4631 4632 smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].engine_clock = 4633 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * 4634 value / 100 + 4635 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; 4636 4637 return 0; 4638 } 4639 4640 static int smu7_get_mclk_od(struct pp_hwmgr *hwmgr) 4641 { 4642 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4643 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); 4644 struct smu7_single_dpm_table *golden_mclk_table = 4645 &(data->golden_dpm_table.mclk_table); 4646 int value = mclk_table->dpm_levels[mclk_table->count - 1].value; 4647 int golden_value = golden_mclk_table->dpm_levels 4648 [golden_mclk_table->count - 1].value; 4649 4650 value -= golden_value; 4651 value = DIV_ROUND_UP(value * 100, golden_value); 4652 4653 return value; 4654 } 4655 4656 static int smu7_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value) 4657 { 4658 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4659 struct smu7_single_dpm_table *golden_mclk_table = 4660 &(data->golden_dpm_table.mclk_table); 4661 struct pp_power_state *ps; 4662 struct smu7_power_state *smu7_ps; 4663 4664 if (value > 20) 4665 value = 20; 4666 4667 ps = hwmgr->request_ps; 4668 4669 if (ps == NULL) 4670 return -EINVAL; 4671 4672 smu7_ps = cast_phw_smu7_power_state(&ps->hardware); 4673 4674 smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].memory_clock = 4675 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * 4676 value / 100 + 4677 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; 4678 4679 return 0; 4680 } 4681 4682 4683 static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks) 4684 { 4685 struct phm_ppt_v1_information *table_info = 4686 (struct phm_ppt_v1_information *)hwmgr->pptable; 4687 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = NULL; 4688 struct phm_clock_voltage_dependency_table *sclk_table; 4689 int i; 4690 4691 if (hwmgr->pp_table_version == PP_TABLE_V1) { 4692 if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL) 4693 return -EINVAL; 4694 dep_sclk_table = table_info->vdd_dep_on_sclk; 4695 for (i = 0; i < dep_sclk_table->count; i++) 4696 clocks->clock[i] = dep_sclk_table->entries[i].clk * 10; 4697 clocks->count = dep_sclk_table->count; 4698 } else if (hwmgr->pp_table_version == PP_TABLE_V0) { 4699 sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk; 4700 for (i = 0; i < sclk_table->count; i++) 4701 clocks->clock[i] = sclk_table->entries[i].clk * 10; 4702 clocks->count = sclk_table->count; 4703 } 4704 4705 return 0; 4706 } 4707 4708 static uint32_t smu7_get_mem_latency(struct pp_hwmgr *hwmgr, uint32_t clk) 4709 { 4710 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4711 4712 if (clk >= MEM_FREQ_LOW_LATENCY && clk < MEM_FREQ_HIGH_LATENCY) 4713 return data->mem_latency_high; 4714 else if (clk >= MEM_FREQ_HIGH_LATENCY) 4715 return data->mem_latency_low; 4716 else 4717 return MEM_LATENCY_ERR; 4718 } 4719 4720 static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks) 4721 { 4722 struct phm_ppt_v1_information *table_info = 4723 (struct phm_ppt_v1_information *)hwmgr->pptable; 4724 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table; 4725 int i; 4726 struct phm_clock_voltage_dependency_table *mclk_table; 4727 4728 if (hwmgr->pp_table_version == PP_TABLE_V1) { 4729 if (table_info == NULL) 4730 return -EINVAL; 4731 dep_mclk_table = table_info->vdd_dep_on_mclk; 4732 for (i = 0; i < dep_mclk_table->count; i++) { 4733 clocks->clock[i] = dep_mclk_table->entries[i].clk * 10; 4734 clocks->latency[i] = smu7_get_mem_latency(hwmgr, 4735 dep_mclk_table->entries[i].clk); 4736 } 4737 clocks->count = dep_mclk_table->count; 4738 } else if (hwmgr->pp_table_version == PP_TABLE_V0) { 4739 mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk; 4740 for (i = 0; i < mclk_table->count; i++) 4741 clocks->clock[i] = mclk_table->entries[i].clk * 10; 4742 clocks->count = mclk_table->count; 4743 } 4744 return 0; 4745 } 4746 4747 static int smu7_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, 4748 struct amd_pp_clocks *clocks) 4749 { 4750 switch (type) { 4751 case amd_pp_sys_clock: 4752 smu7_get_sclks(hwmgr, clocks); 4753 break; 4754 case amd_pp_mem_clock: 4755 smu7_get_mclks(hwmgr, clocks); 4756 break; 4757 default: 4758 return -EINVAL; 4759 } 4760 4761 return 0; 4762 } 4763 4764 static int smu7_notify_cac_buffer_info(struct pp_hwmgr *hwmgr, 4765 uint32_t virtual_addr_low, 4766 uint32_t virtual_addr_hi, 4767 uint32_t mc_addr_low, 4768 uint32_t mc_addr_hi, 4769 uint32_t size) 4770 { 4771 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4772 4773 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 4774 data->soft_regs_start + 4775 smum_get_offsetof(hwmgr, 4776 SMU_SoftRegisters, DRAM_LOG_ADDR_H), 4777 mc_addr_hi); 4778 4779 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 4780 data->soft_regs_start + 4781 smum_get_offsetof(hwmgr, 4782 SMU_SoftRegisters, DRAM_LOG_ADDR_L), 4783 mc_addr_low); 4784 4785 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 4786 data->soft_regs_start + 4787 smum_get_offsetof(hwmgr, 4788 SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_H), 4789 virtual_addr_hi); 4790 4791 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 4792 data->soft_regs_start + 4793 smum_get_offsetof(hwmgr, 4794 SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_L), 4795 virtual_addr_low); 4796 4797 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 4798 data->soft_regs_start + 4799 smum_get_offsetof(hwmgr, 4800 SMU_SoftRegisters, DRAM_LOG_BUFF_SIZE), 4801 size); 4802 return 0; 4803 } 4804 4805 static int smu7_get_max_high_clocks(struct pp_hwmgr *hwmgr, 4806 struct amd_pp_simple_clock_info *clocks) 4807 { 4808 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4809 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); 4810 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); 4811 4812 if (clocks == NULL) 4813 return -EINVAL; 4814 4815 clocks->memory_max_clock = mclk_table->count > 1 ? 4816 mclk_table->dpm_levels[mclk_table->count-1].value : 4817 mclk_table->dpm_levels[0].value; 4818 clocks->engine_max_clock = sclk_table->count > 1 ? 4819 sclk_table->dpm_levels[sclk_table->count-1].value : 4820 sclk_table->dpm_levels[0].value; 4821 return 0; 4822 } 4823 4824 static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr, 4825 struct PP_TemperatureRange *thermal_data) 4826 { 4827 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4828 struct phm_ppt_v1_information *table_info = 4829 (struct phm_ppt_v1_information *)hwmgr->pptable; 4830 4831 memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct PP_TemperatureRange)); 4832 4833 if (hwmgr->pp_table_version == PP_TABLE_V1) 4834 thermal_data->max = table_info->cac_dtp_table->usSoftwareShutdownTemp * 4835 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 4836 else if (hwmgr->pp_table_version == PP_TABLE_V0) 4837 thermal_data->max = data->thermal_temp_setting.temperature_shutdown * 4838 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 4839 4840 return 0; 4841 } 4842 4843 static bool smu7_check_clk_voltage_valid(struct pp_hwmgr *hwmgr, 4844 enum PP_OD_DPM_TABLE_COMMAND type, 4845 uint32_t clk, 4846 uint32_t voltage) 4847 { 4848 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4849 4850 if (voltage < data->odn_dpm_table.min_vddc || voltage > data->odn_dpm_table.max_vddc) { 4851 pr_info("OD voltage is out of range [%d - %d] mV\n", 4852 data->odn_dpm_table.min_vddc, 4853 data->odn_dpm_table.max_vddc); 4854 return false; 4855 } 4856 4857 if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) { 4858 if (data->golden_dpm_table.sclk_table.dpm_levels[0].value > clk || 4859 hwmgr->platform_descriptor.overdriveLimit.engineClock < clk) { 4860 pr_info("OD engine clock is out of range [%d - %d] MHz\n", 4861 data->golden_dpm_table.sclk_table.dpm_levels[0].value/100, 4862 hwmgr->platform_descriptor.overdriveLimit.engineClock/100); 4863 return false; 4864 } 4865 } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) { 4866 if (data->golden_dpm_table.mclk_table.dpm_levels[0].value > clk || 4867 hwmgr->platform_descriptor.overdriveLimit.memoryClock < clk) { 4868 pr_info("OD memory clock is out of range [%d - %d] MHz\n", 4869 data->golden_dpm_table.mclk_table.dpm_levels[0].value/100, 4870 hwmgr->platform_descriptor.overdriveLimit.memoryClock/100); 4871 return false; 4872 } 4873 } else { 4874 return false; 4875 } 4876 4877 return true; 4878 } 4879 4880 static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, 4881 enum PP_OD_DPM_TABLE_COMMAND type, 4882 long *input, uint32_t size) 4883 { 4884 uint32_t i; 4885 struct phm_odn_clock_levels *podn_dpm_table_in_backend = NULL; 4886 struct smu7_odn_clock_voltage_dependency_table *podn_vdd_dep_in_backend = NULL; 4887 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4888 4889 uint32_t input_clk; 4890 uint32_t input_vol; 4891 uint32_t input_level; 4892 4893 PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage", 4894 return -EINVAL); 4895 4896 if (!hwmgr->od_enabled) { 4897 pr_info("OverDrive feature not enabled\n"); 4898 return -EINVAL; 4899 } 4900 4901 if (PP_OD_EDIT_SCLK_VDDC_TABLE == type) { 4902 podn_dpm_table_in_backend = &data->odn_dpm_table.odn_core_clock_dpm_levels; 4903 podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_sclk; 4904 PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend), 4905 "Failed to get ODN SCLK and Voltage tables", 4906 return -EINVAL); 4907 } else if (PP_OD_EDIT_MCLK_VDDC_TABLE == type) { 4908 podn_dpm_table_in_backend = &data->odn_dpm_table.odn_memory_clock_dpm_levels; 4909 podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_mclk; 4910 4911 PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend), 4912 "Failed to get ODN MCLK and Voltage tables", 4913 return -EINVAL); 4914 } else if (PP_OD_RESTORE_DEFAULT_TABLE == type) { 4915 smu7_odn_initial_default_setting(hwmgr); 4916 return 0; 4917 } else if (PP_OD_COMMIT_DPM_TABLE == type) { 4918 smu7_check_dpm_table_updated(hwmgr); 4919 return 0; 4920 } else { 4921 return -EINVAL; 4922 } 4923 4924 for (i = 0; i < size; i += 3) { 4925 if (i + 3 > size || input[i] >= podn_dpm_table_in_backend->num_of_pl) { 4926 pr_info("invalid clock voltage input \n"); 4927 return 0; 4928 } 4929 input_level = input[i]; 4930 input_clk = input[i+1] * 100; 4931 input_vol = input[i+2]; 4932 4933 if (smu7_check_clk_voltage_valid(hwmgr, type, input_clk, input_vol)) { 4934 podn_dpm_table_in_backend->entries[input_level].clock = input_clk; 4935 podn_vdd_dep_in_backend->entries[input_level].clk = input_clk; 4936 podn_dpm_table_in_backend->entries[input_level].vddc = input_vol; 4937 podn_vdd_dep_in_backend->entries[input_level].vddc = input_vol; 4938 podn_vdd_dep_in_backend->entries[input_level].vddgfx = input_vol; 4939 } else { 4940 return -EINVAL; 4941 } 4942 } 4943 4944 return 0; 4945 } 4946 4947 static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) 4948 { 4949 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4950 uint32_t i, size = 0; 4951 uint32_t len; 4952 4953 static const char *profile_name[7] = {"BOOTUP_DEFAULT", 4954 "3D_FULL_SCREEN", 4955 "POWER_SAVING", 4956 "VIDEO", 4957 "VR", 4958 "COMPUTE", 4959 "CUSTOM"}; 4960 4961 static const char *title[8] = {"NUM", 4962 "MODE_NAME", 4963 "SCLK_UP_HYST", 4964 "SCLK_DOWN_HYST", 4965 "SCLK_ACTIVE_LEVEL", 4966 "MCLK_UP_HYST", 4967 "MCLK_DOWN_HYST", 4968 "MCLK_ACTIVE_LEVEL"}; 4969 4970 if (!buf) 4971 return -EINVAL; 4972 4973 size += sprintf(buf + size, "%s %16s %16s %16s %16s %16s %16s %16s\n", 4974 title[0], title[1], title[2], title[3], 4975 title[4], title[5], title[6], title[7]); 4976 4977 len = ARRAY_SIZE(smu7_profiling); 4978 4979 for (i = 0; i < len; i++) { 4980 if (i == hwmgr->power_profile_mode) { 4981 size += sprintf(buf + size, "%3d %14s %s: %8d %16d %16d %16d %16d %16d\n", 4982 i, profile_name[i], "*", 4983 data->current_profile_setting.sclk_up_hyst, 4984 data->current_profile_setting.sclk_down_hyst, 4985 data->current_profile_setting.sclk_activity, 4986 data->current_profile_setting.mclk_up_hyst, 4987 data->current_profile_setting.mclk_down_hyst, 4988 data->current_profile_setting.mclk_activity); 4989 continue; 4990 } 4991 if (smu7_profiling[i].bupdate_sclk) 4992 size += sprintf(buf + size, "%3d %16s: %8d %16d %16d ", 4993 i, profile_name[i], smu7_profiling[i].sclk_up_hyst, 4994 smu7_profiling[i].sclk_down_hyst, 4995 smu7_profiling[i].sclk_activity); 4996 else 4997 size += sprintf(buf + size, "%3d %16s: %8s %16s %16s ", 4998 i, profile_name[i], "-", "-", "-"); 4999 5000 if (smu7_profiling[i].bupdate_mclk) 5001 size += sprintf(buf + size, "%16d %16d %16d\n", 5002 smu7_profiling[i].mclk_up_hyst, 5003 smu7_profiling[i].mclk_down_hyst, 5004 smu7_profiling[i].mclk_activity); 5005 else 5006 size += sprintf(buf + size, "%16s %16s %16s\n", 5007 "-", "-", "-"); 5008 } 5009 5010 return size; 5011 } 5012 5013 static void smu7_patch_compute_profile_mode(struct pp_hwmgr *hwmgr, 5014 enum PP_SMC_POWER_PROFILE requst) 5015 { 5016 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 5017 uint32_t tmp, level; 5018 5019 if (requst == PP_SMC_POWER_PROFILE_COMPUTE) { 5020 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) { 5021 level = 0; 5022 tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask; 5023 while (tmp >>= 1) 5024 level++; 5025 if (level > 0) 5026 smu7_force_clock_level(hwmgr, PP_SCLK, 3 << (level-1)); 5027 } 5028 } else if (hwmgr->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) { 5029 smu7_force_clock_level(hwmgr, PP_SCLK, data->dpm_level_enable_mask.sclk_dpm_enable_mask); 5030 } 5031 } 5032 5033 static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size) 5034 { 5035 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 5036 struct profile_mode_setting tmp; 5037 enum PP_SMC_POWER_PROFILE mode; 5038 5039 if (input == NULL) 5040 return -EINVAL; 5041 5042 mode = input[size]; 5043 switch (mode) { 5044 case PP_SMC_POWER_PROFILE_CUSTOM: 5045 if (size < 8 && size != 0) 5046 return -EINVAL; 5047 /* If only CUSTOM is passed in, use the saved values. Check 5048 * that we actually have a CUSTOM profile by ensuring that 5049 * the "use sclk" or the "use mclk" bits are set 5050 */ 5051 tmp = smu7_profiling[PP_SMC_POWER_PROFILE_CUSTOM]; 5052 if (size == 0) { 5053 if (tmp.bupdate_sclk == 0 && tmp.bupdate_mclk == 0) 5054 return -EINVAL; 5055 } else { 5056 tmp.bupdate_sclk = input[0]; 5057 tmp.sclk_up_hyst = input[1]; 5058 tmp.sclk_down_hyst = input[2]; 5059 tmp.sclk_activity = input[3]; 5060 tmp.bupdate_mclk = input[4]; 5061 tmp.mclk_up_hyst = input[5]; 5062 tmp.mclk_down_hyst = input[6]; 5063 tmp.mclk_activity = input[7]; 5064 smu7_profiling[PP_SMC_POWER_PROFILE_CUSTOM] = tmp; 5065 } 5066 if (!smum_update_dpm_settings(hwmgr, &tmp)) { 5067 memcpy(&data->current_profile_setting, &tmp, sizeof(struct profile_mode_setting)); 5068 hwmgr->power_profile_mode = mode; 5069 } 5070 break; 5071 case PP_SMC_POWER_PROFILE_FULLSCREEN3D: 5072 case PP_SMC_POWER_PROFILE_POWERSAVING: 5073 case PP_SMC_POWER_PROFILE_VIDEO: 5074 case PP_SMC_POWER_PROFILE_VR: 5075 case PP_SMC_POWER_PROFILE_COMPUTE: 5076 if (mode == hwmgr->power_profile_mode) 5077 return 0; 5078 5079 memcpy(&tmp, &smu7_profiling[mode], sizeof(struct profile_mode_setting)); 5080 if (!smum_update_dpm_settings(hwmgr, &tmp)) { 5081 if (tmp.bupdate_sclk) { 5082 data->current_profile_setting.bupdate_sclk = tmp.bupdate_sclk; 5083 data->current_profile_setting.sclk_up_hyst = tmp.sclk_up_hyst; 5084 data->current_profile_setting.sclk_down_hyst = tmp.sclk_down_hyst; 5085 data->current_profile_setting.sclk_activity = tmp.sclk_activity; 5086 } 5087 if (tmp.bupdate_mclk) { 5088 data->current_profile_setting.bupdate_mclk = tmp.bupdate_mclk; 5089 data->current_profile_setting.mclk_up_hyst = tmp.mclk_up_hyst; 5090 data->current_profile_setting.mclk_down_hyst = tmp.mclk_down_hyst; 5091 data->current_profile_setting.mclk_activity = tmp.mclk_activity; 5092 } 5093 smu7_patch_compute_profile_mode(hwmgr, mode); 5094 hwmgr->power_profile_mode = mode; 5095 } 5096 break; 5097 default: 5098 return -EINVAL; 5099 } 5100 5101 return 0; 5102 } 5103 5104 static int smu7_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, 5105 PHM_PerformanceLevelDesignation designation, uint32_t index, 5106 PHM_PerformanceLevel *level) 5107 { 5108 const struct smu7_power_state *ps; 5109 uint32_t i; 5110 5111 if (level == NULL || hwmgr == NULL || state == NULL) 5112 return -EINVAL; 5113 5114 ps = cast_const_phw_smu7_power_state(state); 5115 5116 i = index > ps->performance_level_count - 1 ? 5117 ps->performance_level_count - 1 : index; 5118 5119 level->coreClock = ps->performance_levels[i].engine_clock; 5120 level->memory_clock = ps->performance_levels[i].memory_clock; 5121 5122 return 0; 5123 } 5124 5125 static int smu7_power_off_asic(struct pp_hwmgr *hwmgr) 5126 { 5127 int result; 5128 5129 result = smu7_disable_dpm_tasks(hwmgr); 5130 PP_ASSERT_WITH_CODE((0 == result), 5131 "[disable_dpm_tasks] Failed to disable DPM!", 5132 ); 5133 5134 return result; 5135 } 5136 5137 static const struct pp_hwmgr_func smu7_hwmgr_funcs = { 5138 .backend_init = &smu7_hwmgr_backend_init, 5139 .backend_fini = &smu7_hwmgr_backend_fini, 5140 .asic_setup = &smu7_setup_asic_task, 5141 .dynamic_state_management_enable = &smu7_enable_dpm_tasks, 5142 .apply_state_adjust_rules = smu7_apply_state_adjust_rules, 5143 .force_dpm_level = &smu7_force_dpm_level, 5144 .power_state_set = smu7_set_power_state_tasks, 5145 .get_power_state_size = smu7_get_power_state_size, 5146 .get_mclk = smu7_dpm_get_mclk, 5147 .get_sclk = smu7_dpm_get_sclk, 5148 .patch_boot_state = smu7_dpm_patch_boot_state, 5149 .get_pp_table_entry = smu7_get_pp_table_entry, 5150 .get_num_of_pp_table_entries = smu7_get_number_of_powerplay_table_entries, 5151 .powerdown_uvd = smu7_powerdown_uvd, 5152 .powergate_uvd = smu7_powergate_uvd, 5153 .powergate_vce = smu7_powergate_vce, 5154 .disable_clock_power_gating = smu7_disable_clock_power_gating, 5155 .update_clock_gatings = smu7_update_clock_gatings, 5156 .notify_smc_display_config_after_ps_adjustment = smu7_notify_smc_display_config_after_ps_adjustment, 5157 .display_config_changed = smu7_display_configuration_changed_task, 5158 .set_max_fan_pwm_output = smu7_set_max_fan_pwm_output, 5159 .set_max_fan_rpm_output = smu7_set_max_fan_rpm_output, 5160 .stop_thermal_controller = smu7_thermal_stop_thermal_controller, 5161 .get_fan_speed_info = smu7_fan_ctrl_get_fan_speed_info, 5162 .get_fan_speed_percent = smu7_fan_ctrl_get_fan_speed_percent, 5163 .set_fan_speed_percent = smu7_fan_ctrl_set_fan_speed_percent, 5164 .reset_fan_speed_to_default = smu7_fan_ctrl_reset_fan_speed_to_default, 5165 .get_fan_speed_rpm = smu7_fan_ctrl_get_fan_speed_rpm, 5166 .set_fan_speed_rpm = smu7_fan_ctrl_set_fan_speed_rpm, 5167 .uninitialize_thermal_controller = smu7_thermal_ctrl_uninitialize_thermal_controller, 5168 .register_irq_handlers = smu7_register_irq_handlers, 5169 .check_smc_update_required_for_display_configuration = smu7_check_smc_update_required_for_display_configuration, 5170 .check_states_equal = smu7_check_states_equal, 5171 .set_fan_control_mode = smu7_set_fan_control_mode, 5172 .get_fan_control_mode = smu7_get_fan_control_mode, 5173 .force_clock_level = smu7_force_clock_level, 5174 .print_clock_levels = smu7_print_clock_levels, 5175 .powergate_gfx = smu7_powergate_gfx, 5176 .get_sclk_od = smu7_get_sclk_od, 5177 .set_sclk_od = smu7_set_sclk_od, 5178 .get_mclk_od = smu7_get_mclk_od, 5179 .set_mclk_od = smu7_set_mclk_od, 5180 .get_clock_by_type = smu7_get_clock_by_type, 5181 .read_sensor = smu7_read_sensor, 5182 .dynamic_state_management_disable = smu7_disable_dpm_tasks, 5183 .avfs_control = smu7_avfs_control, 5184 .disable_smc_firmware_ctf = smu7_thermal_disable_alert, 5185 .start_thermal_controller = smu7_start_thermal_controller, 5186 .notify_cac_buffer_info = smu7_notify_cac_buffer_info, 5187 .get_max_high_clocks = smu7_get_max_high_clocks, 5188 .get_thermal_temperature_range = smu7_get_thermal_temperature_range, 5189 .odn_edit_dpm_table = smu7_odn_edit_dpm_table, 5190 .set_power_limit = smu7_set_power_limit, 5191 .get_power_profile_mode = smu7_get_power_profile_mode, 5192 .set_power_profile_mode = smu7_set_power_profile_mode, 5193 .get_performance_level = smu7_get_performance_level, 5194 .get_asic_baco_capability = smu7_baco_get_capability, 5195 .get_asic_baco_state = smu7_baco_get_state, 5196 .set_asic_baco_state = smu7_baco_set_state, 5197 .power_off_asic = smu7_power_off_asic, 5198 }; 5199 5200 uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock, 5201 uint32_t clock_insr) 5202 { 5203 uint8_t i; 5204 uint32_t temp; 5205 uint32_t min = max(clock_insr, (uint32_t)SMU7_MINIMUM_ENGINE_CLOCK); 5206 5207 PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0); 5208 for (i = SMU7_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) { 5209 temp = clock >> i; 5210 5211 if (temp >= min || i == 0) 5212 break; 5213 } 5214 return i; 5215 } 5216 5217 int smu7_init_function_pointers(struct pp_hwmgr *hwmgr) 5218 { 5219 hwmgr->hwmgr_func = &smu7_hwmgr_funcs; 5220 if (hwmgr->pp_table_version == PP_TABLE_V0) 5221 hwmgr->pptable_func = &pptable_funcs; 5222 else if (hwmgr->pp_table_version == PP_TABLE_V1) 5223 hwmgr->pptable_func = &pptable_v1_0_funcs; 5224 5225 return 0; 5226 } 5227