1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include "pp_debug.h" 24 #include <linux/delay.h> 25 #include <linux/fb.h> 26 #include <linux/module.h> 27 #include <linux/pci.h> 28 #include <linux/slab.h> 29 #include <asm/div64.h> 30 #include <drm/amdgpu_drm.h> 31 #include "ppatomctrl.h" 32 #include "atombios.h" 33 #include "pptable_v1_0.h" 34 #include "pppcielanes.h" 35 #include "amd_pcie_helpers.h" 36 #include "hardwaremanager.h" 37 #include "process_pptables_v1_0.h" 38 #include "cgs_common.h" 39 40 #include "smu7_common.h" 41 42 #include "hwmgr.h" 43 #include "smu7_hwmgr.h" 44 #include "smu_ucode_xfer_vi.h" 45 #include "smu7_powertune.h" 46 #include "smu7_dyn_defaults.h" 47 #include "smu7_thermal.h" 48 #include "smu7_clockpowergating.h" 49 #include "processpptables.h" 50 #include "pp_thermal.h" 51 #include "smu7_baco.h" 52 #include "smu7_smumgr.h" 53 #include "polaris10_smumgr.h" 54 55 #include "ivsrcid/ivsrcid_vislands30.h" 56 57 #define MC_CG_ARB_FREQ_F0 0x0a 58 #define MC_CG_ARB_FREQ_F1 0x0b 59 #define MC_CG_ARB_FREQ_F2 0x0c 60 #define MC_CG_ARB_FREQ_F3 0x0d 61 62 #define MC_CG_SEQ_DRAMCONF_S0 0x05 63 #define MC_CG_SEQ_DRAMCONF_S1 0x06 64 #define MC_CG_SEQ_YCLK_SUSPEND 0x04 65 #define MC_CG_SEQ_YCLK_RESUME 0x0a 66 67 #define SMC_CG_IND_START 0xc0030000 68 #define SMC_CG_IND_END 0xc0040000 69 70 #define MEM_FREQ_LOW_LATENCY 25000 71 #define MEM_FREQ_HIGH_LATENCY 80000 72 73 #define MEM_LATENCY_HIGH 45 74 #define MEM_LATENCY_LOW 35 75 #define MEM_LATENCY_ERR 0xFFFF 76 77 #define MC_SEQ_MISC0_GDDR5_SHIFT 28 78 #define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000 79 #define MC_SEQ_MISC0_GDDR5_VALUE 5 80 81 #define PCIE_BUS_CLK 10000 82 #define TCLK (PCIE_BUS_CLK / 10) 83 84 static struct profile_mode_setting smu7_profiling[7] = 85 {{0, 0, 0, 0, 0, 0, 0, 0}, 86 {1, 0, 100, 30, 1, 0, 100, 10}, 87 {1, 10, 0, 30, 0, 0, 0, 0}, 88 {0, 0, 0, 0, 1, 10, 16, 31}, 89 {1, 0, 11, 50, 1, 0, 100, 10}, 90 {1, 0, 5, 30, 0, 0, 0, 0}, 91 {0, 0, 0, 0, 0, 0, 0, 0}, 92 }; 93 94 #define PPSMC_MSG_SetVBITimeout_VEGAM ((uint16_t) 0x310) 95 96 #define ixPWR_SVI2_PLANE1_LOAD 0xC0200280 97 #define PWR_SVI2_PLANE1_LOAD__PSI1_MASK 0x00000020L 98 #define PWR_SVI2_PLANE1_LOAD__PSI0_EN_MASK 0x00000040L 99 #define PWR_SVI2_PLANE1_LOAD__PSI1__SHIFT 0x00000005 100 #define PWR_SVI2_PLANE1_LOAD__PSI0_EN__SHIFT 0x00000006 101 102 #define STRAP_EVV_REVISION_MSB 2211 103 #define STRAP_EVV_REVISION_LSB 2208 104 105 /** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */ 106 enum DPM_EVENT_SRC { 107 DPM_EVENT_SRC_ANALOG = 0, 108 DPM_EVENT_SRC_EXTERNAL = 1, 109 DPM_EVENT_SRC_DIGITAL = 2, 110 DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, 111 DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4 112 }; 113 114 #define ixDIDT_SQ_EDC_CTRL 0x0013 115 #define ixDIDT_SQ_EDC_THRESHOLD 0x0014 116 #define ixDIDT_SQ_EDC_STALL_PATTERN_1_2 0x0015 117 #define ixDIDT_SQ_EDC_STALL_PATTERN_3_4 0x0016 118 #define ixDIDT_SQ_EDC_STALL_PATTERN_5_6 0x0017 119 #define ixDIDT_SQ_EDC_STALL_PATTERN_7 0x0018 120 121 #define ixDIDT_TD_EDC_CTRL 0x0053 122 #define ixDIDT_TD_EDC_THRESHOLD 0x0054 123 #define ixDIDT_TD_EDC_STALL_PATTERN_1_2 0x0055 124 #define ixDIDT_TD_EDC_STALL_PATTERN_3_4 0x0056 125 #define ixDIDT_TD_EDC_STALL_PATTERN_5_6 0x0057 126 #define ixDIDT_TD_EDC_STALL_PATTERN_7 0x0058 127 128 #define ixDIDT_TCP_EDC_CTRL 0x0073 129 #define ixDIDT_TCP_EDC_THRESHOLD 0x0074 130 #define ixDIDT_TCP_EDC_STALL_PATTERN_1_2 0x0075 131 #define ixDIDT_TCP_EDC_STALL_PATTERN_3_4 0x0076 132 #define ixDIDT_TCP_EDC_STALL_PATTERN_5_6 0x0077 133 #define ixDIDT_TCP_EDC_STALL_PATTERN_7 0x0078 134 135 #define ixDIDT_DB_EDC_CTRL 0x0033 136 #define ixDIDT_DB_EDC_THRESHOLD 0x0034 137 #define ixDIDT_DB_EDC_STALL_PATTERN_1_2 0x0035 138 #define ixDIDT_DB_EDC_STALL_PATTERN_3_4 0x0036 139 #define ixDIDT_DB_EDC_STALL_PATTERN_5_6 0x0037 140 #define ixDIDT_DB_EDC_STALL_PATTERN_7 0x0038 141 142 uint32_t DIDTEDCConfig_P12[] = { 143 ixDIDT_SQ_EDC_STALL_PATTERN_1_2, 144 ixDIDT_SQ_EDC_STALL_PATTERN_3_4, 145 ixDIDT_SQ_EDC_STALL_PATTERN_5_6, 146 ixDIDT_SQ_EDC_STALL_PATTERN_7, 147 ixDIDT_SQ_EDC_THRESHOLD, 148 ixDIDT_SQ_EDC_CTRL, 149 ixDIDT_TD_EDC_STALL_PATTERN_1_2, 150 ixDIDT_TD_EDC_STALL_PATTERN_3_4, 151 ixDIDT_TD_EDC_STALL_PATTERN_5_6, 152 ixDIDT_TD_EDC_STALL_PATTERN_7, 153 ixDIDT_TD_EDC_THRESHOLD, 154 ixDIDT_TD_EDC_CTRL, 155 ixDIDT_TCP_EDC_STALL_PATTERN_1_2, 156 ixDIDT_TCP_EDC_STALL_PATTERN_3_4, 157 ixDIDT_TCP_EDC_STALL_PATTERN_5_6, 158 ixDIDT_TCP_EDC_STALL_PATTERN_7, 159 ixDIDT_TCP_EDC_THRESHOLD, 160 ixDIDT_TCP_EDC_CTRL, 161 ixDIDT_DB_EDC_STALL_PATTERN_1_2, 162 ixDIDT_DB_EDC_STALL_PATTERN_3_4, 163 ixDIDT_DB_EDC_STALL_PATTERN_5_6, 164 ixDIDT_DB_EDC_STALL_PATTERN_7, 165 ixDIDT_DB_EDC_THRESHOLD, 166 ixDIDT_DB_EDC_CTRL, 167 0xFFFFFFFF // End of list 168 }; 169 170 static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic); 171 static int smu7_force_clock_level(struct pp_hwmgr *hwmgr, 172 enum pp_clock_type type, uint32_t mask); 173 static int smu7_notify_has_display(struct pp_hwmgr *hwmgr); 174 175 static struct smu7_power_state *cast_phw_smu7_power_state( 176 struct pp_hw_power_state *hw_ps) 177 { 178 PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic), 179 "Invalid Powerstate Type!", 180 return NULL); 181 182 return (struct smu7_power_state *)hw_ps; 183 } 184 185 static const struct smu7_power_state *cast_const_phw_smu7_power_state( 186 const struct pp_hw_power_state *hw_ps) 187 { 188 PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic), 189 "Invalid Powerstate Type!", 190 return NULL); 191 192 return (const struct smu7_power_state *)hw_ps; 193 } 194 195 /** 196 * Find the MC microcode version and store it in the HwMgr struct 197 * 198 * @param hwmgr the address of the powerplay hardware manager. 199 * @return always 0 200 */ 201 static int smu7_get_mc_microcode_version(struct pp_hwmgr *hwmgr) 202 { 203 cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F); 204 205 hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA); 206 207 return 0; 208 } 209 210 static uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr) 211 { 212 uint32_t speedCntl = 0; 213 214 /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */ 215 speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE, 216 ixPCIE_LC_SPEED_CNTL); 217 return((uint16_t)PHM_GET_FIELD(speedCntl, 218 PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE)); 219 } 220 221 static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr) 222 { 223 uint32_t link_width; 224 225 /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */ 226 link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, 227 PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD); 228 229 PP_ASSERT_WITH_CODE((7 >= link_width), 230 "Invalid PCIe lane width!", return 0); 231 232 return decode_pcie_lane_width(link_width); 233 } 234 235 /** 236 * Enable voltage control 237 * 238 * @param pHwMgr the address of the powerplay hardware manager. 239 * @return always PP_Result_OK 240 */ 241 static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr) 242 { 243 if (hwmgr->chip_id >= CHIP_POLARIS10 && 244 hwmgr->chip_id <= CHIP_VEGAM) { 245 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, 246 CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI1, 0); 247 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, 248 CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI0_EN, 0); 249 } 250 251 if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK) 252 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable, NULL); 253 254 return 0; 255 } 256 257 /** 258 * Checks if we want to support voltage control 259 * 260 * @param hwmgr the address of the powerplay hardware manager. 261 */ 262 static bool smu7_voltage_control(const struct pp_hwmgr *hwmgr) 263 { 264 const struct smu7_hwmgr *data = 265 (const struct smu7_hwmgr *)(hwmgr->backend); 266 267 return (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control); 268 } 269 270 /** 271 * Enable voltage control 272 * 273 * @param hwmgr the address of the powerplay hardware manager. 274 * @return always 0 275 */ 276 static int smu7_enable_voltage_control(struct pp_hwmgr *hwmgr) 277 { 278 /* enable voltage control */ 279 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 280 GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1); 281 282 return 0; 283 } 284 285 static int phm_get_svi2_voltage_table_v0(pp_atomctrl_voltage_table *voltage_table, 286 struct phm_clock_voltage_dependency_table *voltage_dependency_table 287 ) 288 { 289 uint32_t i; 290 291 PP_ASSERT_WITH_CODE((NULL != voltage_table), 292 "Voltage Dependency Table empty.", return -EINVAL;); 293 294 voltage_table->mask_low = 0; 295 voltage_table->phase_delay = 0; 296 voltage_table->count = voltage_dependency_table->count; 297 298 for (i = 0; i < voltage_dependency_table->count; i++) { 299 voltage_table->entries[i].value = 300 voltage_dependency_table->entries[i].v; 301 voltage_table->entries[i].smio_low = 0; 302 } 303 304 return 0; 305 } 306 307 308 /** 309 * Create Voltage Tables. 310 * 311 * @param hwmgr the address of the powerplay hardware manager. 312 * @return always 0 313 */ 314 static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr) 315 { 316 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 317 struct phm_ppt_v1_information *table_info = 318 (struct phm_ppt_v1_information *)hwmgr->pptable; 319 int result = 0; 320 uint32_t tmp; 321 322 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { 323 result = atomctrl_get_voltage_table_v3(hwmgr, 324 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT, 325 &(data->mvdd_voltage_table)); 326 PP_ASSERT_WITH_CODE((0 == result), 327 "Failed to retrieve MVDD table.", 328 return result); 329 } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) { 330 if (hwmgr->pp_table_version == PP_TABLE_V1) 331 result = phm_get_svi2_mvdd_voltage_table(&(data->mvdd_voltage_table), 332 table_info->vdd_dep_on_mclk); 333 else if (hwmgr->pp_table_version == PP_TABLE_V0) 334 result = phm_get_svi2_voltage_table_v0(&(data->mvdd_voltage_table), 335 hwmgr->dyn_state.mvdd_dependency_on_mclk); 336 337 PP_ASSERT_WITH_CODE((0 == result), 338 "Failed to retrieve SVI2 MVDD table from dependency table.", 339 return result;); 340 } 341 342 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) { 343 result = atomctrl_get_voltage_table_v3(hwmgr, 344 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT, 345 &(data->vddci_voltage_table)); 346 PP_ASSERT_WITH_CODE((0 == result), 347 "Failed to retrieve VDDCI table.", 348 return result); 349 } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) { 350 if (hwmgr->pp_table_version == PP_TABLE_V1) 351 result = phm_get_svi2_vddci_voltage_table(&(data->vddci_voltage_table), 352 table_info->vdd_dep_on_mclk); 353 else if (hwmgr->pp_table_version == PP_TABLE_V0) 354 result = phm_get_svi2_voltage_table_v0(&(data->vddci_voltage_table), 355 hwmgr->dyn_state.vddci_dependency_on_mclk); 356 PP_ASSERT_WITH_CODE((0 == result), 357 "Failed to retrieve SVI2 VDDCI table from dependency table.", 358 return result); 359 } 360 361 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) { 362 /* VDDGFX has only SVI2 voltage control */ 363 result = phm_get_svi2_vdd_voltage_table(&(data->vddgfx_voltage_table), 364 table_info->vddgfx_lookup_table); 365 PP_ASSERT_WITH_CODE((0 == result), 366 "Failed to retrieve SVI2 VDDGFX table from lookup table.", return result;); 367 } 368 369 370 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) { 371 result = atomctrl_get_voltage_table_v3(hwmgr, 372 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT, 373 &data->vddc_voltage_table); 374 PP_ASSERT_WITH_CODE((0 == result), 375 "Failed to retrieve VDDC table.", return result;); 376 } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) { 377 378 if (hwmgr->pp_table_version == PP_TABLE_V0) 379 result = phm_get_svi2_voltage_table_v0(&data->vddc_voltage_table, 380 hwmgr->dyn_state.vddc_dependency_on_mclk); 381 else if (hwmgr->pp_table_version == PP_TABLE_V1) 382 result = phm_get_svi2_vdd_voltage_table(&(data->vddc_voltage_table), 383 table_info->vddc_lookup_table); 384 385 PP_ASSERT_WITH_CODE((0 == result), 386 "Failed to retrieve SVI2 VDDC table from dependency table.", return result;); 387 } 388 389 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDC); 390 PP_ASSERT_WITH_CODE( 391 (data->vddc_voltage_table.count <= tmp), 392 "Too many voltage values for VDDC. Trimming to fit state table.", 393 phm_trim_voltage_table_to_fit_state_table(tmp, 394 &(data->vddc_voltage_table))); 395 396 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX); 397 PP_ASSERT_WITH_CODE( 398 (data->vddgfx_voltage_table.count <= tmp), 399 "Too many voltage values for VDDC. Trimming to fit state table.", 400 phm_trim_voltage_table_to_fit_state_table(tmp, 401 &(data->vddgfx_voltage_table))); 402 403 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDCI); 404 PP_ASSERT_WITH_CODE( 405 (data->vddci_voltage_table.count <= tmp), 406 "Too many voltage values for VDDCI. Trimming to fit state table.", 407 phm_trim_voltage_table_to_fit_state_table(tmp, 408 &(data->vddci_voltage_table))); 409 410 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_MVDD); 411 PP_ASSERT_WITH_CODE( 412 (data->mvdd_voltage_table.count <= tmp), 413 "Too many voltage values for MVDD. Trimming to fit state table.", 414 phm_trim_voltage_table_to_fit_state_table(tmp, 415 &(data->mvdd_voltage_table))); 416 417 return 0; 418 } 419 420 /** 421 * Programs static screed detection parameters 422 * 423 * @param hwmgr the address of the powerplay hardware manager. 424 * @return always 0 425 */ 426 static int smu7_program_static_screen_threshold_parameters( 427 struct pp_hwmgr *hwmgr) 428 { 429 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 430 431 /* Set static screen threshold unit */ 432 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 433 CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT, 434 data->static_screen_threshold_unit); 435 /* Set static screen threshold */ 436 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 437 CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD, 438 data->static_screen_threshold); 439 440 return 0; 441 } 442 443 /** 444 * Setup display gap for glitch free memory clock switching. 445 * 446 * @param hwmgr the address of the powerplay hardware manager. 447 * @return always 0 448 */ 449 static int smu7_enable_display_gap(struct pp_hwmgr *hwmgr) 450 { 451 uint32_t display_gap = 452 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, 453 ixCG_DISPLAY_GAP_CNTL); 454 455 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, 456 DISP_GAP, DISPLAY_GAP_IGNORE); 457 458 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, 459 DISP_GAP_MCHG, DISPLAY_GAP_VBLANK); 460 461 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 462 ixCG_DISPLAY_GAP_CNTL, display_gap); 463 464 return 0; 465 } 466 467 /** 468 * Programs activity state transition voting clients 469 * 470 * @param hwmgr the address of the powerplay hardware manager. 471 * @return always 0 472 */ 473 static int smu7_program_voting_clients(struct pp_hwmgr *hwmgr) 474 { 475 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 476 int i; 477 478 /* Clear reset for voting clients before enabling DPM */ 479 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 480 SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0); 481 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 482 SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0); 483 484 for (i = 0; i < 8; i++) 485 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 486 ixCG_FREQ_TRAN_VOTING_0 + i * 4, 487 data->voting_rights_clients[i]); 488 return 0; 489 } 490 491 static int smu7_clear_voting_clients(struct pp_hwmgr *hwmgr) 492 { 493 int i; 494 495 /* Reset voting clients before disabling DPM */ 496 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 497 SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1); 498 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 499 SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1); 500 501 for (i = 0; i < 8; i++) 502 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 503 ixCG_FREQ_TRAN_VOTING_0 + i * 4, 0); 504 505 return 0; 506 } 507 508 /* Copy one arb setting to another and then switch the active set. 509 * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants. 510 */ 511 static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr, 512 uint32_t arb_src, uint32_t arb_dest) 513 { 514 uint32_t mc_arb_dram_timing; 515 uint32_t mc_arb_dram_timing2; 516 uint32_t burst_time; 517 uint32_t mc_cg_config; 518 519 switch (arb_src) { 520 case MC_CG_ARB_FREQ_F0: 521 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); 522 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); 523 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0); 524 break; 525 case MC_CG_ARB_FREQ_F1: 526 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1); 527 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1); 528 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1); 529 break; 530 default: 531 return -EINVAL; 532 } 533 534 switch (arb_dest) { 535 case MC_CG_ARB_FREQ_F0: 536 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing); 537 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2); 538 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time); 539 break; 540 case MC_CG_ARB_FREQ_F1: 541 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing); 542 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2); 543 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time); 544 break; 545 default: 546 return -EINVAL; 547 } 548 549 mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG); 550 mc_cg_config |= 0x0000000F; 551 cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config); 552 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest); 553 554 return 0; 555 } 556 557 static int smu7_reset_to_default(struct pp_hwmgr *hwmgr) 558 { 559 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ResetToDefaults, NULL); 560 } 561 562 /** 563 * Initial switch from ARB F0->F1 564 * 565 * @param hwmgr the address of the powerplay hardware manager. 566 * @return always 0 567 * This function is to be called from the SetPowerState table. 568 */ 569 static int smu7_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr) 570 { 571 return smu7_copy_and_switch_arb_sets(hwmgr, 572 MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); 573 } 574 575 static int smu7_force_switch_to_arbf0(struct pp_hwmgr *hwmgr) 576 { 577 uint32_t tmp; 578 579 tmp = (cgs_read_ind_register(hwmgr->device, 580 CGS_IND_REG__SMC, ixSMC_SCRATCH9) & 581 0x0000ff00) >> 8; 582 583 if (tmp == MC_CG_ARB_FREQ_F0) 584 return 0; 585 586 return smu7_copy_and_switch_arb_sets(hwmgr, 587 tmp, MC_CG_ARB_FREQ_F0); 588 } 589 590 static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr) 591 { 592 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 593 594 struct phm_ppt_v1_information *table_info = 595 (struct phm_ppt_v1_information *)(hwmgr->pptable); 596 struct phm_ppt_v1_pcie_table *pcie_table = NULL; 597 598 uint32_t i, max_entry; 599 uint32_t tmp; 600 601 PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels || 602 data->use_pcie_power_saving_levels), "No pcie performance levels!", 603 return -EINVAL); 604 605 if (table_info != NULL) 606 pcie_table = table_info->pcie_table; 607 608 if (data->use_pcie_performance_levels && 609 !data->use_pcie_power_saving_levels) { 610 data->pcie_gen_power_saving = data->pcie_gen_performance; 611 data->pcie_lane_power_saving = data->pcie_lane_performance; 612 } else if (!data->use_pcie_performance_levels && 613 data->use_pcie_power_saving_levels) { 614 data->pcie_gen_performance = data->pcie_gen_power_saving; 615 data->pcie_lane_performance = data->pcie_lane_power_saving; 616 } 617 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_LINK); 618 phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table, 619 tmp, 620 MAX_REGULAR_DPM_NUMBER); 621 622 if (pcie_table != NULL) { 623 /* max_entry is used to make sure we reserve one PCIE level 624 * for boot level (fix for A+A PSPP issue). 625 * If PCIE table from PPTable have ULV entry + 8 entries, 626 * then ignore the last entry.*/ 627 max_entry = (tmp < pcie_table->count) ? tmp : pcie_table->count; 628 for (i = 1; i < max_entry; i++) { 629 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1, 630 get_pcie_gen_support(data->pcie_gen_cap, 631 pcie_table->entries[i].gen_speed), 632 get_pcie_lane_support(data->pcie_lane_cap, 633 pcie_table->entries[i].lane_width)); 634 } 635 data->dpm_table.pcie_speed_table.count = max_entry - 1; 636 smum_update_smc_table(hwmgr, SMU_BIF_TABLE); 637 } else { 638 /* Hardcode Pcie Table */ 639 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0, 640 get_pcie_gen_support(data->pcie_gen_cap, 641 PP_Min_PCIEGen), 642 get_pcie_lane_support(data->pcie_lane_cap, 643 PP_Max_PCIELane)); 644 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1, 645 get_pcie_gen_support(data->pcie_gen_cap, 646 PP_Min_PCIEGen), 647 get_pcie_lane_support(data->pcie_lane_cap, 648 PP_Max_PCIELane)); 649 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2, 650 get_pcie_gen_support(data->pcie_gen_cap, 651 PP_Max_PCIEGen), 652 get_pcie_lane_support(data->pcie_lane_cap, 653 PP_Max_PCIELane)); 654 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3, 655 get_pcie_gen_support(data->pcie_gen_cap, 656 PP_Max_PCIEGen), 657 get_pcie_lane_support(data->pcie_lane_cap, 658 PP_Max_PCIELane)); 659 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4, 660 get_pcie_gen_support(data->pcie_gen_cap, 661 PP_Max_PCIEGen), 662 get_pcie_lane_support(data->pcie_lane_cap, 663 PP_Max_PCIELane)); 664 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5, 665 get_pcie_gen_support(data->pcie_gen_cap, 666 PP_Max_PCIEGen), 667 get_pcie_lane_support(data->pcie_lane_cap, 668 PP_Max_PCIELane)); 669 670 data->dpm_table.pcie_speed_table.count = 6; 671 } 672 /* Populate last level for boot PCIE level, but do not increment count. */ 673 if (hwmgr->chip_family == AMDGPU_FAMILY_CI) { 674 for (i = 0; i <= data->dpm_table.pcie_speed_table.count; i++) 675 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i, 676 get_pcie_gen_support(data->pcie_gen_cap, 677 PP_Max_PCIEGen), 678 data->vbios_boot_state.pcie_lane_bootup_value); 679 } else { 680 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 681 data->dpm_table.pcie_speed_table.count, 682 get_pcie_gen_support(data->pcie_gen_cap, 683 PP_Min_PCIEGen), 684 get_pcie_lane_support(data->pcie_lane_cap, 685 PP_Max_PCIELane)); 686 } 687 return 0; 688 } 689 690 static int smu7_reset_dpm_tables(struct pp_hwmgr *hwmgr) 691 { 692 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 693 694 memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table)); 695 696 phm_reset_single_dpm_table( 697 &data->dpm_table.sclk_table, 698 smum_get_mac_definition(hwmgr, 699 SMU_MAX_LEVELS_GRAPHICS), 700 MAX_REGULAR_DPM_NUMBER); 701 phm_reset_single_dpm_table( 702 &data->dpm_table.mclk_table, 703 smum_get_mac_definition(hwmgr, 704 SMU_MAX_LEVELS_MEMORY), MAX_REGULAR_DPM_NUMBER); 705 706 phm_reset_single_dpm_table( 707 &data->dpm_table.vddc_table, 708 smum_get_mac_definition(hwmgr, 709 SMU_MAX_LEVELS_VDDC), 710 MAX_REGULAR_DPM_NUMBER); 711 phm_reset_single_dpm_table( 712 &data->dpm_table.vddci_table, 713 smum_get_mac_definition(hwmgr, 714 SMU_MAX_LEVELS_VDDCI), MAX_REGULAR_DPM_NUMBER); 715 716 phm_reset_single_dpm_table( 717 &data->dpm_table.mvdd_table, 718 smum_get_mac_definition(hwmgr, 719 SMU_MAX_LEVELS_MVDD), 720 MAX_REGULAR_DPM_NUMBER); 721 return 0; 722 } 723 /* 724 * This function is to initialize all DPM state tables 725 * for SMU7 based on the dependency table. 726 * Dynamic state patching function will then trim these 727 * state tables to the allowed range based 728 * on the power policy or external client requests, 729 * such as UVD request, etc. 730 */ 731 732 static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr) 733 { 734 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 735 struct phm_clock_voltage_dependency_table *allowed_vdd_sclk_table = 736 hwmgr->dyn_state.vddc_dependency_on_sclk; 737 struct phm_clock_voltage_dependency_table *allowed_vdd_mclk_table = 738 hwmgr->dyn_state.vddc_dependency_on_mclk; 739 struct phm_cac_leakage_table *std_voltage_table = 740 hwmgr->dyn_state.cac_leakage_table; 741 uint32_t i; 742 743 PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL, 744 "SCLK dependency table is missing. This table is mandatory", return -EINVAL); 745 PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1, 746 "SCLK dependency table has to have is missing. This table is mandatory", return -EINVAL); 747 748 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL, 749 "MCLK dependency table is missing. This table is mandatory", return -EINVAL); 750 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1, 751 "VMCLK dependency table has to have is missing. This table is mandatory", return -EINVAL); 752 753 754 /* Initialize Sclk DPM table based on allow Sclk values*/ 755 data->dpm_table.sclk_table.count = 0; 756 757 for (i = 0; i < allowed_vdd_sclk_table->count; i++) { 758 if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value != 759 allowed_vdd_sclk_table->entries[i].clk) { 760 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value = 761 allowed_vdd_sclk_table->entries[i].clk; 762 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = (i == 0) ? 1 : 0; 763 data->dpm_table.sclk_table.count++; 764 } 765 } 766 767 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL, 768 "MCLK dependency table is missing. This table is mandatory", return -EINVAL); 769 /* Initialize Mclk DPM table based on allow Mclk values */ 770 data->dpm_table.mclk_table.count = 0; 771 for (i = 0; i < allowed_vdd_mclk_table->count; i++) { 772 if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value != 773 allowed_vdd_mclk_table->entries[i].clk) { 774 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value = 775 allowed_vdd_mclk_table->entries[i].clk; 776 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = (i == 0) ? 1 : 0; 777 data->dpm_table.mclk_table.count++; 778 } 779 } 780 781 /* Initialize Vddc DPM table based on allow Vddc values. And populate corresponding std values. */ 782 for (i = 0; i < allowed_vdd_sclk_table->count; i++) { 783 data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; 784 data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage; 785 /* param1 is for corresponding std voltage */ 786 data->dpm_table.vddc_table.dpm_levels[i].enabled = true; 787 } 788 789 data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count; 790 allowed_vdd_mclk_table = hwmgr->dyn_state.vddci_dependency_on_mclk; 791 792 if (NULL != allowed_vdd_mclk_table) { 793 /* Initialize Vddci DPM table based on allow Mclk values */ 794 for (i = 0; i < allowed_vdd_mclk_table->count; i++) { 795 data->dpm_table.vddci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; 796 data->dpm_table.vddci_table.dpm_levels[i].enabled = true; 797 } 798 data->dpm_table.vddci_table.count = allowed_vdd_mclk_table->count; 799 } 800 801 allowed_vdd_mclk_table = hwmgr->dyn_state.mvdd_dependency_on_mclk; 802 803 if (NULL != allowed_vdd_mclk_table) { 804 /* 805 * Initialize MVDD DPM table based on allow Mclk 806 * values 807 */ 808 for (i = 0; i < allowed_vdd_mclk_table->count; i++) { 809 data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; 810 data->dpm_table.mvdd_table.dpm_levels[i].enabled = true; 811 } 812 data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count; 813 } 814 815 return 0; 816 } 817 818 static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr) 819 { 820 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 821 struct phm_ppt_v1_information *table_info = 822 (struct phm_ppt_v1_information *)(hwmgr->pptable); 823 uint32_t i; 824 825 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table; 826 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table; 827 828 if (table_info == NULL) 829 return -EINVAL; 830 831 dep_sclk_table = table_info->vdd_dep_on_sclk; 832 dep_mclk_table = table_info->vdd_dep_on_mclk; 833 834 PP_ASSERT_WITH_CODE(dep_sclk_table != NULL, 835 "SCLK dependency table is missing.", 836 return -EINVAL); 837 PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1, 838 "SCLK dependency table count is 0.", 839 return -EINVAL); 840 841 PP_ASSERT_WITH_CODE(dep_mclk_table != NULL, 842 "MCLK dependency table is missing.", 843 return -EINVAL); 844 PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1, 845 "MCLK dependency table count is 0", 846 return -EINVAL); 847 848 /* Initialize Sclk DPM table based on allow Sclk values */ 849 data->dpm_table.sclk_table.count = 0; 850 for (i = 0; i < dep_sclk_table->count; i++) { 851 if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value != 852 dep_sclk_table->entries[i].clk) { 853 854 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value = 855 dep_sclk_table->entries[i].clk; 856 857 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = 858 (i == 0) ? true : false; 859 data->dpm_table.sclk_table.count++; 860 } 861 } 862 if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0) 863 hwmgr->platform_descriptor.overdriveLimit.engineClock = dep_sclk_table->entries[i-1].clk; 864 /* Initialize Mclk DPM table based on allow Mclk values */ 865 data->dpm_table.mclk_table.count = 0; 866 for (i = 0; i < dep_mclk_table->count; i++) { 867 if (i == 0 || data->dpm_table.mclk_table.dpm_levels 868 [data->dpm_table.mclk_table.count - 1].value != 869 dep_mclk_table->entries[i].clk) { 870 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value = 871 dep_mclk_table->entries[i].clk; 872 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = 873 (i == 0) ? true : false; 874 data->dpm_table.mclk_table.count++; 875 } 876 } 877 878 if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0) 879 hwmgr->platform_descriptor.overdriveLimit.memoryClock = dep_mclk_table->entries[i-1].clk; 880 return 0; 881 } 882 883 static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr) 884 { 885 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 886 struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table); 887 struct phm_ppt_v1_information *table_info = 888 (struct phm_ppt_v1_information *)(hwmgr->pptable); 889 uint32_t i; 890 891 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table; 892 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table; 893 struct phm_odn_performance_level *entries; 894 895 if (table_info == NULL) 896 return -EINVAL; 897 898 dep_sclk_table = table_info->vdd_dep_on_sclk; 899 dep_mclk_table = table_info->vdd_dep_on_mclk; 900 901 odn_table->odn_core_clock_dpm_levels.num_of_pl = 902 data->golden_dpm_table.sclk_table.count; 903 entries = odn_table->odn_core_clock_dpm_levels.entries; 904 for (i=0; i<data->golden_dpm_table.sclk_table.count; i++) { 905 entries[i].clock = data->golden_dpm_table.sclk_table.dpm_levels[i].value; 906 entries[i].enabled = true; 907 entries[i].vddc = dep_sclk_table->entries[i].vddc; 908 } 909 910 smu_get_voltage_dependency_table_ppt_v1(dep_sclk_table, 911 (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk)); 912 913 odn_table->odn_memory_clock_dpm_levels.num_of_pl = 914 data->golden_dpm_table.mclk_table.count; 915 entries = odn_table->odn_memory_clock_dpm_levels.entries; 916 for (i=0; i<data->golden_dpm_table.mclk_table.count; i++) { 917 entries[i].clock = data->golden_dpm_table.mclk_table.dpm_levels[i].value; 918 entries[i].enabled = true; 919 entries[i].vddc = dep_mclk_table->entries[i].vddc; 920 } 921 922 smu_get_voltage_dependency_table_ppt_v1(dep_mclk_table, 923 (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk)); 924 925 return 0; 926 } 927 928 static void smu7_setup_voltage_range_from_vbios(struct pp_hwmgr *hwmgr) 929 { 930 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 931 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table; 932 struct phm_ppt_v1_information *table_info = 933 (struct phm_ppt_v1_information *)(hwmgr->pptable); 934 uint32_t min_vddc = 0; 935 uint32_t max_vddc = 0; 936 937 if (!table_info) 938 return; 939 940 dep_sclk_table = table_info->vdd_dep_on_sclk; 941 942 atomctrl_get_voltage_range(hwmgr, &max_vddc, &min_vddc); 943 944 if (min_vddc == 0 || min_vddc > 2000 945 || min_vddc > dep_sclk_table->entries[0].vddc) 946 min_vddc = dep_sclk_table->entries[0].vddc; 947 948 if (max_vddc == 0 || max_vddc > 2000 949 || max_vddc < dep_sclk_table->entries[dep_sclk_table->count-1].vddc) 950 max_vddc = dep_sclk_table->entries[dep_sclk_table->count-1].vddc; 951 952 data->odn_dpm_table.min_vddc = min_vddc; 953 data->odn_dpm_table.max_vddc = max_vddc; 954 } 955 956 static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr) 957 { 958 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 959 struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table); 960 struct phm_ppt_v1_information *table_info = 961 (struct phm_ppt_v1_information *)(hwmgr->pptable); 962 uint32_t i; 963 964 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table; 965 struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table; 966 967 if (table_info == NULL) 968 return; 969 970 for (i = 0; i < data->dpm_table.sclk_table.count; i++) { 971 if (odn_table->odn_core_clock_dpm_levels.entries[i].clock != 972 data->dpm_table.sclk_table.dpm_levels[i].value) { 973 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; 974 break; 975 } 976 } 977 978 for (i = 0; i < data->dpm_table.mclk_table.count; i++) { 979 if (odn_table->odn_memory_clock_dpm_levels.entries[i].clock != 980 data->dpm_table.mclk_table.dpm_levels[i].value) { 981 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; 982 break; 983 } 984 } 985 986 dep_table = table_info->vdd_dep_on_mclk; 987 odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk); 988 989 for (i = 0; i < dep_table->count; i++) { 990 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { 991 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK; 992 return; 993 } 994 } 995 996 dep_table = table_info->vdd_dep_on_sclk; 997 odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk); 998 for (i = 0; i < dep_table->count; i++) { 999 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { 1000 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK; 1001 return; 1002 } 1003 } 1004 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { 1005 data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC; 1006 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK; 1007 } 1008 } 1009 1010 static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) 1011 { 1012 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1013 1014 smu7_reset_dpm_tables(hwmgr); 1015 1016 if (hwmgr->pp_table_version == PP_TABLE_V1) 1017 smu7_setup_dpm_tables_v1(hwmgr); 1018 else if (hwmgr->pp_table_version == PP_TABLE_V0) 1019 smu7_setup_dpm_tables_v0(hwmgr); 1020 1021 smu7_setup_default_pcie_table(hwmgr); 1022 1023 /* save a copy of the default DPM table */ 1024 memcpy(&(data->golden_dpm_table), &(data->dpm_table), 1025 sizeof(struct smu7_dpm_table)); 1026 1027 /* initialize ODN table */ 1028 if (hwmgr->od_enabled) { 1029 if (data->odn_dpm_table.max_vddc) { 1030 smu7_check_dpm_table_updated(hwmgr); 1031 } else { 1032 smu7_setup_voltage_range_from_vbios(hwmgr); 1033 smu7_odn_initial_default_setting(hwmgr); 1034 } 1035 } 1036 return 0; 1037 } 1038 1039 static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr) 1040 { 1041 1042 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1043 PHM_PlatformCaps_RegulatorHot)) 1044 return smum_send_msg_to_smc(hwmgr, 1045 PPSMC_MSG_EnableVRHotGPIOInterrupt, 1046 NULL); 1047 1048 return 0; 1049 } 1050 1051 static int smu7_enable_sclk_control(struct pp_hwmgr *hwmgr) 1052 { 1053 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, 1054 SCLK_PWRMGT_OFF, 0); 1055 return 0; 1056 } 1057 1058 static int smu7_enable_ulv(struct pp_hwmgr *hwmgr) 1059 { 1060 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1061 1062 if (data->ulv_supported) 1063 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableULV, NULL); 1064 1065 return 0; 1066 } 1067 1068 static int smu7_disable_ulv(struct pp_hwmgr *hwmgr) 1069 { 1070 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1071 1072 if (data->ulv_supported) 1073 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableULV, NULL); 1074 1075 return 0; 1076 } 1077 1078 static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) 1079 { 1080 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1081 PHM_PlatformCaps_SclkDeepSleep)) { 1082 if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_ON, NULL)) 1083 PP_ASSERT_WITH_CODE(false, 1084 "Attempt to enable Master Deep Sleep switch failed!", 1085 return -EINVAL); 1086 } else { 1087 if (smum_send_msg_to_smc(hwmgr, 1088 PPSMC_MSG_MASTER_DeepSleep_OFF, 1089 NULL)) { 1090 PP_ASSERT_WITH_CODE(false, 1091 "Attempt to disable Master Deep Sleep switch failed!", 1092 return -EINVAL); 1093 } 1094 } 1095 1096 return 0; 1097 } 1098 1099 static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) 1100 { 1101 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1102 PHM_PlatformCaps_SclkDeepSleep)) { 1103 if (smum_send_msg_to_smc(hwmgr, 1104 PPSMC_MSG_MASTER_DeepSleep_OFF, 1105 NULL)) { 1106 PP_ASSERT_WITH_CODE(false, 1107 "Attempt to disable Master Deep Sleep switch failed!", 1108 return -EINVAL); 1109 } 1110 } 1111 1112 return 0; 1113 } 1114 1115 static int smu7_disable_sclk_vce_handshake(struct pp_hwmgr *hwmgr) 1116 { 1117 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1118 uint32_t soft_register_value = 0; 1119 uint32_t handshake_disables_offset = data->soft_regs_start 1120 + smum_get_offsetof(hwmgr, 1121 SMU_SoftRegisters, HandshakeDisables); 1122 1123 soft_register_value = cgs_read_ind_register(hwmgr->device, 1124 CGS_IND_REG__SMC, handshake_disables_offset); 1125 soft_register_value |= SMU7_VCE_SCLK_HANDSHAKE_DISABLE; 1126 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 1127 handshake_disables_offset, soft_register_value); 1128 return 0; 1129 } 1130 1131 static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr) 1132 { 1133 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1134 uint32_t soft_register_value = 0; 1135 uint32_t handshake_disables_offset = data->soft_regs_start 1136 + smum_get_offsetof(hwmgr, 1137 SMU_SoftRegisters, HandshakeDisables); 1138 1139 soft_register_value = cgs_read_ind_register(hwmgr->device, 1140 CGS_IND_REG__SMC, handshake_disables_offset); 1141 soft_register_value |= smum_get_mac_definition(hwmgr, 1142 SMU_UVD_MCLK_HANDSHAKE_DISABLE); 1143 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 1144 handshake_disables_offset, soft_register_value); 1145 return 0; 1146 } 1147 1148 static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) 1149 { 1150 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1151 1152 /* enable SCLK dpm */ 1153 if (!data->sclk_dpm_key_disabled) { 1154 if (hwmgr->chip_id >= CHIP_POLARIS10 && 1155 hwmgr->chip_id <= CHIP_VEGAM) 1156 smu7_disable_sclk_vce_handshake(hwmgr); 1157 1158 PP_ASSERT_WITH_CODE( 1159 (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable, NULL)), 1160 "Failed to enable SCLK DPM during DPM Start Function!", 1161 return -EINVAL); 1162 } 1163 1164 /* enable MCLK dpm */ 1165 if (0 == data->mclk_dpm_key_disabled) { 1166 if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK)) 1167 smu7_disable_handshake_uvd(hwmgr); 1168 1169 PP_ASSERT_WITH_CODE( 1170 (0 == smum_send_msg_to_smc(hwmgr, 1171 PPSMC_MSG_MCLKDPM_Enable, 1172 NULL)), 1173 "Failed to enable MCLK DPM during DPM Start Function!", 1174 return -EINVAL); 1175 1176 if ((hwmgr->chip_family == AMDGPU_FAMILY_CI) || 1177 (hwmgr->chip_id == CHIP_POLARIS10) || 1178 (hwmgr->chip_id == CHIP_POLARIS11) || 1179 (hwmgr->chip_id == CHIP_POLARIS12) || 1180 (hwmgr->chip_id == CHIP_TONGA)) 1181 PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1); 1182 1183 1184 if (hwmgr->chip_family == AMDGPU_FAMILY_CI) { 1185 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x5); 1186 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x5); 1187 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x100005); 1188 udelay(10); 1189 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x400005); 1190 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x400005); 1191 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x500005); 1192 } else { 1193 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5); 1194 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5); 1195 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005); 1196 udelay(10); 1197 if (hwmgr->chip_id == CHIP_VEGAM) { 1198 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400009); 1199 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400009); 1200 } else { 1201 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005); 1202 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005); 1203 } 1204 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005); 1205 } 1206 } 1207 1208 return 0; 1209 } 1210 1211 static int smu7_start_dpm(struct pp_hwmgr *hwmgr) 1212 { 1213 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1214 1215 /*enable general power management */ 1216 1217 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, 1218 GLOBAL_PWRMGT_EN, 1); 1219 1220 /* enable sclk deep sleep */ 1221 1222 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, 1223 DYNAMIC_PM_EN, 1); 1224 1225 /* prepare for PCIE DPM */ 1226 1227 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 1228 data->soft_regs_start + 1229 smum_get_offsetof(hwmgr, SMU_SoftRegisters, 1230 VoltageChangeTimeout), 0x1000); 1231 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, 1232 SWRST_COMMAND_1, RESETLC, 0x0); 1233 1234 if (hwmgr->chip_family == AMDGPU_FAMILY_CI) 1235 cgs_write_register(hwmgr->device, 0x1488, 1236 (cgs_read_register(hwmgr->device, 0x1488) & ~0x1)); 1237 1238 if (smu7_enable_sclk_mclk_dpm(hwmgr)) { 1239 pr_err("Failed to enable Sclk DPM and Mclk DPM!"); 1240 return -EINVAL; 1241 } 1242 1243 /* enable PCIE dpm */ 1244 if (0 == data->pcie_dpm_key_disabled) { 1245 PP_ASSERT_WITH_CODE( 1246 (0 == smum_send_msg_to_smc(hwmgr, 1247 PPSMC_MSG_PCIeDPM_Enable, 1248 NULL)), 1249 "Failed to enable pcie DPM during DPM Start Function!", 1250 return -EINVAL); 1251 } 1252 1253 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1254 PHM_PlatformCaps_Falcon_QuickTransition)) { 1255 PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr, 1256 PPSMC_MSG_EnableACDCGPIOInterrupt, 1257 NULL)), 1258 "Failed to enable AC DC GPIO Interrupt!", 1259 ); 1260 } 1261 1262 return 0; 1263 } 1264 1265 static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) 1266 { 1267 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1268 1269 /* disable SCLK dpm */ 1270 if (!data->sclk_dpm_key_disabled) { 1271 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), 1272 "Trying to disable SCLK DPM when DPM is disabled", 1273 return 0); 1274 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Disable, NULL); 1275 } 1276 1277 /* disable MCLK dpm */ 1278 if (!data->mclk_dpm_key_disabled) { 1279 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), 1280 "Trying to disable MCLK DPM when DPM is disabled", 1281 return 0); 1282 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Disable, NULL); 1283 } 1284 1285 return 0; 1286 } 1287 1288 static int smu7_stop_dpm(struct pp_hwmgr *hwmgr) 1289 { 1290 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1291 1292 /* disable general power management */ 1293 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, 1294 GLOBAL_PWRMGT_EN, 0); 1295 /* disable sclk deep sleep */ 1296 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, 1297 DYNAMIC_PM_EN, 0); 1298 1299 /* disable PCIE dpm */ 1300 if (!data->pcie_dpm_key_disabled) { 1301 PP_ASSERT_WITH_CODE( 1302 (smum_send_msg_to_smc(hwmgr, 1303 PPSMC_MSG_PCIeDPM_Disable, 1304 NULL) == 0), 1305 "Failed to disable pcie DPM during DPM Stop Function!", 1306 return -EINVAL); 1307 } 1308 1309 smu7_disable_sclk_mclk_dpm(hwmgr); 1310 1311 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), 1312 "Trying to disable voltage DPM when DPM is disabled", 1313 return 0); 1314 1315 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Disable, NULL); 1316 1317 return 0; 1318 } 1319 1320 static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources) 1321 { 1322 bool protection; 1323 enum DPM_EVENT_SRC src; 1324 1325 switch (sources) { 1326 default: 1327 pr_err("Unknown throttling event sources."); 1328 fallthrough; 1329 case 0: 1330 protection = false; 1331 /* src is unused */ 1332 break; 1333 case (1 << PHM_AutoThrottleSource_Thermal): 1334 protection = true; 1335 src = DPM_EVENT_SRC_DIGITAL; 1336 break; 1337 case (1 << PHM_AutoThrottleSource_External): 1338 protection = true; 1339 src = DPM_EVENT_SRC_EXTERNAL; 1340 break; 1341 case (1 << PHM_AutoThrottleSource_External) | 1342 (1 << PHM_AutoThrottleSource_Thermal): 1343 protection = true; 1344 src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL; 1345 break; 1346 } 1347 /* Order matters - don't enable thermal protection for the wrong source. */ 1348 if (protection) { 1349 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL, 1350 DPM_EVENT_SRC, src); 1351 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, 1352 THERMAL_PROTECTION_DIS, 1353 !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1354 PHM_PlatformCaps_ThermalController)); 1355 } else 1356 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, 1357 THERMAL_PROTECTION_DIS, 1); 1358 } 1359 1360 static int smu7_enable_auto_throttle_source(struct pp_hwmgr *hwmgr, 1361 PHM_AutoThrottleSource source) 1362 { 1363 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1364 1365 if (!(data->active_auto_throttle_sources & (1 << source))) { 1366 data->active_auto_throttle_sources |= 1 << source; 1367 smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources); 1368 } 1369 return 0; 1370 } 1371 1372 static int smu7_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) 1373 { 1374 return smu7_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); 1375 } 1376 1377 static int smu7_disable_auto_throttle_source(struct pp_hwmgr *hwmgr, 1378 PHM_AutoThrottleSource source) 1379 { 1380 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1381 1382 if (data->active_auto_throttle_sources & (1 << source)) { 1383 data->active_auto_throttle_sources &= ~(1 << source); 1384 smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources); 1385 } 1386 return 0; 1387 } 1388 1389 static int smu7_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) 1390 { 1391 return smu7_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); 1392 } 1393 1394 static int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr) 1395 { 1396 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1397 data->pcie_performance_request = true; 1398 1399 return 0; 1400 } 1401 1402 static int smu7_program_edc_didt_registers(struct pp_hwmgr *hwmgr, 1403 uint32_t *cac_config_regs, 1404 AtomCtrl_EDCLeakgeTable *edc_leakage_table) 1405 { 1406 uint32_t data, i = 0; 1407 1408 while (cac_config_regs[i] != 0xFFFFFFFF) { 1409 data = edc_leakage_table->DIDT_REG[i]; 1410 cgs_write_ind_register(hwmgr->device, 1411 CGS_IND_REG__DIDT, 1412 cac_config_regs[i], 1413 data); 1414 i++; 1415 } 1416 1417 return 0; 1418 } 1419 1420 static int smu7_populate_edc_leakage_registers(struct pp_hwmgr *hwmgr) 1421 { 1422 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1423 int ret = 0; 1424 1425 if (!data->disable_edc_leakage_controller && 1426 data->edc_hilo_leakage_offset_from_vbios.usEdcDidtLoDpm7TableOffset && 1427 data->edc_hilo_leakage_offset_from_vbios.usEdcDidtHiDpm7TableOffset) { 1428 ret = smu7_program_edc_didt_registers(hwmgr, 1429 DIDTEDCConfig_P12, 1430 &data->edc_leakage_table); 1431 if (ret) 1432 return ret; 1433 1434 ret = smum_send_msg_to_smc(hwmgr, 1435 (PPSMC_Msg)PPSMC_MSG_EnableEDCController, 1436 NULL); 1437 } else { 1438 ret = smum_send_msg_to_smc(hwmgr, 1439 (PPSMC_Msg)PPSMC_MSG_DisableEDCController, 1440 NULL); 1441 } 1442 1443 return ret; 1444 } 1445 1446 static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr) 1447 { 1448 int tmp_result = 0; 1449 int result = 0; 1450 1451 if (smu7_voltage_control(hwmgr)) { 1452 tmp_result = smu7_enable_voltage_control(hwmgr); 1453 PP_ASSERT_WITH_CODE(tmp_result == 0, 1454 "Failed to enable voltage control!", 1455 result = tmp_result); 1456 1457 tmp_result = smu7_construct_voltage_tables(hwmgr); 1458 PP_ASSERT_WITH_CODE((0 == tmp_result), 1459 "Failed to construct voltage tables!", 1460 result = tmp_result); 1461 } 1462 smum_initialize_mc_reg_table(hwmgr); 1463 1464 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1465 PHM_PlatformCaps_EngineSpreadSpectrumSupport)) 1466 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 1467 GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1); 1468 1469 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1470 PHM_PlatformCaps_ThermalController)) 1471 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 1472 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0); 1473 1474 tmp_result = smu7_program_static_screen_threshold_parameters(hwmgr); 1475 PP_ASSERT_WITH_CODE((0 == tmp_result), 1476 "Failed to program static screen threshold parameters!", 1477 result = tmp_result); 1478 1479 tmp_result = smu7_enable_display_gap(hwmgr); 1480 PP_ASSERT_WITH_CODE((0 == tmp_result), 1481 "Failed to enable display gap!", result = tmp_result); 1482 1483 tmp_result = smu7_program_voting_clients(hwmgr); 1484 PP_ASSERT_WITH_CODE((0 == tmp_result), 1485 "Failed to program voting clients!", result = tmp_result); 1486 1487 tmp_result = smum_process_firmware_header(hwmgr); 1488 PP_ASSERT_WITH_CODE((0 == tmp_result), 1489 "Failed to process firmware header!", result = tmp_result); 1490 1491 if (hwmgr->chip_id != CHIP_VEGAM) { 1492 tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr); 1493 PP_ASSERT_WITH_CODE((0 == tmp_result), 1494 "Failed to initialize switch from ArbF0 to F1!", 1495 result = tmp_result); 1496 } 1497 1498 result = smu7_setup_default_dpm_tables(hwmgr); 1499 PP_ASSERT_WITH_CODE(0 == result, 1500 "Failed to setup default DPM tables!", return result); 1501 1502 tmp_result = smum_init_smc_table(hwmgr); 1503 PP_ASSERT_WITH_CODE((0 == tmp_result), 1504 "Failed to initialize SMC table!", result = tmp_result); 1505 1506 tmp_result = smu7_enable_vrhot_gpio_interrupt(hwmgr); 1507 PP_ASSERT_WITH_CODE((0 == tmp_result), 1508 "Failed to enable VR hot GPIO interrupt!", result = tmp_result); 1509 1510 if (hwmgr->chip_id >= CHIP_POLARIS10 && 1511 hwmgr->chip_id <= CHIP_VEGAM) { 1512 tmp_result = smu7_notify_has_display(hwmgr); 1513 PP_ASSERT_WITH_CODE((0 == tmp_result), 1514 "Failed to enable display setting!", result = tmp_result); 1515 } else { 1516 smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay, NULL); 1517 } 1518 1519 if (hwmgr->chip_id >= CHIP_POLARIS10 && 1520 hwmgr->chip_id <= CHIP_VEGAM) { 1521 tmp_result = smu7_populate_edc_leakage_registers(hwmgr); 1522 PP_ASSERT_WITH_CODE((0 == tmp_result), 1523 "Failed to populate edc leakage registers!", result = tmp_result); 1524 } 1525 1526 tmp_result = smu7_enable_sclk_control(hwmgr); 1527 PP_ASSERT_WITH_CODE((0 == tmp_result), 1528 "Failed to enable SCLK control!", result = tmp_result); 1529 1530 tmp_result = smu7_enable_smc_voltage_controller(hwmgr); 1531 PP_ASSERT_WITH_CODE((0 == tmp_result), 1532 "Failed to enable voltage control!", result = tmp_result); 1533 1534 tmp_result = smu7_enable_ulv(hwmgr); 1535 PP_ASSERT_WITH_CODE((0 == tmp_result), 1536 "Failed to enable ULV!", result = tmp_result); 1537 1538 tmp_result = smu7_enable_deep_sleep_master_switch(hwmgr); 1539 PP_ASSERT_WITH_CODE((0 == tmp_result), 1540 "Failed to enable deep sleep master switch!", result = tmp_result); 1541 1542 tmp_result = smu7_enable_didt_config(hwmgr); 1543 PP_ASSERT_WITH_CODE((tmp_result == 0), 1544 "Failed to enable deep sleep master switch!", result = tmp_result); 1545 1546 tmp_result = smu7_start_dpm(hwmgr); 1547 PP_ASSERT_WITH_CODE((0 == tmp_result), 1548 "Failed to start DPM!", result = tmp_result); 1549 1550 tmp_result = smu7_enable_smc_cac(hwmgr); 1551 PP_ASSERT_WITH_CODE((0 == tmp_result), 1552 "Failed to enable SMC CAC!", result = tmp_result); 1553 1554 tmp_result = smu7_enable_power_containment(hwmgr); 1555 PP_ASSERT_WITH_CODE((0 == tmp_result), 1556 "Failed to enable power containment!", result = tmp_result); 1557 1558 tmp_result = smu7_power_control_set_level(hwmgr); 1559 PP_ASSERT_WITH_CODE((0 == tmp_result), 1560 "Failed to power control set level!", result = tmp_result); 1561 1562 tmp_result = smu7_enable_thermal_auto_throttle(hwmgr); 1563 PP_ASSERT_WITH_CODE((0 == tmp_result), 1564 "Failed to enable thermal auto throttle!", result = tmp_result); 1565 1566 tmp_result = smu7_pcie_performance_request(hwmgr); 1567 PP_ASSERT_WITH_CODE((0 == tmp_result), 1568 "pcie performance request failed!", result = tmp_result); 1569 1570 return 0; 1571 } 1572 1573 static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable) 1574 { 1575 if (!hwmgr->avfs_supported) 1576 return 0; 1577 1578 if (enable) { 1579 if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, 1580 CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) { 1581 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc( 1582 hwmgr, PPSMC_MSG_EnableAvfs, NULL), 1583 "Failed to enable AVFS!", 1584 return -EINVAL); 1585 } 1586 } else if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, 1587 CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) { 1588 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc( 1589 hwmgr, PPSMC_MSG_DisableAvfs, NULL), 1590 "Failed to disable AVFS!", 1591 return -EINVAL); 1592 } 1593 1594 return 0; 1595 } 1596 1597 static int smu7_update_avfs(struct pp_hwmgr *hwmgr) 1598 { 1599 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1600 1601 if (!hwmgr->avfs_supported) 1602 return 0; 1603 1604 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { 1605 smu7_avfs_control(hwmgr, false); 1606 } else if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) { 1607 smu7_avfs_control(hwmgr, false); 1608 smu7_avfs_control(hwmgr, true); 1609 } else { 1610 smu7_avfs_control(hwmgr, true); 1611 } 1612 1613 return 0; 1614 } 1615 1616 static int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr) 1617 { 1618 int tmp_result, result = 0; 1619 1620 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1621 PHM_PlatformCaps_ThermalController)) 1622 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 1623 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1); 1624 1625 tmp_result = smu7_disable_power_containment(hwmgr); 1626 PP_ASSERT_WITH_CODE((tmp_result == 0), 1627 "Failed to disable power containment!", result = tmp_result); 1628 1629 tmp_result = smu7_disable_smc_cac(hwmgr); 1630 PP_ASSERT_WITH_CODE((tmp_result == 0), 1631 "Failed to disable SMC CAC!", result = tmp_result); 1632 1633 tmp_result = smu7_disable_didt_config(hwmgr); 1634 PP_ASSERT_WITH_CODE((tmp_result == 0), 1635 "Failed to disable DIDT!", result = tmp_result); 1636 1637 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 1638 CG_SPLL_SPREAD_SPECTRUM, SSEN, 0); 1639 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 1640 GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0); 1641 1642 tmp_result = smu7_disable_thermal_auto_throttle(hwmgr); 1643 PP_ASSERT_WITH_CODE((tmp_result == 0), 1644 "Failed to disable thermal auto throttle!", result = tmp_result); 1645 1646 tmp_result = smu7_avfs_control(hwmgr, false); 1647 PP_ASSERT_WITH_CODE((tmp_result == 0), 1648 "Failed to disable AVFS!", result = tmp_result); 1649 1650 tmp_result = smu7_stop_dpm(hwmgr); 1651 PP_ASSERT_WITH_CODE((tmp_result == 0), 1652 "Failed to stop DPM!", result = tmp_result); 1653 1654 tmp_result = smu7_disable_deep_sleep_master_switch(hwmgr); 1655 PP_ASSERT_WITH_CODE((tmp_result == 0), 1656 "Failed to disable deep sleep master switch!", result = tmp_result); 1657 1658 tmp_result = smu7_disable_ulv(hwmgr); 1659 PP_ASSERT_WITH_CODE((tmp_result == 0), 1660 "Failed to disable ULV!", result = tmp_result); 1661 1662 tmp_result = smu7_clear_voting_clients(hwmgr); 1663 PP_ASSERT_WITH_CODE((tmp_result == 0), 1664 "Failed to clear voting clients!", result = tmp_result); 1665 1666 tmp_result = smu7_reset_to_default(hwmgr); 1667 PP_ASSERT_WITH_CODE((tmp_result == 0), 1668 "Failed to reset to default!", result = tmp_result); 1669 1670 tmp_result = smu7_force_switch_to_arbf0(hwmgr); 1671 PP_ASSERT_WITH_CODE((tmp_result == 0), 1672 "Failed to force to switch arbf0!", result = tmp_result); 1673 1674 return result; 1675 } 1676 1677 static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr) 1678 { 1679 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1680 struct phm_ppt_v1_information *table_info = 1681 (struct phm_ppt_v1_information *)(hwmgr->pptable); 1682 struct amdgpu_device *adev = hwmgr->adev; 1683 uint8_t tmp1, tmp2; 1684 uint16_t tmp3 = 0; 1685 1686 data->dll_default_on = false; 1687 data->mclk_dpm0_activity_target = 0xa; 1688 data->vddc_vddgfx_delta = 300; 1689 data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT; 1690 data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT; 1691 data->voting_rights_clients[0] = SMU7_VOTINGRIGHTSCLIENTS_DFLT0; 1692 data->voting_rights_clients[1]= SMU7_VOTINGRIGHTSCLIENTS_DFLT1; 1693 data->voting_rights_clients[2] = SMU7_VOTINGRIGHTSCLIENTS_DFLT2; 1694 data->voting_rights_clients[3]= SMU7_VOTINGRIGHTSCLIENTS_DFLT3; 1695 data->voting_rights_clients[4]= SMU7_VOTINGRIGHTSCLIENTS_DFLT4; 1696 data->voting_rights_clients[5]= SMU7_VOTINGRIGHTSCLIENTS_DFLT5; 1697 data->voting_rights_clients[6]= SMU7_VOTINGRIGHTSCLIENTS_DFLT6; 1698 data->voting_rights_clients[7]= SMU7_VOTINGRIGHTSCLIENTS_DFLT7; 1699 1700 data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true; 1701 data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true; 1702 data->pcie_dpm_key_disabled = hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true; 1703 /* need to set voltage control types before EVV patching */ 1704 data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE; 1705 data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE; 1706 data->mvdd_control = SMU7_VOLTAGE_CONTROL_NONE; 1707 data->enable_tdc_limit_feature = true; 1708 data->enable_pkg_pwr_tracking_feature = true; 1709 data->force_pcie_gen = PP_PCIEGenInvalid; 1710 data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false; 1711 data->current_profile_setting.bupdate_sclk = 1; 1712 data->current_profile_setting.sclk_up_hyst = 0; 1713 data->current_profile_setting.sclk_down_hyst = 100; 1714 data->current_profile_setting.sclk_activity = SMU7_SCLK_TARGETACTIVITY_DFLT; 1715 data->current_profile_setting.bupdate_mclk = 1; 1716 if (adev->gmc.vram_width == 256) { 1717 data->current_profile_setting.mclk_up_hyst = 10; 1718 data->current_profile_setting.mclk_down_hyst = 60; 1719 data->current_profile_setting.mclk_activity = 25; 1720 } else if (adev->gmc.vram_width == 128) { 1721 data->current_profile_setting.mclk_up_hyst = 5; 1722 data->current_profile_setting.mclk_down_hyst = 16; 1723 data->current_profile_setting.mclk_activity = 20; 1724 } else if (adev->gmc.vram_width == 64) { 1725 data->current_profile_setting.mclk_up_hyst = 3; 1726 data->current_profile_setting.mclk_down_hyst = 16; 1727 data->current_profile_setting.mclk_activity = 20; 1728 } 1729 hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D]; 1730 hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D; 1731 hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D; 1732 1733 if (hwmgr->chip_id == CHIP_HAWAII) { 1734 data->thermal_temp_setting.temperature_low = 94500; 1735 data->thermal_temp_setting.temperature_high = 95000; 1736 data->thermal_temp_setting.temperature_shutdown = 104000; 1737 } else { 1738 data->thermal_temp_setting.temperature_low = 99500; 1739 data->thermal_temp_setting.temperature_high = 100000; 1740 data->thermal_temp_setting.temperature_shutdown = 104000; 1741 } 1742 1743 data->fast_watermark_threshold = 100; 1744 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, 1745 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) 1746 data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; 1747 else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, 1748 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT)) 1749 data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_GPIO; 1750 1751 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1752 PHM_PlatformCaps_ControlVDDGFX)) { 1753 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, 1754 VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) { 1755 data->vdd_gfx_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; 1756 } 1757 } 1758 1759 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1760 PHM_PlatformCaps_EnableMVDDControl)) { 1761 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, 1762 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT)) 1763 data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_GPIO; 1764 else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, 1765 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2)) 1766 data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; 1767 } 1768 1769 if (SMU7_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control) 1770 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 1771 PHM_PlatformCaps_ControlVDDGFX); 1772 1773 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1774 PHM_PlatformCaps_ControlVDDCI)) { 1775 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, 1776 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT)) 1777 data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_GPIO; 1778 else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, 1779 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2)) 1780 data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; 1781 } 1782 1783 if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE) 1784 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 1785 PHM_PlatformCaps_EnableMVDDControl); 1786 1787 if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE) 1788 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 1789 PHM_PlatformCaps_ControlVDDCI); 1790 1791 data->vddc_phase_shed_control = 1; 1792 if ((hwmgr->chip_id == CHIP_POLARIS12) || 1793 ASICID_IS_P20(adev->pdev->device, adev->pdev->revision) || 1794 ASICID_IS_P21(adev->pdev->device, adev->pdev->revision) || 1795 ASICID_IS_P30(adev->pdev->device, adev->pdev->revision) || 1796 ASICID_IS_P31(adev->pdev->device, adev->pdev->revision)) { 1797 if (data->voltage_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { 1798 atomctrl_get_svi2_info(hwmgr, VOLTAGE_TYPE_VDDC, &tmp1, &tmp2, 1799 &tmp3); 1800 tmp3 = (tmp3 >> 5) & 0x3; 1801 data->vddc_phase_shed_control = ((tmp3 << 1) | (tmp3 >> 1)) & 0x3; 1802 } 1803 } else if (hwmgr->chip_family == AMDGPU_FAMILY_CI) { 1804 data->vddc_phase_shed_control = 1; 1805 } 1806 1807 if ((hwmgr->pp_table_version != PP_TABLE_V0) && (hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK) 1808 && (table_info->cac_dtp_table->usClockStretchAmount != 0)) 1809 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 1810 PHM_PlatformCaps_ClockStretcher); 1811 1812 data->pcie_gen_performance.max = PP_PCIEGen1; 1813 data->pcie_gen_performance.min = PP_PCIEGen3; 1814 data->pcie_gen_power_saving.max = PP_PCIEGen1; 1815 data->pcie_gen_power_saving.min = PP_PCIEGen3; 1816 data->pcie_lane_performance.max = 0; 1817 data->pcie_lane_performance.min = 16; 1818 data->pcie_lane_power_saving.max = 0; 1819 data->pcie_lane_power_saving.min = 16; 1820 1821 1822 if (adev->pg_flags & AMD_PG_SUPPORT_UVD) 1823 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 1824 PHM_PlatformCaps_UVDPowerGating); 1825 if (adev->pg_flags & AMD_PG_SUPPORT_VCE) 1826 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 1827 PHM_PlatformCaps_VCEPowerGating); 1828 1829 data->disable_edc_leakage_controller = true; 1830 if (((adev->asic_type == CHIP_POLARIS10) && hwmgr->is_kicker) || 1831 ((adev->asic_type == CHIP_POLARIS11) && hwmgr->is_kicker) || 1832 (adev->asic_type == CHIP_POLARIS12) || 1833 (adev->asic_type == CHIP_VEGAM)) 1834 data->disable_edc_leakage_controller = false; 1835 1836 if (!atomctrl_is_asic_internal_ss_supported(hwmgr)) { 1837 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 1838 PHM_PlatformCaps_MemorySpreadSpectrumSupport); 1839 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 1840 PHM_PlatformCaps_EngineSpreadSpectrumSupport); 1841 } 1842 1843 if ((adev->pdev->device == 0x699F) && 1844 (adev->pdev->revision == 0xCF)) { 1845 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 1846 PHM_PlatformCaps_PowerContainment); 1847 data->enable_tdc_limit_feature = false; 1848 data->enable_pkg_pwr_tracking_feature = false; 1849 data->disable_edc_leakage_controller = true; 1850 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 1851 PHM_PlatformCaps_ClockStretcher); 1852 } 1853 } 1854 1855 static int smu7_calculate_ro_range(struct pp_hwmgr *hwmgr) 1856 { 1857 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1858 struct amdgpu_device *adev = hwmgr->adev; 1859 uint32_t asicrev1, evv_revision, max, min; 1860 1861 atomctrl_read_efuse(hwmgr, STRAP_EVV_REVISION_LSB, STRAP_EVV_REVISION_MSB, 1862 &evv_revision); 1863 1864 atomctrl_read_efuse(hwmgr, 568, 579, &asicrev1); 1865 1866 if (ASICID_IS_P20(adev->pdev->device, adev->pdev->revision) || 1867 ASICID_IS_P30(adev->pdev->device, adev->pdev->revision)) { 1868 min = 1200; 1869 max = 2500; 1870 } else if (ASICID_IS_P21(adev->pdev->device, adev->pdev->revision) || 1871 ASICID_IS_P31(adev->pdev->device, adev->pdev->revision)) { 1872 min = 900; 1873 max= 2100; 1874 } else if (hwmgr->chip_id == CHIP_POLARIS10) { 1875 if (adev->pdev->subsystem_vendor == 0x106B) { 1876 min = 1000; 1877 max = 2300; 1878 } else { 1879 if (evv_revision == 0) { 1880 min = 1000; 1881 max = 2300; 1882 } else if (evv_revision == 1) { 1883 if (asicrev1 == 326) { 1884 min = 1200; 1885 max = 2500; 1886 /* TODO: PATCH RO in VBIOS */ 1887 } else { 1888 min = 1200; 1889 max = 2000; 1890 } 1891 } else if (evv_revision == 2) { 1892 min = 1200; 1893 max = 2500; 1894 } 1895 } 1896 } else if ((hwmgr->chip_id == CHIP_POLARIS11) || 1897 (hwmgr->chip_id == CHIP_POLARIS12)) { 1898 min = 1100; 1899 max = 2100; 1900 } 1901 1902 data->ro_range_minimum = min; 1903 data->ro_range_maximum = max; 1904 1905 /* TODO: PATCH RO in VBIOS here */ 1906 1907 return 0; 1908 } 1909 1910 /** 1911 * Get Leakage VDDC based on leakage ID. 1912 * 1913 * @param hwmgr the address of the powerplay hardware manager. 1914 * @return always 0 1915 */ 1916 static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr) 1917 { 1918 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1919 uint16_t vv_id; 1920 uint16_t vddc = 0; 1921 uint16_t vddgfx = 0; 1922 uint16_t i, j; 1923 uint32_t sclk = 0; 1924 struct phm_ppt_v1_information *table_info = 1925 (struct phm_ppt_v1_information *)hwmgr->pptable; 1926 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL; 1927 1928 if (hwmgr->chip_id == CHIP_POLARIS10 || 1929 hwmgr->chip_id == CHIP_POLARIS11 || 1930 hwmgr->chip_id == CHIP_POLARIS12) 1931 smu7_calculate_ro_range(hwmgr); 1932 1933 for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) { 1934 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; 1935 1936 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { 1937 if ((hwmgr->pp_table_version == PP_TABLE_V1) 1938 && !phm_get_sclk_for_voltage_evv(hwmgr, 1939 table_info->vddgfx_lookup_table, vv_id, &sclk)) { 1940 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1941 PHM_PlatformCaps_ClockStretcher)) { 1942 sclk_table = table_info->vdd_dep_on_sclk; 1943 1944 for (j = 1; j < sclk_table->count; j++) { 1945 if (sclk_table->entries[j].clk == sclk && 1946 sclk_table->entries[j].cks_enable == 0) { 1947 sclk += 5000; 1948 break; 1949 } 1950 } 1951 } 1952 if (0 == atomctrl_get_voltage_evv_on_sclk 1953 (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk, 1954 vv_id, &vddgfx)) { 1955 /* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */ 1956 PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -EINVAL); 1957 1958 /* the voltage should not be zero nor equal to leakage ID */ 1959 if (vddgfx != 0 && vddgfx != vv_id) { 1960 data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx; 1961 data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = vv_id; 1962 data->vddcgfx_leakage.count++; 1963 } 1964 } else { 1965 pr_info("Error retrieving EVV voltage value!\n"); 1966 } 1967 } 1968 } else { 1969 if ((hwmgr->pp_table_version == PP_TABLE_V0) 1970 || !phm_get_sclk_for_voltage_evv(hwmgr, 1971 table_info->vddc_lookup_table, vv_id, &sclk)) { 1972 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1973 PHM_PlatformCaps_ClockStretcher)) { 1974 if (table_info == NULL) 1975 return -EINVAL; 1976 sclk_table = table_info->vdd_dep_on_sclk; 1977 1978 for (j = 1; j < sclk_table->count; j++) { 1979 if (sclk_table->entries[j].clk == sclk && 1980 sclk_table->entries[j].cks_enable == 0) { 1981 sclk += 5000; 1982 break; 1983 } 1984 } 1985 } 1986 1987 if (phm_get_voltage_evv_on_sclk(hwmgr, 1988 VOLTAGE_TYPE_VDDC, 1989 sclk, vv_id, &vddc) == 0) { 1990 if (vddc >= 2000 || vddc == 0) 1991 return -EINVAL; 1992 } else { 1993 pr_debug("failed to retrieving EVV voltage!\n"); 1994 continue; 1995 } 1996 1997 /* the voltage should not be zero nor equal to leakage ID */ 1998 if (vddc != 0 && vddc != vv_id) { 1999 data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc); 2000 data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id; 2001 data->vddc_leakage.count++; 2002 } 2003 } 2004 } 2005 } 2006 2007 return 0; 2008 } 2009 2010 /** 2011 * Change virtual leakage voltage to actual value. 2012 * 2013 * @param hwmgr the address of the powerplay hardware manager. 2014 * @param pointer to changing voltage 2015 * @param pointer to leakage table 2016 */ 2017 static void smu7_patch_ppt_v1_with_vdd_leakage(struct pp_hwmgr *hwmgr, 2018 uint16_t *voltage, struct smu7_leakage_voltage *leakage_table) 2019 { 2020 uint32_t index; 2021 2022 /* search for leakage voltage ID 0xff01 ~ 0xff08 */ 2023 for (index = 0; index < leakage_table->count; index++) { 2024 /* if this voltage matches a leakage voltage ID */ 2025 /* patch with actual leakage voltage */ 2026 if (leakage_table->leakage_id[index] == *voltage) { 2027 *voltage = leakage_table->actual_voltage[index]; 2028 break; 2029 } 2030 } 2031 2032 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0) 2033 pr_err("Voltage value looks like a Leakage ID but it's not patched \n"); 2034 } 2035 2036 /** 2037 * Patch voltage lookup table by EVV leakages. 2038 * 2039 * @param hwmgr the address of the powerplay hardware manager. 2040 * @param pointer to voltage lookup table 2041 * @param pointer to leakage table 2042 * @return always 0 2043 */ 2044 static int smu7_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr, 2045 phm_ppt_v1_voltage_lookup_table *lookup_table, 2046 struct smu7_leakage_voltage *leakage_table) 2047 { 2048 uint32_t i; 2049 2050 for (i = 0; i < lookup_table->count; i++) 2051 smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, 2052 &lookup_table->entries[i].us_vdd, leakage_table); 2053 2054 return 0; 2055 } 2056 2057 static int smu7_patch_clock_voltage_limits_with_vddc_leakage( 2058 struct pp_hwmgr *hwmgr, struct smu7_leakage_voltage *leakage_table, 2059 uint16_t *vddc) 2060 { 2061 struct phm_ppt_v1_information *table_info = 2062 (struct phm_ppt_v1_information *)(hwmgr->pptable); 2063 smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table); 2064 hwmgr->dyn_state.max_clock_voltage_on_dc.vddc = 2065 table_info->max_clock_voltage_on_dc.vddc; 2066 return 0; 2067 } 2068 2069 static int smu7_patch_voltage_dependency_tables_with_lookup_table( 2070 struct pp_hwmgr *hwmgr) 2071 { 2072 uint8_t entry_id; 2073 uint8_t voltage_id; 2074 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2075 struct phm_ppt_v1_information *table_info = 2076 (struct phm_ppt_v1_information *)(hwmgr->pptable); 2077 2078 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = 2079 table_info->vdd_dep_on_sclk; 2080 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table = 2081 table_info->vdd_dep_on_mclk; 2082 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = 2083 table_info->mm_dep_table; 2084 2085 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { 2086 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) { 2087 voltage_id = sclk_table->entries[entry_id].vddInd; 2088 sclk_table->entries[entry_id].vddgfx = 2089 table_info->vddgfx_lookup_table->entries[voltage_id].us_vdd; 2090 } 2091 } else { 2092 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) { 2093 voltage_id = sclk_table->entries[entry_id].vddInd; 2094 sclk_table->entries[entry_id].vddc = 2095 table_info->vddc_lookup_table->entries[voltage_id].us_vdd; 2096 } 2097 } 2098 2099 for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) { 2100 voltage_id = mclk_table->entries[entry_id].vddInd; 2101 mclk_table->entries[entry_id].vddc = 2102 table_info->vddc_lookup_table->entries[voltage_id].us_vdd; 2103 } 2104 2105 for (entry_id = 0; entry_id < mm_table->count; ++entry_id) { 2106 voltage_id = mm_table->entries[entry_id].vddcInd; 2107 mm_table->entries[entry_id].vddc = 2108 table_info->vddc_lookup_table->entries[voltage_id].us_vdd; 2109 } 2110 2111 return 0; 2112 2113 } 2114 2115 static int phm_add_voltage(struct pp_hwmgr *hwmgr, 2116 phm_ppt_v1_voltage_lookup_table *look_up_table, 2117 phm_ppt_v1_voltage_lookup_record *record) 2118 { 2119 uint32_t i; 2120 2121 PP_ASSERT_WITH_CODE((NULL != look_up_table), 2122 "Lookup Table empty.", return -EINVAL); 2123 PP_ASSERT_WITH_CODE((0 != look_up_table->count), 2124 "Lookup Table empty.", return -EINVAL); 2125 2126 i = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX); 2127 PP_ASSERT_WITH_CODE((i >= look_up_table->count), 2128 "Lookup Table is full.", return -EINVAL); 2129 2130 /* This is to avoid entering duplicate calculated records. */ 2131 for (i = 0; i < look_up_table->count; i++) { 2132 if (look_up_table->entries[i].us_vdd == record->us_vdd) { 2133 if (look_up_table->entries[i].us_calculated == 1) 2134 return 0; 2135 break; 2136 } 2137 } 2138 2139 look_up_table->entries[i].us_calculated = 1; 2140 look_up_table->entries[i].us_vdd = record->us_vdd; 2141 look_up_table->entries[i].us_cac_low = record->us_cac_low; 2142 look_up_table->entries[i].us_cac_mid = record->us_cac_mid; 2143 look_up_table->entries[i].us_cac_high = record->us_cac_high; 2144 /* Only increment the count when we're appending, not replacing duplicate entry. */ 2145 if (i == look_up_table->count) 2146 look_up_table->count++; 2147 2148 return 0; 2149 } 2150 2151 2152 static int smu7_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr) 2153 { 2154 uint8_t entry_id; 2155 struct phm_ppt_v1_voltage_lookup_record v_record; 2156 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2157 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); 2158 2159 phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk; 2160 phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk; 2161 2162 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { 2163 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) { 2164 if (sclk_table->entries[entry_id].vdd_offset & (1 << 15)) 2165 v_record.us_vdd = sclk_table->entries[entry_id].vddgfx + 2166 sclk_table->entries[entry_id].vdd_offset - 0xFFFF; 2167 else 2168 v_record.us_vdd = sclk_table->entries[entry_id].vddgfx + 2169 sclk_table->entries[entry_id].vdd_offset; 2170 2171 sclk_table->entries[entry_id].vddc = 2172 v_record.us_cac_low = v_record.us_cac_mid = 2173 v_record.us_cac_high = v_record.us_vdd; 2174 2175 phm_add_voltage(hwmgr, pptable_info->vddc_lookup_table, &v_record); 2176 } 2177 2178 for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) { 2179 if (mclk_table->entries[entry_id].vdd_offset & (1 << 15)) 2180 v_record.us_vdd = mclk_table->entries[entry_id].vddc + 2181 mclk_table->entries[entry_id].vdd_offset - 0xFFFF; 2182 else 2183 v_record.us_vdd = mclk_table->entries[entry_id].vddc + 2184 mclk_table->entries[entry_id].vdd_offset; 2185 2186 mclk_table->entries[entry_id].vddgfx = v_record.us_cac_low = 2187 v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd; 2188 phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record); 2189 } 2190 } 2191 return 0; 2192 } 2193 2194 static int smu7_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr) 2195 { 2196 uint8_t entry_id; 2197 struct phm_ppt_v1_voltage_lookup_record v_record; 2198 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2199 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); 2200 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table; 2201 2202 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { 2203 for (entry_id = 0; entry_id < mm_table->count; entry_id++) { 2204 if (mm_table->entries[entry_id].vddgfx_offset & (1 << 15)) 2205 v_record.us_vdd = mm_table->entries[entry_id].vddc + 2206 mm_table->entries[entry_id].vddgfx_offset - 0xFFFF; 2207 else 2208 v_record.us_vdd = mm_table->entries[entry_id].vddc + 2209 mm_table->entries[entry_id].vddgfx_offset; 2210 2211 /* Add the calculated VDDGFX to the VDDGFX lookup table */ 2212 mm_table->entries[entry_id].vddgfx = v_record.us_cac_low = 2213 v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd; 2214 phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record); 2215 } 2216 } 2217 return 0; 2218 } 2219 2220 static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr, 2221 struct phm_ppt_v1_voltage_lookup_table *lookup_table) 2222 { 2223 uint32_t table_size, i, j; 2224 table_size = lookup_table->count; 2225 2226 PP_ASSERT_WITH_CODE(0 != lookup_table->count, 2227 "Lookup table is empty", return -EINVAL); 2228 2229 /* Sorting voltages */ 2230 for (i = 0; i < table_size - 1; i++) { 2231 for (j = i + 1; j > 0; j--) { 2232 if (lookup_table->entries[j].us_vdd < 2233 lookup_table->entries[j - 1].us_vdd) { 2234 swap(lookup_table->entries[j - 1], 2235 lookup_table->entries[j]); 2236 } 2237 } 2238 } 2239 2240 return 0; 2241 } 2242 2243 static int smu7_complete_dependency_tables(struct pp_hwmgr *hwmgr) 2244 { 2245 int result = 0; 2246 int tmp_result; 2247 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2248 struct phm_ppt_v1_information *table_info = 2249 (struct phm_ppt_v1_information *)(hwmgr->pptable); 2250 2251 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { 2252 tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr, 2253 table_info->vddgfx_lookup_table, &(data->vddcgfx_leakage)); 2254 if (tmp_result != 0) 2255 result = tmp_result; 2256 2257 smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, 2258 &table_info->max_clock_voltage_on_dc.vddgfx, &(data->vddcgfx_leakage)); 2259 } else { 2260 2261 tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr, 2262 table_info->vddc_lookup_table, &(data->vddc_leakage)); 2263 if (tmp_result) 2264 result = tmp_result; 2265 2266 tmp_result = smu7_patch_clock_voltage_limits_with_vddc_leakage(hwmgr, 2267 &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc); 2268 if (tmp_result) 2269 result = tmp_result; 2270 } 2271 2272 tmp_result = smu7_patch_voltage_dependency_tables_with_lookup_table(hwmgr); 2273 if (tmp_result) 2274 result = tmp_result; 2275 2276 tmp_result = smu7_calc_voltage_dependency_tables(hwmgr); 2277 if (tmp_result) 2278 result = tmp_result; 2279 2280 tmp_result = smu7_calc_mm_voltage_dependency_table(hwmgr); 2281 if (tmp_result) 2282 result = tmp_result; 2283 2284 tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddgfx_lookup_table); 2285 if (tmp_result) 2286 result = tmp_result; 2287 2288 tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddc_lookup_table); 2289 if (tmp_result) 2290 result = tmp_result; 2291 2292 return result; 2293 } 2294 2295 static int smu7_find_highest_vddc(struct pp_hwmgr *hwmgr) 2296 { 2297 struct phm_ppt_v1_information *table_info = 2298 (struct phm_ppt_v1_information *)(hwmgr->pptable); 2299 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table = 2300 table_info->vdd_dep_on_sclk; 2301 struct phm_ppt_v1_voltage_lookup_table *lookup_table = 2302 table_info->vddc_lookup_table; 2303 uint16_t highest_voltage; 2304 uint32_t i; 2305 2306 highest_voltage = allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc; 2307 2308 for (i = 0; i < lookup_table->count; i++) { 2309 if (lookup_table->entries[i].us_vdd < ATOM_VIRTUAL_VOLTAGE_ID0 && 2310 lookup_table->entries[i].us_vdd > highest_voltage) 2311 highest_voltage = lookup_table->entries[i].us_vdd; 2312 } 2313 2314 return highest_voltage; 2315 } 2316 2317 static int smu7_set_private_data_based_on_pptable_v1(struct pp_hwmgr *hwmgr) 2318 { 2319 struct phm_ppt_v1_information *table_info = 2320 (struct phm_ppt_v1_information *)(hwmgr->pptable); 2321 2322 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table = 2323 table_info->vdd_dep_on_sclk; 2324 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table = 2325 table_info->vdd_dep_on_mclk; 2326 2327 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL, 2328 "VDD dependency on SCLK table is missing.", 2329 return -EINVAL); 2330 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1, 2331 "VDD dependency on SCLK table has to have is missing.", 2332 return -EINVAL); 2333 2334 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL, 2335 "VDD dependency on MCLK table is missing", 2336 return -EINVAL); 2337 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1, 2338 "VDD dependency on MCLK table has to have is missing.", 2339 return -EINVAL); 2340 2341 table_info->max_clock_voltage_on_ac.sclk = 2342 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk; 2343 table_info->max_clock_voltage_on_ac.mclk = 2344 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk; 2345 if (hwmgr->chip_id >= CHIP_POLARIS10 && hwmgr->chip_id <= CHIP_VEGAM) 2346 table_info->max_clock_voltage_on_ac.vddc = 2347 smu7_find_highest_vddc(hwmgr); 2348 else 2349 table_info->max_clock_voltage_on_ac.vddc = 2350 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc; 2351 table_info->max_clock_voltage_on_ac.vddci = 2352 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci; 2353 2354 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = table_info->max_clock_voltage_on_ac.sclk; 2355 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = table_info->max_clock_voltage_on_ac.mclk; 2356 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = table_info->max_clock_voltage_on_ac.vddc; 2357 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = table_info->max_clock_voltage_on_ac.vddci; 2358 2359 return 0; 2360 } 2361 2362 static int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr) 2363 { 2364 struct phm_ppt_v1_information *table_info = 2365 (struct phm_ppt_v1_information *)(hwmgr->pptable); 2366 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table; 2367 struct phm_ppt_v1_voltage_lookup_table *lookup_table; 2368 uint32_t i; 2369 uint32_t hw_revision, sub_vendor_id, sub_sys_id; 2370 struct amdgpu_device *adev = hwmgr->adev; 2371 2372 if (table_info != NULL) { 2373 dep_mclk_table = table_info->vdd_dep_on_mclk; 2374 lookup_table = table_info->vddc_lookup_table; 2375 } else 2376 return 0; 2377 2378 hw_revision = adev->pdev->revision; 2379 sub_sys_id = adev->pdev->subsystem_device; 2380 sub_vendor_id = adev->pdev->subsystem_vendor; 2381 2382 if (adev->pdev->device == 0x67DF && hw_revision == 0xC7 && 2383 ((sub_sys_id == 0xb37 && sub_vendor_id == 0x1002) || 2384 (sub_sys_id == 0x4a8 && sub_vendor_id == 0x1043) || 2385 (sub_sys_id == 0x9480 && sub_vendor_id == 0x1682))) { 2386 2387 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, 2388 CGS_IND_REG__SMC, 2389 PWR_CKS_CNTL, 2390 CKS_STRETCH_AMOUNT, 2391 0x3); 2392 2393 if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000) 2394 return 0; 2395 2396 for (i = 0; i < lookup_table->count; i++) { 2397 if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) { 2398 dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i; 2399 return 0; 2400 } 2401 } 2402 } 2403 return 0; 2404 } 2405 2406 static int smu7_thermal_parameter_init(struct pp_hwmgr *hwmgr) 2407 { 2408 struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment; 2409 uint32_t temp_reg; 2410 struct phm_ppt_v1_information *table_info = 2411 (struct phm_ppt_v1_information *)(hwmgr->pptable); 2412 2413 2414 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) { 2415 temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL); 2416 switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) { 2417 case 0: 2418 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1); 2419 break; 2420 case 1: 2421 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2); 2422 break; 2423 case 2: 2424 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW, 0x1); 2425 break; 2426 case 3: 2427 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1); 2428 break; 2429 case 4: 2430 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1); 2431 break; 2432 default: 2433 break; 2434 } 2435 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg); 2436 } 2437 2438 if (table_info == NULL) 2439 return 0; 2440 2441 if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp != 0 && 2442 hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode) { 2443 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit = 2444 (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit; 2445 2446 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMaxLimit = 2447 (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM; 2448 2449 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMStep = 1; 2450 2451 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = 100; 2452 2453 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMinLimit = 2454 (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit; 2455 2456 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMStep = 1; 2457 2458 table_info->cac_dtp_table->usDefaultTargetOperatingTemp = (table_info->cac_dtp_table->usDefaultTargetOperatingTemp >= 50) ? 2459 (table_info->cac_dtp_table->usDefaultTargetOperatingTemp - 50) : 0; 2460 2461 table_info->cac_dtp_table->usOperatingTempMaxLimit = table_info->cac_dtp_table->usDefaultTargetOperatingTemp; 2462 table_info->cac_dtp_table->usOperatingTempStep = 1; 2463 table_info->cac_dtp_table->usOperatingTempHyst = 1; 2464 2465 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM = 2466 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM; 2467 2468 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM = 2469 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM; 2470 2471 hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit = 2472 table_info->cac_dtp_table->usOperatingTempMinLimit; 2473 2474 hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit = 2475 table_info->cac_dtp_table->usOperatingTempMaxLimit; 2476 2477 hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp = 2478 table_info->cac_dtp_table->usDefaultTargetOperatingTemp; 2479 2480 hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep = 2481 table_info->cac_dtp_table->usOperatingTempStep; 2482 2483 hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp = 2484 table_info->cac_dtp_table->usTargetOperatingTemp; 2485 if (hwmgr->feature_mask & PP_OD_FUZZY_FAN_CONTROL_MASK) 2486 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 2487 PHM_PlatformCaps_ODFuzzyFanControlSupport); 2488 } 2489 2490 return 0; 2491 } 2492 2493 /** 2494 * Change virtual leakage voltage to actual value. 2495 * 2496 * @param hwmgr the address of the powerplay hardware manager. 2497 * @param pointer to changing voltage 2498 * @param pointer to leakage table 2499 */ 2500 static void smu7_patch_ppt_v0_with_vdd_leakage(struct pp_hwmgr *hwmgr, 2501 uint32_t *voltage, struct smu7_leakage_voltage *leakage_table) 2502 { 2503 uint32_t index; 2504 2505 /* search for leakage voltage ID 0xff01 ~ 0xff08 */ 2506 for (index = 0; index < leakage_table->count; index++) { 2507 /* if this voltage matches a leakage voltage ID */ 2508 /* patch with actual leakage voltage */ 2509 if (leakage_table->leakage_id[index] == *voltage) { 2510 *voltage = leakage_table->actual_voltage[index]; 2511 break; 2512 } 2513 } 2514 2515 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0) 2516 pr_err("Voltage value looks like a Leakage ID but it's not patched \n"); 2517 } 2518 2519 2520 static int smu7_patch_vddc(struct pp_hwmgr *hwmgr, 2521 struct phm_clock_voltage_dependency_table *tab) 2522 { 2523 uint16_t i; 2524 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2525 2526 if (tab) 2527 for (i = 0; i < tab->count; i++) 2528 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, 2529 &data->vddc_leakage); 2530 2531 return 0; 2532 } 2533 2534 static int smu7_patch_vddci(struct pp_hwmgr *hwmgr, 2535 struct phm_clock_voltage_dependency_table *tab) 2536 { 2537 uint16_t i; 2538 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2539 2540 if (tab) 2541 for (i = 0; i < tab->count; i++) 2542 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, 2543 &data->vddci_leakage); 2544 2545 return 0; 2546 } 2547 2548 static int smu7_patch_vce_vddc(struct pp_hwmgr *hwmgr, 2549 struct phm_vce_clock_voltage_dependency_table *tab) 2550 { 2551 uint16_t i; 2552 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2553 2554 if (tab) 2555 for (i = 0; i < tab->count; i++) 2556 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, 2557 &data->vddc_leakage); 2558 2559 return 0; 2560 } 2561 2562 2563 static int smu7_patch_uvd_vddc(struct pp_hwmgr *hwmgr, 2564 struct phm_uvd_clock_voltage_dependency_table *tab) 2565 { 2566 uint16_t i; 2567 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2568 2569 if (tab) 2570 for (i = 0; i < tab->count; i++) 2571 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, 2572 &data->vddc_leakage); 2573 2574 return 0; 2575 } 2576 2577 static int smu7_patch_vddc_shed_limit(struct pp_hwmgr *hwmgr, 2578 struct phm_phase_shedding_limits_table *tab) 2579 { 2580 uint16_t i; 2581 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2582 2583 if (tab) 2584 for (i = 0; i < tab->count; i++) 2585 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].Voltage, 2586 &data->vddc_leakage); 2587 2588 return 0; 2589 } 2590 2591 static int smu7_patch_samu_vddc(struct pp_hwmgr *hwmgr, 2592 struct phm_samu_clock_voltage_dependency_table *tab) 2593 { 2594 uint16_t i; 2595 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2596 2597 if (tab) 2598 for (i = 0; i < tab->count; i++) 2599 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, 2600 &data->vddc_leakage); 2601 2602 return 0; 2603 } 2604 2605 static int smu7_patch_acp_vddc(struct pp_hwmgr *hwmgr, 2606 struct phm_acp_clock_voltage_dependency_table *tab) 2607 { 2608 uint16_t i; 2609 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2610 2611 if (tab) 2612 for (i = 0; i < tab->count; i++) 2613 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, 2614 &data->vddc_leakage); 2615 2616 return 0; 2617 } 2618 2619 static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr, 2620 struct phm_clock_and_voltage_limits *tab) 2621 { 2622 uint32_t vddc, vddci; 2623 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2624 2625 if (tab) { 2626 vddc = tab->vddc; 2627 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, 2628 &data->vddc_leakage); 2629 tab->vddc = vddc; 2630 vddci = tab->vddci; 2631 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddci, 2632 &data->vddci_leakage); 2633 tab->vddci = vddci; 2634 } 2635 2636 return 0; 2637 } 2638 2639 static int smu7_patch_cac_vddc(struct pp_hwmgr *hwmgr, struct phm_cac_leakage_table *tab) 2640 { 2641 uint32_t i; 2642 uint32_t vddc; 2643 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2644 2645 if (tab) { 2646 for (i = 0; i < tab->count; i++) { 2647 vddc = (uint32_t)(tab->entries[i].Vddc); 2648 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, &data->vddc_leakage); 2649 tab->entries[i].Vddc = (uint16_t)vddc; 2650 } 2651 } 2652 2653 return 0; 2654 } 2655 2656 static int smu7_patch_dependency_tables_with_leakage(struct pp_hwmgr *hwmgr) 2657 { 2658 int tmp; 2659 2660 tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_sclk); 2661 if (tmp) 2662 return -EINVAL; 2663 2664 tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_mclk); 2665 if (tmp) 2666 return -EINVAL; 2667 2668 tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dep_on_dal_pwrl); 2669 if (tmp) 2670 return -EINVAL; 2671 2672 tmp = smu7_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk); 2673 if (tmp) 2674 return -EINVAL; 2675 2676 tmp = smu7_patch_vce_vddc(hwmgr, hwmgr->dyn_state.vce_clock_voltage_dependency_table); 2677 if (tmp) 2678 return -EINVAL; 2679 2680 tmp = smu7_patch_uvd_vddc(hwmgr, hwmgr->dyn_state.uvd_clock_voltage_dependency_table); 2681 if (tmp) 2682 return -EINVAL; 2683 2684 tmp = smu7_patch_samu_vddc(hwmgr, hwmgr->dyn_state.samu_clock_voltage_dependency_table); 2685 if (tmp) 2686 return -EINVAL; 2687 2688 tmp = smu7_patch_acp_vddc(hwmgr, hwmgr->dyn_state.acp_clock_voltage_dependency_table); 2689 if (tmp) 2690 return -EINVAL; 2691 2692 tmp = smu7_patch_vddc_shed_limit(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table); 2693 if (tmp) 2694 return -EINVAL; 2695 2696 tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_ac); 2697 if (tmp) 2698 return -EINVAL; 2699 2700 tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_dc); 2701 if (tmp) 2702 return -EINVAL; 2703 2704 tmp = smu7_patch_cac_vddc(hwmgr, hwmgr->dyn_state.cac_leakage_table); 2705 if (tmp) 2706 return -EINVAL; 2707 2708 return 0; 2709 } 2710 2711 2712 static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr) 2713 { 2714 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2715 2716 struct phm_clock_voltage_dependency_table *allowed_sclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_sclk; 2717 struct phm_clock_voltage_dependency_table *allowed_mclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_mclk; 2718 struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk; 2719 2720 PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL, 2721 "VDDC dependency on SCLK table is missing. This table is mandatory", 2722 return -EINVAL); 2723 PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1, 2724 "VDDC dependency on SCLK table has to have is missing. This table is mandatory", 2725 return -EINVAL); 2726 2727 PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL, 2728 "VDDC dependency on MCLK table is missing. This table is mandatory", 2729 return -EINVAL); 2730 PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1, 2731 "VDD dependency on MCLK table has to have is missing. This table is mandatory", 2732 return -EINVAL); 2733 2734 data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[0].v; 2735 data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; 2736 2737 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = 2738 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk; 2739 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = 2740 allowed_mclk_vddc_table->entries[allowed_mclk_vddc_table->count - 1].clk; 2741 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = 2742 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; 2743 2744 if (allowed_mclk_vddci_table != NULL && allowed_mclk_vddci_table->count >= 1) { 2745 data->min_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[0].v; 2746 data->max_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v; 2747 } 2748 2749 if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count >= 1) 2750 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v; 2751 2752 return 0; 2753 } 2754 2755 static int smu7_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) 2756 { 2757 kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl); 2758 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL; 2759 kfree(hwmgr->backend); 2760 hwmgr->backend = NULL; 2761 2762 return 0; 2763 } 2764 2765 static int smu7_get_elb_voltages(struct pp_hwmgr *hwmgr) 2766 { 2767 uint16_t virtual_voltage_id, vddc, vddci, efuse_voltage_id; 2768 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2769 int i; 2770 2771 if (atomctrl_get_leakage_id_from_efuse(hwmgr, &efuse_voltage_id) == 0) { 2772 for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) { 2773 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; 2774 if (atomctrl_get_leakage_vddc_base_on_leakage(hwmgr, &vddc, &vddci, 2775 virtual_voltage_id, 2776 efuse_voltage_id) == 0) { 2777 if (vddc != 0 && vddc != virtual_voltage_id) { 2778 data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc; 2779 data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id; 2780 data->vddc_leakage.count++; 2781 } 2782 if (vddci != 0 && vddci != virtual_voltage_id) { 2783 data->vddci_leakage.actual_voltage[data->vddci_leakage.count] = vddci; 2784 data->vddci_leakage.leakage_id[data->vddci_leakage.count] = virtual_voltage_id; 2785 data->vddci_leakage.count++; 2786 } 2787 } 2788 } 2789 } 2790 return 0; 2791 } 2792 2793 #define LEAKAGE_ID_MSB 463 2794 #define LEAKAGE_ID_LSB 454 2795 2796 static int smu7_update_edc_leakage_table(struct pp_hwmgr *hwmgr) 2797 { 2798 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2799 uint32_t efuse; 2800 uint16_t offset; 2801 int ret = 0; 2802 2803 if (data->disable_edc_leakage_controller) 2804 return 0; 2805 2806 ret = atomctrl_get_edc_hilo_leakage_offset_table(hwmgr, 2807 &data->edc_hilo_leakage_offset_from_vbios); 2808 if (ret) 2809 return ret; 2810 2811 if (data->edc_hilo_leakage_offset_from_vbios.usEdcDidtLoDpm7TableOffset && 2812 data->edc_hilo_leakage_offset_from_vbios.usEdcDidtHiDpm7TableOffset) { 2813 atomctrl_read_efuse(hwmgr, LEAKAGE_ID_LSB, LEAKAGE_ID_MSB, &efuse); 2814 if (efuse < data->edc_hilo_leakage_offset_from_vbios.usHiLoLeakageThreshold) 2815 offset = data->edc_hilo_leakage_offset_from_vbios.usEdcDidtLoDpm7TableOffset; 2816 else 2817 offset = data->edc_hilo_leakage_offset_from_vbios.usEdcDidtHiDpm7TableOffset; 2818 2819 ret = atomctrl_get_edc_leakage_table(hwmgr, 2820 &data->edc_leakage_table, 2821 offset); 2822 if (ret) 2823 return ret; 2824 } 2825 2826 return ret; 2827 } 2828 2829 static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr) 2830 { 2831 struct smu7_hwmgr *data; 2832 int result = 0; 2833 2834 data = kzalloc(sizeof(struct smu7_hwmgr), GFP_KERNEL); 2835 if (data == NULL) 2836 return -ENOMEM; 2837 2838 hwmgr->backend = data; 2839 smu7_patch_voltage_workaround(hwmgr); 2840 smu7_init_dpm_defaults(hwmgr); 2841 2842 /* Get leakage voltage based on leakage ID. */ 2843 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 2844 PHM_PlatformCaps_EVV)) { 2845 result = smu7_get_evv_voltages(hwmgr); 2846 if (result) { 2847 pr_info("Get EVV Voltage Failed. Abort Driver loading!\n"); 2848 return -EINVAL; 2849 } 2850 } else { 2851 smu7_get_elb_voltages(hwmgr); 2852 } 2853 2854 if (hwmgr->pp_table_version == PP_TABLE_V1) { 2855 smu7_complete_dependency_tables(hwmgr); 2856 smu7_set_private_data_based_on_pptable_v1(hwmgr); 2857 } else if (hwmgr->pp_table_version == PP_TABLE_V0) { 2858 smu7_patch_dependency_tables_with_leakage(hwmgr); 2859 smu7_set_private_data_based_on_pptable_v0(hwmgr); 2860 } 2861 2862 /* Initalize Dynamic State Adjustment Rule Settings */ 2863 result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr); 2864 2865 if (0 == result) { 2866 struct amdgpu_device *adev = hwmgr->adev; 2867 2868 data->is_tlu_enabled = false; 2869 2870 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = 2871 SMU7_MAX_HARDWARE_POWERLEVELS; 2872 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; 2873 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; 2874 2875 data->pcie_gen_cap = adev->pm.pcie_gen_mask; 2876 if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) 2877 data->pcie_spc_cap = 20; 2878 else 2879 data->pcie_spc_cap = 16; 2880 data->pcie_lane_cap = adev->pm.pcie_mlw_mask; 2881 2882 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */ 2883 /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */ 2884 hwmgr->platform_descriptor.clockStep.engineClock = 500; 2885 hwmgr->platform_descriptor.clockStep.memoryClock = 500; 2886 smu7_thermal_parameter_init(hwmgr); 2887 } else { 2888 /* Ignore return value in here, we are cleaning up a mess. */ 2889 smu7_hwmgr_backend_fini(hwmgr); 2890 } 2891 2892 result = smu7_update_edc_leakage_table(hwmgr); 2893 if (result) 2894 return result; 2895 2896 return 0; 2897 } 2898 2899 static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr) 2900 { 2901 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2902 uint32_t level, tmp; 2903 2904 if (!data->pcie_dpm_key_disabled) { 2905 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) { 2906 level = 0; 2907 tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask; 2908 while (tmp >>= 1) 2909 level++; 2910 2911 if (level) 2912 smum_send_msg_to_smc_with_parameter(hwmgr, 2913 PPSMC_MSG_PCIeDPM_ForceLevel, level, 2914 NULL); 2915 } 2916 } 2917 2918 if (!data->sclk_dpm_key_disabled) { 2919 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) { 2920 level = 0; 2921 tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask; 2922 while (tmp >>= 1) 2923 level++; 2924 2925 if (level) 2926 smum_send_msg_to_smc_with_parameter(hwmgr, 2927 PPSMC_MSG_SCLKDPM_SetEnabledMask, 2928 (1 << level), 2929 NULL); 2930 } 2931 } 2932 2933 if (!data->mclk_dpm_key_disabled) { 2934 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) { 2935 level = 0; 2936 tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask; 2937 while (tmp >>= 1) 2938 level++; 2939 2940 if (level) 2941 smum_send_msg_to_smc_with_parameter(hwmgr, 2942 PPSMC_MSG_MCLKDPM_SetEnabledMask, 2943 (1 << level), 2944 NULL); 2945 } 2946 } 2947 2948 return 0; 2949 } 2950 2951 static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr) 2952 { 2953 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2954 2955 if (hwmgr->pp_table_version == PP_TABLE_V1) 2956 phm_apply_dal_min_voltage_request(hwmgr); 2957 /* TO DO for v0 iceland and Ci*/ 2958 2959 if (!data->sclk_dpm_key_disabled) { 2960 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) 2961 smum_send_msg_to_smc_with_parameter(hwmgr, 2962 PPSMC_MSG_SCLKDPM_SetEnabledMask, 2963 data->dpm_level_enable_mask.sclk_dpm_enable_mask, 2964 NULL); 2965 } 2966 2967 if (!data->mclk_dpm_key_disabled) { 2968 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) 2969 smum_send_msg_to_smc_with_parameter(hwmgr, 2970 PPSMC_MSG_MCLKDPM_SetEnabledMask, 2971 data->dpm_level_enable_mask.mclk_dpm_enable_mask, 2972 NULL); 2973 } 2974 2975 return 0; 2976 } 2977 2978 static int smu7_unforce_dpm_levels(struct pp_hwmgr *hwmgr) 2979 { 2980 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2981 2982 if (!smum_is_dpm_running(hwmgr)) 2983 return -EINVAL; 2984 2985 if (!data->pcie_dpm_key_disabled) { 2986 smum_send_msg_to_smc(hwmgr, 2987 PPSMC_MSG_PCIeDPM_UnForceLevel, 2988 NULL); 2989 } 2990 2991 return smu7_upload_dpm_level_enable_mask(hwmgr); 2992 } 2993 2994 static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr) 2995 { 2996 struct smu7_hwmgr *data = 2997 (struct smu7_hwmgr *)(hwmgr->backend); 2998 uint32_t level; 2999 3000 if (!data->sclk_dpm_key_disabled) 3001 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) { 3002 level = phm_get_lowest_enabled_level(hwmgr, 3003 data->dpm_level_enable_mask.sclk_dpm_enable_mask); 3004 smum_send_msg_to_smc_with_parameter(hwmgr, 3005 PPSMC_MSG_SCLKDPM_SetEnabledMask, 3006 (1 << level), 3007 NULL); 3008 3009 } 3010 3011 if (!data->mclk_dpm_key_disabled) { 3012 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) { 3013 level = phm_get_lowest_enabled_level(hwmgr, 3014 data->dpm_level_enable_mask.mclk_dpm_enable_mask); 3015 smum_send_msg_to_smc_with_parameter(hwmgr, 3016 PPSMC_MSG_MCLKDPM_SetEnabledMask, 3017 (1 << level), 3018 NULL); 3019 } 3020 } 3021 3022 if (!data->pcie_dpm_key_disabled) { 3023 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) { 3024 level = phm_get_lowest_enabled_level(hwmgr, 3025 data->dpm_level_enable_mask.pcie_dpm_enable_mask); 3026 smum_send_msg_to_smc_with_parameter(hwmgr, 3027 PPSMC_MSG_PCIeDPM_ForceLevel, 3028 (level), 3029 NULL); 3030 } 3031 } 3032 3033 return 0; 3034 } 3035 3036 static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level, 3037 uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *pcie_mask) 3038 { 3039 uint32_t percentage; 3040 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3041 struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table; 3042 int32_t tmp_mclk; 3043 int32_t tmp_sclk; 3044 int32_t count; 3045 3046 if (golden_dpm_table->mclk_table.count < 1) 3047 return -EINVAL; 3048 3049 percentage = 100 * golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value / 3050 golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value; 3051 3052 if (golden_dpm_table->mclk_table.count == 1) { 3053 percentage = 70; 3054 tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value; 3055 *mclk_mask = golden_dpm_table->mclk_table.count - 1; 3056 } else { 3057 tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 2].value; 3058 *mclk_mask = golden_dpm_table->mclk_table.count - 2; 3059 } 3060 3061 tmp_sclk = tmp_mclk * percentage / 100; 3062 3063 if (hwmgr->pp_table_version == PP_TABLE_V0) { 3064 for (count = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1; 3065 count >= 0; count--) { 3066 if (tmp_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) { 3067 tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk; 3068 *sclk_mask = count; 3069 break; 3070 } 3071 } 3072 if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { 3073 *sclk_mask = 0; 3074 tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].clk; 3075 } 3076 3077 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) 3078 *sclk_mask = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1; 3079 } else if (hwmgr->pp_table_version == PP_TABLE_V1) { 3080 struct phm_ppt_v1_information *table_info = 3081 (struct phm_ppt_v1_information *)(hwmgr->pptable); 3082 3083 for (count = table_info->vdd_dep_on_sclk->count-1; count >= 0; count--) { 3084 if (tmp_sclk >= table_info->vdd_dep_on_sclk->entries[count].clk) { 3085 tmp_sclk = table_info->vdd_dep_on_sclk->entries[count].clk; 3086 *sclk_mask = count; 3087 break; 3088 } 3089 } 3090 if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { 3091 *sclk_mask = 0; 3092 tmp_sclk = table_info->vdd_dep_on_sclk->entries[0].clk; 3093 } 3094 3095 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) 3096 *sclk_mask = table_info->vdd_dep_on_sclk->count - 1; 3097 } 3098 3099 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) 3100 *mclk_mask = 0; 3101 else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) 3102 *mclk_mask = golden_dpm_table->mclk_table.count - 1; 3103 3104 *pcie_mask = data->dpm_table.pcie_speed_table.count - 1; 3105 hwmgr->pstate_sclk = tmp_sclk; 3106 hwmgr->pstate_mclk = tmp_mclk; 3107 3108 return 0; 3109 } 3110 3111 static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr, 3112 enum amd_dpm_forced_level level) 3113 { 3114 int ret = 0; 3115 uint32_t sclk_mask = 0; 3116 uint32_t mclk_mask = 0; 3117 uint32_t pcie_mask = 0; 3118 3119 if (hwmgr->pstate_sclk == 0) 3120 smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask); 3121 3122 switch (level) { 3123 case AMD_DPM_FORCED_LEVEL_HIGH: 3124 ret = smu7_force_dpm_highest(hwmgr); 3125 break; 3126 case AMD_DPM_FORCED_LEVEL_LOW: 3127 ret = smu7_force_dpm_lowest(hwmgr); 3128 break; 3129 case AMD_DPM_FORCED_LEVEL_AUTO: 3130 ret = smu7_unforce_dpm_levels(hwmgr); 3131 break; 3132 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 3133 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 3134 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 3135 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 3136 ret = smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask); 3137 if (ret) 3138 return ret; 3139 smu7_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask); 3140 smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask); 3141 smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask); 3142 break; 3143 case AMD_DPM_FORCED_LEVEL_MANUAL: 3144 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 3145 default: 3146 break; 3147 } 3148 3149 if (!ret) { 3150 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) 3151 smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100); 3152 else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) 3153 smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr); 3154 } 3155 return ret; 3156 } 3157 3158 static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr) 3159 { 3160 return sizeof(struct smu7_power_state); 3161 } 3162 3163 static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr, 3164 uint32_t vblank_time_us) 3165 { 3166 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3167 uint32_t switch_limit_us; 3168 3169 switch (hwmgr->chip_id) { 3170 case CHIP_POLARIS10: 3171 case CHIP_POLARIS11: 3172 case CHIP_POLARIS12: 3173 if (hwmgr->is_kicker || (hwmgr->chip_id == CHIP_POLARIS12)) 3174 switch_limit_us = data->is_memory_gddr5 ? 450 : 150; 3175 else 3176 switch_limit_us = data->is_memory_gddr5 ? 200 : 150; 3177 break; 3178 case CHIP_VEGAM: 3179 switch_limit_us = 30; 3180 break; 3181 default: 3182 switch_limit_us = data->is_memory_gddr5 ? 450 : 150; 3183 break; 3184 } 3185 3186 if (vblank_time_us < switch_limit_us) 3187 return true; 3188 else 3189 return false; 3190 } 3191 3192 static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, 3193 struct pp_power_state *request_ps, 3194 const struct pp_power_state *current_ps) 3195 { 3196 struct amdgpu_device *adev = hwmgr->adev; 3197 struct smu7_power_state *smu7_ps = 3198 cast_phw_smu7_power_state(&request_ps->hardware); 3199 uint32_t sclk; 3200 uint32_t mclk; 3201 struct PP_Clocks minimum_clocks = {0}; 3202 bool disable_mclk_switching; 3203 bool disable_mclk_switching_for_frame_lock; 3204 bool disable_mclk_switching_for_display; 3205 const struct phm_clock_and_voltage_limits *max_limits; 3206 uint32_t i; 3207 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3208 struct phm_ppt_v1_information *table_info = 3209 (struct phm_ppt_v1_information *)(hwmgr->pptable); 3210 int32_t count; 3211 int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0; 3212 uint32_t latency; 3213 bool latency_allowed = false; 3214 3215 data->battery_state = (PP_StateUILabel_Battery == 3216 request_ps->classification.ui_label); 3217 data->mclk_ignore_signal = false; 3218 3219 PP_ASSERT_WITH_CODE(smu7_ps->performance_level_count == 2, 3220 "VI should always have 2 performance levels", 3221 ); 3222 3223 max_limits = adev->pm.ac_power ? 3224 &(hwmgr->dyn_state.max_clock_voltage_on_ac) : 3225 &(hwmgr->dyn_state.max_clock_voltage_on_dc); 3226 3227 /* Cap clock DPM tables at DC MAX if it is in DC. */ 3228 if (!adev->pm.ac_power) { 3229 for (i = 0; i < smu7_ps->performance_level_count; i++) { 3230 if (smu7_ps->performance_levels[i].memory_clock > max_limits->mclk) 3231 smu7_ps->performance_levels[i].memory_clock = max_limits->mclk; 3232 if (smu7_ps->performance_levels[i].engine_clock > max_limits->sclk) 3233 smu7_ps->performance_levels[i].engine_clock = max_limits->sclk; 3234 } 3235 } 3236 3237 minimum_clocks.engineClock = hwmgr->display_config->min_core_set_clock; 3238 minimum_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock; 3239 3240 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 3241 PHM_PlatformCaps_StablePState)) { 3242 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac); 3243 stable_pstate_sclk = (max_limits->sclk * 75) / 100; 3244 3245 for (count = table_info->vdd_dep_on_sclk->count - 1; 3246 count >= 0; count--) { 3247 if (stable_pstate_sclk >= 3248 table_info->vdd_dep_on_sclk->entries[count].clk) { 3249 stable_pstate_sclk = 3250 table_info->vdd_dep_on_sclk->entries[count].clk; 3251 break; 3252 } 3253 } 3254 3255 if (count < 0) 3256 stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk; 3257 3258 stable_pstate_mclk = max_limits->mclk; 3259 3260 minimum_clocks.engineClock = stable_pstate_sclk; 3261 minimum_clocks.memoryClock = stable_pstate_mclk; 3262 } 3263 3264 disable_mclk_switching_for_frame_lock = phm_cap_enabled( 3265 hwmgr->platform_descriptor.platformCaps, 3266 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); 3267 3268 disable_mclk_switching_for_display = ((1 < hwmgr->display_config->num_display) && 3269 !hwmgr->display_config->multi_monitor_in_sync) || 3270 smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time); 3271 3272 disable_mclk_switching = disable_mclk_switching_for_frame_lock || 3273 disable_mclk_switching_for_display; 3274 3275 if (hwmgr->display_config->num_display == 0) { 3276 if (hwmgr->chip_id >= CHIP_POLARIS10 && hwmgr->chip_id <= CHIP_VEGAM) 3277 data->mclk_ignore_signal = true; 3278 else 3279 disable_mclk_switching = false; 3280 } 3281 3282 sclk = smu7_ps->performance_levels[0].engine_clock; 3283 mclk = smu7_ps->performance_levels[0].memory_clock; 3284 3285 if (disable_mclk_switching && 3286 (!(hwmgr->chip_id >= CHIP_POLARIS10 && 3287 hwmgr->chip_id <= CHIP_VEGAM))) 3288 mclk = smu7_ps->performance_levels 3289 [smu7_ps->performance_level_count - 1].memory_clock; 3290 3291 if (sclk < minimum_clocks.engineClock) 3292 sclk = (minimum_clocks.engineClock > max_limits->sclk) ? 3293 max_limits->sclk : minimum_clocks.engineClock; 3294 3295 if (mclk < minimum_clocks.memoryClock) 3296 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ? 3297 max_limits->mclk : minimum_clocks.memoryClock; 3298 3299 smu7_ps->performance_levels[0].engine_clock = sclk; 3300 smu7_ps->performance_levels[0].memory_clock = mclk; 3301 3302 smu7_ps->performance_levels[1].engine_clock = 3303 (smu7_ps->performance_levels[1].engine_clock >= 3304 smu7_ps->performance_levels[0].engine_clock) ? 3305 smu7_ps->performance_levels[1].engine_clock : 3306 smu7_ps->performance_levels[0].engine_clock; 3307 3308 if (disable_mclk_switching) { 3309 if (mclk < smu7_ps->performance_levels[1].memory_clock) 3310 mclk = smu7_ps->performance_levels[1].memory_clock; 3311 3312 if (hwmgr->chip_id >= CHIP_POLARIS10 && hwmgr->chip_id <= CHIP_VEGAM) { 3313 if (disable_mclk_switching_for_display) { 3314 /* Find the lowest MCLK frequency that is within 3315 * the tolerable latency defined in DAL 3316 */ 3317 latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency; 3318 for (i = 0; i < data->mclk_latency_table.count; i++) { 3319 if (data->mclk_latency_table.entries[i].latency <= latency) { 3320 latency_allowed = true; 3321 3322 if ((data->mclk_latency_table.entries[i].frequency >= 3323 smu7_ps->performance_levels[0].memory_clock) && 3324 (data->mclk_latency_table.entries[i].frequency <= 3325 smu7_ps->performance_levels[1].memory_clock)) { 3326 mclk = data->mclk_latency_table.entries[i].frequency; 3327 break; 3328 } 3329 } 3330 } 3331 if ((i >= data->mclk_latency_table.count - 1) && !latency_allowed) { 3332 data->mclk_ignore_signal = true; 3333 } else { 3334 data->mclk_ignore_signal = false; 3335 } 3336 } 3337 3338 if (disable_mclk_switching_for_frame_lock) 3339 mclk = smu7_ps->performance_levels[1].memory_clock; 3340 } 3341 3342 smu7_ps->performance_levels[0].memory_clock = mclk; 3343 3344 if (!(hwmgr->chip_id >= CHIP_POLARIS10 && 3345 hwmgr->chip_id <= CHIP_VEGAM)) 3346 smu7_ps->performance_levels[1].memory_clock = mclk; 3347 } else { 3348 if (smu7_ps->performance_levels[1].memory_clock < 3349 smu7_ps->performance_levels[0].memory_clock) 3350 smu7_ps->performance_levels[1].memory_clock = 3351 smu7_ps->performance_levels[0].memory_clock; 3352 } 3353 3354 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 3355 PHM_PlatformCaps_StablePState)) { 3356 for (i = 0; i < smu7_ps->performance_level_count; i++) { 3357 smu7_ps->performance_levels[i].engine_clock = stable_pstate_sclk; 3358 smu7_ps->performance_levels[i].memory_clock = stable_pstate_mclk; 3359 smu7_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max; 3360 smu7_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max; 3361 } 3362 } 3363 return 0; 3364 } 3365 3366 3367 static uint32_t smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) 3368 { 3369 struct pp_power_state *ps; 3370 struct smu7_power_state *smu7_ps; 3371 3372 if (hwmgr == NULL) 3373 return -EINVAL; 3374 3375 ps = hwmgr->request_ps; 3376 3377 if (ps == NULL) 3378 return -EINVAL; 3379 3380 smu7_ps = cast_phw_smu7_power_state(&ps->hardware); 3381 3382 if (low) 3383 return smu7_ps->performance_levels[0].memory_clock; 3384 else 3385 return smu7_ps->performance_levels 3386 [smu7_ps->performance_level_count-1].memory_clock; 3387 } 3388 3389 static uint32_t smu7_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) 3390 { 3391 struct pp_power_state *ps; 3392 struct smu7_power_state *smu7_ps; 3393 3394 if (hwmgr == NULL) 3395 return -EINVAL; 3396 3397 ps = hwmgr->request_ps; 3398 3399 if (ps == NULL) 3400 return -EINVAL; 3401 3402 smu7_ps = cast_phw_smu7_power_state(&ps->hardware); 3403 3404 if (low) 3405 return smu7_ps->performance_levels[0].engine_clock; 3406 else 3407 return smu7_ps->performance_levels 3408 [smu7_ps->performance_level_count-1].engine_clock; 3409 } 3410 3411 static int smu7_dpm_patch_boot_state(struct pp_hwmgr *hwmgr, 3412 struct pp_hw_power_state *hw_ps) 3413 { 3414 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3415 struct smu7_power_state *ps = (struct smu7_power_state *)hw_ps; 3416 ATOM_FIRMWARE_INFO_V2_2 *fw_info; 3417 uint16_t size; 3418 uint8_t frev, crev; 3419 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); 3420 3421 /* First retrieve the Boot clocks and VDDC from the firmware info table. 3422 * We assume here that fw_info is unchanged if this call fails. 3423 */ 3424 fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)smu_atom_get_data_table(hwmgr->adev, index, 3425 &size, &frev, &crev); 3426 if (!fw_info) 3427 /* During a test, there is no firmware info table. */ 3428 return 0; 3429 3430 /* Patch the state. */ 3431 data->vbios_boot_state.sclk_bootup_value = 3432 le32_to_cpu(fw_info->ulDefaultEngineClock); 3433 data->vbios_boot_state.mclk_bootup_value = 3434 le32_to_cpu(fw_info->ulDefaultMemoryClock); 3435 data->vbios_boot_state.mvdd_bootup_value = 3436 le16_to_cpu(fw_info->usBootUpMVDDCVoltage); 3437 data->vbios_boot_state.vddc_bootup_value = 3438 le16_to_cpu(fw_info->usBootUpVDDCVoltage); 3439 data->vbios_boot_state.vddci_bootup_value = 3440 le16_to_cpu(fw_info->usBootUpVDDCIVoltage); 3441 data->vbios_boot_state.pcie_gen_bootup_value = 3442 smu7_get_current_pcie_speed(hwmgr); 3443 3444 data->vbios_boot_state.pcie_lane_bootup_value = 3445 (uint16_t)smu7_get_current_pcie_lane_number(hwmgr); 3446 3447 /* set boot power state */ 3448 ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value; 3449 ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value; 3450 ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value; 3451 ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value; 3452 3453 return 0; 3454 } 3455 3456 static int smu7_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr) 3457 { 3458 int result; 3459 unsigned long ret = 0; 3460 3461 if (hwmgr->pp_table_version == PP_TABLE_V0) { 3462 result = pp_tables_get_num_of_entries(hwmgr, &ret); 3463 return result ? 0 : ret; 3464 } else if (hwmgr->pp_table_version == PP_TABLE_V1) { 3465 result = get_number_of_powerplay_table_entries_v1_0(hwmgr); 3466 return result; 3467 } 3468 return 0; 3469 } 3470 3471 static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr, 3472 void *state, struct pp_power_state *power_state, 3473 void *pp_table, uint32_t classification_flag) 3474 { 3475 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3476 struct smu7_power_state *smu7_power_state = 3477 (struct smu7_power_state *)(&(power_state->hardware)); 3478 struct smu7_performance_level *performance_level; 3479 ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state; 3480 ATOM_Tonga_POWERPLAYTABLE *powerplay_table = 3481 (ATOM_Tonga_POWERPLAYTABLE *)pp_table; 3482 PPTable_Generic_SubTable_Header *sclk_dep_table = 3483 (PPTable_Generic_SubTable_Header *) 3484 (((unsigned long)powerplay_table) + 3485 le16_to_cpu(powerplay_table->usSclkDependencyTableOffset)); 3486 3487 ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = 3488 (ATOM_Tonga_MCLK_Dependency_Table *) 3489 (((unsigned long)powerplay_table) + 3490 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset)); 3491 3492 /* The following fields are not initialized here: id orderedList allStatesList */ 3493 power_state->classification.ui_label = 3494 (le16_to_cpu(state_entry->usClassification) & 3495 ATOM_PPLIB_CLASSIFICATION_UI_MASK) >> 3496 ATOM_PPLIB_CLASSIFICATION_UI_SHIFT; 3497 power_state->classification.flags = classification_flag; 3498 /* NOTE: There is a classification2 flag in BIOS that is not being used right now */ 3499 3500 power_state->classification.temporary_state = false; 3501 power_state->classification.to_be_deleted = false; 3502 3503 power_state->validation.disallowOnDC = 3504 (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & 3505 ATOM_Tonga_DISALLOW_ON_DC)); 3506 3507 power_state->pcie.lanes = 0; 3508 3509 power_state->display.disableFrameModulation = false; 3510 power_state->display.limitRefreshrate = false; 3511 power_state->display.enableVariBright = 3512 (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & 3513 ATOM_Tonga_ENABLE_VARIBRIGHT)); 3514 3515 power_state->validation.supportedPowerLevels = 0; 3516 power_state->uvd_clocks.VCLK = 0; 3517 power_state->uvd_clocks.DCLK = 0; 3518 power_state->temperatures.min = 0; 3519 power_state->temperatures.max = 0; 3520 3521 performance_level = &(smu7_power_state->performance_levels 3522 [smu7_power_state->performance_level_count++]); 3523 3524 PP_ASSERT_WITH_CODE( 3525 (smu7_power_state->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)), 3526 "Performance levels exceeds SMC limit!", 3527 return -EINVAL); 3528 3529 PP_ASSERT_WITH_CODE( 3530 (smu7_power_state->performance_level_count <= 3531 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels), 3532 "Performance levels exceeds Driver limit!", 3533 return -EINVAL); 3534 3535 /* Performance levels are arranged from low to high. */ 3536 performance_level->memory_clock = mclk_dep_table->entries 3537 [state_entry->ucMemoryClockIndexLow].ulMclk; 3538 if (sclk_dep_table->ucRevId == 0) 3539 performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries 3540 [state_entry->ucEngineClockIndexLow].ulSclk; 3541 else if (sclk_dep_table->ucRevId == 1) 3542 performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries 3543 [state_entry->ucEngineClockIndexLow].ulSclk; 3544 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, 3545 state_entry->ucPCIEGenLow); 3546 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, 3547 state_entry->ucPCIELaneLow); 3548 3549 performance_level = &(smu7_power_state->performance_levels 3550 [smu7_power_state->performance_level_count++]); 3551 performance_level->memory_clock = mclk_dep_table->entries 3552 [state_entry->ucMemoryClockIndexHigh].ulMclk; 3553 3554 if (sclk_dep_table->ucRevId == 0) 3555 performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries 3556 [state_entry->ucEngineClockIndexHigh].ulSclk; 3557 else if (sclk_dep_table->ucRevId == 1) 3558 performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries 3559 [state_entry->ucEngineClockIndexHigh].ulSclk; 3560 3561 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, 3562 state_entry->ucPCIEGenHigh); 3563 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, 3564 state_entry->ucPCIELaneHigh); 3565 3566 return 0; 3567 } 3568 3569 static int smu7_get_pp_table_entry_v1(struct pp_hwmgr *hwmgr, 3570 unsigned long entry_index, struct pp_power_state *state) 3571 { 3572 int result; 3573 struct smu7_power_state *ps; 3574 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3575 struct phm_ppt_v1_information *table_info = 3576 (struct phm_ppt_v1_information *)(hwmgr->pptable); 3577 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table = 3578 table_info->vdd_dep_on_mclk; 3579 3580 state->hardware.magic = PHM_VIslands_Magic; 3581 3582 ps = (struct smu7_power_state *)(&state->hardware); 3583 3584 result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state, 3585 smu7_get_pp_table_entry_callback_func_v1); 3586 3587 /* This is the earliest time we have all the dependency table and the VBIOS boot state 3588 * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state 3589 * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state 3590 */ 3591 if (dep_mclk_table != NULL && dep_mclk_table->count == 1) { 3592 if (dep_mclk_table->entries[0].clk != 3593 data->vbios_boot_state.mclk_bootup_value) 3594 pr_debug("Single MCLK entry VDDCI/MCLK dependency table " 3595 "does not match VBIOS boot MCLK level"); 3596 if (dep_mclk_table->entries[0].vddci != 3597 data->vbios_boot_state.vddci_bootup_value) 3598 pr_debug("Single VDDCI entry VDDCI/MCLK dependency table " 3599 "does not match VBIOS boot VDDCI level"); 3600 } 3601 3602 /* set DC compatible flag if this state supports DC */ 3603 if (!state->validation.disallowOnDC) 3604 ps->dc_compatible = true; 3605 3606 if (state->classification.flags & PP_StateClassificationFlag_ACPI) 3607 data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen; 3608 3609 ps->uvd_clks.vclk = state->uvd_clocks.VCLK; 3610 ps->uvd_clks.dclk = state->uvd_clocks.DCLK; 3611 3612 if (!result) { 3613 uint32_t i; 3614 3615 switch (state->classification.ui_label) { 3616 case PP_StateUILabel_Performance: 3617 data->use_pcie_performance_levels = true; 3618 for (i = 0; i < ps->performance_level_count; i++) { 3619 if (data->pcie_gen_performance.max < 3620 ps->performance_levels[i].pcie_gen) 3621 data->pcie_gen_performance.max = 3622 ps->performance_levels[i].pcie_gen; 3623 3624 if (data->pcie_gen_performance.min > 3625 ps->performance_levels[i].pcie_gen) 3626 data->pcie_gen_performance.min = 3627 ps->performance_levels[i].pcie_gen; 3628 3629 if (data->pcie_lane_performance.max < 3630 ps->performance_levels[i].pcie_lane) 3631 data->pcie_lane_performance.max = 3632 ps->performance_levels[i].pcie_lane; 3633 if (data->pcie_lane_performance.min > 3634 ps->performance_levels[i].pcie_lane) 3635 data->pcie_lane_performance.min = 3636 ps->performance_levels[i].pcie_lane; 3637 } 3638 break; 3639 case PP_StateUILabel_Battery: 3640 data->use_pcie_power_saving_levels = true; 3641 3642 for (i = 0; i < ps->performance_level_count; i++) { 3643 if (data->pcie_gen_power_saving.max < 3644 ps->performance_levels[i].pcie_gen) 3645 data->pcie_gen_power_saving.max = 3646 ps->performance_levels[i].pcie_gen; 3647 3648 if (data->pcie_gen_power_saving.min > 3649 ps->performance_levels[i].pcie_gen) 3650 data->pcie_gen_power_saving.min = 3651 ps->performance_levels[i].pcie_gen; 3652 3653 if (data->pcie_lane_power_saving.max < 3654 ps->performance_levels[i].pcie_lane) 3655 data->pcie_lane_power_saving.max = 3656 ps->performance_levels[i].pcie_lane; 3657 3658 if (data->pcie_lane_power_saving.min > 3659 ps->performance_levels[i].pcie_lane) 3660 data->pcie_lane_power_saving.min = 3661 ps->performance_levels[i].pcie_lane; 3662 } 3663 break; 3664 default: 3665 break; 3666 } 3667 } 3668 return 0; 3669 } 3670 3671 static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr, 3672 struct pp_hw_power_state *power_state, 3673 unsigned int index, const void *clock_info) 3674 { 3675 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3676 struct smu7_power_state *ps = cast_phw_smu7_power_state(power_state); 3677 const ATOM_PPLIB_CI_CLOCK_INFO *visland_clk_info = clock_info; 3678 struct smu7_performance_level *performance_level; 3679 uint32_t engine_clock, memory_clock; 3680 uint16_t pcie_gen_from_bios; 3681 3682 engine_clock = visland_clk_info->ucEngineClockHigh << 16 | visland_clk_info->usEngineClockLow; 3683 memory_clock = visland_clk_info->ucMemoryClockHigh << 16 | visland_clk_info->usMemoryClockLow; 3684 3685 if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk) 3686 data->highest_mclk = memory_clock; 3687 3688 PP_ASSERT_WITH_CODE( 3689 (ps->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)), 3690 "Performance levels exceeds SMC limit!", 3691 return -EINVAL); 3692 3693 PP_ASSERT_WITH_CODE( 3694 (ps->performance_level_count < 3695 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels), 3696 "Performance levels exceeds Driver limit, Skip!", 3697 return 0); 3698 3699 performance_level = &(ps->performance_levels 3700 [ps->performance_level_count++]); 3701 3702 /* Performance levels are arranged from low to high. */ 3703 performance_level->memory_clock = memory_clock; 3704 performance_level->engine_clock = engine_clock; 3705 3706 pcie_gen_from_bios = visland_clk_info->ucPCIEGen; 3707 3708 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, pcie_gen_from_bios); 3709 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, visland_clk_info->usPCIELane); 3710 3711 return 0; 3712 } 3713 3714 static int smu7_get_pp_table_entry_v0(struct pp_hwmgr *hwmgr, 3715 unsigned long entry_index, struct pp_power_state *state) 3716 { 3717 int result; 3718 struct smu7_power_state *ps; 3719 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3720 struct phm_clock_voltage_dependency_table *dep_mclk_table = 3721 hwmgr->dyn_state.vddci_dependency_on_mclk; 3722 3723 memset(&state->hardware, 0x00, sizeof(struct pp_hw_power_state)); 3724 3725 state->hardware.magic = PHM_VIslands_Magic; 3726 3727 ps = (struct smu7_power_state *)(&state->hardware); 3728 3729 result = pp_tables_get_entry(hwmgr, entry_index, state, 3730 smu7_get_pp_table_entry_callback_func_v0); 3731 3732 /* 3733 * This is the earliest time we have all the dependency table 3734 * and the VBIOS boot state as 3735 * PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot 3736 * state if there is only one VDDCI/MCLK level, check if it's 3737 * the same as VBIOS boot state 3738 */ 3739 if (dep_mclk_table != NULL && dep_mclk_table->count == 1) { 3740 if (dep_mclk_table->entries[0].clk != 3741 data->vbios_boot_state.mclk_bootup_value) 3742 pr_debug("Single MCLK entry VDDCI/MCLK dependency table " 3743 "does not match VBIOS boot MCLK level"); 3744 if (dep_mclk_table->entries[0].v != 3745 data->vbios_boot_state.vddci_bootup_value) 3746 pr_debug("Single VDDCI entry VDDCI/MCLK dependency table " 3747 "does not match VBIOS boot VDDCI level"); 3748 } 3749 3750 /* set DC compatible flag if this state supports DC */ 3751 if (!state->validation.disallowOnDC) 3752 ps->dc_compatible = true; 3753 3754 if (state->classification.flags & PP_StateClassificationFlag_ACPI) 3755 data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen; 3756 3757 ps->uvd_clks.vclk = state->uvd_clocks.VCLK; 3758 ps->uvd_clks.dclk = state->uvd_clocks.DCLK; 3759 3760 if (!result) { 3761 uint32_t i; 3762 3763 switch (state->classification.ui_label) { 3764 case PP_StateUILabel_Performance: 3765 data->use_pcie_performance_levels = true; 3766 3767 for (i = 0; i < ps->performance_level_count; i++) { 3768 if (data->pcie_gen_performance.max < 3769 ps->performance_levels[i].pcie_gen) 3770 data->pcie_gen_performance.max = 3771 ps->performance_levels[i].pcie_gen; 3772 3773 if (data->pcie_gen_performance.min > 3774 ps->performance_levels[i].pcie_gen) 3775 data->pcie_gen_performance.min = 3776 ps->performance_levels[i].pcie_gen; 3777 3778 if (data->pcie_lane_performance.max < 3779 ps->performance_levels[i].pcie_lane) 3780 data->pcie_lane_performance.max = 3781 ps->performance_levels[i].pcie_lane; 3782 3783 if (data->pcie_lane_performance.min > 3784 ps->performance_levels[i].pcie_lane) 3785 data->pcie_lane_performance.min = 3786 ps->performance_levels[i].pcie_lane; 3787 } 3788 break; 3789 case PP_StateUILabel_Battery: 3790 data->use_pcie_power_saving_levels = true; 3791 3792 for (i = 0; i < ps->performance_level_count; i++) { 3793 if (data->pcie_gen_power_saving.max < 3794 ps->performance_levels[i].pcie_gen) 3795 data->pcie_gen_power_saving.max = 3796 ps->performance_levels[i].pcie_gen; 3797 3798 if (data->pcie_gen_power_saving.min > 3799 ps->performance_levels[i].pcie_gen) 3800 data->pcie_gen_power_saving.min = 3801 ps->performance_levels[i].pcie_gen; 3802 3803 if (data->pcie_lane_power_saving.max < 3804 ps->performance_levels[i].pcie_lane) 3805 data->pcie_lane_power_saving.max = 3806 ps->performance_levels[i].pcie_lane; 3807 3808 if (data->pcie_lane_power_saving.min > 3809 ps->performance_levels[i].pcie_lane) 3810 data->pcie_lane_power_saving.min = 3811 ps->performance_levels[i].pcie_lane; 3812 } 3813 break; 3814 default: 3815 break; 3816 } 3817 } 3818 return 0; 3819 } 3820 3821 static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr, 3822 unsigned long entry_index, struct pp_power_state *state) 3823 { 3824 if (hwmgr->pp_table_version == PP_TABLE_V0) 3825 return smu7_get_pp_table_entry_v0(hwmgr, entry_index, state); 3826 else if (hwmgr->pp_table_version == PP_TABLE_V1) 3827 return smu7_get_pp_table_entry_v1(hwmgr, entry_index, state); 3828 3829 return 0; 3830 } 3831 3832 static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query) 3833 { 3834 struct amdgpu_device *adev = hwmgr->adev; 3835 int i; 3836 u32 tmp = 0; 3837 3838 if (!query) 3839 return -EINVAL; 3840 3841 /* 3842 * PPSMC_MSG_GetCurrPkgPwr is not supported on: 3843 * - Hawaii 3844 * - Bonaire 3845 * - Fiji 3846 * - Tonga 3847 */ 3848 if ((adev->asic_type != CHIP_HAWAII) && 3849 (adev->asic_type != CHIP_BONAIRE) && 3850 (adev->asic_type != CHIP_FIJI) && 3851 (adev->asic_type != CHIP_TONGA)) { 3852 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0, &tmp); 3853 *query = tmp; 3854 3855 if (tmp != 0) 3856 return 0; 3857 } 3858 3859 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart, NULL); 3860 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 3861 ixSMU_PM_STATUS_95, 0); 3862 3863 for (i = 0; i < 10; i++) { 3864 msleep(500); 3865 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample, NULL); 3866 tmp = cgs_read_ind_register(hwmgr->device, 3867 CGS_IND_REG__SMC, 3868 ixSMU_PM_STATUS_95); 3869 if (tmp != 0) 3870 break; 3871 } 3872 *query = tmp; 3873 3874 return 0; 3875 } 3876 3877 static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx, 3878 void *value, int *size) 3879 { 3880 uint32_t sclk, mclk, activity_percent; 3881 uint32_t offset, val_vid; 3882 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3883 3884 /* size must be at least 4 bytes for all sensors */ 3885 if (*size < 4) 3886 return -EINVAL; 3887 3888 switch (idx) { 3889 case AMDGPU_PP_SENSOR_GFX_SCLK: 3890 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &sclk); 3891 *((uint32_t *)value) = sclk; 3892 *size = 4; 3893 return 0; 3894 case AMDGPU_PP_SENSOR_GFX_MCLK: 3895 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &mclk); 3896 *((uint32_t *)value) = mclk; 3897 *size = 4; 3898 return 0; 3899 case AMDGPU_PP_SENSOR_GPU_LOAD: 3900 case AMDGPU_PP_SENSOR_MEM_LOAD: 3901 offset = data->soft_regs_start + smum_get_offsetof(hwmgr, 3902 SMU_SoftRegisters, 3903 (idx == AMDGPU_PP_SENSOR_GPU_LOAD) ? 3904 AverageGraphicsActivity: 3905 AverageMemoryActivity); 3906 3907 activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset); 3908 activity_percent += 0x80; 3909 activity_percent >>= 8; 3910 *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent; 3911 *size = 4; 3912 return 0; 3913 case AMDGPU_PP_SENSOR_GPU_TEMP: 3914 *((uint32_t *)value) = smu7_thermal_get_temperature(hwmgr); 3915 *size = 4; 3916 return 0; 3917 case AMDGPU_PP_SENSOR_UVD_POWER: 3918 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1; 3919 *size = 4; 3920 return 0; 3921 case AMDGPU_PP_SENSOR_VCE_POWER: 3922 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1; 3923 *size = 4; 3924 return 0; 3925 case AMDGPU_PP_SENSOR_GPU_POWER: 3926 return smu7_get_gpu_power(hwmgr, (uint32_t *)value); 3927 case AMDGPU_PP_SENSOR_VDDGFX: 3928 if ((data->vr_config & VRCONF_VDDGFX_MASK) == 3929 (VR_SVI2_PLANE_2 << VRCONF_VDDGFX_SHIFT)) 3930 val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device, 3931 CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE2_VID); 3932 else 3933 val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device, 3934 CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE1_VID); 3935 3936 *((uint32_t *)value) = (uint32_t)convert_to_vddc(val_vid); 3937 return 0; 3938 default: 3939 return -EINVAL; 3940 } 3941 } 3942 3943 static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input) 3944 { 3945 const struct phm_set_power_state_input *states = 3946 (const struct phm_set_power_state_input *)input; 3947 const struct smu7_power_state *smu7_ps = 3948 cast_const_phw_smu7_power_state(states->pnew_state); 3949 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3950 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); 3951 uint32_t sclk = smu7_ps->performance_levels 3952 [smu7_ps->performance_level_count - 1].engine_clock; 3953 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); 3954 uint32_t mclk = smu7_ps->performance_levels 3955 [smu7_ps->performance_level_count - 1].memory_clock; 3956 struct PP_Clocks min_clocks = {0}; 3957 uint32_t i; 3958 3959 for (i = 0; i < sclk_table->count; i++) { 3960 if (sclk == sclk_table->dpm_levels[i].value) 3961 break; 3962 } 3963 3964 if (i >= sclk_table->count) { 3965 if (sclk > sclk_table->dpm_levels[i-1].value) { 3966 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; 3967 sclk_table->dpm_levels[i-1].value = sclk; 3968 } 3969 } else { 3970 /* TODO: Check SCLK in DAL's minimum clocks 3971 * in case DeepSleep divider update is required. 3972 */ 3973 if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR && 3974 (min_clocks.engineClockInSR >= SMU7_MINIMUM_ENGINE_CLOCK || 3975 data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK)) 3976 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK; 3977 } 3978 3979 for (i = 0; i < mclk_table->count; i++) { 3980 if (mclk == mclk_table->dpm_levels[i].value) 3981 break; 3982 } 3983 3984 if (i >= mclk_table->count) { 3985 if (mclk > mclk_table->dpm_levels[i-1].value) { 3986 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; 3987 mclk_table->dpm_levels[i-1].value = mclk; 3988 } 3989 } 3990 3991 if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display) 3992 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK; 3993 3994 return 0; 3995 } 3996 3997 static uint16_t smu7_get_maximum_link_speed(struct pp_hwmgr *hwmgr, 3998 const struct smu7_power_state *smu7_ps) 3999 { 4000 uint32_t i; 4001 uint32_t sclk, max_sclk = 0; 4002 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4003 struct smu7_dpm_table *dpm_table = &data->dpm_table; 4004 4005 for (i = 0; i < smu7_ps->performance_level_count; i++) { 4006 sclk = smu7_ps->performance_levels[i].engine_clock; 4007 if (max_sclk < sclk) 4008 max_sclk = sclk; 4009 } 4010 4011 for (i = 0; i < dpm_table->sclk_table.count; i++) { 4012 if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk) 4013 return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ? 4014 dpm_table->pcie_speed_table.dpm_levels 4015 [dpm_table->pcie_speed_table.count - 1].value : 4016 dpm_table->pcie_speed_table.dpm_levels[i].value); 4017 } 4018 4019 return 0; 4020 } 4021 4022 static int smu7_request_link_speed_change_before_state_change( 4023 struct pp_hwmgr *hwmgr, const void *input) 4024 { 4025 const struct phm_set_power_state_input *states = 4026 (const struct phm_set_power_state_input *)input; 4027 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4028 const struct smu7_power_state *smu7_nps = 4029 cast_const_phw_smu7_power_state(states->pnew_state); 4030 const struct smu7_power_state *polaris10_cps = 4031 cast_const_phw_smu7_power_state(states->pcurrent_state); 4032 4033 uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_nps); 4034 uint16_t current_link_speed; 4035 4036 if (data->force_pcie_gen == PP_PCIEGenInvalid) 4037 current_link_speed = smu7_get_maximum_link_speed(hwmgr, polaris10_cps); 4038 else 4039 current_link_speed = data->force_pcie_gen; 4040 4041 data->force_pcie_gen = PP_PCIEGenInvalid; 4042 data->pspp_notify_required = false; 4043 4044 if (target_link_speed > current_link_speed) { 4045 switch (target_link_speed) { 4046 #ifdef CONFIG_ACPI 4047 case PP_PCIEGen3: 4048 if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN3, false)) 4049 break; 4050 data->force_pcie_gen = PP_PCIEGen2; 4051 if (current_link_speed == PP_PCIEGen2) 4052 break; 4053 fallthrough; 4054 case PP_PCIEGen2: 4055 if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN2, false)) 4056 break; 4057 fallthrough; 4058 #endif 4059 default: 4060 data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr); 4061 break; 4062 } 4063 } else { 4064 if (target_link_speed < current_link_speed) 4065 data->pspp_notify_required = true; 4066 } 4067 4068 return 0; 4069 } 4070 4071 static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) 4072 { 4073 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4074 4075 if (0 == data->need_update_smu7_dpm_table) 4076 return 0; 4077 4078 if ((0 == data->sclk_dpm_key_disabled) && 4079 (data->need_update_smu7_dpm_table & 4080 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { 4081 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), 4082 "Trying to freeze SCLK DPM when DPM is disabled", 4083 ); 4084 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr, 4085 PPSMC_MSG_SCLKDPM_FreezeLevel, 4086 NULL), 4087 "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!", 4088 return -EINVAL); 4089 } 4090 4091 if ((0 == data->mclk_dpm_key_disabled) && 4092 !data->mclk_ignore_signal && 4093 (data->need_update_smu7_dpm_table & 4094 DPMTABLE_OD_UPDATE_MCLK)) { 4095 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), 4096 "Trying to freeze MCLK DPM when DPM is disabled", 4097 ); 4098 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr, 4099 PPSMC_MSG_MCLKDPM_FreezeLevel, 4100 NULL), 4101 "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!", 4102 return -EINVAL); 4103 } 4104 4105 return 0; 4106 } 4107 4108 static int smu7_populate_and_upload_sclk_mclk_dpm_levels( 4109 struct pp_hwmgr *hwmgr, const void *input) 4110 { 4111 int result = 0; 4112 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4113 struct smu7_dpm_table *dpm_table = &data->dpm_table; 4114 uint32_t count; 4115 struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table); 4116 struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels); 4117 struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels); 4118 4119 if (0 == data->need_update_smu7_dpm_table) 4120 return 0; 4121 4122 if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) { 4123 for (count = 0; count < dpm_table->sclk_table.count; count++) { 4124 dpm_table->sclk_table.dpm_levels[count].enabled = odn_sclk_table->entries[count].enabled; 4125 dpm_table->sclk_table.dpm_levels[count].value = odn_sclk_table->entries[count].clock; 4126 } 4127 } 4128 4129 if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) { 4130 for (count = 0; count < dpm_table->mclk_table.count; count++) { 4131 dpm_table->mclk_table.dpm_levels[count].enabled = odn_mclk_table->entries[count].enabled; 4132 dpm_table->mclk_table.dpm_levels[count].value = odn_mclk_table->entries[count].clock; 4133 } 4134 } 4135 4136 if (data->need_update_smu7_dpm_table & 4137 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) { 4138 result = smum_populate_all_graphic_levels(hwmgr); 4139 PP_ASSERT_WITH_CODE((0 == result), 4140 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!", 4141 return result); 4142 } 4143 4144 if (data->need_update_smu7_dpm_table & 4145 (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) { 4146 /*populate MCLK dpm table to SMU7 */ 4147 result = smum_populate_all_memory_levels(hwmgr); 4148 PP_ASSERT_WITH_CODE((0 == result), 4149 "Failed to populate MCLK during PopulateNewDPMClocksStates Function!", 4150 return result); 4151 } 4152 4153 return result; 4154 } 4155 4156 static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr, 4157 struct smu7_single_dpm_table *dpm_table, 4158 uint32_t low_limit, uint32_t high_limit) 4159 { 4160 uint32_t i; 4161 4162 /* force the trim if mclk_switching is disabled to prevent flicker */ 4163 bool force_trim = (low_limit == high_limit); 4164 for (i = 0; i < dpm_table->count; i++) { 4165 /*skip the trim if od is enabled*/ 4166 if ((!hwmgr->od_enabled || force_trim) 4167 && (dpm_table->dpm_levels[i].value < low_limit 4168 || dpm_table->dpm_levels[i].value > high_limit)) 4169 dpm_table->dpm_levels[i].enabled = false; 4170 else 4171 dpm_table->dpm_levels[i].enabled = true; 4172 } 4173 4174 return 0; 4175 } 4176 4177 static int smu7_trim_dpm_states(struct pp_hwmgr *hwmgr, 4178 const struct smu7_power_state *smu7_ps) 4179 { 4180 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4181 uint32_t high_limit_count; 4182 4183 PP_ASSERT_WITH_CODE((smu7_ps->performance_level_count >= 1), 4184 "power state did not have any performance level", 4185 return -EINVAL); 4186 4187 high_limit_count = (1 == smu7_ps->performance_level_count) ? 0 : 1; 4188 4189 smu7_trim_single_dpm_states(hwmgr, 4190 &(data->dpm_table.sclk_table), 4191 smu7_ps->performance_levels[0].engine_clock, 4192 smu7_ps->performance_levels[high_limit_count].engine_clock); 4193 4194 smu7_trim_single_dpm_states(hwmgr, 4195 &(data->dpm_table.mclk_table), 4196 smu7_ps->performance_levels[0].memory_clock, 4197 smu7_ps->performance_levels[high_limit_count].memory_clock); 4198 4199 return 0; 4200 } 4201 4202 static int smu7_generate_dpm_level_enable_mask( 4203 struct pp_hwmgr *hwmgr, const void *input) 4204 { 4205 int result = 0; 4206 const struct phm_set_power_state_input *states = 4207 (const struct phm_set_power_state_input *)input; 4208 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4209 const struct smu7_power_state *smu7_ps = 4210 cast_const_phw_smu7_power_state(states->pnew_state); 4211 4212 4213 result = smu7_trim_dpm_states(hwmgr, smu7_ps); 4214 if (result) 4215 return result; 4216 4217 data->dpm_level_enable_mask.sclk_dpm_enable_mask = 4218 phm_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table); 4219 data->dpm_level_enable_mask.mclk_dpm_enable_mask = 4220 phm_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table); 4221 data->dpm_level_enable_mask.pcie_dpm_enable_mask = 4222 phm_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table); 4223 4224 return 0; 4225 } 4226 4227 static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) 4228 { 4229 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4230 4231 if (0 == data->need_update_smu7_dpm_table) 4232 return 0; 4233 4234 if ((0 == data->sclk_dpm_key_disabled) && 4235 (data->need_update_smu7_dpm_table & 4236 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { 4237 4238 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), 4239 "Trying to Unfreeze SCLK DPM when DPM is disabled", 4240 ); 4241 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr, 4242 PPSMC_MSG_SCLKDPM_UnfreezeLevel, 4243 NULL), 4244 "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!", 4245 return -EINVAL); 4246 } 4247 4248 if ((0 == data->mclk_dpm_key_disabled) && 4249 !data->mclk_ignore_signal && 4250 (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { 4251 4252 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), 4253 "Trying to Unfreeze MCLK DPM when DPM is disabled", 4254 ); 4255 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr, 4256 PPSMC_MSG_MCLKDPM_UnfreezeLevel, 4257 NULL), 4258 "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!", 4259 return -EINVAL); 4260 } 4261 4262 data->need_update_smu7_dpm_table &= DPMTABLE_OD_UPDATE_VDDC; 4263 4264 return 0; 4265 } 4266 4267 static int smu7_notify_link_speed_change_after_state_change( 4268 struct pp_hwmgr *hwmgr, const void *input) 4269 { 4270 const struct phm_set_power_state_input *states = 4271 (const struct phm_set_power_state_input *)input; 4272 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4273 const struct smu7_power_state *smu7_ps = 4274 cast_const_phw_smu7_power_state(states->pnew_state); 4275 uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_ps); 4276 uint8_t request; 4277 4278 if (data->pspp_notify_required) { 4279 if (target_link_speed == PP_PCIEGen3) 4280 request = PCIE_PERF_REQ_GEN3; 4281 else if (target_link_speed == PP_PCIEGen2) 4282 request = PCIE_PERF_REQ_GEN2; 4283 else 4284 request = PCIE_PERF_REQ_GEN1; 4285 4286 if (request == PCIE_PERF_REQ_GEN1 && 4287 smu7_get_current_pcie_speed(hwmgr) > 0) 4288 return 0; 4289 4290 #ifdef CONFIG_ACPI 4291 if (amdgpu_acpi_pcie_performance_request(hwmgr->adev, request, false)) { 4292 if (PP_PCIEGen2 == target_link_speed) 4293 pr_info("PSPP request to switch to Gen2 from Gen3 Failed!"); 4294 else 4295 pr_info("PSPP request to switch to Gen1 from Gen2 Failed!"); 4296 } 4297 #endif 4298 } 4299 4300 return 0; 4301 } 4302 4303 static int smu7_notify_no_display(struct pp_hwmgr *hwmgr) 4304 { 4305 return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay, NULL) == 0) ? 0 : -EINVAL; 4306 } 4307 4308 static int smu7_notify_has_display(struct pp_hwmgr *hwmgr) 4309 { 4310 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4311 4312 if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) { 4313 if (hwmgr->chip_id == CHIP_VEGAM) 4314 smum_send_msg_to_smc_with_parameter(hwmgr, 4315 (PPSMC_Msg)PPSMC_MSG_SetVBITimeout_VEGAM, data->frame_time_x2, 4316 NULL); 4317 else 4318 smum_send_msg_to_smc_with_parameter(hwmgr, 4319 (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2, 4320 NULL); 4321 data->last_sent_vbi_timeout = data->frame_time_x2; 4322 } 4323 4324 return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay, NULL) == 0) ? 0 : -EINVAL; 4325 } 4326 4327 static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr) 4328 { 4329 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4330 int result = 0; 4331 4332 if (data->mclk_ignore_signal) 4333 result = smu7_notify_no_display(hwmgr); 4334 else 4335 result = smu7_notify_has_display(hwmgr); 4336 4337 return result; 4338 } 4339 4340 static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) 4341 { 4342 int tmp_result, result = 0; 4343 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4344 4345 tmp_result = smu7_find_dpm_states_clocks_in_dpm_table(hwmgr, input); 4346 PP_ASSERT_WITH_CODE((0 == tmp_result), 4347 "Failed to find DPM states clocks in DPM table!", 4348 result = tmp_result); 4349 4350 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 4351 PHM_PlatformCaps_PCIEPerformanceRequest)) { 4352 tmp_result = 4353 smu7_request_link_speed_change_before_state_change(hwmgr, input); 4354 PP_ASSERT_WITH_CODE((0 == tmp_result), 4355 "Failed to request link speed change before state change!", 4356 result = tmp_result); 4357 } 4358 4359 tmp_result = smu7_freeze_sclk_mclk_dpm(hwmgr); 4360 PP_ASSERT_WITH_CODE((0 == tmp_result), 4361 "Failed to freeze SCLK MCLK DPM!", result = tmp_result); 4362 4363 tmp_result = smu7_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input); 4364 PP_ASSERT_WITH_CODE((0 == tmp_result), 4365 "Failed to populate and upload SCLK MCLK DPM levels!", 4366 result = tmp_result); 4367 4368 /* 4369 * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag. 4370 * That effectively disables AVFS feature. 4371 */ 4372 if (hwmgr->hardcode_pp_table != NULL) 4373 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; 4374 4375 tmp_result = smu7_update_avfs(hwmgr); 4376 PP_ASSERT_WITH_CODE((0 == tmp_result), 4377 "Failed to update avfs voltages!", 4378 result = tmp_result); 4379 4380 tmp_result = smu7_generate_dpm_level_enable_mask(hwmgr, input); 4381 PP_ASSERT_WITH_CODE((0 == tmp_result), 4382 "Failed to generate DPM level enabled mask!", 4383 result = tmp_result); 4384 4385 tmp_result = smum_update_sclk_threshold(hwmgr); 4386 PP_ASSERT_WITH_CODE((0 == tmp_result), 4387 "Failed to update SCLK threshold!", 4388 result = tmp_result); 4389 4390 tmp_result = smu7_unfreeze_sclk_mclk_dpm(hwmgr); 4391 PP_ASSERT_WITH_CODE((0 == tmp_result), 4392 "Failed to unfreeze SCLK MCLK DPM!", 4393 result = tmp_result); 4394 4395 tmp_result = smu7_upload_dpm_level_enable_mask(hwmgr); 4396 PP_ASSERT_WITH_CODE((0 == tmp_result), 4397 "Failed to upload DPM level enabled mask!", 4398 result = tmp_result); 4399 4400 tmp_result = smu7_notify_smc_display(hwmgr); 4401 PP_ASSERT_WITH_CODE((0 == tmp_result), 4402 "Failed to notify smc display settings!", 4403 result = tmp_result); 4404 4405 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 4406 PHM_PlatformCaps_PCIEPerformanceRequest)) { 4407 tmp_result = 4408 smu7_notify_link_speed_change_after_state_change(hwmgr, input); 4409 PP_ASSERT_WITH_CODE((0 == tmp_result), 4410 "Failed to notify link speed change after state change!", 4411 result = tmp_result); 4412 } 4413 data->apply_optimized_settings = false; 4414 return result; 4415 } 4416 4417 static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm) 4418 { 4419 hwmgr->thermal_controller. 4420 advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm; 4421 4422 return smum_send_msg_to_smc_with_parameter(hwmgr, 4423 PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm, 4424 NULL); 4425 } 4426 4427 static int 4428 smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr) 4429 { 4430 return 0; 4431 } 4432 4433 /** 4434 * Programs the display gap 4435 * 4436 * @param hwmgr the address of the powerplay hardware manager. 4437 * @return always OK 4438 */ 4439 static int smu7_program_display_gap(struct pp_hwmgr *hwmgr) 4440 { 4441 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4442 uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL); 4443 uint32_t display_gap2; 4444 uint32_t pre_vbi_time_in_us; 4445 uint32_t frame_time_in_us; 4446 uint32_t ref_clock, refresh_rate; 4447 4448 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (hwmgr->display_config->num_display > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE); 4449 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap); 4450 4451 ref_clock = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev); 4452 refresh_rate = hwmgr->display_config->vrefresh; 4453 4454 if (0 == refresh_rate) 4455 refresh_rate = 60; 4456 4457 frame_time_in_us = 1000000 / refresh_rate; 4458 4459 pre_vbi_time_in_us = frame_time_in_us - 200 - hwmgr->display_config->min_vblank_time; 4460 4461 data->frame_time_x2 = frame_time_in_us * 2 / 100; 4462 4463 if (data->frame_time_x2 < 280) { 4464 pr_debug("%s: enforce minimal VBITimeout: %d -> 280\n", __func__, data->frame_time_x2); 4465 data->frame_time_x2 = 280; 4466 } 4467 4468 display_gap2 = pre_vbi_time_in_us * (ref_clock / 100); 4469 4470 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2); 4471 4472 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 4473 data->soft_regs_start + smum_get_offsetof(hwmgr, 4474 SMU_SoftRegisters, 4475 PreVBlankGap), 0x64); 4476 4477 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 4478 data->soft_regs_start + smum_get_offsetof(hwmgr, 4479 SMU_SoftRegisters, 4480 VBlankTimeout), 4481 (frame_time_in_us - pre_vbi_time_in_us)); 4482 4483 return 0; 4484 } 4485 4486 static int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr) 4487 { 4488 return smu7_program_display_gap(hwmgr); 4489 } 4490 4491 /** 4492 * Set maximum target operating fan output RPM 4493 * 4494 * @param hwmgr: the address of the powerplay hardware manager. 4495 * @param usMaxFanRpm: max operating fan RPM value. 4496 * @return The response that came from the SMC. 4497 */ 4498 static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm) 4499 { 4500 hwmgr->thermal_controller. 4501 advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm; 4502 4503 return smum_send_msg_to_smc_with_parameter(hwmgr, 4504 PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm, 4505 NULL); 4506 } 4507 4508 static const struct amdgpu_irq_src_funcs smu7_irq_funcs = { 4509 .process = phm_irq_process, 4510 }; 4511 4512 static int smu7_register_irq_handlers(struct pp_hwmgr *hwmgr) 4513 { 4514 struct amdgpu_irq_src *source = 4515 kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL); 4516 4517 if (!source) 4518 return -ENOMEM; 4519 4520 source->funcs = &smu7_irq_funcs; 4521 4522 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev), 4523 AMDGPU_IRQ_CLIENTID_LEGACY, 4524 VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH, 4525 source); 4526 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev), 4527 AMDGPU_IRQ_CLIENTID_LEGACY, 4528 VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW, 4529 source); 4530 4531 /* Register CTF(GPIO_19) interrupt */ 4532 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev), 4533 AMDGPU_IRQ_CLIENTID_LEGACY, 4534 VISLANDS30_IV_SRCID_GPIO_19, 4535 source); 4536 4537 return 0; 4538 } 4539 4540 static bool 4541 smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr) 4542 { 4543 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4544 bool is_update_required = false; 4545 4546 if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display) 4547 is_update_required = true; 4548 4549 if (data->display_timing.vrefresh != hwmgr->display_config->vrefresh) 4550 is_update_required = true; 4551 4552 if (hwmgr->chip_id >= CHIP_POLARIS10 && 4553 hwmgr->chip_id <= CHIP_VEGAM && 4554 data->last_sent_vbi_timeout != data->frame_time_x2) 4555 is_update_required = true; 4556 4557 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { 4558 if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr && 4559 (data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK || 4560 hwmgr->display_config->min_core_set_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK)) 4561 is_update_required = true; 4562 } 4563 return is_update_required; 4564 } 4565 4566 static inline bool smu7_are_power_levels_equal(const struct smu7_performance_level *pl1, 4567 const struct smu7_performance_level *pl2) 4568 { 4569 return ((pl1->memory_clock == pl2->memory_clock) && 4570 (pl1->engine_clock == pl2->engine_clock) && 4571 (pl1->pcie_gen == pl2->pcie_gen) && 4572 (pl1->pcie_lane == pl2->pcie_lane)); 4573 } 4574 4575 static int smu7_check_states_equal(struct pp_hwmgr *hwmgr, 4576 const struct pp_hw_power_state *pstate1, 4577 const struct pp_hw_power_state *pstate2, bool *equal) 4578 { 4579 const struct smu7_power_state *psa; 4580 const struct smu7_power_state *psb; 4581 int i; 4582 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4583 4584 if (pstate1 == NULL || pstate2 == NULL || equal == NULL) 4585 return -EINVAL; 4586 4587 psa = cast_const_phw_smu7_power_state(pstate1); 4588 psb = cast_const_phw_smu7_power_state(pstate2); 4589 /* If the two states don't even have the same number of performance levels they cannot be the same state. */ 4590 if (psa->performance_level_count != psb->performance_level_count) { 4591 *equal = false; 4592 return 0; 4593 } 4594 4595 for (i = 0; i < psa->performance_level_count; i++) { 4596 if (!smu7_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) { 4597 /* If we have found even one performance level pair that is different the states are different. */ 4598 *equal = false; 4599 return 0; 4600 } 4601 } 4602 4603 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/ 4604 *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk)); 4605 *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk)); 4606 *equal &= (psa->sclk_threshold == psb->sclk_threshold); 4607 /* For OD call, set value based on flag */ 4608 *equal &= !(data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | 4609 DPMTABLE_OD_UPDATE_MCLK | 4610 DPMTABLE_OD_UPDATE_VDDC)); 4611 4612 return 0; 4613 } 4614 4615 static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr) 4616 { 4617 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4618 4619 uint32_t tmp; 4620 4621 /* Read MC indirect register offset 0x9F bits [3:0] to see 4622 * if VBIOS has already loaded a full version of MC ucode 4623 * or not. 4624 */ 4625 4626 smu7_get_mc_microcode_version(hwmgr); 4627 4628 data->need_long_memory_training = false; 4629 4630 cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 4631 ixMC_IO_DEBUG_UP_13); 4632 tmp = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA); 4633 4634 if (tmp & (1 << 23)) { 4635 data->mem_latency_high = MEM_LATENCY_HIGH; 4636 data->mem_latency_low = MEM_LATENCY_LOW; 4637 if ((hwmgr->chip_id == CHIP_POLARIS10) || 4638 (hwmgr->chip_id == CHIP_POLARIS11) || 4639 (hwmgr->chip_id == CHIP_POLARIS12)) 4640 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableFFC, NULL); 4641 } else { 4642 data->mem_latency_high = 330; 4643 data->mem_latency_low = 330; 4644 if ((hwmgr->chip_id == CHIP_POLARIS10) || 4645 (hwmgr->chip_id == CHIP_POLARIS11) || 4646 (hwmgr->chip_id == CHIP_POLARIS12)) 4647 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableFFC, NULL); 4648 } 4649 4650 return 0; 4651 } 4652 4653 static int smu7_read_clock_registers(struct pp_hwmgr *hwmgr) 4654 { 4655 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4656 4657 data->clock_registers.vCG_SPLL_FUNC_CNTL = 4658 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL); 4659 data->clock_registers.vCG_SPLL_FUNC_CNTL_2 = 4660 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2); 4661 data->clock_registers.vCG_SPLL_FUNC_CNTL_3 = 4662 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3); 4663 data->clock_registers.vCG_SPLL_FUNC_CNTL_4 = 4664 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4); 4665 data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM = 4666 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM); 4667 data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 = 4668 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2); 4669 data->clock_registers.vDLL_CNTL = 4670 cgs_read_register(hwmgr->device, mmDLL_CNTL); 4671 data->clock_registers.vMCLK_PWRMGT_CNTL = 4672 cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL); 4673 data->clock_registers.vMPLL_AD_FUNC_CNTL = 4674 cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL); 4675 data->clock_registers.vMPLL_DQ_FUNC_CNTL = 4676 cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL); 4677 data->clock_registers.vMPLL_FUNC_CNTL = 4678 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL); 4679 data->clock_registers.vMPLL_FUNC_CNTL_1 = 4680 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1); 4681 data->clock_registers.vMPLL_FUNC_CNTL_2 = 4682 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2); 4683 data->clock_registers.vMPLL_SS1 = 4684 cgs_read_register(hwmgr->device, mmMPLL_SS1); 4685 data->clock_registers.vMPLL_SS2 = 4686 cgs_read_register(hwmgr->device, mmMPLL_SS2); 4687 return 0; 4688 4689 } 4690 4691 /** 4692 * Find out if memory is GDDR5. 4693 * 4694 * @param hwmgr the address of the powerplay hardware manager. 4695 * @return always 0 4696 */ 4697 static int smu7_get_memory_type(struct pp_hwmgr *hwmgr) 4698 { 4699 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4700 struct amdgpu_device *adev = hwmgr->adev; 4701 4702 data->is_memory_gddr5 = (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5); 4703 4704 return 0; 4705 } 4706 4707 /** 4708 * Enables Dynamic Power Management by SMC 4709 * 4710 * @param hwmgr the address of the powerplay hardware manager. 4711 * @return always 0 4712 */ 4713 static int smu7_enable_acpi_power_management(struct pp_hwmgr *hwmgr) 4714 { 4715 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 4716 GENERAL_PWRMGT, STATIC_PM_EN, 1); 4717 4718 return 0; 4719 } 4720 4721 /** 4722 * Initialize PowerGating States for different engines 4723 * 4724 * @param hwmgr the address of the powerplay hardware manager. 4725 * @return always 0 4726 */ 4727 static int smu7_init_power_gate_state(struct pp_hwmgr *hwmgr) 4728 { 4729 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4730 4731 data->uvd_power_gated = false; 4732 data->vce_power_gated = false; 4733 4734 return 0; 4735 } 4736 4737 static int smu7_init_sclk_threshold(struct pp_hwmgr *hwmgr) 4738 { 4739 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4740 4741 data->low_sclk_interrupt_threshold = 0; 4742 return 0; 4743 } 4744 4745 static int smu7_setup_asic_task(struct pp_hwmgr *hwmgr) 4746 { 4747 int tmp_result, result = 0; 4748 4749 smu7_check_mc_firmware(hwmgr); 4750 4751 tmp_result = smu7_read_clock_registers(hwmgr); 4752 PP_ASSERT_WITH_CODE((0 == tmp_result), 4753 "Failed to read clock registers!", result = tmp_result); 4754 4755 tmp_result = smu7_get_memory_type(hwmgr); 4756 PP_ASSERT_WITH_CODE((0 == tmp_result), 4757 "Failed to get memory type!", result = tmp_result); 4758 4759 tmp_result = smu7_enable_acpi_power_management(hwmgr); 4760 PP_ASSERT_WITH_CODE((0 == tmp_result), 4761 "Failed to enable ACPI power management!", result = tmp_result); 4762 4763 tmp_result = smu7_init_power_gate_state(hwmgr); 4764 PP_ASSERT_WITH_CODE((0 == tmp_result), 4765 "Failed to init power gate state!", result = tmp_result); 4766 4767 tmp_result = smu7_get_mc_microcode_version(hwmgr); 4768 PP_ASSERT_WITH_CODE((0 == tmp_result), 4769 "Failed to get MC microcode version!", result = tmp_result); 4770 4771 tmp_result = smu7_init_sclk_threshold(hwmgr); 4772 PP_ASSERT_WITH_CODE((0 == tmp_result), 4773 "Failed to init sclk threshold!", result = tmp_result); 4774 4775 return result; 4776 } 4777 4778 static int smu7_force_clock_level(struct pp_hwmgr *hwmgr, 4779 enum pp_clock_type type, uint32_t mask) 4780 { 4781 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4782 4783 if (mask == 0) 4784 return -EINVAL; 4785 4786 switch (type) { 4787 case PP_SCLK: 4788 if (!data->sclk_dpm_key_disabled) 4789 smum_send_msg_to_smc_with_parameter(hwmgr, 4790 PPSMC_MSG_SCLKDPM_SetEnabledMask, 4791 data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask, 4792 NULL); 4793 break; 4794 case PP_MCLK: 4795 if (!data->mclk_dpm_key_disabled) 4796 smum_send_msg_to_smc_with_parameter(hwmgr, 4797 PPSMC_MSG_MCLKDPM_SetEnabledMask, 4798 data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask, 4799 NULL); 4800 break; 4801 case PP_PCIE: 4802 { 4803 uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask; 4804 4805 if (!data->pcie_dpm_key_disabled) { 4806 if (fls(tmp) != ffs(tmp)) 4807 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_UnForceLevel, 4808 NULL); 4809 else 4810 smum_send_msg_to_smc_with_parameter(hwmgr, 4811 PPSMC_MSG_PCIeDPM_ForceLevel, 4812 fls(tmp) - 1, 4813 NULL); 4814 } 4815 break; 4816 } 4817 default: 4818 break; 4819 } 4820 4821 return 0; 4822 } 4823 4824 static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, 4825 enum pp_clock_type type, char *buf) 4826 { 4827 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4828 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); 4829 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); 4830 struct smu7_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table); 4831 struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table); 4832 struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels); 4833 struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels); 4834 int i, now, size = 0; 4835 uint32_t clock, pcie_speed; 4836 4837 switch (type) { 4838 case PP_SCLK: 4839 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &clock); 4840 4841 for (i = 0; i < sclk_table->count; i++) { 4842 if (clock > sclk_table->dpm_levels[i].value) 4843 continue; 4844 break; 4845 } 4846 now = i; 4847 4848 for (i = 0; i < sclk_table->count; i++) 4849 size += sprintf(buf + size, "%d: %uMhz %s\n", 4850 i, sclk_table->dpm_levels[i].value / 100, 4851 (i == now) ? "*" : ""); 4852 break; 4853 case PP_MCLK: 4854 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &clock); 4855 4856 for (i = 0; i < mclk_table->count; i++) { 4857 if (clock > mclk_table->dpm_levels[i].value) 4858 continue; 4859 break; 4860 } 4861 now = i; 4862 4863 for (i = 0; i < mclk_table->count; i++) 4864 size += sprintf(buf + size, "%d: %uMhz %s\n", 4865 i, mclk_table->dpm_levels[i].value / 100, 4866 (i == now) ? "*" : ""); 4867 break; 4868 case PP_PCIE: 4869 pcie_speed = smu7_get_current_pcie_speed(hwmgr); 4870 for (i = 0; i < pcie_table->count; i++) { 4871 if (pcie_speed != pcie_table->dpm_levels[i].value) 4872 continue; 4873 break; 4874 } 4875 now = i; 4876 4877 for (i = 0; i < pcie_table->count; i++) 4878 size += sprintf(buf + size, "%d: %s %s\n", i, 4879 (pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x8" : 4880 (pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" : 4881 (pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "", 4882 (i == now) ? "*" : ""); 4883 break; 4884 case OD_SCLK: 4885 if (hwmgr->od_enabled) { 4886 size = sprintf(buf, "%s:\n", "OD_SCLK"); 4887 for (i = 0; i < odn_sclk_table->num_of_pl; i++) 4888 size += sprintf(buf + size, "%d: %10uMHz %10umV\n", 4889 i, odn_sclk_table->entries[i].clock/100, 4890 odn_sclk_table->entries[i].vddc); 4891 } 4892 break; 4893 case OD_MCLK: 4894 if (hwmgr->od_enabled) { 4895 size = sprintf(buf, "%s:\n", "OD_MCLK"); 4896 for (i = 0; i < odn_mclk_table->num_of_pl; i++) 4897 size += sprintf(buf + size, "%d: %10uMHz %10umV\n", 4898 i, odn_mclk_table->entries[i].clock/100, 4899 odn_mclk_table->entries[i].vddc); 4900 } 4901 break; 4902 case OD_RANGE: 4903 if (hwmgr->od_enabled) { 4904 size = sprintf(buf, "%s:\n", "OD_RANGE"); 4905 size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n", 4906 data->golden_dpm_table.sclk_table.dpm_levels[0].value/100, 4907 hwmgr->platform_descriptor.overdriveLimit.engineClock/100); 4908 size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n", 4909 data->golden_dpm_table.mclk_table.dpm_levels[0].value/100, 4910 hwmgr->platform_descriptor.overdriveLimit.memoryClock/100); 4911 size += sprintf(buf + size, "VDDC: %7umV %11umV\n", 4912 data->odn_dpm_table.min_vddc, 4913 data->odn_dpm_table.max_vddc); 4914 } 4915 break; 4916 default: 4917 break; 4918 } 4919 return size; 4920 } 4921 4922 static void smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) 4923 { 4924 switch (mode) { 4925 case AMD_FAN_CTRL_NONE: 4926 smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100); 4927 break; 4928 case AMD_FAN_CTRL_MANUAL: 4929 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 4930 PHM_PlatformCaps_MicrocodeFanControl)) 4931 smu7_fan_ctrl_stop_smc_fan_control(hwmgr); 4932 break; 4933 case AMD_FAN_CTRL_AUTO: 4934 if (!smu7_fan_ctrl_set_static_mode(hwmgr, mode)) 4935 smu7_fan_ctrl_start_smc_fan_control(hwmgr); 4936 break; 4937 default: 4938 break; 4939 } 4940 } 4941 4942 static uint32_t smu7_get_fan_control_mode(struct pp_hwmgr *hwmgr) 4943 { 4944 return hwmgr->fan_ctrl_enabled ? AMD_FAN_CTRL_AUTO : AMD_FAN_CTRL_MANUAL; 4945 } 4946 4947 static int smu7_get_sclk_od(struct pp_hwmgr *hwmgr) 4948 { 4949 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4950 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); 4951 struct smu7_single_dpm_table *golden_sclk_table = 4952 &(data->golden_dpm_table.sclk_table); 4953 int value = sclk_table->dpm_levels[sclk_table->count - 1].value; 4954 int golden_value = golden_sclk_table->dpm_levels 4955 [golden_sclk_table->count - 1].value; 4956 4957 value -= golden_value; 4958 value = DIV_ROUND_UP(value * 100, golden_value); 4959 4960 return value; 4961 } 4962 4963 static int smu7_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value) 4964 { 4965 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4966 struct smu7_single_dpm_table *golden_sclk_table = 4967 &(data->golden_dpm_table.sclk_table); 4968 struct pp_power_state *ps; 4969 struct smu7_power_state *smu7_ps; 4970 4971 if (value > 20) 4972 value = 20; 4973 4974 ps = hwmgr->request_ps; 4975 4976 if (ps == NULL) 4977 return -EINVAL; 4978 4979 smu7_ps = cast_phw_smu7_power_state(&ps->hardware); 4980 4981 smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].engine_clock = 4982 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * 4983 value / 100 + 4984 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; 4985 4986 return 0; 4987 } 4988 4989 static int smu7_get_mclk_od(struct pp_hwmgr *hwmgr) 4990 { 4991 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4992 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); 4993 struct smu7_single_dpm_table *golden_mclk_table = 4994 &(data->golden_dpm_table.mclk_table); 4995 int value = mclk_table->dpm_levels[mclk_table->count - 1].value; 4996 int golden_value = golden_mclk_table->dpm_levels 4997 [golden_mclk_table->count - 1].value; 4998 4999 value -= golden_value; 5000 value = DIV_ROUND_UP(value * 100, golden_value); 5001 5002 return value; 5003 } 5004 5005 static int smu7_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value) 5006 { 5007 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 5008 struct smu7_single_dpm_table *golden_mclk_table = 5009 &(data->golden_dpm_table.mclk_table); 5010 struct pp_power_state *ps; 5011 struct smu7_power_state *smu7_ps; 5012 5013 if (value > 20) 5014 value = 20; 5015 5016 ps = hwmgr->request_ps; 5017 5018 if (ps == NULL) 5019 return -EINVAL; 5020 5021 smu7_ps = cast_phw_smu7_power_state(&ps->hardware); 5022 5023 smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].memory_clock = 5024 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * 5025 value / 100 + 5026 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; 5027 5028 return 0; 5029 } 5030 5031 5032 static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks) 5033 { 5034 struct phm_ppt_v1_information *table_info = 5035 (struct phm_ppt_v1_information *)hwmgr->pptable; 5036 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = NULL; 5037 struct phm_clock_voltage_dependency_table *sclk_table; 5038 int i; 5039 5040 if (hwmgr->pp_table_version == PP_TABLE_V1) { 5041 if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL) 5042 return -EINVAL; 5043 dep_sclk_table = table_info->vdd_dep_on_sclk; 5044 for (i = 0; i < dep_sclk_table->count; i++) 5045 clocks->clock[i] = dep_sclk_table->entries[i].clk * 10; 5046 clocks->count = dep_sclk_table->count; 5047 } else if (hwmgr->pp_table_version == PP_TABLE_V0) { 5048 sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk; 5049 for (i = 0; i < sclk_table->count; i++) 5050 clocks->clock[i] = sclk_table->entries[i].clk * 10; 5051 clocks->count = sclk_table->count; 5052 } 5053 5054 return 0; 5055 } 5056 5057 static uint32_t smu7_get_mem_latency(struct pp_hwmgr *hwmgr, uint32_t clk) 5058 { 5059 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 5060 5061 if (clk >= MEM_FREQ_LOW_LATENCY && clk < MEM_FREQ_HIGH_LATENCY) 5062 return data->mem_latency_high; 5063 else if (clk >= MEM_FREQ_HIGH_LATENCY) 5064 return data->mem_latency_low; 5065 else 5066 return MEM_LATENCY_ERR; 5067 } 5068 5069 static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks) 5070 { 5071 struct phm_ppt_v1_information *table_info = 5072 (struct phm_ppt_v1_information *)hwmgr->pptable; 5073 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table; 5074 int i; 5075 struct phm_clock_voltage_dependency_table *mclk_table; 5076 5077 if (hwmgr->pp_table_version == PP_TABLE_V1) { 5078 if (table_info == NULL) 5079 return -EINVAL; 5080 dep_mclk_table = table_info->vdd_dep_on_mclk; 5081 for (i = 0; i < dep_mclk_table->count; i++) { 5082 clocks->clock[i] = dep_mclk_table->entries[i].clk * 10; 5083 clocks->latency[i] = smu7_get_mem_latency(hwmgr, 5084 dep_mclk_table->entries[i].clk); 5085 } 5086 clocks->count = dep_mclk_table->count; 5087 } else if (hwmgr->pp_table_version == PP_TABLE_V0) { 5088 mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk; 5089 for (i = 0; i < mclk_table->count; i++) 5090 clocks->clock[i] = mclk_table->entries[i].clk * 10; 5091 clocks->count = mclk_table->count; 5092 } 5093 return 0; 5094 } 5095 5096 static int smu7_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, 5097 struct amd_pp_clocks *clocks) 5098 { 5099 switch (type) { 5100 case amd_pp_sys_clock: 5101 smu7_get_sclks(hwmgr, clocks); 5102 break; 5103 case amd_pp_mem_clock: 5104 smu7_get_mclks(hwmgr, clocks); 5105 break; 5106 default: 5107 return -EINVAL; 5108 } 5109 5110 return 0; 5111 } 5112 5113 static int smu7_get_sclks_with_latency(struct pp_hwmgr *hwmgr, 5114 struct pp_clock_levels_with_latency *clocks) 5115 { 5116 struct phm_ppt_v1_information *table_info = 5117 (struct phm_ppt_v1_information *)hwmgr->pptable; 5118 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = 5119 table_info->vdd_dep_on_sclk; 5120 int i; 5121 5122 clocks->num_levels = 0; 5123 for (i = 0; i < dep_sclk_table->count; i++) { 5124 if (dep_sclk_table->entries[i].clk) { 5125 clocks->data[clocks->num_levels].clocks_in_khz = 5126 dep_sclk_table->entries[i].clk * 10; 5127 clocks->num_levels++; 5128 } 5129 } 5130 5131 return 0; 5132 } 5133 5134 static int smu7_get_mclks_with_latency(struct pp_hwmgr *hwmgr, 5135 struct pp_clock_levels_with_latency *clocks) 5136 { 5137 struct phm_ppt_v1_information *table_info = 5138 (struct phm_ppt_v1_information *)hwmgr->pptable; 5139 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table = 5140 table_info->vdd_dep_on_mclk; 5141 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 5142 int i; 5143 5144 clocks->num_levels = 0; 5145 data->mclk_latency_table.count = 0; 5146 for (i = 0; i < dep_mclk_table->count; i++) { 5147 if (dep_mclk_table->entries[i].clk) { 5148 clocks->data[clocks->num_levels].clocks_in_khz = 5149 dep_mclk_table->entries[i].clk * 10; 5150 data->mclk_latency_table.entries[data->mclk_latency_table.count].frequency = 5151 dep_mclk_table->entries[i].clk; 5152 clocks->data[clocks->num_levels].latency_in_us = 5153 data->mclk_latency_table.entries[data->mclk_latency_table.count].latency = 5154 smu7_get_mem_latency(hwmgr, dep_mclk_table->entries[i].clk); 5155 clocks->num_levels++; 5156 data->mclk_latency_table.count++; 5157 } 5158 } 5159 5160 return 0; 5161 } 5162 5163 static int smu7_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr, 5164 enum amd_pp_clock_type type, 5165 struct pp_clock_levels_with_latency *clocks) 5166 { 5167 if (!(hwmgr->chip_id >= CHIP_POLARIS10 && 5168 hwmgr->chip_id <= CHIP_VEGAM)) 5169 return -EINVAL; 5170 5171 switch (type) { 5172 case amd_pp_sys_clock: 5173 smu7_get_sclks_with_latency(hwmgr, clocks); 5174 break; 5175 case amd_pp_mem_clock: 5176 smu7_get_mclks_with_latency(hwmgr, clocks); 5177 break; 5178 default: 5179 return -EINVAL; 5180 } 5181 5182 return 0; 5183 } 5184 5185 static int smu7_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, 5186 void *clock_range) 5187 { 5188 struct phm_ppt_v1_information *table_info = 5189 (struct phm_ppt_v1_information *)hwmgr->pptable; 5190 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table = 5191 table_info->vdd_dep_on_mclk; 5192 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = 5193 table_info->vdd_dep_on_sclk; 5194 struct polaris10_smumgr *smu_data = 5195 (struct polaris10_smumgr *)(hwmgr->smu_backend); 5196 SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table); 5197 struct dm_pp_wm_sets_with_clock_ranges *watermarks = 5198 (struct dm_pp_wm_sets_with_clock_ranges *)clock_range; 5199 uint32_t i, j, k; 5200 bool valid_entry; 5201 5202 if (!(hwmgr->chip_id >= CHIP_POLARIS10 && 5203 hwmgr->chip_id <= CHIP_VEGAM)) 5204 return -EINVAL; 5205 5206 for (i = 0; i < dep_mclk_table->count; i++) { 5207 for (j = 0; j < dep_sclk_table->count; j++) { 5208 valid_entry = false; 5209 for (k = 0; k < watermarks->num_wm_sets; k++) { 5210 if (dep_sclk_table->entries[i].clk / 10 >= watermarks->wm_clk_ranges[k].wm_min_eng_clk_in_khz && 5211 dep_sclk_table->entries[i].clk / 10 < watermarks->wm_clk_ranges[k].wm_max_eng_clk_in_khz && 5212 dep_mclk_table->entries[i].clk / 10 >= watermarks->wm_clk_ranges[k].wm_min_mem_clk_in_khz && 5213 dep_mclk_table->entries[i].clk / 10 < watermarks->wm_clk_ranges[k].wm_max_mem_clk_in_khz) { 5214 valid_entry = true; 5215 table->DisplayWatermark[i][j] = watermarks->wm_clk_ranges[k].wm_set_id; 5216 break; 5217 } 5218 } 5219 PP_ASSERT_WITH_CODE(valid_entry, 5220 "Clock is not in range of specified clock range for watermark from DAL! Using highest water mark set.", 5221 table->DisplayWatermark[i][j] = watermarks->wm_clk_ranges[k - 1].wm_set_id); 5222 } 5223 } 5224 5225 return smu7_copy_bytes_to_smc(hwmgr, 5226 smu_data->smu7_data.dpm_table_start + offsetof(SMU74_Discrete_DpmTable, DisplayWatermark), 5227 (uint8_t *)table->DisplayWatermark, 5228 sizeof(uint8_t) * SMU74_MAX_LEVELS_MEMORY * SMU74_MAX_LEVELS_GRAPHICS, 5229 SMC_RAM_END); 5230 } 5231 5232 static int smu7_notify_cac_buffer_info(struct pp_hwmgr *hwmgr, 5233 uint32_t virtual_addr_low, 5234 uint32_t virtual_addr_hi, 5235 uint32_t mc_addr_low, 5236 uint32_t mc_addr_hi, 5237 uint32_t size) 5238 { 5239 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 5240 5241 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 5242 data->soft_regs_start + 5243 smum_get_offsetof(hwmgr, 5244 SMU_SoftRegisters, DRAM_LOG_ADDR_H), 5245 mc_addr_hi); 5246 5247 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 5248 data->soft_regs_start + 5249 smum_get_offsetof(hwmgr, 5250 SMU_SoftRegisters, DRAM_LOG_ADDR_L), 5251 mc_addr_low); 5252 5253 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 5254 data->soft_regs_start + 5255 smum_get_offsetof(hwmgr, 5256 SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_H), 5257 virtual_addr_hi); 5258 5259 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 5260 data->soft_regs_start + 5261 smum_get_offsetof(hwmgr, 5262 SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_L), 5263 virtual_addr_low); 5264 5265 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 5266 data->soft_regs_start + 5267 smum_get_offsetof(hwmgr, 5268 SMU_SoftRegisters, DRAM_LOG_BUFF_SIZE), 5269 size); 5270 return 0; 5271 } 5272 5273 static int smu7_get_max_high_clocks(struct pp_hwmgr *hwmgr, 5274 struct amd_pp_simple_clock_info *clocks) 5275 { 5276 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 5277 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); 5278 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); 5279 5280 if (clocks == NULL) 5281 return -EINVAL; 5282 5283 clocks->memory_max_clock = mclk_table->count > 1 ? 5284 mclk_table->dpm_levels[mclk_table->count-1].value : 5285 mclk_table->dpm_levels[0].value; 5286 clocks->engine_max_clock = sclk_table->count > 1 ? 5287 sclk_table->dpm_levels[sclk_table->count-1].value : 5288 sclk_table->dpm_levels[0].value; 5289 return 0; 5290 } 5291 5292 static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr, 5293 struct PP_TemperatureRange *thermal_data) 5294 { 5295 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 5296 struct phm_ppt_v1_information *table_info = 5297 (struct phm_ppt_v1_information *)hwmgr->pptable; 5298 5299 memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct PP_TemperatureRange)); 5300 5301 if (hwmgr->pp_table_version == PP_TABLE_V1) 5302 thermal_data->max = table_info->cac_dtp_table->usSoftwareShutdownTemp * 5303 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 5304 else if (hwmgr->pp_table_version == PP_TABLE_V0) 5305 thermal_data->max = data->thermal_temp_setting.temperature_shutdown * 5306 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 5307 5308 return 0; 5309 } 5310 5311 static bool smu7_check_clk_voltage_valid(struct pp_hwmgr *hwmgr, 5312 enum PP_OD_DPM_TABLE_COMMAND type, 5313 uint32_t clk, 5314 uint32_t voltage) 5315 { 5316 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 5317 5318 if (voltage < data->odn_dpm_table.min_vddc || voltage > data->odn_dpm_table.max_vddc) { 5319 pr_info("OD voltage is out of range [%d - %d] mV\n", 5320 data->odn_dpm_table.min_vddc, 5321 data->odn_dpm_table.max_vddc); 5322 return false; 5323 } 5324 5325 if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) { 5326 if (data->golden_dpm_table.sclk_table.dpm_levels[0].value > clk || 5327 hwmgr->platform_descriptor.overdriveLimit.engineClock < clk) { 5328 pr_info("OD engine clock is out of range [%d - %d] MHz\n", 5329 data->golden_dpm_table.sclk_table.dpm_levels[0].value/100, 5330 hwmgr->platform_descriptor.overdriveLimit.engineClock/100); 5331 return false; 5332 } 5333 } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) { 5334 if (data->golden_dpm_table.mclk_table.dpm_levels[0].value > clk || 5335 hwmgr->platform_descriptor.overdriveLimit.memoryClock < clk) { 5336 pr_info("OD memory clock is out of range [%d - %d] MHz\n", 5337 data->golden_dpm_table.mclk_table.dpm_levels[0].value/100, 5338 hwmgr->platform_descriptor.overdriveLimit.memoryClock/100); 5339 return false; 5340 } 5341 } else { 5342 return false; 5343 } 5344 5345 return true; 5346 } 5347 5348 static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, 5349 enum PP_OD_DPM_TABLE_COMMAND type, 5350 long *input, uint32_t size) 5351 { 5352 uint32_t i; 5353 struct phm_odn_clock_levels *podn_dpm_table_in_backend = NULL; 5354 struct smu7_odn_clock_voltage_dependency_table *podn_vdd_dep_in_backend = NULL; 5355 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 5356 5357 uint32_t input_clk; 5358 uint32_t input_vol; 5359 uint32_t input_level; 5360 5361 PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage", 5362 return -EINVAL); 5363 5364 if (!hwmgr->od_enabled) { 5365 pr_info("OverDrive feature not enabled\n"); 5366 return -EINVAL; 5367 } 5368 5369 if (PP_OD_EDIT_SCLK_VDDC_TABLE == type) { 5370 podn_dpm_table_in_backend = &data->odn_dpm_table.odn_core_clock_dpm_levels; 5371 podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_sclk; 5372 PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend), 5373 "Failed to get ODN SCLK and Voltage tables", 5374 return -EINVAL); 5375 } else if (PP_OD_EDIT_MCLK_VDDC_TABLE == type) { 5376 podn_dpm_table_in_backend = &data->odn_dpm_table.odn_memory_clock_dpm_levels; 5377 podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_mclk; 5378 5379 PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend), 5380 "Failed to get ODN MCLK and Voltage tables", 5381 return -EINVAL); 5382 } else if (PP_OD_RESTORE_DEFAULT_TABLE == type) { 5383 smu7_odn_initial_default_setting(hwmgr); 5384 return 0; 5385 } else if (PP_OD_COMMIT_DPM_TABLE == type) { 5386 smu7_check_dpm_table_updated(hwmgr); 5387 return 0; 5388 } else { 5389 return -EINVAL; 5390 } 5391 5392 for (i = 0; i < size; i += 3) { 5393 if (i + 3 > size || input[i] >= podn_dpm_table_in_backend->num_of_pl) { 5394 pr_info("invalid clock voltage input \n"); 5395 return 0; 5396 } 5397 input_level = input[i]; 5398 input_clk = input[i+1] * 100; 5399 input_vol = input[i+2]; 5400 5401 if (smu7_check_clk_voltage_valid(hwmgr, type, input_clk, input_vol)) { 5402 podn_dpm_table_in_backend->entries[input_level].clock = input_clk; 5403 podn_vdd_dep_in_backend->entries[input_level].clk = input_clk; 5404 podn_dpm_table_in_backend->entries[input_level].vddc = input_vol; 5405 podn_vdd_dep_in_backend->entries[input_level].vddc = input_vol; 5406 podn_vdd_dep_in_backend->entries[input_level].vddgfx = input_vol; 5407 } else { 5408 return -EINVAL; 5409 } 5410 } 5411 5412 return 0; 5413 } 5414 5415 static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) 5416 { 5417 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 5418 uint32_t i, size = 0; 5419 uint32_t len; 5420 5421 static const char *profile_name[7] = {"BOOTUP_DEFAULT", 5422 "3D_FULL_SCREEN", 5423 "POWER_SAVING", 5424 "VIDEO", 5425 "VR", 5426 "COMPUTE", 5427 "CUSTOM"}; 5428 5429 static const char *title[8] = {"NUM", 5430 "MODE_NAME", 5431 "SCLK_UP_HYST", 5432 "SCLK_DOWN_HYST", 5433 "SCLK_ACTIVE_LEVEL", 5434 "MCLK_UP_HYST", 5435 "MCLK_DOWN_HYST", 5436 "MCLK_ACTIVE_LEVEL"}; 5437 5438 if (!buf) 5439 return -EINVAL; 5440 5441 size += sprintf(buf + size, "%s %16s %16s %16s %16s %16s %16s %16s\n", 5442 title[0], title[1], title[2], title[3], 5443 title[4], title[5], title[6], title[7]); 5444 5445 len = ARRAY_SIZE(smu7_profiling); 5446 5447 for (i = 0; i < len; i++) { 5448 if (i == hwmgr->power_profile_mode) { 5449 size += sprintf(buf + size, "%3d %14s %s: %8d %16d %16d %16d %16d %16d\n", 5450 i, profile_name[i], "*", 5451 data->current_profile_setting.sclk_up_hyst, 5452 data->current_profile_setting.sclk_down_hyst, 5453 data->current_profile_setting.sclk_activity, 5454 data->current_profile_setting.mclk_up_hyst, 5455 data->current_profile_setting.mclk_down_hyst, 5456 data->current_profile_setting.mclk_activity); 5457 continue; 5458 } 5459 if (smu7_profiling[i].bupdate_sclk) 5460 size += sprintf(buf + size, "%3d %16s: %8d %16d %16d ", 5461 i, profile_name[i], smu7_profiling[i].sclk_up_hyst, 5462 smu7_profiling[i].sclk_down_hyst, 5463 smu7_profiling[i].sclk_activity); 5464 else 5465 size += sprintf(buf + size, "%3d %16s: %8s %16s %16s ", 5466 i, profile_name[i], "-", "-", "-"); 5467 5468 if (smu7_profiling[i].bupdate_mclk) 5469 size += sprintf(buf + size, "%16d %16d %16d\n", 5470 smu7_profiling[i].mclk_up_hyst, 5471 smu7_profiling[i].mclk_down_hyst, 5472 smu7_profiling[i].mclk_activity); 5473 else 5474 size += sprintf(buf + size, "%16s %16s %16s\n", 5475 "-", "-", "-"); 5476 } 5477 5478 return size; 5479 } 5480 5481 static void smu7_patch_compute_profile_mode(struct pp_hwmgr *hwmgr, 5482 enum PP_SMC_POWER_PROFILE requst) 5483 { 5484 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 5485 uint32_t tmp, level; 5486 5487 if (requst == PP_SMC_POWER_PROFILE_COMPUTE) { 5488 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) { 5489 level = 0; 5490 tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask; 5491 while (tmp >>= 1) 5492 level++; 5493 if (level > 0) 5494 smu7_force_clock_level(hwmgr, PP_SCLK, 3 << (level-1)); 5495 } 5496 } else if (hwmgr->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) { 5497 smu7_force_clock_level(hwmgr, PP_SCLK, data->dpm_level_enable_mask.sclk_dpm_enable_mask); 5498 } 5499 } 5500 5501 static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size) 5502 { 5503 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 5504 struct profile_mode_setting tmp; 5505 enum PP_SMC_POWER_PROFILE mode; 5506 5507 if (input == NULL) 5508 return -EINVAL; 5509 5510 mode = input[size]; 5511 switch (mode) { 5512 case PP_SMC_POWER_PROFILE_CUSTOM: 5513 if (size < 8 && size != 0) 5514 return -EINVAL; 5515 /* If only CUSTOM is passed in, use the saved values. Check 5516 * that we actually have a CUSTOM profile by ensuring that 5517 * the "use sclk" or the "use mclk" bits are set 5518 */ 5519 tmp = smu7_profiling[PP_SMC_POWER_PROFILE_CUSTOM]; 5520 if (size == 0) { 5521 if (tmp.bupdate_sclk == 0 && tmp.bupdate_mclk == 0) 5522 return -EINVAL; 5523 } else { 5524 tmp.bupdate_sclk = input[0]; 5525 tmp.sclk_up_hyst = input[1]; 5526 tmp.sclk_down_hyst = input[2]; 5527 tmp.sclk_activity = input[3]; 5528 tmp.bupdate_mclk = input[4]; 5529 tmp.mclk_up_hyst = input[5]; 5530 tmp.mclk_down_hyst = input[6]; 5531 tmp.mclk_activity = input[7]; 5532 smu7_profiling[PP_SMC_POWER_PROFILE_CUSTOM] = tmp; 5533 } 5534 if (!smum_update_dpm_settings(hwmgr, &tmp)) { 5535 memcpy(&data->current_profile_setting, &tmp, sizeof(struct profile_mode_setting)); 5536 hwmgr->power_profile_mode = mode; 5537 } 5538 break; 5539 case PP_SMC_POWER_PROFILE_FULLSCREEN3D: 5540 case PP_SMC_POWER_PROFILE_POWERSAVING: 5541 case PP_SMC_POWER_PROFILE_VIDEO: 5542 case PP_SMC_POWER_PROFILE_VR: 5543 case PP_SMC_POWER_PROFILE_COMPUTE: 5544 if (mode == hwmgr->power_profile_mode) 5545 return 0; 5546 5547 memcpy(&tmp, &smu7_profiling[mode], sizeof(struct profile_mode_setting)); 5548 if (!smum_update_dpm_settings(hwmgr, &tmp)) { 5549 if (tmp.bupdate_sclk) { 5550 data->current_profile_setting.bupdate_sclk = tmp.bupdate_sclk; 5551 data->current_profile_setting.sclk_up_hyst = tmp.sclk_up_hyst; 5552 data->current_profile_setting.sclk_down_hyst = tmp.sclk_down_hyst; 5553 data->current_profile_setting.sclk_activity = tmp.sclk_activity; 5554 } 5555 if (tmp.bupdate_mclk) { 5556 data->current_profile_setting.bupdate_mclk = tmp.bupdate_mclk; 5557 data->current_profile_setting.mclk_up_hyst = tmp.mclk_up_hyst; 5558 data->current_profile_setting.mclk_down_hyst = tmp.mclk_down_hyst; 5559 data->current_profile_setting.mclk_activity = tmp.mclk_activity; 5560 } 5561 smu7_patch_compute_profile_mode(hwmgr, mode); 5562 hwmgr->power_profile_mode = mode; 5563 } 5564 break; 5565 default: 5566 return -EINVAL; 5567 } 5568 5569 return 0; 5570 } 5571 5572 static int smu7_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, 5573 PHM_PerformanceLevelDesignation designation, uint32_t index, 5574 PHM_PerformanceLevel *level) 5575 { 5576 const struct smu7_power_state *ps; 5577 uint32_t i; 5578 5579 if (level == NULL || hwmgr == NULL || state == NULL) 5580 return -EINVAL; 5581 5582 ps = cast_const_phw_smu7_power_state(state); 5583 5584 i = index > ps->performance_level_count - 1 ? 5585 ps->performance_level_count - 1 : index; 5586 5587 level->coreClock = ps->performance_levels[i].engine_clock; 5588 level->memory_clock = ps->performance_levels[i].memory_clock; 5589 5590 return 0; 5591 } 5592 5593 static int smu7_power_off_asic(struct pp_hwmgr *hwmgr) 5594 { 5595 int result; 5596 5597 result = smu7_disable_dpm_tasks(hwmgr); 5598 PP_ASSERT_WITH_CODE((0 == result), 5599 "[disable_dpm_tasks] Failed to disable DPM!", 5600 ); 5601 5602 return result; 5603 } 5604 5605 static const struct pp_hwmgr_func smu7_hwmgr_funcs = { 5606 .backend_init = &smu7_hwmgr_backend_init, 5607 .backend_fini = &smu7_hwmgr_backend_fini, 5608 .asic_setup = &smu7_setup_asic_task, 5609 .dynamic_state_management_enable = &smu7_enable_dpm_tasks, 5610 .apply_state_adjust_rules = smu7_apply_state_adjust_rules, 5611 .force_dpm_level = &smu7_force_dpm_level, 5612 .power_state_set = smu7_set_power_state_tasks, 5613 .get_power_state_size = smu7_get_power_state_size, 5614 .get_mclk = smu7_dpm_get_mclk, 5615 .get_sclk = smu7_dpm_get_sclk, 5616 .patch_boot_state = smu7_dpm_patch_boot_state, 5617 .get_pp_table_entry = smu7_get_pp_table_entry, 5618 .get_num_of_pp_table_entries = smu7_get_number_of_powerplay_table_entries, 5619 .powerdown_uvd = smu7_powerdown_uvd, 5620 .powergate_uvd = smu7_powergate_uvd, 5621 .powergate_vce = smu7_powergate_vce, 5622 .disable_clock_power_gating = smu7_disable_clock_power_gating, 5623 .update_clock_gatings = smu7_update_clock_gatings, 5624 .notify_smc_display_config_after_ps_adjustment = smu7_notify_smc_display_config_after_ps_adjustment, 5625 .display_config_changed = smu7_display_configuration_changed_task, 5626 .set_max_fan_pwm_output = smu7_set_max_fan_pwm_output, 5627 .set_max_fan_rpm_output = smu7_set_max_fan_rpm_output, 5628 .stop_thermal_controller = smu7_thermal_stop_thermal_controller, 5629 .get_fan_speed_info = smu7_fan_ctrl_get_fan_speed_info, 5630 .get_fan_speed_percent = smu7_fan_ctrl_get_fan_speed_percent, 5631 .set_fan_speed_percent = smu7_fan_ctrl_set_fan_speed_percent, 5632 .reset_fan_speed_to_default = smu7_fan_ctrl_reset_fan_speed_to_default, 5633 .get_fan_speed_rpm = smu7_fan_ctrl_get_fan_speed_rpm, 5634 .set_fan_speed_rpm = smu7_fan_ctrl_set_fan_speed_rpm, 5635 .uninitialize_thermal_controller = smu7_thermal_ctrl_uninitialize_thermal_controller, 5636 .register_irq_handlers = smu7_register_irq_handlers, 5637 .check_smc_update_required_for_display_configuration = smu7_check_smc_update_required_for_display_configuration, 5638 .check_states_equal = smu7_check_states_equal, 5639 .set_fan_control_mode = smu7_set_fan_control_mode, 5640 .get_fan_control_mode = smu7_get_fan_control_mode, 5641 .force_clock_level = smu7_force_clock_level, 5642 .print_clock_levels = smu7_print_clock_levels, 5643 .powergate_gfx = smu7_powergate_gfx, 5644 .get_sclk_od = smu7_get_sclk_od, 5645 .set_sclk_od = smu7_set_sclk_od, 5646 .get_mclk_od = smu7_get_mclk_od, 5647 .set_mclk_od = smu7_set_mclk_od, 5648 .get_clock_by_type = smu7_get_clock_by_type, 5649 .get_clock_by_type_with_latency = smu7_get_clock_by_type_with_latency, 5650 .set_watermarks_for_clocks_ranges = smu7_set_watermarks_for_clocks_ranges, 5651 .read_sensor = smu7_read_sensor, 5652 .dynamic_state_management_disable = smu7_disable_dpm_tasks, 5653 .avfs_control = smu7_avfs_control, 5654 .disable_smc_firmware_ctf = smu7_thermal_disable_alert, 5655 .start_thermal_controller = smu7_start_thermal_controller, 5656 .notify_cac_buffer_info = smu7_notify_cac_buffer_info, 5657 .get_max_high_clocks = smu7_get_max_high_clocks, 5658 .get_thermal_temperature_range = smu7_get_thermal_temperature_range, 5659 .odn_edit_dpm_table = smu7_odn_edit_dpm_table, 5660 .set_power_limit = smu7_set_power_limit, 5661 .get_power_profile_mode = smu7_get_power_profile_mode, 5662 .set_power_profile_mode = smu7_set_power_profile_mode, 5663 .get_performance_level = smu7_get_performance_level, 5664 .get_asic_baco_capability = smu7_baco_get_capability, 5665 .get_asic_baco_state = smu7_baco_get_state, 5666 .set_asic_baco_state = smu7_baco_set_state, 5667 .power_off_asic = smu7_power_off_asic, 5668 }; 5669 5670 uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock, 5671 uint32_t clock_insr) 5672 { 5673 uint8_t i; 5674 uint32_t temp; 5675 uint32_t min = max(clock_insr, (uint32_t)SMU7_MINIMUM_ENGINE_CLOCK); 5676 5677 PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0); 5678 for (i = SMU7_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) { 5679 temp = clock >> i; 5680 5681 if (temp >= min || i == 0) 5682 break; 5683 } 5684 return i; 5685 } 5686 5687 int smu7_init_function_pointers(struct pp_hwmgr *hwmgr) 5688 { 5689 hwmgr->hwmgr_func = &smu7_hwmgr_funcs; 5690 if (hwmgr->pp_table_version == PP_TABLE_V0) 5691 hwmgr->pptable_func = &pptable_funcs; 5692 else if (hwmgr->pp_table_version == PP_TABLE_V1) 5693 hwmgr->pptable_func = &pptable_v1_0_funcs; 5694 5695 return 0; 5696 } 5697