1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include "pp_debug.h" 24 #include <linux/delay.h> 25 #include <linux/fb.h> 26 #include <linux/module.h> 27 #include <linux/pci.h> 28 #include <linux/slab.h> 29 #include <asm/div64.h> 30 #if IS_ENABLED(CONFIG_X86_64) 31 #include <asm/intel-family.h> 32 #endif 33 #include <drm/amdgpu_drm.h> 34 #include "ppatomctrl.h" 35 #include "atombios.h" 36 #include "pptable_v1_0.h" 37 #include "pppcielanes.h" 38 #include "amd_pcie_helpers.h" 39 #include "hardwaremanager.h" 40 #include "process_pptables_v1_0.h" 41 #include "cgs_common.h" 42 43 #include "smu7_common.h" 44 45 #include "hwmgr.h" 46 #include "smu7_hwmgr.h" 47 #include "smu_ucode_xfer_vi.h" 48 #include "smu7_powertune.h" 49 #include "smu7_dyn_defaults.h" 50 #include "smu7_thermal.h" 51 #include "smu7_clockpowergating.h" 52 #include "processpptables.h" 53 #include "pp_thermal.h" 54 #include "smu7_baco.h" 55 #include "smu7_smumgr.h" 56 #include "polaris10_smumgr.h" 57 58 #include "ivsrcid/ivsrcid_vislands30.h" 59 60 #define MC_CG_ARB_FREQ_F0 0x0a 61 #define MC_CG_ARB_FREQ_F1 0x0b 62 #define MC_CG_ARB_FREQ_F2 0x0c 63 #define MC_CG_ARB_FREQ_F3 0x0d 64 65 #define MC_CG_SEQ_DRAMCONF_S0 0x05 66 #define MC_CG_SEQ_DRAMCONF_S1 0x06 67 #define MC_CG_SEQ_YCLK_SUSPEND 0x04 68 #define MC_CG_SEQ_YCLK_RESUME 0x0a 69 70 #define SMC_CG_IND_START 0xc0030000 71 #define SMC_CG_IND_END 0xc0040000 72 73 #define MEM_FREQ_LOW_LATENCY 25000 74 #define MEM_FREQ_HIGH_LATENCY 80000 75 76 #define MEM_LATENCY_HIGH 45 77 #define MEM_LATENCY_LOW 35 78 #define MEM_LATENCY_ERR 0xFFFF 79 80 #define MC_SEQ_MISC0_GDDR5_SHIFT 28 81 #define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000 82 #define MC_SEQ_MISC0_GDDR5_VALUE 5 83 84 #define PCIE_BUS_CLK 10000 85 #define TCLK (PCIE_BUS_CLK / 10) 86 87 static struct profile_mode_setting smu7_profiling[7] = 88 {{0, 0, 0, 0, 0, 0, 0, 0}, 89 {1, 0, 100, 30, 1, 0, 100, 10}, 90 {1, 10, 0, 30, 0, 0, 0, 0}, 91 {0, 0, 0, 0, 1, 10, 16, 31}, 92 {1, 0, 11, 50, 1, 0, 100, 10}, 93 {1, 0, 5, 30, 0, 0, 0, 0}, 94 {0, 0, 0, 0, 0, 0, 0, 0}, 95 }; 96 97 #define PPSMC_MSG_SetVBITimeout_VEGAM ((uint16_t) 0x310) 98 99 #define ixPWR_SVI2_PLANE1_LOAD 0xC0200280 100 #define PWR_SVI2_PLANE1_LOAD__PSI1_MASK 0x00000020L 101 #define PWR_SVI2_PLANE1_LOAD__PSI0_EN_MASK 0x00000040L 102 #define PWR_SVI2_PLANE1_LOAD__PSI1__SHIFT 0x00000005 103 #define PWR_SVI2_PLANE1_LOAD__PSI0_EN__SHIFT 0x00000006 104 105 #define STRAP_EVV_REVISION_MSB 2211 106 #define STRAP_EVV_REVISION_LSB 2208 107 108 /** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */ 109 enum DPM_EVENT_SRC { 110 DPM_EVENT_SRC_ANALOG = 0, 111 DPM_EVENT_SRC_EXTERNAL = 1, 112 DPM_EVENT_SRC_DIGITAL = 2, 113 DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, 114 DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4 115 }; 116 117 #define ixDIDT_SQ_EDC_CTRL 0x0013 118 #define ixDIDT_SQ_EDC_THRESHOLD 0x0014 119 #define ixDIDT_SQ_EDC_STALL_PATTERN_1_2 0x0015 120 #define ixDIDT_SQ_EDC_STALL_PATTERN_3_4 0x0016 121 #define ixDIDT_SQ_EDC_STALL_PATTERN_5_6 0x0017 122 #define ixDIDT_SQ_EDC_STALL_PATTERN_7 0x0018 123 124 #define ixDIDT_TD_EDC_CTRL 0x0053 125 #define ixDIDT_TD_EDC_THRESHOLD 0x0054 126 #define ixDIDT_TD_EDC_STALL_PATTERN_1_2 0x0055 127 #define ixDIDT_TD_EDC_STALL_PATTERN_3_4 0x0056 128 #define ixDIDT_TD_EDC_STALL_PATTERN_5_6 0x0057 129 #define ixDIDT_TD_EDC_STALL_PATTERN_7 0x0058 130 131 #define ixDIDT_TCP_EDC_CTRL 0x0073 132 #define ixDIDT_TCP_EDC_THRESHOLD 0x0074 133 #define ixDIDT_TCP_EDC_STALL_PATTERN_1_2 0x0075 134 #define ixDIDT_TCP_EDC_STALL_PATTERN_3_4 0x0076 135 #define ixDIDT_TCP_EDC_STALL_PATTERN_5_6 0x0077 136 #define ixDIDT_TCP_EDC_STALL_PATTERN_7 0x0078 137 138 #define ixDIDT_DB_EDC_CTRL 0x0033 139 #define ixDIDT_DB_EDC_THRESHOLD 0x0034 140 #define ixDIDT_DB_EDC_STALL_PATTERN_1_2 0x0035 141 #define ixDIDT_DB_EDC_STALL_PATTERN_3_4 0x0036 142 #define ixDIDT_DB_EDC_STALL_PATTERN_5_6 0x0037 143 #define ixDIDT_DB_EDC_STALL_PATTERN_7 0x0038 144 145 uint32_t DIDTEDCConfig_P12[] = { 146 ixDIDT_SQ_EDC_STALL_PATTERN_1_2, 147 ixDIDT_SQ_EDC_STALL_PATTERN_3_4, 148 ixDIDT_SQ_EDC_STALL_PATTERN_5_6, 149 ixDIDT_SQ_EDC_STALL_PATTERN_7, 150 ixDIDT_SQ_EDC_THRESHOLD, 151 ixDIDT_SQ_EDC_CTRL, 152 ixDIDT_TD_EDC_STALL_PATTERN_1_2, 153 ixDIDT_TD_EDC_STALL_PATTERN_3_4, 154 ixDIDT_TD_EDC_STALL_PATTERN_5_6, 155 ixDIDT_TD_EDC_STALL_PATTERN_7, 156 ixDIDT_TD_EDC_THRESHOLD, 157 ixDIDT_TD_EDC_CTRL, 158 ixDIDT_TCP_EDC_STALL_PATTERN_1_2, 159 ixDIDT_TCP_EDC_STALL_PATTERN_3_4, 160 ixDIDT_TCP_EDC_STALL_PATTERN_5_6, 161 ixDIDT_TCP_EDC_STALL_PATTERN_7, 162 ixDIDT_TCP_EDC_THRESHOLD, 163 ixDIDT_TCP_EDC_CTRL, 164 ixDIDT_DB_EDC_STALL_PATTERN_1_2, 165 ixDIDT_DB_EDC_STALL_PATTERN_3_4, 166 ixDIDT_DB_EDC_STALL_PATTERN_5_6, 167 ixDIDT_DB_EDC_STALL_PATTERN_7, 168 ixDIDT_DB_EDC_THRESHOLD, 169 ixDIDT_DB_EDC_CTRL, 170 0xFFFFFFFF // End of list 171 }; 172 173 static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic); 174 static int smu7_force_clock_level(struct pp_hwmgr *hwmgr, 175 enum pp_clock_type type, uint32_t mask); 176 static int smu7_notify_has_display(struct pp_hwmgr *hwmgr); 177 178 static struct smu7_power_state *cast_phw_smu7_power_state( 179 struct pp_hw_power_state *hw_ps) 180 { 181 PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic), 182 "Invalid Powerstate Type!", 183 return NULL); 184 185 return (struct smu7_power_state *)hw_ps; 186 } 187 188 static const struct smu7_power_state *cast_const_phw_smu7_power_state( 189 const struct pp_hw_power_state *hw_ps) 190 { 191 PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic), 192 "Invalid Powerstate Type!", 193 return NULL); 194 195 return (const struct smu7_power_state *)hw_ps; 196 } 197 198 /** 199 * smu7_get_mc_microcode_version - Find the MC microcode version and store it in the HwMgr struct 200 * 201 * @hwmgr: the address of the powerplay hardware manager. 202 * Return: always 0 203 */ 204 static int smu7_get_mc_microcode_version(struct pp_hwmgr *hwmgr) 205 { 206 cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F); 207 208 hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA); 209 210 return 0; 211 } 212 213 static uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr) 214 { 215 uint32_t speedCntl = 0; 216 217 /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */ 218 speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE, 219 ixPCIE_LC_SPEED_CNTL); 220 return((uint16_t)PHM_GET_FIELD(speedCntl, 221 PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE)); 222 } 223 224 static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr) 225 { 226 uint32_t link_width; 227 228 /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */ 229 link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, 230 PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD); 231 232 PP_ASSERT_WITH_CODE((7 >= link_width), 233 "Invalid PCIe lane width!", return 0); 234 235 return decode_pcie_lane_width(link_width); 236 } 237 238 /** 239 * smu7_enable_smc_voltage_controller - Enable voltage control 240 * 241 * @hwmgr: the address of the powerplay hardware manager. 242 * Return: always PP_Result_OK 243 */ 244 static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr) 245 { 246 if (hwmgr->chip_id >= CHIP_POLARIS10 && 247 hwmgr->chip_id <= CHIP_VEGAM) { 248 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, 249 CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI1, 0); 250 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, 251 CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI0_EN, 0); 252 } 253 254 if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK) 255 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable, NULL); 256 257 return 0; 258 } 259 260 /** 261 * smu7_voltage_control - Checks if we want to support voltage control 262 * 263 * @hwmgr: the address of the powerplay hardware manager. 264 */ 265 static bool smu7_voltage_control(const struct pp_hwmgr *hwmgr) 266 { 267 const struct smu7_hwmgr *data = 268 (const struct smu7_hwmgr *)(hwmgr->backend); 269 270 return (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control); 271 } 272 273 /** 274 * smu7_enable_voltage_control - Enable voltage control 275 * 276 * @hwmgr: the address of the powerplay hardware manager. 277 * Return: always 0 278 */ 279 static int smu7_enable_voltage_control(struct pp_hwmgr *hwmgr) 280 { 281 /* enable voltage control */ 282 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 283 GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1); 284 285 return 0; 286 } 287 288 static int phm_get_svi2_voltage_table_v0(pp_atomctrl_voltage_table *voltage_table, 289 struct phm_clock_voltage_dependency_table *voltage_dependency_table 290 ) 291 { 292 uint32_t i; 293 294 PP_ASSERT_WITH_CODE((NULL != voltage_table), 295 "Voltage Dependency Table empty.", return -EINVAL;); 296 297 voltage_table->mask_low = 0; 298 voltage_table->phase_delay = 0; 299 voltage_table->count = voltage_dependency_table->count; 300 301 for (i = 0; i < voltage_dependency_table->count; i++) { 302 voltage_table->entries[i].value = 303 voltage_dependency_table->entries[i].v; 304 voltage_table->entries[i].smio_low = 0; 305 } 306 307 return 0; 308 } 309 310 311 /** 312 * smu7_construct_voltage_tables - Create Voltage Tables. 313 * 314 * @hwmgr: the address of the powerplay hardware manager. 315 * Return: always 0 316 */ 317 static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr) 318 { 319 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 320 struct phm_ppt_v1_information *table_info = 321 (struct phm_ppt_v1_information *)hwmgr->pptable; 322 int result = 0; 323 uint32_t tmp; 324 325 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { 326 result = atomctrl_get_voltage_table_v3(hwmgr, 327 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT, 328 &(data->mvdd_voltage_table)); 329 PP_ASSERT_WITH_CODE((0 == result), 330 "Failed to retrieve MVDD table.", 331 return result); 332 } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) { 333 if (hwmgr->pp_table_version == PP_TABLE_V1) 334 result = phm_get_svi2_mvdd_voltage_table(&(data->mvdd_voltage_table), 335 table_info->vdd_dep_on_mclk); 336 else if (hwmgr->pp_table_version == PP_TABLE_V0) 337 result = phm_get_svi2_voltage_table_v0(&(data->mvdd_voltage_table), 338 hwmgr->dyn_state.mvdd_dependency_on_mclk); 339 340 PP_ASSERT_WITH_CODE((0 == result), 341 "Failed to retrieve SVI2 MVDD table from dependency table.", 342 return result;); 343 } 344 345 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) { 346 result = atomctrl_get_voltage_table_v3(hwmgr, 347 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT, 348 &(data->vddci_voltage_table)); 349 PP_ASSERT_WITH_CODE((0 == result), 350 "Failed to retrieve VDDCI table.", 351 return result); 352 } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) { 353 if (hwmgr->pp_table_version == PP_TABLE_V1) 354 result = phm_get_svi2_vddci_voltage_table(&(data->vddci_voltage_table), 355 table_info->vdd_dep_on_mclk); 356 else if (hwmgr->pp_table_version == PP_TABLE_V0) 357 result = phm_get_svi2_voltage_table_v0(&(data->vddci_voltage_table), 358 hwmgr->dyn_state.vddci_dependency_on_mclk); 359 PP_ASSERT_WITH_CODE((0 == result), 360 "Failed to retrieve SVI2 VDDCI table from dependency table.", 361 return result); 362 } 363 364 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) { 365 /* VDDGFX has only SVI2 voltage control */ 366 result = phm_get_svi2_vdd_voltage_table(&(data->vddgfx_voltage_table), 367 table_info->vddgfx_lookup_table); 368 PP_ASSERT_WITH_CODE((0 == result), 369 "Failed to retrieve SVI2 VDDGFX table from lookup table.", return result;); 370 } 371 372 373 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) { 374 result = atomctrl_get_voltage_table_v3(hwmgr, 375 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT, 376 &data->vddc_voltage_table); 377 PP_ASSERT_WITH_CODE((0 == result), 378 "Failed to retrieve VDDC table.", return result;); 379 } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) { 380 381 if (hwmgr->pp_table_version == PP_TABLE_V0) 382 result = phm_get_svi2_voltage_table_v0(&data->vddc_voltage_table, 383 hwmgr->dyn_state.vddc_dependency_on_mclk); 384 else if (hwmgr->pp_table_version == PP_TABLE_V1) 385 result = phm_get_svi2_vdd_voltage_table(&(data->vddc_voltage_table), 386 table_info->vddc_lookup_table); 387 388 PP_ASSERT_WITH_CODE((0 == result), 389 "Failed to retrieve SVI2 VDDC table from dependency table.", return result;); 390 } 391 392 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDC); 393 PP_ASSERT_WITH_CODE( 394 (data->vddc_voltage_table.count <= tmp), 395 "Too many voltage values for VDDC. Trimming to fit state table.", 396 phm_trim_voltage_table_to_fit_state_table(tmp, 397 &(data->vddc_voltage_table))); 398 399 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX); 400 PP_ASSERT_WITH_CODE( 401 (data->vddgfx_voltage_table.count <= tmp), 402 "Too many voltage values for VDDC. Trimming to fit state table.", 403 phm_trim_voltage_table_to_fit_state_table(tmp, 404 &(data->vddgfx_voltage_table))); 405 406 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDCI); 407 PP_ASSERT_WITH_CODE( 408 (data->vddci_voltage_table.count <= tmp), 409 "Too many voltage values for VDDCI. Trimming to fit state table.", 410 phm_trim_voltage_table_to_fit_state_table(tmp, 411 &(data->vddci_voltage_table))); 412 413 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_MVDD); 414 PP_ASSERT_WITH_CODE( 415 (data->mvdd_voltage_table.count <= tmp), 416 "Too many voltage values for MVDD. Trimming to fit state table.", 417 phm_trim_voltage_table_to_fit_state_table(tmp, 418 &(data->mvdd_voltage_table))); 419 420 return 0; 421 } 422 423 /** 424 * smu7_program_static_screen_threshold_parameters - Programs static screed detection parameters 425 * 426 * @hwmgr: the address of the powerplay hardware manager. 427 * Return: always 0 428 */ 429 static int smu7_program_static_screen_threshold_parameters( 430 struct pp_hwmgr *hwmgr) 431 { 432 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 433 434 /* Set static screen threshold unit */ 435 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 436 CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT, 437 data->static_screen_threshold_unit); 438 /* Set static screen threshold */ 439 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 440 CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD, 441 data->static_screen_threshold); 442 443 return 0; 444 } 445 446 /** 447 * smu7_enable_display_gap - Setup display gap for glitch free memory clock switching. 448 * 449 * @hwmgr: the address of the powerplay hardware manager. 450 * Return: always 0 451 */ 452 static int smu7_enable_display_gap(struct pp_hwmgr *hwmgr) 453 { 454 uint32_t display_gap = 455 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, 456 ixCG_DISPLAY_GAP_CNTL); 457 458 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, 459 DISP_GAP, DISPLAY_GAP_IGNORE); 460 461 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, 462 DISP_GAP_MCHG, DISPLAY_GAP_VBLANK); 463 464 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 465 ixCG_DISPLAY_GAP_CNTL, display_gap); 466 467 return 0; 468 } 469 470 /** 471 * smu7_program_voting_clients - Programs activity state transition voting clients 472 * 473 * @hwmgr: the address of the powerplay hardware manager. 474 * Return: always 0 475 */ 476 static int smu7_program_voting_clients(struct pp_hwmgr *hwmgr) 477 { 478 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 479 int i; 480 481 /* Clear reset for voting clients before enabling DPM */ 482 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 483 SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0); 484 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 485 SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0); 486 487 for (i = 0; i < 8; i++) 488 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 489 ixCG_FREQ_TRAN_VOTING_0 + i * 4, 490 data->voting_rights_clients[i]); 491 return 0; 492 } 493 494 static int smu7_clear_voting_clients(struct pp_hwmgr *hwmgr) 495 { 496 int i; 497 498 /* Reset voting clients before disabling DPM */ 499 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 500 SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1); 501 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 502 SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1); 503 504 for (i = 0; i < 8; i++) 505 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 506 ixCG_FREQ_TRAN_VOTING_0 + i * 4, 0); 507 508 return 0; 509 } 510 511 /* Copy one arb setting to another and then switch the active set. 512 * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants. 513 */ 514 static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr, 515 uint32_t arb_src, uint32_t arb_dest) 516 { 517 uint32_t mc_arb_dram_timing; 518 uint32_t mc_arb_dram_timing2; 519 uint32_t burst_time; 520 uint32_t mc_cg_config; 521 522 switch (arb_src) { 523 case MC_CG_ARB_FREQ_F0: 524 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); 525 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); 526 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0); 527 break; 528 case MC_CG_ARB_FREQ_F1: 529 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1); 530 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1); 531 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1); 532 break; 533 default: 534 return -EINVAL; 535 } 536 537 switch (arb_dest) { 538 case MC_CG_ARB_FREQ_F0: 539 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing); 540 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2); 541 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time); 542 break; 543 case MC_CG_ARB_FREQ_F1: 544 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing); 545 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2); 546 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time); 547 break; 548 default: 549 return -EINVAL; 550 } 551 552 mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG); 553 mc_cg_config |= 0x0000000F; 554 cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config); 555 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest); 556 557 return 0; 558 } 559 560 static int smu7_reset_to_default(struct pp_hwmgr *hwmgr) 561 { 562 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ResetToDefaults, NULL); 563 } 564 565 /** 566 * smu7_initial_switch_from_arbf0_to_f1 - Initial switch from ARB F0->F1 567 * 568 * @hwmgr: the address of the powerplay hardware manager. 569 * Return: always 0 570 * This function is to be called from the SetPowerState table. 571 */ 572 static int smu7_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr) 573 { 574 return smu7_copy_and_switch_arb_sets(hwmgr, 575 MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); 576 } 577 578 static int smu7_force_switch_to_arbf0(struct pp_hwmgr *hwmgr) 579 { 580 uint32_t tmp; 581 582 tmp = (cgs_read_ind_register(hwmgr->device, 583 CGS_IND_REG__SMC, ixSMC_SCRATCH9) & 584 0x0000ff00) >> 8; 585 586 if (tmp == MC_CG_ARB_FREQ_F0) 587 return 0; 588 589 return smu7_copy_and_switch_arb_sets(hwmgr, 590 tmp, MC_CG_ARB_FREQ_F0); 591 } 592 593 static uint16_t smu7_override_pcie_speed(struct pp_hwmgr *hwmgr) 594 { 595 struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); 596 uint16_t pcie_gen = 0; 597 598 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 && 599 adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4) 600 pcie_gen = 3; 601 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 && 602 adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3) 603 pcie_gen = 2; 604 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 && 605 adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2) 606 pcie_gen = 1; 607 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 && 608 adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1) 609 pcie_gen = 0; 610 611 return pcie_gen; 612 } 613 614 static uint16_t smu7_override_pcie_width(struct pp_hwmgr *hwmgr) 615 { 616 struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); 617 uint16_t pcie_width = 0; 618 619 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) 620 pcie_width = 16; 621 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12) 622 pcie_width = 12; 623 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8) 624 pcie_width = 8; 625 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4) 626 pcie_width = 4; 627 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2) 628 pcie_width = 2; 629 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1) 630 pcie_width = 1; 631 632 return pcie_width; 633 } 634 635 static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr) 636 { 637 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 638 639 struct phm_ppt_v1_information *table_info = 640 (struct phm_ppt_v1_information *)(hwmgr->pptable); 641 struct phm_ppt_v1_pcie_table *pcie_table = NULL; 642 643 uint32_t i, max_entry; 644 uint32_t tmp; 645 646 PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels || 647 data->use_pcie_power_saving_levels), "No pcie performance levels!", 648 return -EINVAL); 649 650 if (table_info != NULL) 651 pcie_table = table_info->pcie_table; 652 653 if (data->use_pcie_performance_levels && 654 !data->use_pcie_power_saving_levels) { 655 data->pcie_gen_power_saving = data->pcie_gen_performance; 656 data->pcie_lane_power_saving = data->pcie_lane_performance; 657 } else if (!data->use_pcie_performance_levels && 658 data->use_pcie_power_saving_levels) { 659 data->pcie_gen_performance = data->pcie_gen_power_saving; 660 data->pcie_lane_performance = data->pcie_lane_power_saving; 661 } 662 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_LINK); 663 phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table, 664 tmp, 665 MAX_REGULAR_DPM_NUMBER); 666 667 if (pcie_table != NULL) { 668 /* max_entry is used to make sure we reserve one PCIE level 669 * for boot level (fix for A+A PSPP issue). 670 * If PCIE table from PPTable have ULV entry + 8 entries, 671 * then ignore the last entry.*/ 672 max_entry = (tmp < pcie_table->count) ? tmp : pcie_table->count; 673 for (i = 1; i < max_entry; i++) { 674 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1, 675 get_pcie_gen_support(data->pcie_gen_cap, 676 pcie_table->entries[i].gen_speed), 677 get_pcie_lane_support(data->pcie_lane_cap, 678 pcie_table->entries[i].lane_width)); 679 } 680 data->dpm_table.pcie_speed_table.count = max_entry - 1; 681 smum_update_smc_table(hwmgr, SMU_BIF_TABLE); 682 } else { 683 /* Hardcode Pcie Table */ 684 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0, 685 get_pcie_gen_support(data->pcie_gen_cap, 686 PP_Min_PCIEGen), 687 get_pcie_lane_support(data->pcie_lane_cap, 688 PP_Max_PCIELane)); 689 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1, 690 get_pcie_gen_support(data->pcie_gen_cap, 691 PP_Min_PCIEGen), 692 get_pcie_lane_support(data->pcie_lane_cap, 693 PP_Max_PCIELane)); 694 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2, 695 get_pcie_gen_support(data->pcie_gen_cap, 696 PP_Max_PCIEGen), 697 get_pcie_lane_support(data->pcie_lane_cap, 698 PP_Max_PCIELane)); 699 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3, 700 get_pcie_gen_support(data->pcie_gen_cap, 701 PP_Max_PCIEGen), 702 get_pcie_lane_support(data->pcie_lane_cap, 703 PP_Max_PCIELane)); 704 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4, 705 get_pcie_gen_support(data->pcie_gen_cap, 706 PP_Max_PCIEGen), 707 get_pcie_lane_support(data->pcie_lane_cap, 708 PP_Max_PCIELane)); 709 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5, 710 get_pcie_gen_support(data->pcie_gen_cap, 711 PP_Max_PCIEGen), 712 get_pcie_lane_support(data->pcie_lane_cap, 713 PP_Max_PCIELane)); 714 715 data->dpm_table.pcie_speed_table.count = 6; 716 } 717 /* Populate last level for boot PCIE level, but do not increment count. */ 718 if (hwmgr->chip_family == AMDGPU_FAMILY_CI) { 719 for (i = 0; i <= data->dpm_table.pcie_speed_table.count; i++) 720 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i, 721 get_pcie_gen_support(data->pcie_gen_cap, 722 PP_Max_PCIEGen), 723 data->vbios_boot_state.pcie_lane_bootup_value); 724 } else { 725 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 726 data->dpm_table.pcie_speed_table.count, 727 get_pcie_gen_support(data->pcie_gen_cap, 728 PP_Min_PCIEGen), 729 get_pcie_lane_support(data->pcie_lane_cap, 730 PP_Max_PCIELane)); 731 732 if (data->pcie_dpm_key_disabled) 733 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 734 data->dpm_table.pcie_speed_table.count, 735 smu7_override_pcie_speed(hwmgr), smu7_override_pcie_width(hwmgr)); 736 } 737 return 0; 738 } 739 740 static int smu7_reset_dpm_tables(struct pp_hwmgr *hwmgr) 741 { 742 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 743 744 memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table)); 745 746 phm_reset_single_dpm_table( 747 &data->dpm_table.sclk_table, 748 smum_get_mac_definition(hwmgr, 749 SMU_MAX_LEVELS_GRAPHICS), 750 MAX_REGULAR_DPM_NUMBER); 751 phm_reset_single_dpm_table( 752 &data->dpm_table.mclk_table, 753 smum_get_mac_definition(hwmgr, 754 SMU_MAX_LEVELS_MEMORY), MAX_REGULAR_DPM_NUMBER); 755 756 phm_reset_single_dpm_table( 757 &data->dpm_table.vddc_table, 758 smum_get_mac_definition(hwmgr, 759 SMU_MAX_LEVELS_VDDC), 760 MAX_REGULAR_DPM_NUMBER); 761 phm_reset_single_dpm_table( 762 &data->dpm_table.vddci_table, 763 smum_get_mac_definition(hwmgr, 764 SMU_MAX_LEVELS_VDDCI), MAX_REGULAR_DPM_NUMBER); 765 766 phm_reset_single_dpm_table( 767 &data->dpm_table.mvdd_table, 768 smum_get_mac_definition(hwmgr, 769 SMU_MAX_LEVELS_MVDD), 770 MAX_REGULAR_DPM_NUMBER); 771 return 0; 772 } 773 /* 774 * This function is to initialize all DPM state tables 775 * for SMU7 based on the dependency table. 776 * Dynamic state patching function will then trim these 777 * state tables to the allowed range based 778 * on the power policy or external client requests, 779 * such as UVD request, etc. 780 */ 781 782 static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr) 783 { 784 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 785 struct phm_clock_voltage_dependency_table *allowed_vdd_sclk_table = 786 hwmgr->dyn_state.vddc_dependency_on_sclk; 787 struct phm_clock_voltage_dependency_table *allowed_vdd_mclk_table = 788 hwmgr->dyn_state.vddc_dependency_on_mclk; 789 struct phm_cac_leakage_table *std_voltage_table = 790 hwmgr->dyn_state.cac_leakage_table; 791 uint32_t i; 792 793 PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL, 794 "SCLK dependency table is missing. This table is mandatory", return -EINVAL); 795 PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1, 796 "SCLK dependency table has to have is missing. This table is mandatory", return -EINVAL); 797 798 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL, 799 "MCLK dependency table is missing. This table is mandatory", return -EINVAL); 800 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1, 801 "VMCLK dependency table has to have is missing. This table is mandatory", return -EINVAL); 802 803 804 /* Initialize Sclk DPM table based on allow Sclk values*/ 805 data->dpm_table.sclk_table.count = 0; 806 807 for (i = 0; i < allowed_vdd_sclk_table->count; i++) { 808 if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value != 809 allowed_vdd_sclk_table->entries[i].clk) { 810 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value = 811 allowed_vdd_sclk_table->entries[i].clk; 812 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = (i == 0) ? 1 : 0; 813 data->dpm_table.sclk_table.count++; 814 } 815 } 816 817 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL, 818 "MCLK dependency table is missing. This table is mandatory", return -EINVAL); 819 /* Initialize Mclk DPM table based on allow Mclk values */ 820 data->dpm_table.mclk_table.count = 0; 821 for (i = 0; i < allowed_vdd_mclk_table->count; i++) { 822 if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value != 823 allowed_vdd_mclk_table->entries[i].clk) { 824 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value = 825 allowed_vdd_mclk_table->entries[i].clk; 826 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = (i == 0) ? 1 : 0; 827 data->dpm_table.mclk_table.count++; 828 } 829 } 830 831 /* Initialize Vddc DPM table based on allow Vddc values. And populate corresponding std values. */ 832 for (i = 0; i < allowed_vdd_sclk_table->count; i++) { 833 data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; 834 data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage; 835 /* param1 is for corresponding std voltage */ 836 data->dpm_table.vddc_table.dpm_levels[i].enabled = true; 837 } 838 839 data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count; 840 allowed_vdd_mclk_table = hwmgr->dyn_state.vddci_dependency_on_mclk; 841 842 if (NULL != allowed_vdd_mclk_table) { 843 /* Initialize Vddci DPM table based on allow Mclk values */ 844 for (i = 0; i < allowed_vdd_mclk_table->count; i++) { 845 data->dpm_table.vddci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; 846 data->dpm_table.vddci_table.dpm_levels[i].enabled = true; 847 } 848 data->dpm_table.vddci_table.count = allowed_vdd_mclk_table->count; 849 } 850 851 allowed_vdd_mclk_table = hwmgr->dyn_state.mvdd_dependency_on_mclk; 852 853 if (NULL != allowed_vdd_mclk_table) { 854 /* 855 * Initialize MVDD DPM table based on allow Mclk 856 * values 857 */ 858 for (i = 0; i < allowed_vdd_mclk_table->count; i++) { 859 data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; 860 data->dpm_table.mvdd_table.dpm_levels[i].enabled = true; 861 } 862 data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count; 863 } 864 865 return 0; 866 } 867 868 static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr) 869 { 870 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 871 struct phm_ppt_v1_information *table_info = 872 (struct phm_ppt_v1_information *)(hwmgr->pptable); 873 uint32_t i; 874 875 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table; 876 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table; 877 878 if (table_info == NULL) 879 return -EINVAL; 880 881 dep_sclk_table = table_info->vdd_dep_on_sclk; 882 dep_mclk_table = table_info->vdd_dep_on_mclk; 883 884 PP_ASSERT_WITH_CODE(dep_sclk_table != NULL, 885 "SCLK dependency table is missing.", 886 return -EINVAL); 887 PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1, 888 "SCLK dependency table count is 0.", 889 return -EINVAL); 890 891 PP_ASSERT_WITH_CODE(dep_mclk_table != NULL, 892 "MCLK dependency table is missing.", 893 return -EINVAL); 894 PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1, 895 "MCLK dependency table count is 0", 896 return -EINVAL); 897 898 /* Initialize Sclk DPM table based on allow Sclk values */ 899 data->dpm_table.sclk_table.count = 0; 900 for (i = 0; i < dep_sclk_table->count; i++) { 901 if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value != 902 dep_sclk_table->entries[i].clk) { 903 904 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value = 905 dep_sclk_table->entries[i].clk; 906 907 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = 908 (i == 0) ? true : false; 909 data->dpm_table.sclk_table.count++; 910 } 911 } 912 if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0) 913 hwmgr->platform_descriptor.overdriveLimit.engineClock = dep_sclk_table->entries[i-1].clk; 914 /* Initialize Mclk DPM table based on allow Mclk values */ 915 data->dpm_table.mclk_table.count = 0; 916 for (i = 0; i < dep_mclk_table->count; i++) { 917 if (i == 0 || data->dpm_table.mclk_table.dpm_levels 918 [data->dpm_table.mclk_table.count - 1].value != 919 dep_mclk_table->entries[i].clk) { 920 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value = 921 dep_mclk_table->entries[i].clk; 922 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = 923 (i == 0) ? true : false; 924 data->dpm_table.mclk_table.count++; 925 } 926 } 927 928 if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0) 929 hwmgr->platform_descriptor.overdriveLimit.memoryClock = dep_mclk_table->entries[i-1].clk; 930 return 0; 931 } 932 933 static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr) 934 { 935 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 936 struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table); 937 struct phm_ppt_v1_information *table_info = 938 (struct phm_ppt_v1_information *)(hwmgr->pptable); 939 uint32_t i; 940 941 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table; 942 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table; 943 struct phm_odn_performance_level *entries; 944 945 if (table_info == NULL) 946 return -EINVAL; 947 948 dep_sclk_table = table_info->vdd_dep_on_sclk; 949 dep_mclk_table = table_info->vdd_dep_on_mclk; 950 951 odn_table->odn_core_clock_dpm_levels.num_of_pl = 952 data->golden_dpm_table.sclk_table.count; 953 entries = odn_table->odn_core_clock_dpm_levels.entries; 954 for (i=0; i<data->golden_dpm_table.sclk_table.count; i++) { 955 entries[i].clock = data->golden_dpm_table.sclk_table.dpm_levels[i].value; 956 entries[i].enabled = true; 957 entries[i].vddc = dep_sclk_table->entries[i].vddc; 958 } 959 960 smu_get_voltage_dependency_table_ppt_v1(dep_sclk_table, 961 (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk)); 962 963 odn_table->odn_memory_clock_dpm_levels.num_of_pl = 964 data->golden_dpm_table.mclk_table.count; 965 entries = odn_table->odn_memory_clock_dpm_levels.entries; 966 for (i=0; i<data->golden_dpm_table.mclk_table.count; i++) { 967 entries[i].clock = data->golden_dpm_table.mclk_table.dpm_levels[i].value; 968 entries[i].enabled = true; 969 entries[i].vddc = dep_mclk_table->entries[i].vddc; 970 } 971 972 smu_get_voltage_dependency_table_ppt_v1(dep_mclk_table, 973 (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk)); 974 975 return 0; 976 } 977 978 static void smu7_setup_voltage_range_from_vbios(struct pp_hwmgr *hwmgr) 979 { 980 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 981 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table; 982 struct phm_ppt_v1_information *table_info = 983 (struct phm_ppt_v1_information *)(hwmgr->pptable); 984 uint32_t min_vddc = 0; 985 uint32_t max_vddc = 0; 986 987 if (!table_info) 988 return; 989 990 dep_sclk_table = table_info->vdd_dep_on_sclk; 991 992 atomctrl_get_voltage_range(hwmgr, &max_vddc, &min_vddc); 993 994 if (min_vddc == 0 || min_vddc > 2000 995 || min_vddc > dep_sclk_table->entries[0].vddc) 996 min_vddc = dep_sclk_table->entries[0].vddc; 997 998 if (max_vddc == 0 || max_vddc > 2000 999 || max_vddc < dep_sclk_table->entries[dep_sclk_table->count-1].vddc) 1000 max_vddc = dep_sclk_table->entries[dep_sclk_table->count-1].vddc; 1001 1002 data->odn_dpm_table.min_vddc = min_vddc; 1003 data->odn_dpm_table.max_vddc = max_vddc; 1004 } 1005 1006 static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr) 1007 { 1008 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1009 struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table); 1010 struct phm_ppt_v1_information *table_info = 1011 (struct phm_ppt_v1_information *)(hwmgr->pptable); 1012 uint32_t i; 1013 1014 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table; 1015 struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table; 1016 1017 if (table_info == NULL) 1018 return; 1019 1020 for (i = 0; i < data->dpm_table.sclk_table.count; i++) { 1021 if (odn_table->odn_core_clock_dpm_levels.entries[i].clock != 1022 data->dpm_table.sclk_table.dpm_levels[i].value) { 1023 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; 1024 break; 1025 } 1026 } 1027 1028 for (i = 0; i < data->dpm_table.mclk_table.count; i++) { 1029 if (odn_table->odn_memory_clock_dpm_levels.entries[i].clock != 1030 data->dpm_table.mclk_table.dpm_levels[i].value) { 1031 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; 1032 break; 1033 } 1034 } 1035 1036 dep_table = table_info->vdd_dep_on_mclk; 1037 odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk); 1038 1039 for (i = 0; i < dep_table->count; i++) { 1040 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { 1041 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK; 1042 return; 1043 } 1044 } 1045 1046 dep_table = table_info->vdd_dep_on_sclk; 1047 odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk); 1048 for (i = 0; i < dep_table->count; i++) { 1049 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { 1050 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK; 1051 return; 1052 } 1053 } 1054 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { 1055 data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC; 1056 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK; 1057 } 1058 } 1059 1060 static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) 1061 { 1062 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1063 1064 smu7_reset_dpm_tables(hwmgr); 1065 1066 if (hwmgr->pp_table_version == PP_TABLE_V1) 1067 smu7_setup_dpm_tables_v1(hwmgr); 1068 else if (hwmgr->pp_table_version == PP_TABLE_V0) 1069 smu7_setup_dpm_tables_v0(hwmgr); 1070 1071 smu7_setup_default_pcie_table(hwmgr); 1072 1073 /* save a copy of the default DPM table */ 1074 memcpy(&(data->golden_dpm_table), &(data->dpm_table), 1075 sizeof(struct smu7_dpm_table)); 1076 1077 /* initialize ODN table */ 1078 if (hwmgr->od_enabled) { 1079 if (data->odn_dpm_table.max_vddc) { 1080 smu7_check_dpm_table_updated(hwmgr); 1081 } else { 1082 smu7_setup_voltage_range_from_vbios(hwmgr); 1083 smu7_odn_initial_default_setting(hwmgr); 1084 } 1085 } 1086 return 0; 1087 } 1088 1089 static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr) 1090 { 1091 1092 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1093 PHM_PlatformCaps_RegulatorHot)) 1094 return smum_send_msg_to_smc(hwmgr, 1095 PPSMC_MSG_EnableVRHotGPIOInterrupt, 1096 NULL); 1097 1098 return 0; 1099 } 1100 1101 static int smu7_enable_sclk_control(struct pp_hwmgr *hwmgr) 1102 { 1103 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, 1104 SCLK_PWRMGT_OFF, 0); 1105 return 0; 1106 } 1107 1108 static int smu7_enable_ulv(struct pp_hwmgr *hwmgr) 1109 { 1110 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1111 1112 if (data->ulv_supported) 1113 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableULV, NULL); 1114 1115 return 0; 1116 } 1117 1118 static int smu7_disable_ulv(struct pp_hwmgr *hwmgr) 1119 { 1120 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1121 1122 if (data->ulv_supported) 1123 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableULV, NULL); 1124 1125 return 0; 1126 } 1127 1128 static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) 1129 { 1130 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1131 PHM_PlatformCaps_SclkDeepSleep)) { 1132 if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_ON, NULL)) 1133 PP_ASSERT_WITH_CODE(false, 1134 "Attempt to enable Master Deep Sleep switch failed!", 1135 return -EINVAL); 1136 } else { 1137 if (smum_send_msg_to_smc(hwmgr, 1138 PPSMC_MSG_MASTER_DeepSleep_OFF, 1139 NULL)) { 1140 PP_ASSERT_WITH_CODE(false, 1141 "Attempt to disable Master Deep Sleep switch failed!", 1142 return -EINVAL); 1143 } 1144 } 1145 1146 return 0; 1147 } 1148 1149 static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) 1150 { 1151 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1152 PHM_PlatformCaps_SclkDeepSleep)) { 1153 if (smum_send_msg_to_smc(hwmgr, 1154 PPSMC_MSG_MASTER_DeepSleep_OFF, 1155 NULL)) { 1156 PP_ASSERT_WITH_CODE(false, 1157 "Attempt to disable Master Deep Sleep switch failed!", 1158 return -EINVAL); 1159 } 1160 } 1161 1162 return 0; 1163 } 1164 1165 static int smu7_disable_sclk_vce_handshake(struct pp_hwmgr *hwmgr) 1166 { 1167 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1168 uint32_t soft_register_value = 0; 1169 uint32_t handshake_disables_offset = data->soft_regs_start 1170 + smum_get_offsetof(hwmgr, 1171 SMU_SoftRegisters, HandshakeDisables); 1172 1173 soft_register_value = cgs_read_ind_register(hwmgr->device, 1174 CGS_IND_REG__SMC, handshake_disables_offset); 1175 soft_register_value |= SMU7_VCE_SCLK_HANDSHAKE_DISABLE; 1176 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 1177 handshake_disables_offset, soft_register_value); 1178 return 0; 1179 } 1180 1181 static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr) 1182 { 1183 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1184 uint32_t soft_register_value = 0; 1185 uint32_t handshake_disables_offset = data->soft_regs_start 1186 + smum_get_offsetof(hwmgr, 1187 SMU_SoftRegisters, HandshakeDisables); 1188 1189 soft_register_value = cgs_read_ind_register(hwmgr->device, 1190 CGS_IND_REG__SMC, handshake_disables_offset); 1191 soft_register_value |= smum_get_mac_definition(hwmgr, 1192 SMU_UVD_MCLK_HANDSHAKE_DISABLE); 1193 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 1194 handshake_disables_offset, soft_register_value); 1195 return 0; 1196 } 1197 1198 static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) 1199 { 1200 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1201 1202 /* enable SCLK dpm */ 1203 if (!data->sclk_dpm_key_disabled) { 1204 if (hwmgr->chip_id >= CHIP_POLARIS10 && 1205 hwmgr->chip_id <= CHIP_VEGAM) 1206 smu7_disable_sclk_vce_handshake(hwmgr); 1207 1208 PP_ASSERT_WITH_CODE( 1209 (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable, NULL)), 1210 "Failed to enable SCLK DPM during DPM Start Function!", 1211 return -EINVAL); 1212 } 1213 1214 /* enable MCLK dpm */ 1215 if (0 == data->mclk_dpm_key_disabled) { 1216 if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK)) 1217 smu7_disable_handshake_uvd(hwmgr); 1218 1219 PP_ASSERT_WITH_CODE( 1220 (0 == smum_send_msg_to_smc(hwmgr, 1221 PPSMC_MSG_MCLKDPM_Enable, 1222 NULL)), 1223 "Failed to enable MCLK DPM during DPM Start Function!", 1224 return -EINVAL); 1225 1226 if ((hwmgr->chip_family == AMDGPU_FAMILY_CI) || 1227 (hwmgr->chip_id == CHIP_POLARIS10) || 1228 (hwmgr->chip_id == CHIP_POLARIS11) || 1229 (hwmgr->chip_id == CHIP_POLARIS12) || 1230 (hwmgr->chip_id == CHIP_TONGA) || 1231 (hwmgr->chip_id == CHIP_TOPAZ)) 1232 PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1); 1233 1234 1235 if (hwmgr->chip_family == AMDGPU_FAMILY_CI) { 1236 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x5); 1237 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x5); 1238 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x100005); 1239 udelay(10); 1240 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x400005); 1241 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x400005); 1242 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x500005); 1243 } else { 1244 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5); 1245 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5); 1246 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005); 1247 udelay(10); 1248 if (hwmgr->chip_id == CHIP_VEGAM) { 1249 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400009); 1250 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400009); 1251 } else { 1252 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005); 1253 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005); 1254 } 1255 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005); 1256 } 1257 } 1258 1259 return 0; 1260 } 1261 1262 static int smu7_start_dpm(struct pp_hwmgr *hwmgr) 1263 { 1264 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1265 1266 /*enable general power management */ 1267 1268 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, 1269 GLOBAL_PWRMGT_EN, 1); 1270 1271 /* enable sclk deep sleep */ 1272 1273 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, 1274 DYNAMIC_PM_EN, 1); 1275 1276 /* prepare for PCIE DPM */ 1277 1278 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 1279 data->soft_regs_start + 1280 smum_get_offsetof(hwmgr, SMU_SoftRegisters, 1281 VoltageChangeTimeout), 0x1000); 1282 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, 1283 SWRST_COMMAND_1, RESETLC, 0x0); 1284 1285 if (hwmgr->chip_family == AMDGPU_FAMILY_CI) 1286 cgs_write_register(hwmgr->device, 0x1488, 1287 (cgs_read_register(hwmgr->device, 0x1488) & ~0x1)); 1288 1289 if (smu7_enable_sclk_mclk_dpm(hwmgr)) { 1290 pr_err("Failed to enable Sclk DPM and Mclk DPM!"); 1291 return -EINVAL; 1292 } 1293 1294 /* enable PCIE dpm */ 1295 if (0 == data->pcie_dpm_key_disabled) { 1296 PP_ASSERT_WITH_CODE( 1297 (0 == smum_send_msg_to_smc(hwmgr, 1298 PPSMC_MSG_PCIeDPM_Enable, 1299 NULL)), 1300 "Failed to enable pcie DPM during DPM Start Function!", 1301 return -EINVAL); 1302 } else { 1303 PP_ASSERT_WITH_CODE( 1304 (0 == smum_send_msg_to_smc(hwmgr, 1305 PPSMC_MSG_PCIeDPM_Disable, 1306 NULL)), 1307 "Failed to disable pcie DPM during DPM Start Function!", 1308 return -EINVAL); 1309 } 1310 1311 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1312 PHM_PlatformCaps_Falcon_QuickTransition)) { 1313 PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr, 1314 PPSMC_MSG_EnableACDCGPIOInterrupt, 1315 NULL)), 1316 "Failed to enable AC DC GPIO Interrupt!", 1317 ); 1318 } 1319 1320 return 0; 1321 } 1322 1323 static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) 1324 { 1325 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1326 1327 /* disable SCLK dpm */ 1328 if (!data->sclk_dpm_key_disabled) { 1329 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), 1330 "Trying to disable SCLK DPM when DPM is disabled", 1331 return 0); 1332 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Disable, NULL); 1333 } 1334 1335 /* disable MCLK dpm */ 1336 if (!data->mclk_dpm_key_disabled) { 1337 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), 1338 "Trying to disable MCLK DPM when DPM is disabled", 1339 return 0); 1340 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Disable, NULL); 1341 } 1342 1343 return 0; 1344 } 1345 1346 static int smu7_stop_dpm(struct pp_hwmgr *hwmgr) 1347 { 1348 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1349 1350 /* disable general power management */ 1351 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, 1352 GLOBAL_PWRMGT_EN, 0); 1353 /* disable sclk deep sleep */ 1354 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, 1355 DYNAMIC_PM_EN, 0); 1356 1357 /* disable PCIE dpm */ 1358 if (!data->pcie_dpm_key_disabled) { 1359 PP_ASSERT_WITH_CODE( 1360 (smum_send_msg_to_smc(hwmgr, 1361 PPSMC_MSG_PCIeDPM_Disable, 1362 NULL) == 0), 1363 "Failed to disable pcie DPM during DPM Stop Function!", 1364 return -EINVAL); 1365 } 1366 1367 smu7_disable_sclk_mclk_dpm(hwmgr); 1368 1369 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), 1370 "Trying to disable voltage DPM when DPM is disabled", 1371 return 0); 1372 1373 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Disable, NULL); 1374 1375 return 0; 1376 } 1377 1378 static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources) 1379 { 1380 bool protection; 1381 enum DPM_EVENT_SRC src; 1382 1383 switch (sources) { 1384 default: 1385 pr_err("Unknown throttling event sources."); 1386 fallthrough; 1387 case 0: 1388 protection = false; 1389 /* src is unused */ 1390 break; 1391 case (1 << PHM_AutoThrottleSource_Thermal): 1392 protection = true; 1393 src = DPM_EVENT_SRC_DIGITAL; 1394 break; 1395 case (1 << PHM_AutoThrottleSource_External): 1396 protection = true; 1397 src = DPM_EVENT_SRC_EXTERNAL; 1398 break; 1399 case (1 << PHM_AutoThrottleSource_External) | 1400 (1 << PHM_AutoThrottleSource_Thermal): 1401 protection = true; 1402 src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL; 1403 break; 1404 } 1405 /* Order matters - don't enable thermal protection for the wrong source. */ 1406 if (protection) { 1407 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL, 1408 DPM_EVENT_SRC, src); 1409 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, 1410 THERMAL_PROTECTION_DIS, 1411 !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1412 PHM_PlatformCaps_ThermalController)); 1413 } else 1414 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, 1415 THERMAL_PROTECTION_DIS, 1); 1416 } 1417 1418 static int smu7_enable_auto_throttle_source(struct pp_hwmgr *hwmgr, 1419 PHM_AutoThrottleSource source) 1420 { 1421 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1422 1423 if (!(data->active_auto_throttle_sources & (1 << source))) { 1424 data->active_auto_throttle_sources |= 1 << source; 1425 smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources); 1426 } 1427 return 0; 1428 } 1429 1430 static int smu7_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) 1431 { 1432 return smu7_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); 1433 } 1434 1435 static int smu7_disable_auto_throttle_source(struct pp_hwmgr *hwmgr, 1436 PHM_AutoThrottleSource source) 1437 { 1438 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1439 1440 if (data->active_auto_throttle_sources & (1 << source)) { 1441 data->active_auto_throttle_sources &= ~(1 << source); 1442 smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources); 1443 } 1444 return 0; 1445 } 1446 1447 static int smu7_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) 1448 { 1449 return smu7_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); 1450 } 1451 1452 static int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr) 1453 { 1454 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1455 data->pcie_performance_request = true; 1456 1457 return 0; 1458 } 1459 1460 static int smu7_program_edc_didt_registers(struct pp_hwmgr *hwmgr, 1461 uint32_t *cac_config_regs, 1462 AtomCtrl_EDCLeakgeTable *edc_leakage_table) 1463 { 1464 uint32_t data, i = 0; 1465 1466 while (cac_config_regs[i] != 0xFFFFFFFF) { 1467 data = edc_leakage_table->DIDT_REG[i]; 1468 cgs_write_ind_register(hwmgr->device, 1469 CGS_IND_REG__DIDT, 1470 cac_config_regs[i], 1471 data); 1472 i++; 1473 } 1474 1475 return 0; 1476 } 1477 1478 static int smu7_populate_edc_leakage_registers(struct pp_hwmgr *hwmgr) 1479 { 1480 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1481 int ret = 0; 1482 1483 if (!data->disable_edc_leakage_controller && 1484 data->edc_hilo_leakage_offset_from_vbios.usEdcDidtLoDpm7TableOffset && 1485 data->edc_hilo_leakage_offset_from_vbios.usEdcDidtHiDpm7TableOffset) { 1486 ret = smu7_program_edc_didt_registers(hwmgr, 1487 DIDTEDCConfig_P12, 1488 &data->edc_leakage_table); 1489 if (ret) 1490 return ret; 1491 1492 ret = smum_send_msg_to_smc(hwmgr, 1493 (PPSMC_Msg)PPSMC_MSG_EnableEDCController, 1494 NULL); 1495 } else { 1496 ret = smum_send_msg_to_smc(hwmgr, 1497 (PPSMC_Msg)PPSMC_MSG_DisableEDCController, 1498 NULL); 1499 } 1500 1501 return ret; 1502 } 1503 1504 static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr) 1505 { 1506 int tmp_result = 0; 1507 int result = 0; 1508 1509 if (smu7_voltage_control(hwmgr)) { 1510 tmp_result = smu7_enable_voltage_control(hwmgr); 1511 PP_ASSERT_WITH_CODE(tmp_result == 0, 1512 "Failed to enable voltage control!", 1513 result = tmp_result); 1514 1515 tmp_result = smu7_construct_voltage_tables(hwmgr); 1516 PP_ASSERT_WITH_CODE((0 == tmp_result), 1517 "Failed to construct voltage tables!", 1518 result = tmp_result); 1519 } 1520 smum_initialize_mc_reg_table(hwmgr); 1521 1522 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1523 PHM_PlatformCaps_EngineSpreadSpectrumSupport)) 1524 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 1525 GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1); 1526 1527 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1528 PHM_PlatformCaps_ThermalController)) 1529 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 1530 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0); 1531 1532 tmp_result = smu7_program_static_screen_threshold_parameters(hwmgr); 1533 PP_ASSERT_WITH_CODE((0 == tmp_result), 1534 "Failed to program static screen threshold parameters!", 1535 result = tmp_result); 1536 1537 tmp_result = smu7_enable_display_gap(hwmgr); 1538 PP_ASSERT_WITH_CODE((0 == tmp_result), 1539 "Failed to enable display gap!", result = tmp_result); 1540 1541 tmp_result = smu7_program_voting_clients(hwmgr); 1542 PP_ASSERT_WITH_CODE((0 == tmp_result), 1543 "Failed to program voting clients!", result = tmp_result); 1544 1545 tmp_result = smum_process_firmware_header(hwmgr); 1546 PP_ASSERT_WITH_CODE((0 == tmp_result), 1547 "Failed to process firmware header!", result = tmp_result); 1548 1549 if (hwmgr->chip_id != CHIP_VEGAM) { 1550 tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr); 1551 PP_ASSERT_WITH_CODE((0 == tmp_result), 1552 "Failed to initialize switch from ArbF0 to F1!", 1553 result = tmp_result); 1554 } 1555 1556 result = smu7_setup_default_dpm_tables(hwmgr); 1557 PP_ASSERT_WITH_CODE(0 == result, 1558 "Failed to setup default DPM tables!", return result); 1559 1560 tmp_result = smum_init_smc_table(hwmgr); 1561 PP_ASSERT_WITH_CODE((0 == tmp_result), 1562 "Failed to initialize SMC table!", result = tmp_result); 1563 1564 tmp_result = smu7_enable_vrhot_gpio_interrupt(hwmgr); 1565 PP_ASSERT_WITH_CODE((0 == tmp_result), 1566 "Failed to enable VR hot GPIO interrupt!", result = tmp_result); 1567 1568 if (hwmgr->chip_id >= CHIP_POLARIS10 && 1569 hwmgr->chip_id <= CHIP_VEGAM) { 1570 tmp_result = smu7_notify_has_display(hwmgr); 1571 PP_ASSERT_WITH_CODE((0 == tmp_result), 1572 "Failed to enable display setting!", result = tmp_result); 1573 } else { 1574 smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay, NULL); 1575 } 1576 1577 if (hwmgr->chip_id >= CHIP_POLARIS10 && 1578 hwmgr->chip_id <= CHIP_VEGAM) { 1579 tmp_result = smu7_populate_edc_leakage_registers(hwmgr); 1580 PP_ASSERT_WITH_CODE((0 == tmp_result), 1581 "Failed to populate edc leakage registers!", result = tmp_result); 1582 } 1583 1584 tmp_result = smu7_enable_sclk_control(hwmgr); 1585 PP_ASSERT_WITH_CODE((0 == tmp_result), 1586 "Failed to enable SCLK control!", result = tmp_result); 1587 1588 tmp_result = smu7_enable_smc_voltage_controller(hwmgr); 1589 PP_ASSERT_WITH_CODE((0 == tmp_result), 1590 "Failed to enable voltage control!", result = tmp_result); 1591 1592 tmp_result = smu7_enable_ulv(hwmgr); 1593 PP_ASSERT_WITH_CODE((0 == tmp_result), 1594 "Failed to enable ULV!", result = tmp_result); 1595 1596 tmp_result = smu7_enable_deep_sleep_master_switch(hwmgr); 1597 PP_ASSERT_WITH_CODE((0 == tmp_result), 1598 "Failed to enable deep sleep master switch!", result = tmp_result); 1599 1600 tmp_result = smu7_enable_didt_config(hwmgr); 1601 PP_ASSERT_WITH_CODE((tmp_result == 0), 1602 "Failed to enable deep sleep master switch!", result = tmp_result); 1603 1604 tmp_result = smu7_start_dpm(hwmgr); 1605 PP_ASSERT_WITH_CODE((0 == tmp_result), 1606 "Failed to start DPM!", result = tmp_result); 1607 1608 tmp_result = smu7_enable_smc_cac(hwmgr); 1609 PP_ASSERT_WITH_CODE((0 == tmp_result), 1610 "Failed to enable SMC CAC!", result = tmp_result); 1611 1612 tmp_result = smu7_enable_power_containment(hwmgr); 1613 PP_ASSERT_WITH_CODE((0 == tmp_result), 1614 "Failed to enable power containment!", result = tmp_result); 1615 1616 tmp_result = smu7_power_control_set_level(hwmgr); 1617 PP_ASSERT_WITH_CODE((0 == tmp_result), 1618 "Failed to power control set level!", result = tmp_result); 1619 1620 tmp_result = smu7_enable_thermal_auto_throttle(hwmgr); 1621 PP_ASSERT_WITH_CODE((0 == tmp_result), 1622 "Failed to enable thermal auto throttle!", result = tmp_result); 1623 1624 tmp_result = smu7_pcie_performance_request(hwmgr); 1625 PP_ASSERT_WITH_CODE((0 == tmp_result), 1626 "pcie performance request failed!", result = tmp_result); 1627 1628 return 0; 1629 } 1630 1631 static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable) 1632 { 1633 if (!hwmgr->avfs_supported) 1634 return 0; 1635 1636 if (enable) { 1637 if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, 1638 CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) { 1639 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc( 1640 hwmgr, PPSMC_MSG_EnableAvfs, NULL), 1641 "Failed to enable AVFS!", 1642 return -EINVAL); 1643 } 1644 } else if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, 1645 CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) { 1646 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc( 1647 hwmgr, PPSMC_MSG_DisableAvfs, NULL), 1648 "Failed to disable AVFS!", 1649 return -EINVAL); 1650 } 1651 1652 return 0; 1653 } 1654 1655 static int smu7_update_avfs(struct pp_hwmgr *hwmgr) 1656 { 1657 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1658 1659 if (!hwmgr->avfs_supported) 1660 return 0; 1661 1662 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { 1663 smu7_avfs_control(hwmgr, false); 1664 } else if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) { 1665 smu7_avfs_control(hwmgr, false); 1666 smu7_avfs_control(hwmgr, true); 1667 } else { 1668 smu7_avfs_control(hwmgr, true); 1669 } 1670 1671 return 0; 1672 } 1673 1674 static int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr) 1675 { 1676 int tmp_result, result = 0; 1677 1678 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1679 PHM_PlatformCaps_ThermalController)) 1680 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 1681 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1); 1682 1683 tmp_result = smu7_disable_power_containment(hwmgr); 1684 PP_ASSERT_WITH_CODE((tmp_result == 0), 1685 "Failed to disable power containment!", result = tmp_result); 1686 1687 tmp_result = smu7_disable_smc_cac(hwmgr); 1688 PP_ASSERT_WITH_CODE((tmp_result == 0), 1689 "Failed to disable SMC CAC!", result = tmp_result); 1690 1691 tmp_result = smu7_disable_didt_config(hwmgr); 1692 PP_ASSERT_WITH_CODE((tmp_result == 0), 1693 "Failed to disable DIDT!", result = tmp_result); 1694 1695 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 1696 CG_SPLL_SPREAD_SPECTRUM, SSEN, 0); 1697 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 1698 GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0); 1699 1700 tmp_result = smu7_disable_thermal_auto_throttle(hwmgr); 1701 PP_ASSERT_WITH_CODE((tmp_result == 0), 1702 "Failed to disable thermal auto throttle!", result = tmp_result); 1703 1704 tmp_result = smu7_avfs_control(hwmgr, false); 1705 PP_ASSERT_WITH_CODE((tmp_result == 0), 1706 "Failed to disable AVFS!", result = tmp_result); 1707 1708 tmp_result = smu7_stop_dpm(hwmgr); 1709 PP_ASSERT_WITH_CODE((tmp_result == 0), 1710 "Failed to stop DPM!", result = tmp_result); 1711 1712 tmp_result = smu7_disable_deep_sleep_master_switch(hwmgr); 1713 PP_ASSERT_WITH_CODE((tmp_result == 0), 1714 "Failed to disable deep sleep master switch!", result = tmp_result); 1715 1716 tmp_result = smu7_disable_ulv(hwmgr); 1717 PP_ASSERT_WITH_CODE((tmp_result == 0), 1718 "Failed to disable ULV!", result = tmp_result); 1719 1720 tmp_result = smu7_clear_voting_clients(hwmgr); 1721 PP_ASSERT_WITH_CODE((tmp_result == 0), 1722 "Failed to clear voting clients!", result = tmp_result); 1723 1724 tmp_result = smu7_reset_to_default(hwmgr); 1725 PP_ASSERT_WITH_CODE((tmp_result == 0), 1726 "Failed to reset to default!", result = tmp_result); 1727 1728 tmp_result = smum_stop_smc(hwmgr); 1729 PP_ASSERT_WITH_CODE((tmp_result == 0), 1730 "Failed to stop smc!", result = tmp_result); 1731 1732 tmp_result = smu7_force_switch_to_arbf0(hwmgr); 1733 PP_ASSERT_WITH_CODE((tmp_result == 0), 1734 "Failed to force to switch arbf0!", result = tmp_result); 1735 1736 return result; 1737 } 1738 1739 static bool intel_core_rkl_chk(void) 1740 { 1741 #if IS_ENABLED(CONFIG_X86_64) 1742 struct cpuinfo_x86 *c = &cpu_data(0); 1743 1744 return (c->x86 == 6 && c->x86_model == INTEL_FAM6_ROCKETLAKE); 1745 #else 1746 return false; 1747 #endif 1748 } 1749 1750 static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr) 1751 { 1752 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1753 struct phm_ppt_v1_information *table_info = 1754 (struct phm_ppt_v1_information *)(hwmgr->pptable); 1755 struct amdgpu_device *adev = hwmgr->adev; 1756 uint8_t tmp1, tmp2; 1757 uint16_t tmp3 = 0; 1758 1759 data->dll_default_on = false; 1760 data->mclk_dpm0_activity_target = 0xa; 1761 data->vddc_vddgfx_delta = 300; 1762 data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT; 1763 data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT; 1764 data->voting_rights_clients[0] = SMU7_VOTINGRIGHTSCLIENTS_DFLT0; 1765 data->voting_rights_clients[1]= SMU7_VOTINGRIGHTSCLIENTS_DFLT1; 1766 data->voting_rights_clients[2] = SMU7_VOTINGRIGHTSCLIENTS_DFLT2; 1767 data->voting_rights_clients[3]= SMU7_VOTINGRIGHTSCLIENTS_DFLT3; 1768 data->voting_rights_clients[4]= SMU7_VOTINGRIGHTSCLIENTS_DFLT4; 1769 data->voting_rights_clients[5]= SMU7_VOTINGRIGHTSCLIENTS_DFLT5; 1770 data->voting_rights_clients[6]= SMU7_VOTINGRIGHTSCLIENTS_DFLT6; 1771 data->voting_rights_clients[7]= SMU7_VOTINGRIGHTSCLIENTS_DFLT7; 1772 1773 data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true; 1774 data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true; 1775 data->pcie_dpm_key_disabled = 1776 intel_core_rkl_chk() || !(hwmgr->feature_mask & PP_PCIE_DPM_MASK); 1777 /* need to set voltage control types before EVV patching */ 1778 data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE; 1779 data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE; 1780 data->mvdd_control = SMU7_VOLTAGE_CONTROL_NONE; 1781 data->enable_tdc_limit_feature = true; 1782 data->enable_pkg_pwr_tracking_feature = true; 1783 data->force_pcie_gen = PP_PCIEGenInvalid; 1784 data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false; 1785 data->current_profile_setting.bupdate_sclk = 1; 1786 data->current_profile_setting.sclk_up_hyst = 0; 1787 data->current_profile_setting.sclk_down_hyst = 100; 1788 data->current_profile_setting.sclk_activity = SMU7_SCLK_TARGETACTIVITY_DFLT; 1789 data->current_profile_setting.bupdate_mclk = 1; 1790 if (hwmgr->chip_id >= CHIP_POLARIS10) { 1791 if (adev->gmc.vram_width == 256) { 1792 data->current_profile_setting.mclk_up_hyst = 10; 1793 data->current_profile_setting.mclk_down_hyst = 60; 1794 data->current_profile_setting.mclk_activity = 25; 1795 } else if (adev->gmc.vram_width == 128) { 1796 data->current_profile_setting.mclk_up_hyst = 5; 1797 data->current_profile_setting.mclk_down_hyst = 16; 1798 data->current_profile_setting.mclk_activity = 20; 1799 } else if (adev->gmc.vram_width == 64) { 1800 data->current_profile_setting.mclk_up_hyst = 3; 1801 data->current_profile_setting.mclk_down_hyst = 16; 1802 data->current_profile_setting.mclk_activity = 20; 1803 } 1804 } else { 1805 data->current_profile_setting.mclk_up_hyst = 0; 1806 data->current_profile_setting.mclk_down_hyst = 100; 1807 data->current_profile_setting.mclk_activity = SMU7_MCLK_TARGETACTIVITY_DFLT; 1808 } 1809 hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D]; 1810 hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D; 1811 hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D; 1812 1813 if (hwmgr->chip_id == CHIP_HAWAII) { 1814 data->thermal_temp_setting.temperature_low = 94500; 1815 data->thermal_temp_setting.temperature_high = 95000; 1816 data->thermal_temp_setting.temperature_shutdown = 104000; 1817 } else { 1818 data->thermal_temp_setting.temperature_low = 99500; 1819 data->thermal_temp_setting.temperature_high = 100000; 1820 data->thermal_temp_setting.temperature_shutdown = 104000; 1821 } 1822 1823 data->fast_watermark_threshold = 100; 1824 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, 1825 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) 1826 data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; 1827 else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, 1828 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT)) 1829 data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_GPIO; 1830 1831 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1832 PHM_PlatformCaps_ControlVDDGFX)) { 1833 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, 1834 VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) { 1835 data->vdd_gfx_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; 1836 } 1837 } 1838 1839 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1840 PHM_PlatformCaps_EnableMVDDControl)) { 1841 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, 1842 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT)) 1843 data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_GPIO; 1844 else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, 1845 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2)) 1846 data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; 1847 } 1848 1849 if (SMU7_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control) 1850 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 1851 PHM_PlatformCaps_ControlVDDGFX); 1852 1853 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1854 PHM_PlatformCaps_ControlVDDCI)) { 1855 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, 1856 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT)) 1857 data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_GPIO; 1858 else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, 1859 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2)) 1860 data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; 1861 } 1862 1863 if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE) 1864 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 1865 PHM_PlatformCaps_EnableMVDDControl); 1866 1867 if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE) 1868 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 1869 PHM_PlatformCaps_ControlVDDCI); 1870 1871 data->vddc_phase_shed_control = 1; 1872 if ((hwmgr->chip_id == CHIP_POLARIS12) || 1873 ASICID_IS_P20(adev->pdev->device, adev->pdev->revision) || 1874 ASICID_IS_P21(adev->pdev->device, adev->pdev->revision) || 1875 ASICID_IS_P30(adev->pdev->device, adev->pdev->revision) || 1876 ASICID_IS_P31(adev->pdev->device, adev->pdev->revision)) { 1877 if (data->voltage_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { 1878 atomctrl_get_svi2_info(hwmgr, VOLTAGE_TYPE_VDDC, &tmp1, &tmp2, 1879 &tmp3); 1880 tmp3 = (tmp3 >> 5) & 0x3; 1881 data->vddc_phase_shed_control = ((tmp3 << 1) | (tmp3 >> 1)) & 0x3; 1882 } 1883 } else if (hwmgr->chip_family == AMDGPU_FAMILY_CI) { 1884 data->vddc_phase_shed_control = 1; 1885 } 1886 1887 if ((hwmgr->pp_table_version != PP_TABLE_V0) && (hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK) 1888 && (table_info->cac_dtp_table->usClockStretchAmount != 0)) 1889 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 1890 PHM_PlatformCaps_ClockStretcher); 1891 1892 data->pcie_gen_performance.max = PP_PCIEGen1; 1893 data->pcie_gen_performance.min = PP_PCIEGen3; 1894 data->pcie_gen_power_saving.max = PP_PCIEGen1; 1895 data->pcie_gen_power_saving.min = PP_PCIEGen3; 1896 data->pcie_lane_performance.max = 0; 1897 data->pcie_lane_performance.min = 16; 1898 data->pcie_lane_power_saving.max = 0; 1899 data->pcie_lane_power_saving.min = 16; 1900 1901 1902 if (adev->pg_flags & AMD_PG_SUPPORT_UVD) 1903 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 1904 PHM_PlatformCaps_UVDPowerGating); 1905 if (adev->pg_flags & AMD_PG_SUPPORT_VCE) 1906 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 1907 PHM_PlatformCaps_VCEPowerGating); 1908 1909 data->disable_edc_leakage_controller = true; 1910 if (((adev->asic_type == CHIP_POLARIS10) && hwmgr->is_kicker) || 1911 ((adev->asic_type == CHIP_POLARIS11) && hwmgr->is_kicker) || 1912 (adev->asic_type == CHIP_POLARIS12) || 1913 (adev->asic_type == CHIP_VEGAM)) 1914 data->disable_edc_leakage_controller = false; 1915 1916 if (!atomctrl_is_asic_internal_ss_supported(hwmgr)) { 1917 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 1918 PHM_PlatformCaps_MemorySpreadSpectrumSupport); 1919 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 1920 PHM_PlatformCaps_EngineSpreadSpectrumSupport); 1921 } 1922 1923 if ((adev->pdev->device == 0x699F) && 1924 (adev->pdev->revision == 0xCF)) { 1925 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 1926 PHM_PlatformCaps_PowerContainment); 1927 data->enable_tdc_limit_feature = false; 1928 data->enable_pkg_pwr_tracking_feature = false; 1929 data->disable_edc_leakage_controller = true; 1930 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 1931 PHM_PlatformCaps_ClockStretcher); 1932 } 1933 } 1934 1935 static int smu7_calculate_ro_range(struct pp_hwmgr *hwmgr) 1936 { 1937 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1938 struct amdgpu_device *adev = hwmgr->adev; 1939 uint32_t asicrev1, evv_revision, max = 0, min = 0; 1940 1941 atomctrl_read_efuse(hwmgr, STRAP_EVV_REVISION_LSB, STRAP_EVV_REVISION_MSB, 1942 &evv_revision); 1943 1944 atomctrl_read_efuse(hwmgr, 568, 579, &asicrev1); 1945 1946 if (ASICID_IS_P20(adev->pdev->device, adev->pdev->revision) || 1947 ASICID_IS_P30(adev->pdev->device, adev->pdev->revision)) { 1948 min = 1200; 1949 max = 2500; 1950 } else if (ASICID_IS_P21(adev->pdev->device, adev->pdev->revision) || 1951 ASICID_IS_P31(adev->pdev->device, adev->pdev->revision)) { 1952 min = 900; 1953 max= 2100; 1954 } else if (hwmgr->chip_id == CHIP_POLARIS10) { 1955 if (adev->pdev->subsystem_vendor == 0x106B) { 1956 min = 1000; 1957 max = 2300; 1958 } else { 1959 if (evv_revision == 0) { 1960 min = 1000; 1961 max = 2300; 1962 } else if (evv_revision == 1) { 1963 if (asicrev1 == 326) { 1964 min = 1200; 1965 max = 2500; 1966 /* TODO: PATCH RO in VBIOS */ 1967 } else { 1968 min = 1200; 1969 max = 2000; 1970 } 1971 } else if (evv_revision == 2) { 1972 min = 1200; 1973 max = 2500; 1974 } 1975 } 1976 } else { 1977 min = 1100; 1978 max = 2100; 1979 } 1980 1981 data->ro_range_minimum = min; 1982 data->ro_range_maximum = max; 1983 1984 /* TODO: PATCH RO in VBIOS here */ 1985 1986 return 0; 1987 } 1988 1989 /** 1990 * smu7_get_evv_voltages - Get Leakage VDDC based on leakage ID. 1991 * 1992 * @hwmgr: the address of the powerplay hardware manager. 1993 * Return: always 0 1994 */ 1995 static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr) 1996 { 1997 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1998 uint16_t vv_id; 1999 uint16_t vddc = 0; 2000 uint16_t vddgfx = 0; 2001 uint16_t i, j; 2002 uint32_t sclk = 0; 2003 struct phm_ppt_v1_information *table_info = 2004 (struct phm_ppt_v1_information *)hwmgr->pptable; 2005 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL; 2006 2007 if (hwmgr->chip_id == CHIP_POLARIS10 || 2008 hwmgr->chip_id == CHIP_POLARIS11 || 2009 hwmgr->chip_id == CHIP_POLARIS12) 2010 smu7_calculate_ro_range(hwmgr); 2011 2012 for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) { 2013 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; 2014 2015 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { 2016 if ((hwmgr->pp_table_version == PP_TABLE_V1) 2017 && !phm_get_sclk_for_voltage_evv(hwmgr, 2018 table_info->vddgfx_lookup_table, vv_id, &sclk)) { 2019 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 2020 PHM_PlatformCaps_ClockStretcher)) { 2021 sclk_table = table_info->vdd_dep_on_sclk; 2022 2023 for (j = 1; j < sclk_table->count; j++) { 2024 if (sclk_table->entries[j].clk == sclk && 2025 sclk_table->entries[j].cks_enable == 0) { 2026 sclk += 5000; 2027 break; 2028 } 2029 } 2030 } 2031 if (0 == atomctrl_get_voltage_evv_on_sclk 2032 (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk, 2033 vv_id, &vddgfx)) { 2034 /* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */ 2035 PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -EINVAL); 2036 2037 /* the voltage should not be zero nor equal to leakage ID */ 2038 if (vddgfx != 0 && vddgfx != vv_id) { 2039 data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx; 2040 data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = vv_id; 2041 data->vddcgfx_leakage.count++; 2042 } 2043 } else { 2044 pr_info("Error retrieving EVV voltage value!\n"); 2045 } 2046 } 2047 } else { 2048 if ((hwmgr->pp_table_version == PP_TABLE_V0) 2049 || !phm_get_sclk_for_voltage_evv(hwmgr, 2050 table_info->vddc_lookup_table, vv_id, &sclk)) { 2051 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 2052 PHM_PlatformCaps_ClockStretcher)) { 2053 if (table_info == NULL) 2054 return -EINVAL; 2055 sclk_table = table_info->vdd_dep_on_sclk; 2056 2057 for (j = 1; j < sclk_table->count; j++) { 2058 if (sclk_table->entries[j].clk == sclk && 2059 sclk_table->entries[j].cks_enable == 0) { 2060 sclk += 5000; 2061 break; 2062 } 2063 } 2064 } 2065 2066 if (phm_get_voltage_evv_on_sclk(hwmgr, 2067 VOLTAGE_TYPE_VDDC, 2068 sclk, vv_id, &vddc) == 0) { 2069 if (vddc >= 2000 || vddc == 0) 2070 return -EINVAL; 2071 } else { 2072 pr_debug("failed to retrieving EVV voltage!\n"); 2073 continue; 2074 } 2075 2076 /* the voltage should not be zero nor equal to leakage ID */ 2077 if (vddc != 0 && vddc != vv_id) { 2078 data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc); 2079 data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id; 2080 data->vddc_leakage.count++; 2081 } 2082 } 2083 } 2084 } 2085 2086 return 0; 2087 } 2088 2089 /** 2090 * smu7_patch_ppt_v1_with_vdd_leakage - Change virtual leakage voltage to actual value. 2091 * 2092 * @hwmgr: the address of the powerplay hardware manager. 2093 * @voltage: pointer to changing voltage 2094 * @leakage_table: pointer to leakage table 2095 */ 2096 static void smu7_patch_ppt_v1_with_vdd_leakage(struct pp_hwmgr *hwmgr, 2097 uint16_t *voltage, struct smu7_leakage_voltage *leakage_table) 2098 { 2099 uint32_t index; 2100 2101 /* search for leakage voltage ID 0xff01 ~ 0xff08 */ 2102 for (index = 0; index < leakage_table->count; index++) { 2103 /* if this voltage matches a leakage voltage ID */ 2104 /* patch with actual leakage voltage */ 2105 if (leakage_table->leakage_id[index] == *voltage) { 2106 *voltage = leakage_table->actual_voltage[index]; 2107 break; 2108 } 2109 } 2110 2111 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0) 2112 pr_err("Voltage value looks like a Leakage ID but it's not patched \n"); 2113 } 2114 2115 /** 2116 * smu7_patch_lookup_table_with_leakage - Patch voltage lookup table by EVV leakages. 2117 * 2118 * @hwmgr: the address of the powerplay hardware manager. 2119 * @lookup_table: pointer to voltage lookup table 2120 * @leakage_table: pointer to leakage table 2121 * Return: always 0 2122 */ 2123 static int smu7_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr, 2124 phm_ppt_v1_voltage_lookup_table *lookup_table, 2125 struct smu7_leakage_voltage *leakage_table) 2126 { 2127 uint32_t i; 2128 2129 for (i = 0; i < lookup_table->count; i++) 2130 smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, 2131 &lookup_table->entries[i].us_vdd, leakage_table); 2132 2133 return 0; 2134 } 2135 2136 static int smu7_patch_clock_voltage_limits_with_vddc_leakage( 2137 struct pp_hwmgr *hwmgr, struct smu7_leakage_voltage *leakage_table, 2138 uint16_t *vddc) 2139 { 2140 struct phm_ppt_v1_information *table_info = 2141 (struct phm_ppt_v1_information *)(hwmgr->pptable); 2142 smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table); 2143 hwmgr->dyn_state.max_clock_voltage_on_dc.vddc = 2144 table_info->max_clock_voltage_on_dc.vddc; 2145 return 0; 2146 } 2147 2148 static int smu7_patch_voltage_dependency_tables_with_lookup_table( 2149 struct pp_hwmgr *hwmgr) 2150 { 2151 uint8_t entry_id; 2152 uint8_t voltage_id; 2153 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2154 struct phm_ppt_v1_information *table_info = 2155 (struct phm_ppt_v1_information *)(hwmgr->pptable); 2156 2157 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = 2158 table_info->vdd_dep_on_sclk; 2159 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table = 2160 table_info->vdd_dep_on_mclk; 2161 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = 2162 table_info->mm_dep_table; 2163 2164 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { 2165 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) { 2166 voltage_id = sclk_table->entries[entry_id].vddInd; 2167 sclk_table->entries[entry_id].vddgfx = 2168 table_info->vddgfx_lookup_table->entries[voltage_id].us_vdd; 2169 } 2170 } else { 2171 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) { 2172 voltage_id = sclk_table->entries[entry_id].vddInd; 2173 sclk_table->entries[entry_id].vddc = 2174 table_info->vddc_lookup_table->entries[voltage_id].us_vdd; 2175 } 2176 } 2177 2178 for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) { 2179 voltage_id = mclk_table->entries[entry_id].vddInd; 2180 mclk_table->entries[entry_id].vddc = 2181 table_info->vddc_lookup_table->entries[voltage_id].us_vdd; 2182 } 2183 2184 for (entry_id = 0; entry_id < mm_table->count; ++entry_id) { 2185 voltage_id = mm_table->entries[entry_id].vddcInd; 2186 mm_table->entries[entry_id].vddc = 2187 table_info->vddc_lookup_table->entries[voltage_id].us_vdd; 2188 } 2189 2190 return 0; 2191 2192 } 2193 2194 static int phm_add_voltage(struct pp_hwmgr *hwmgr, 2195 phm_ppt_v1_voltage_lookup_table *look_up_table, 2196 phm_ppt_v1_voltage_lookup_record *record) 2197 { 2198 uint32_t i; 2199 2200 PP_ASSERT_WITH_CODE((NULL != look_up_table), 2201 "Lookup Table empty.", return -EINVAL); 2202 PP_ASSERT_WITH_CODE((0 != look_up_table->count), 2203 "Lookup Table empty.", return -EINVAL); 2204 2205 i = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX); 2206 PP_ASSERT_WITH_CODE((i >= look_up_table->count), 2207 "Lookup Table is full.", return -EINVAL); 2208 2209 /* This is to avoid entering duplicate calculated records. */ 2210 for (i = 0; i < look_up_table->count; i++) { 2211 if (look_up_table->entries[i].us_vdd == record->us_vdd) { 2212 if (look_up_table->entries[i].us_calculated == 1) 2213 return 0; 2214 break; 2215 } 2216 } 2217 2218 look_up_table->entries[i].us_calculated = 1; 2219 look_up_table->entries[i].us_vdd = record->us_vdd; 2220 look_up_table->entries[i].us_cac_low = record->us_cac_low; 2221 look_up_table->entries[i].us_cac_mid = record->us_cac_mid; 2222 look_up_table->entries[i].us_cac_high = record->us_cac_high; 2223 /* Only increment the count when we're appending, not replacing duplicate entry. */ 2224 if (i == look_up_table->count) 2225 look_up_table->count++; 2226 2227 return 0; 2228 } 2229 2230 2231 static int smu7_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr) 2232 { 2233 uint8_t entry_id; 2234 struct phm_ppt_v1_voltage_lookup_record v_record; 2235 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2236 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); 2237 2238 phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk; 2239 phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk; 2240 2241 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { 2242 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) { 2243 if (sclk_table->entries[entry_id].vdd_offset & (1 << 15)) 2244 v_record.us_vdd = sclk_table->entries[entry_id].vddgfx + 2245 sclk_table->entries[entry_id].vdd_offset - 0xFFFF; 2246 else 2247 v_record.us_vdd = sclk_table->entries[entry_id].vddgfx + 2248 sclk_table->entries[entry_id].vdd_offset; 2249 2250 sclk_table->entries[entry_id].vddc = 2251 v_record.us_cac_low = v_record.us_cac_mid = 2252 v_record.us_cac_high = v_record.us_vdd; 2253 2254 phm_add_voltage(hwmgr, pptable_info->vddc_lookup_table, &v_record); 2255 } 2256 2257 for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) { 2258 if (mclk_table->entries[entry_id].vdd_offset & (1 << 15)) 2259 v_record.us_vdd = mclk_table->entries[entry_id].vddc + 2260 mclk_table->entries[entry_id].vdd_offset - 0xFFFF; 2261 else 2262 v_record.us_vdd = mclk_table->entries[entry_id].vddc + 2263 mclk_table->entries[entry_id].vdd_offset; 2264 2265 mclk_table->entries[entry_id].vddgfx = v_record.us_cac_low = 2266 v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd; 2267 phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record); 2268 } 2269 } 2270 return 0; 2271 } 2272 2273 static int smu7_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr) 2274 { 2275 uint8_t entry_id; 2276 struct phm_ppt_v1_voltage_lookup_record v_record; 2277 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2278 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); 2279 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table; 2280 2281 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { 2282 for (entry_id = 0; entry_id < mm_table->count; entry_id++) { 2283 if (mm_table->entries[entry_id].vddgfx_offset & (1 << 15)) 2284 v_record.us_vdd = mm_table->entries[entry_id].vddc + 2285 mm_table->entries[entry_id].vddgfx_offset - 0xFFFF; 2286 else 2287 v_record.us_vdd = mm_table->entries[entry_id].vddc + 2288 mm_table->entries[entry_id].vddgfx_offset; 2289 2290 /* Add the calculated VDDGFX to the VDDGFX lookup table */ 2291 mm_table->entries[entry_id].vddgfx = v_record.us_cac_low = 2292 v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd; 2293 phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record); 2294 } 2295 } 2296 return 0; 2297 } 2298 2299 static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr, 2300 struct phm_ppt_v1_voltage_lookup_table *lookup_table) 2301 { 2302 uint32_t table_size, i, j; 2303 table_size = lookup_table->count; 2304 2305 PP_ASSERT_WITH_CODE(0 != lookup_table->count, 2306 "Lookup table is empty", return -EINVAL); 2307 2308 /* Sorting voltages */ 2309 for (i = 0; i < table_size - 1; i++) { 2310 for (j = i + 1; j > 0; j--) { 2311 if (lookup_table->entries[j].us_vdd < 2312 lookup_table->entries[j - 1].us_vdd) { 2313 swap(lookup_table->entries[j - 1], 2314 lookup_table->entries[j]); 2315 } 2316 } 2317 } 2318 2319 return 0; 2320 } 2321 2322 static int smu7_complete_dependency_tables(struct pp_hwmgr *hwmgr) 2323 { 2324 int result = 0; 2325 int tmp_result; 2326 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2327 struct phm_ppt_v1_information *table_info = 2328 (struct phm_ppt_v1_information *)(hwmgr->pptable); 2329 2330 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { 2331 tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr, 2332 table_info->vddgfx_lookup_table, &(data->vddcgfx_leakage)); 2333 if (tmp_result != 0) 2334 result = tmp_result; 2335 2336 smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, 2337 &table_info->max_clock_voltage_on_dc.vddgfx, &(data->vddcgfx_leakage)); 2338 } else { 2339 2340 tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr, 2341 table_info->vddc_lookup_table, &(data->vddc_leakage)); 2342 if (tmp_result) 2343 result = tmp_result; 2344 2345 tmp_result = smu7_patch_clock_voltage_limits_with_vddc_leakage(hwmgr, 2346 &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc); 2347 if (tmp_result) 2348 result = tmp_result; 2349 } 2350 2351 tmp_result = smu7_patch_voltage_dependency_tables_with_lookup_table(hwmgr); 2352 if (tmp_result) 2353 result = tmp_result; 2354 2355 tmp_result = smu7_calc_voltage_dependency_tables(hwmgr); 2356 if (tmp_result) 2357 result = tmp_result; 2358 2359 tmp_result = smu7_calc_mm_voltage_dependency_table(hwmgr); 2360 if (tmp_result) 2361 result = tmp_result; 2362 2363 tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddgfx_lookup_table); 2364 if (tmp_result) 2365 result = tmp_result; 2366 2367 tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddc_lookup_table); 2368 if (tmp_result) 2369 result = tmp_result; 2370 2371 return result; 2372 } 2373 2374 static int smu7_find_highest_vddc(struct pp_hwmgr *hwmgr) 2375 { 2376 struct phm_ppt_v1_information *table_info = 2377 (struct phm_ppt_v1_information *)(hwmgr->pptable); 2378 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table = 2379 table_info->vdd_dep_on_sclk; 2380 struct phm_ppt_v1_voltage_lookup_table *lookup_table = 2381 table_info->vddc_lookup_table; 2382 uint16_t highest_voltage; 2383 uint32_t i; 2384 2385 highest_voltage = allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc; 2386 2387 for (i = 0; i < lookup_table->count; i++) { 2388 if (lookup_table->entries[i].us_vdd < ATOM_VIRTUAL_VOLTAGE_ID0 && 2389 lookup_table->entries[i].us_vdd > highest_voltage) 2390 highest_voltage = lookup_table->entries[i].us_vdd; 2391 } 2392 2393 return highest_voltage; 2394 } 2395 2396 static int smu7_set_private_data_based_on_pptable_v1(struct pp_hwmgr *hwmgr) 2397 { 2398 struct phm_ppt_v1_information *table_info = 2399 (struct phm_ppt_v1_information *)(hwmgr->pptable); 2400 2401 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table = 2402 table_info->vdd_dep_on_sclk; 2403 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table = 2404 table_info->vdd_dep_on_mclk; 2405 2406 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL, 2407 "VDD dependency on SCLK table is missing.", 2408 return -EINVAL); 2409 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1, 2410 "VDD dependency on SCLK table has to have is missing.", 2411 return -EINVAL); 2412 2413 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL, 2414 "VDD dependency on MCLK table is missing", 2415 return -EINVAL); 2416 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1, 2417 "VDD dependency on MCLK table has to have is missing.", 2418 return -EINVAL); 2419 2420 table_info->max_clock_voltage_on_ac.sclk = 2421 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk; 2422 table_info->max_clock_voltage_on_ac.mclk = 2423 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk; 2424 if (hwmgr->chip_id >= CHIP_POLARIS10 && hwmgr->chip_id <= CHIP_VEGAM) 2425 table_info->max_clock_voltage_on_ac.vddc = 2426 smu7_find_highest_vddc(hwmgr); 2427 else 2428 table_info->max_clock_voltage_on_ac.vddc = 2429 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc; 2430 table_info->max_clock_voltage_on_ac.vddci = 2431 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci; 2432 2433 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = table_info->max_clock_voltage_on_ac.sclk; 2434 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = table_info->max_clock_voltage_on_ac.mclk; 2435 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = table_info->max_clock_voltage_on_ac.vddc; 2436 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = table_info->max_clock_voltage_on_ac.vddci; 2437 2438 return 0; 2439 } 2440 2441 static int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr) 2442 { 2443 struct phm_ppt_v1_information *table_info = 2444 (struct phm_ppt_v1_information *)(hwmgr->pptable); 2445 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table; 2446 struct phm_ppt_v1_voltage_lookup_table *lookup_table; 2447 uint32_t i; 2448 uint32_t hw_revision, sub_vendor_id, sub_sys_id; 2449 struct amdgpu_device *adev = hwmgr->adev; 2450 2451 if (table_info != NULL) { 2452 dep_mclk_table = table_info->vdd_dep_on_mclk; 2453 lookup_table = table_info->vddc_lookup_table; 2454 } else 2455 return 0; 2456 2457 hw_revision = adev->pdev->revision; 2458 sub_sys_id = adev->pdev->subsystem_device; 2459 sub_vendor_id = adev->pdev->subsystem_vendor; 2460 2461 if (adev->pdev->device == 0x67DF && hw_revision == 0xC7 && 2462 ((sub_sys_id == 0xb37 && sub_vendor_id == 0x1002) || 2463 (sub_sys_id == 0x4a8 && sub_vendor_id == 0x1043) || 2464 (sub_sys_id == 0x9480 && sub_vendor_id == 0x1682))) { 2465 2466 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, 2467 CGS_IND_REG__SMC, 2468 PWR_CKS_CNTL, 2469 CKS_STRETCH_AMOUNT, 2470 0x3); 2471 2472 if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000) 2473 return 0; 2474 2475 for (i = 0; i < lookup_table->count; i++) { 2476 if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) { 2477 dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i; 2478 return 0; 2479 } 2480 } 2481 } 2482 return 0; 2483 } 2484 2485 static int smu7_thermal_parameter_init(struct pp_hwmgr *hwmgr) 2486 { 2487 struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment; 2488 uint32_t temp_reg; 2489 struct phm_ppt_v1_information *table_info = 2490 (struct phm_ppt_v1_information *)(hwmgr->pptable); 2491 2492 2493 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) { 2494 temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL); 2495 switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) { 2496 case 0: 2497 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1); 2498 break; 2499 case 1: 2500 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2); 2501 break; 2502 case 2: 2503 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW, 0x1); 2504 break; 2505 case 3: 2506 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1); 2507 break; 2508 case 4: 2509 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1); 2510 break; 2511 default: 2512 break; 2513 } 2514 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg); 2515 } 2516 2517 if (table_info == NULL) 2518 return 0; 2519 2520 if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp != 0 && 2521 hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode) { 2522 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit = 2523 (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit; 2524 2525 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMaxLimit = 2526 (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM; 2527 2528 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMStep = 1; 2529 2530 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = 100; 2531 2532 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMinLimit = 2533 (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit; 2534 2535 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMStep = 1; 2536 2537 table_info->cac_dtp_table->usDefaultTargetOperatingTemp = (table_info->cac_dtp_table->usDefaultTargetOperatingTemp >= 50) ? 2538 (table_info->cac_dtp_table->usDefaultTargetOperatingTemp - 50) : 0; 2539 2540 table_info->cac_dtp_table->usOperatingTempMaxLimit = table_info->cac_dtp_table->usDefaultTargetOperatingTemp; 2541 table_info->cac_dtp_table->usOperatingTempStep = 1; 2542 table_info->cac_dtp_table->usOperatingTempHyst = 1; 2543 2544 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM = 2545 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM; 2546 2547 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM = 2548 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM; 2549 2550 hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit = 2551 table_info->cac_dtp_table->usOperatingTempMinLimit; 2552 2553 hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit = 2554 table_info->cac_dtp_table->usOperatingTempMaxLimit; 2555 2556 hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp = 2557 table_info->cac_dtp_table->usDefaultTargetOperatingTemp; 2558 2559 hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep = 2560 table_info->cac_dtp_table->usOperatingTempStep; 2561 2562 hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp = 2563 table_info->cac_dtp_table->usTargetOperatingTemp; 2564 if (hwmgr->feature_mask & PP_OD_FUZZY_FAN_CONTROL_MASK) 2565 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 2566 PHM_PlatformCaps_ODFuzzyFanControlSupport); 2567 } 2568 2569 return 0; 2570 } 2571 2572 /** 2573 * smu7_patch_ppt_v0_with_vdd_leakage - Change virtual leakage voltage to actual value. 2574 * 2575 * @hwmgr: the address of the powerplay hardware manager. 2576 * @voltage: pointer to changing voltage 2577 * @leakage_table: pointer to leakage table 2578 */ 2579 static void smu7_patch_ppt_v0_with_vdd_leakage(struct pp_hwmgr *hwmgr, 2580 uint32_t *voltage, struct smu7_leakage_voltage *leakage_table) 2581 { 2582 uint32_t index; 2583 2584 /* search for leakage voltage ID 0xff01 ~ 0xff08 */ 2585 for (index = 0; index < leakage_table->count; index++) { 2586 /* if this voltage matches a leakage voltage ID */ 2587 /* patch with actual leakage voltage */ 2588 if (leakage_table->leakage_id[index] == *voltage) { 2589 *voltage = leakage_table->actual_voltage[index]; 2590 break; 2591 } 2592 } 2593 2594 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0) 2595 pr_err("Voltage value looks like a Leakage ID but it's not patched \n"); 2596 } 2597 2598 2599 static int smu7_patch_vddc(struct pp_hwmgr *hwmgr, 2600 struct phm_clock_voltage_dependency_table *tab) 2601 { 2602 uint16_t i; 2603 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2604 2605 if (tab) 2606 for (i = 0; i < tab->count; i++) 2607 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, 2608 &data->vddc_leakage); 2609 2610 return 0; 2611 } 2612 2613 static int smu7_patch_vddci(struct pp_hwmgr *hwmgr, 2614 struct phm_clock_voltage_dependency_table *tab) 2615 { 2616 uint16_t i; 2617 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2618 2619 if (tab) 2620 for (i = 0; i < tab->count; i++) 2621 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, 2622 &data->vddci_leakage); 2623 2624 return 0; 2625 } 2626 2627 static int smu7_patch_vce_vddc(struct pp_hwmgr *hwmgr, 2628 struct phm_vce_clock_voltage_dependency_table *tab) 2629 { 2630 uint16_t i; 2631 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2632 2633 if (tab) 2634 for (i = 0; i < tab->count; i++) 2635 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, 2636 &data->vddc_leakage); 2637 2638 return 0; 2639 } 2640 2641 2642 static int smu7_patch_uvd_vddc(struct pp_hwmgr *hwmgr, 2643 struct phm_uvd_clock_voltage_dependency_table *tab) 2644 { 2645 uint16_t i; 2646 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2647 2648 if (tab) 2649 for (i = 0; i < tab->count; i++) 2650 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, 2651 &data->vddc_leakage); 2652 2653 return 0; 2654 } 2655 2656 static int smu7_patch_vddc_shed_limit(struct pp_hwmgr *hwmgr, 2657 struct phm_phase_shedding_limits_table *tab) 2658 { 2659 uint16_t i; 2660 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2661 2662 if (tab) 2663 for (i = 0; i < tab->count; i++) 2664 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].Voltage, 2665 &data->vddc_leakage); 2666 2667 return 0; 2668 } 2669 2670 static int smu7_patch_samu_vddc(struct pp_hwmgr *hwmgr, 2671 struct phm_samu_clock_voltage_dependency_table *tab) 2672 { 2673 uint16_t i; 2674 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2675 2676 if (tab) 2677 for (i = 0; i < tab->count; i++) 2678 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, 2679 &data->vddc_leakage); 2680 2681 return 0; 2682 } 2683 2684 static int smu7_patch_acp_vddc(struct pp_hwmgr *hwmgr, 2685 struct phm_acp_clock_voltage_dependency_table *tab) 2686 { 2687 uint16_t i; 2688 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2689 2690 if (tab) 2691 for (i = 0; i < tab->count; i++) 2692 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, 2693 &data->vddc_leakage); 2694 2695 return 0; 2696 } 2697 2698 static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr, 2699 struct phm_clock_and_voltage_limits *tab) 2700 { 2701 uint32_t vddc, vddci; 2702 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2703 2704 if (tab) { 2705 vddc = tab->vddc; 2706 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, 2707 &data->vddc_leakage); 2708 tab->vddc = vddc; 2709 vddci = tab->vddci; 2710 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddci, 2711 &data->vddci_leakage); 2712 tab->vddci = vddci; 2713 } 2714 2715 return 0; 2716 } 2717 2718 static int smu7_patch_cac_vddc(struct pp_hwmgr *hwmgr, struct phm_cac_leakage_table *tab) 2719 { 2720 uint32_t i; 2721 uint32_t vddc; 2722 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2723 2724 if (tab) { 2725 for (i = 0; i < tab->count; i++) { 2726 vddc = (uint32_t)(tab->entries[i].Vddc); 2727 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, &data->vddc_leakage); 2728 tab->entries[i].Vddc = (uint16_t)vddc; 2729 } 2730 } 2731 2732 return 0; 2733 } 2734 2735 static int smu7_patch_dependency_tables_with_leakage(struct pp_hwmgr *hwmgr) 2736 { 2737 int tmp; 2738 2739 tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_sclk); 2740 if (tmp) 2741 return -EINVAL; 2742 2743 tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_mclk); 2744 if (tmp) 2745 return -EINVAL; 2746 2747 tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dep_on_dal_pwrl); 2748 if (tmp) 2749 return -EINVAL; 2750 2751 tmp = smu7_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk); 2752 if (tmp) 2753 return -EINVAL; 2754 2755 tmp = smu7_patch_vce_vddc(hwmgr, hwmgr->dyn_state.vce_clock_voltage_dependency_table); 2756 if (tmp) 2757 return -EINVAL; 2758 2759 tmp = smu7_patch_uvd_vddc(hwmgr, hwmgr->dyn_state.uvd_clock_voltage_dependency_table); 2760 if (tmp) 2761 return -EINVAL; 2762 2763 tmp = smu7_patch_samu_vddc(hwmgr, hwmgr->dyn_state.samu_clock_voltage_dependency_table); 2764 if (tmp) 2765 return -EINVAL; 2766 2767 tmp = smu7_patch_acp_vddc(hwmgr, hwmgr->dyn_state.acp_clock_voltage_dependency_table); 2768 if (tmp) 2769 return -EINVAL; 2770 2771 tmp = smu7_patch_vddc_shed_limit(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table); 2772 if (tmp) 2773 return -EINVAL; 2774 2775 tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_ac); 2776 if (tmp) 2777 return -EINVAL; 2778 2779 tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_dc); 2780 if (tmp) 2781 return -EINVAL; 2782 2783 tmp = smu7_patch_cac_vddc(hwmgr, hwmgr->dyn_state.cac_leakage_table); 2784 if (tmp) 2785 return -EINVAL; 2786 2787 return 0; 2788 } 2789 2790 2791 static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr) 2792 { 2793 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2794 2795 struct phm_clock_voltage_dependency_table *allowed_sclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_sclk; 2796 struct phm_clock_voltage_dependency_table *allowed_mclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_mclk; 2797 struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk; 2798 2799 PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL, 2800 "VDDC dependency on SCLK table is missing. This table is mandatory", 2801 return -EINVAL); 2802 PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1, 2803 "VDDC dependency on SCLK table has to have is missing. This table is mandatory", 2804 return -EINVAL); 2805 2806 PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL, 2807 "VDDC dependency on MCLK table is missing. This table is mandatory", 2808 return -EINVAL); 2809 PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1, 2810 "VDD dependency on MCLK table has to have is missing. This table is mandatory", 2811 return -EINVAL); 2812 2813 data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[0].v; 2814 data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; 2815 2816 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = 2817 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk; 2818 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = 2819 allowed_mclk_vddc_table->entries[allowed_mclk_vddc_table->count - 1].clk; 2820 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = 2821 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; 2822 2823 if (allowed_mclk_vddci_table != NULL && allowed_mclk_vddci_table->count >= 1) { 2824 data->min_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[0].v; 2825 data->max_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v; 2826 } 2827 2828 if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count >= 1) 2829 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v; 2830 2831 return 0; 2832 } 2833 2834 static int smu7_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) 2835 { 2836 kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl); 2837 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL; 2838 kfree(hwmgr->backend); 2839 hwmgr->backend = NULL; 2840 2841 return 0; 2842 } 2843 2844 static int smu7_get_elb_voltages(struct pp_hwmgr *hwmgr) 2845 { 2846 uint16_t virtual_voltage_id, vddc, vddci, efuse_voltage_id; 2847 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2848 int i; 2849 2850 if (atomctrl_get_leakage_id_from_efuse(hwmgr, &efuse_voltage_id) == 0) { 2851 for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) { 2852 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; 2853 if (atomctrl_get_leakage_vddc_base_on_leakage(hwmgr, &vddc, &vddci, 2854 virtual_voltage_id, 2855 efuse_voltage_id) == 0) { 2856 if (vddc != 0 && vddc != virtual_voltage_id) { 2857 data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc; 2858 data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id; 2859 data->vddc_leakage.count++; 2860 } 2861 if (vddci != 0 && vddci != virtual_voltage_id) { 2862 data->vddci_leakage.actual_voltage[data->vddci_leakage.count] = vddci; 2863 data->vddci_leakage.leakage_id[data->vddci_leakage.count] = virtual_voltage_id; 2864 data->vddci_leakage.count++; 2865 } 2866 } 2867 } 2868 } 2869 return 0; 2870 } 2871 2872 #define LEAKAGE_ID_MSB 463 2873 #define LEAKAGE_ID_LSB 454 2874 2875 static int smu7_update_edc_leakage_table(struct pp_hwmgr *hwmgr) 2876 { 2877 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2878 uint32_t efuse; 2879 uint16_t offset; 2880 int ret = 0; 2881 2882 if (data->disable_edc_leakage_controller) 2883 return 0; 2884 2885 ret = atomctrl_get_edc_hilo_leakage_offset_table(hwmgr, 2886 &data->edc_hilo_leakage_offset_from_vbios); 2887 if (ret) 2888 return ret; 2889 2890 if (data->edc_hilo_leakage_offset_from_vbios.usEdcDidtLoDpm7TableOffset && 2891 data->edc_hilo_leakage_offset_from_vbios.usEdcDidtHiDpm7TableOffset) { 2892 atomctrl_read_efuse(hwmgr, LEAKAGE_ID_LSB, LEAKAGE_ID_MSB, &efuse); 2893 if (efuse < data->edc_hilo_leakage_offset_from_vbios.usHiLoLeakageThreshold) 2894 offset = data->edc_hilo_leakage_offset_from_vbios.usEdcDidtLoDpm7TableOffset; 2895 else 2896 offset = data->edc_hilo_leakage_offset_from_vbios.usEdcDidtHiDpm7TableOffset; 2897 2898 ret = atomctrl_get_edc_leakage_table(hwmgr, 2899 &data->edc_leakage_table, 2900 offset); 2901 if (ret) 2902 return ret; 2903 } 2904 2905 return ret; 2906 } 2907 2908 static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr) 2909 { 2910 struct smu7_hwmgr *data; 2911 int result = 0; 2912 2913 data = kzalloc(sizeof(struct smu7_hwmgr), GFP_KERNEL); 2914 if (data == NULL) 2915 return -ENOMEM; 2916 2917 hwmgr->backend = data; 2918 smu7_patch_voltage_workaround(hwmgr); 2919 smu7_init_dpm_defaults(hwmgr); 2920 2921 /* Get leakage voltage based on leakage ID. */ 2922 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 2923 PHM_PlatformCaps_EVV)) { 2924 result = smu7_get_evv_voltages(hwmgr); 2925 if (result) { 2926 pr_info("Get EVV Voltage Failed. Abort Driver loading!\n"); 2927 return -EINVAL; 2928 } 2929 } else { 2930 smu7_get_elb_voltages(hwmgr); 2931 } 2932 2933 if (hwmgr->pp_table_version == PP_TABLE_V1) { 2934 smu7_complete_dependency_tables(hwmgr); 2935 smu7_set_private_data_based_on_pptable_v1(hwmgr); 2936 } else if (hwmgr->pp_table_version == PP_TABLE_V0) { 2937 smu7_patch_dependency_tables_with_leakage(hwmgr); 2938 smu7_set_private_data_based_on_pptable_v0(hwmgr); 2939 } 2940 2941 /* Initalize Dynamic State Adjustment Rule Settings */ 2942 result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr); 2943 2944 if (0 == result) { 2945 struct amdgpu_device *adev = hwmgr->adev; 2946 2947 data->is_tlu_enabled = false; 2948 2949 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = 2950 SMU7_MAX_HARDWARE_POWERLEVELS; 2951 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; 2952 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; 2953 2954 data->pcie_gen_cap = adev->pm.pcie_gen_mask; 2955 if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) 2956 data->pcie_spc_cap = 20; 2957 else 2958 data->pcie_spc_cap = 16; 2959 data->pcie_lane_cap = adev->pm.pcie_mlw_mask; 2960 2961 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */ 2962 /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */ 2963 hwmgr->platform_descriptor.clockStep.engineClock = 500; 2964 hwmgr->platform_descriptor.clockStep.memoryClock = 500; 2965 smu7_thermal_parameter_init(hwmgr); 2966 } else { 2967 /* Ignore return value in here, we are cleaning up a mess. */ 2968 smu7_hwmgr_backend_fini(hwmgr); 2969 } 2970 2971 result = smu7_update_edc_leakage_table(hwmgr); 2972 if (result) 2973 return result; 2974 2975 return 0; 2976 } 2977 2978 static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr) 2979 { 2980 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2981 uint32_t level, tmp; 2982 2983 if (!data->pcie_dpm_key_disabled) { 2984 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) { 2985 level = 0; 2986 tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask; 2987 while (tmp >>= 1) 2988 level++; 2989 2990 if (level) 2991 smum_send_msg_to_smc_with_parameter(hwmgr, 2992 PPSMC_MSG_PCIeDPM_ForceLevel, level, 2993 NULL); 2994 } 2995 } 2996 2997 if (!data->sclk_dpm_key_disabled) { 2998 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) { 2999 level = 0; 3000 tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask; 3001 while (tmp >>= 1) 3002 level++; 3003 3004 if (level) 3005 smum_send_msg_to_smc_with_parameter(hwmgr, 3006 PPSMC_MSG_SCLKDPM_SetEnabledMask, 3007 (1 << level), 3008 NULL); 3009 } 3010 } 3011 3012 if (!data->mclk_dpm_key_disabled) { 3013 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) { 3014 level = 0; 3015 tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask; 3016 while (tmp >>= 1) 3017 level++; 3018 3019 if (level) 3020 smum_send_msg_to_smc_with_parameter(hwmgr, 3021 PPSMC_MSG_MCLKDPM_SetEnabledMask, 3022 (1 << level), 3023 NULL); 3024 } 3025 } 3026 3027 return 0; 3028 } 3029 3030 static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr) 3031 { 3032 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3033 3034 if (hwmgr->pp_table_version == PP_TABLE_V1) 3035 phm_apply_dal_min_voltage_request(hwmgr); 3036 /* TO DO for v0 iceland and Ci*/ 3037 3038 if (!data->sclk_dpm_key_disabled) { 3039 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) 3040 smum_send_msg_to_smc_with_parameter(hwmgr, 3041 PPSMC_MSG_SCLKDPM_SetEnabledMask, 3042 data->dpm_level_enable_mask.sclk_dpm_enable_mask, 3043 NULL); 3044 } 3045 3046 if (!data->mclk_dpm_key_disabled) { 3047 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) 3048 smum_send_msg_to_smc_with_parameter(hwmgr, 3049 PPSMC_MSG_MCLKDPM_SetEnabledMask, 3050 data->dpm_level_enable_mask.mclk_dpm_enable_mask, 3051 NULL); 3052 } 3053 3054 return 0; 3055 } 3056 3057 static int smu7_unforce_dpm_levels(struct pp_hwmgr *hwmgr) 3058 { 3059 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3060 3061 if (!smum_is_dpm_running(hwmgr)) 3062 return -EINVAL; 3063 3064 if (!data->pcie_dpm_key_disabled) { 3065 smum_send_msg_to_smc(hwmgr, 3066 PPSMC_MSG_PCIeDPM_UnForceLevel, 3067 NULL); 3068 } 3069 3070 return smu7_upload_dpm_level_enable_mask(hwmgr); 3071 } 3072 3073 static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr) 3074 { 3075 struct smu7_hwmgr *data = 3076 (struct smu7_hwmgr *)(hwmgr->backend); 3077 uint32_t level; 3078 3079 if (!data->sclk_dpm_key_disabled) 3080 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) { 3081 level = phm_get_lowest_enabled_level(hwmgr, 3082 data->dpm_level_enable_mask.sclk_dpm_enable_mask); 3083 smum_send_msg_to_smc_with_parameter(hwmgr, 3084 PPSMC_MSG_SCLKDPM_SetEnabledMask, 3085 (1 << level), 3086 NULL); 3087 3088 } 3089 3090 if (!data->mclk_dpm_key_disabled) { 3091 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) { 3092 level = phm_get_lowest_enabled_level(hwmgr, 3093 data->dpm_level_enable_mask.mclk_dpm_enable_mask); 3094 smum_send_msg_to_smc_with_parameter(hwmgr, 3095 PPSMC_MSG_MCLKDPM_SetEnabledMask, 3096 (1 << level), 3097 NULL); 3098 } 3099 } 3100 3101 if (!data->pcie_dpm_key_disabled) { 3102 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) { 3103 level = phm_get_lowest_enabled_level(hwmgr, 3104 data->dpm_level_enable_mask.pcie_dpm_enable_mask); 3105 smum_send_msg_to_smc_with_parameter(hwmgr, 3106 PPSMC_MSG_PCIeDPM_ForceLevel, 3107 (level), 3108 NULL); 3109 } 3110 } 3111 3112 return 0; 3113 } 3114 3115 static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level, 3116 uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *pcie_mask) 3117 { 3118 uint32_t percentage; 3119 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3120 struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table; 3121 int32_t tmp_mclk; 3122 int32_t tmp_sclk; 3123 int32_t count; 3124 3125 if (golden_dpm_table->mclk_table.count < 1) 3126 return -EINVAL; 3127 3128 percentage = 100 * golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value / 3129 golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value; 3130 3131 if (golden_dpm_table->mclk_table.count == 1) { 3132 percentage = 70; 3133 tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value; 3134 *mclk_mask = golden_dpm_table->mclk_table.count - 1; 3135 } else { 3136 tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 2].value; 3137 *mclk_mask = golden_dpm_table->mclk_table.count - 2; 3138 } 3139 3140 tmp_sclk = tmp_mclk * percentage / 100; 3141 3142 if (hwmgr->pp_table_version == PP_TABLE_V0) { 3143 for (count = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1; 3144 count >= 0; count--) { 3145 if (tmp_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) { 3146 tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk; 3147 *sclk_mask = count; 3148 break; 3149 } 3150 } 3151 if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { 3152 *sclk_mask = 0; 3153 tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].clk; 3154 } 3155 3156 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) 3157 *sclk_mask = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1; 3158 } else if (hwmgr->pp_table_version == PP_TABLE_V1) { 3159 struct phm_ppt_v1_information *table_info = 3160 (struct phm_ppt_v1_information *)(hwmgr->pptable); 3161 3162 for (count = table_info->vdd_dep_on_sclk->count-1; count >= 0; count--) { 3163 if (tmp_sclk >= table_info->vdd_dep_on_sclk->entries[count].clk) { 3164 tmp_sclk = table_info->vdd_dep_on_sclk->entries[count].clk; 3165 *sclk_mask = count; 3166 break; 3167 } 3168 } 3169 if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { 3170 *sclk_mask = 0; 3171 tmp_sclk = table_info->vdd_dep_on_sclk->entries[0].clk; 3172 } 3173 3174 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) 3175 *sclk_mask = table_info->vdd_dep_on_sclk->count - 1; 3176 } 3177 3178 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) 3179 *mclk_mask = 0; 3180 else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) 3181 *mclk_mask = golden_dpm_table->mclk_table.count - 1; 3182 3183 *pcie_mask = data->dpm_table.pcie_speed_table.count - 1; 3184 hwmgr->pstate_sclk = tmp_sclk; 3185 hwmgr->pstate_mclk = tmp_mclk; 3186 3187 return 0; 3188 } 3189 3190 static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr, 3191 enum amd_dpm_forced_level level) 3192 { 3193 int ret = 0; 3194 uint32_t sclk_mask = 0; 3195 uint32_t mclk_mask = 0; 3196 uint32_t pcie_mask = 0; 3197 3198 if (hwmgr->pstate_sclk == 0) 3199 smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask); 3200 3201 switch (level) { 3202 case AMD_DPM_FORCED_LEVEL_HIGH: 3203 ret = smu7_force_dpm_highest(hwmgr); 3204 break; 3205 case AMD_DPM_FORCED_LEVEL_LOW: 3206 ret = smu7_force_dpm_lowest(hwmgr); 3207 break; 3208 case AMD_DPM_FORCED_LEVEL_AUTO: 3209 ret = smu7_unforce_dpm_levels(hwmgr); 3210 break; 3211 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 3212 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 3213 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 3214 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 3215 ret = smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask); 3216 if (ret) 3217 return ret; 3218 smu7_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask); 3219 smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask); 3220 smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask); 3221 break; 3222 case AMD_DPM_FORCED_LEVEL_MANUAL: 3223 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 3224 default: 3225 break; 3226 } 3227 3228 if (!ret) { 3229 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) 3230 smu7_fan_ctrl_set_fan_speed_pwm(hwmgr, 255); 3231 else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) 3232 smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr); 3233 } 3234 return ret; 3235 } 3236 3237 static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr) 3238 { 3239 return sizeof(struct smu7_power_state); 3240 } 3241 3242 static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr, 3243 uint32_t vblank_time_us) 3244 { 3245 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3246 uint32_t switch_limit_us; 3247 3248 switch (hwmgr->chip_id) { 3249 case CHIP_POLARIS10: 3250 case CHIP_POLARIS11: 3251 case CHIP_POLARIS12: 3252 if (hwmgr->is_kicker || (hwmgr->chip_id == CHIP_POLARIS12)) 3253 switch_limit_us = data->is_memory_gddr5 ? 450 : 150; 3254 else 3255 switch_limit_us = data->is_memory_gddr5 ? 200 : 150; 3256 break; 3257 case CHIP_VEGAM: 3258 switch_limit_us = 30; 3259 break; 3260 default: 3261 switch_limit_us = data->is_memory_gddr5 ? 450 : 150; 3262 break; 3263 } 3264 3265 if (vblank_time_us < switch_limit_us) 3266 return true; 3267 else 3268 return false; 3269 } 3270 3271 static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, 3272 struct pp_power_state *request_ps, 3273 const struct pp_power_state *current_ps) 3274 { 3275 struct amdgpu_device *adev = hwmgr->adev; 3276 struct smu7_power_state *smu7_ps = 3277 cast_phw_smu7_power_state(&request_ps->hardware); 3278 uint32_t sclk; 3279 uint32_t mclk; 3280 struct PP_Clocks minimum_clocks = {0}; 3281 bool disable_mclk_switching; 3282 bool disable_mclk_switching_for_frame_lock; 3283 bool disable_mclk_switching_for_display; 3284 const struct phm_clock_and_voltage_limits *max_limits; 3285 uint32_t i; 3286 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3287 struct phm_ppt_v1_information *table_info = 3288 (struct phm_ppt_v1_information *)(hwmgr->pptable); 3289 int32_t count; 3290 int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0; 3291 uint32_t latency; 3292 bool latency_allowed = false; 3293 3294 data->battery_state = (PP_StateUILabel_Battery == 3295 request_ps->classification.ui_label); 3296 data->mclk_ignore_signal = false; 3297 3298 PP_ASSERT_WITH_CODE(smu7_ps->performance_level_count == 2, 3299 "VI should always have 2 performance levels", 3300 ); 3301 3302 max_limits = adev->pm.ac_power ? 3303 &(hwmgr->dyn_state.max_clock_voltage_on_ac) : 3304 &(hwmgr->dyn_state.max_clock_voltage_on_dc); 3305 3306 /* Cap clock DPM tables at DC MAX if it is in DC. */ 3307 if (!adev->pm.ac_power) { 3308 for (i = 0; i < smu7_ps->performance_level_count; i++) { 3309 if (smu7_ps->performance_levels[i].memory_clock > max_limits->mclk) 3310 smu7_ps->performance_levels[i].memory_clock = max_limits->mclk; 3311 if (smu7_ps->performance_levels[i].engine_clock > max_limits->sclk) 3312 smu7_ps->performance_levels[i].engine_clock = max_limits->sclk; 3313 } 3314 } 3315 3316 minimum_clocks.engineClock = hwmgr->display_config->min_core_set_clock; 3317 minimum_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock; 3318 3319 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 3320 PHM_PlatformCaps_StablePState)) { 3321 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac); 3322 stable_pstate_sclk = (max_limits->sclk * 75) / 100; 3323 3324 for (count = table_info->vdd_dep_on_sclk->count - 1; 3325 count >= 0; count--) { 3326 if (stable_pstate_sclk >= 3327 table_info->vdd_dep_on_sclk->entries[count].clk) { 3328 stable_pstate_sclk = 3329 table_info->vdd_dep_on_sclk->entries[count].clk; 3330 break; 3331 } 3332 } 3333 3334 if (count < 0) 3335 stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk; 3336 3337 stable_pstate_mclk = max_limits->mclk; 3338 3339 minimum_clocks.engineClock = stable_pstate_sclk; 3340 minimum_clocks.memoryClock = stable_pstate_mclk; 3341 } 3342 3343 disable_mclk_switching_for_frame_lock = phm_cap_enabled( 3344 hwmgr->platform_descriptor.platformCaps, 3345 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); 3346 3347 disable_mclk_switching_for_display = ((1 < hwmgr->display_config->num_display) && 3348 !hwmgr->display_config->multi_monitor_in_sync) || 3349 (hwmgr->display_config->num_display && 3350 smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time)); 3351 3352 disable_mclk_switching = disable_mclk_switching_for_frame_lock || 3353 disable_mclk_switching_for_display; 3354 3355 if (hwmgr->display_config->num_display == 0) { 3356 if (hwmgr->chip_id >= CHIP_POLARIS10 && hwmgr->chip_id <= CHIP_VEGAM) 3357 data->mclk_ignore_signal = true; 3358 else 3359 disable_mclk_switching = false; 3360 } 3361 3362 sclk = smu7_ps->performance_levels[0].engine_clock; 3363 mclk = smu7_ps->performance_levels[0].memory_clock; 3364 3365 if (disable_mclk_switching && 3366 (!(hwmgr->chip_id >= CHIP_POLARIS10 && 3367 hwmgr->chip_id <= CHIP_VEGAM))) 3368 mclk = smu7_ps->performance_levels 3369 [smu7_ps->performance_level_count - 1].memory_clock; 3370 3371 if (sclk < minimum_clocks.engineClock) 3372 sclk = (minimum_clocks.engineClock > max_limits->sclk) ? 3373 max_limits->sclk : minimum_clocks.engineClock; 3374 3375 if (mclk < minimum_clocks.memoryClock) 3376 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ? 3377 max_limits->mclk : minimum_clocks.memoryClock; 3378 3379 smu7_ps->performance_levels[0].engine_clock = sclk; 3380 smu7_ps->performance_levels[0].memory_clock = mclk; 3381 3382 smu7_ps->performance_levels[1].engine_clock = 3383 (smu7_ps->performance_levels[1].engine_clock >= 3384 smu7_ps->performance_levels[0].engine_clock) ? 3385 smu7_ps->performance_levels[1].engine_clock : 3386 smu7_ps->performance_levels[0].engine_clock; 3387 3388 if (disable_mclk_switching) { 3389 if (mclk < smu7_ps->performance_levels[1].memory_clock) 3390 mclk = smu7_ps->performance_levels[1].memory_clock; 3391 3392 if (hwmgr->chip_id >= CHIP_POLARIS10 && hwmgr->chip_id <= CHIP_VEGAM) { 3393 if (disable_mclk_switching_for_display) { 3394 /* Find the lowest MCLK frequency that is within 3395 * the tolerable latency defined in DAL 3396 */ 3397 latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency; 3398 for (i = 0; i < data->mclk_latency_table.count; i++) { 3399 if (data->mclk_latency_table.entries[i].latency <= latency) { 3400 latency_allowed = true; 3401 3402 if ((data->mclk_latency_table.entries[i].frequency >= 3403 smu7_ps->performance_levels[0].memory_clock) && 3404 (data->mclk_latency_table.entries[i].frequency <= 3405 smu7_ps->performance_levels[1].memory_clock)) { 3406 mclk = data->mclk_latency_table.entries[i].frequency; 3407 break; 3408 } 3409 } 3410 } 3411 if ((i >= data->mclk_latency_table.count - 1) && !latency_allowed) { 3412 data->mclk_ignore_signal = true; 3413 } else { 3414 data->mclk_ignore_signal = false; 3415 } 3416 } 3417 3418 if (disable_mclk_switching_for_frame_lock) 3419 mclk = smu7_ps->performance_levels[1].memory_clock; 3420 } 3421 3422 smu7_ps->performance_levels[0].memory_clock = mclk; 3423 3424 if (!(hwmgr->chip_id >= CHIP_POLARIS10 && 3425 hwmgr->chip_id <= CHIP_VEGAM)) 3426 smu7_ps->performance_levels[1].memory_clock = mclk; 3427 } else { 3428 if (smu7_ps->performance_levels[1].memory_clock < 3429 smu7_ps->performance_levels[0].memory_clock) 3430 smu7_ps->performance_levels[1].memory_clock = 3431 smu7_ps->performance_levels[0].memory_clock; 3432 } 3433 3434 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 3435 PHM_PlatformCaps_StablePState)) { 3436 for (i = 0; i < smu7_ps->performance_level_count; i++) { 3437 smu7_ps->performance_levels[i].engine_clock = stable_pstate_sclk; 3438 smu7_ps->performance_levels[i].memory_clock = stable_pstate_mclk; 3439 smu7_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max; 3440 smu7_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max; 3441 } 3442 } 3443 return 0; 3444 } 3445 3446 3447 static uint32_t smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) 3448 { 3449 struct pp_power_state *ps; 3450 struct smu7_power_state *smu7_ps; 3451 3452 if (hwmgr == NULL) 3453 return -EINVAL; 3454 3455 ps = hwmgr->request_ps; 3456 3457 if (ps == NULL) 3458 return -EINVAL; 3459 3460 smu7_ps = cast_phw_smu7_power_state(&ps->hardware); 3461 3462 if (low) 3463 return smu7_ps->performance_levels[0].memory_clock; 3464 else 3465 return smu7_ps->performance_levels 3466 [smu7_ps->performance_level_count-1].memory_clock; 3467 } 3468 3469 static uint32_t smu7_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) 3470 { 3471 struct pp_power_state *ps; 3472 struct smu7_power_state *smu7_ps; 3473 3474 if (hwmgr == NULL) 3475 return -EINVAL; 3476 3477 ps = hwmgr->request_ps; 3478 3479 if (ps == NULL) 3480 return -EINVAL; 3481 3482 smu7_ps = cast_phw_smu7_power_state(&ps->hardware); 3483 3484 if (low) 3485 return smu7_ps->performance_levels[0].engine_clock; 3486 else 3487 return smu7_ps->performance_levels 3488 [smu7_ps->performance_level_count-1].engine_clock; 3489 } 3490 3491 static int smu7_dpm_patch_boot_state(struct pp_hwmgr *hwmgr, 3492 struct pp_hw_power_state *hw_ps) 3493 { 3494 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3495 struct smu7_power_state *ps = (struct smu7_power_state *)hw_ps; 3496 ATOM_FIRMWARE_INFO_V2_2 *fw_info; 3497 uint16_t size; 3498 uint8_t frev, crev; 3499 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); 3500 3501 /* First retrieve the Boot clocks and VDDC from the firmware info table. 3502 * We assume here that fw_info is unchanged if this call fails. 3503 */ 3504 fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)smu_atom_get_data_table(hwmgr->adev, index, 3505 &size, &frev, &crev); 3506 if (!fw_info) 3507 /* During a test, there is no firmware info table. */ 3508 return 0; 3509 3510 /* Patch the state. */ 3511 data->vbios_boot_state.sclk_bootup_value = 3512 le32_to_cpu(fw_info->ulDefaultEngineClock); 3513 data->vbios_boot_state.mclk_bootup_value = 3514 le32_to_cpu(fw_info->ulDefaultMemoryClock); 3515 data->vbios_boot_state.mvdd_bootup_value = 3516 le16_to_cpu(fw_info->usBootUpMVDDCVoltage); 3517 data->vbios_boot_state.vddc_bootup_value = 3518 le16_to_cpu(fw_info->usBootUpVDDCVoltage); 3519 data->vbios_boot_state.vddci_bootup_value = 3520 le16_to_cpu(fw_info->usBootUpVDDCIVoltage); 3521 data->vbios_boot_state.pcie_gen_bootup_value = 3522 smu7_get_current_pcie_speed(hwmgr); 3523 3524 data->vbios_boot_state.pcie_lane_bootup_value = 3525 (uint16_t)smu7_get_current_pcie_lane_number(hwmgr); 3526 3527 /* set boot power state */ 3528 ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value; 3529 ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value; 3530 ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value; 3531 ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value; 3532 3533 return 0; 3534 } 3535 3536 static int smu7_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr) 3537 { 3538 int result; 3539 unsigned long ret = 0; 3540 3541 if (hwmgr->pp_table_version == PP_TABLE_V0) { 3542 result = pp_tables_get_num_of_entries(hwmgr, &ret); 3543 return result ? 0 : ret; 3544 } else if (hwmgr->pp_table_version == PP_TABLE_V1) { 3545 result = get_number_of_powerplay_table_entries_v1_0(hwmgr); 3546 return result; 3547 } 3548 return 0; 3549 } 3550 3551 static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr, 3552 void *state, struct pp_power_state *power_state, 3553 void *pp_table, uint32_t classification_flag) 3554 { 3555 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3556 struct smu7_power_state *smu7_power_state = 3557 (struct smu7_power_state *)(&(power_state->hardware)); 3558 struct smu7_performance_level *performance_level; 3559 ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state; 3560 ATOM_Tonga_POWERPLAYTABLE *powerplay_table = 3561 (ATOM_Tonga_POWERPLAYTABLE *)pp_table; 3562 PPTable_Generic_SubTable_Header *sclk_dep_table = 3563 (PPTable_Generic_SubTable_Header *) 3564 (((unsigned long)powerplay_table) + 3565 le16_to_cpu(powerplay_table->usSclkDependencyTableOffset)); 3566 3567 ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = 3568 (ATOM_Tonga_MCLK_Dependency_Table *) 3569 (((unsigned long)powerplay_table) + 3570 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset)); 3571 3572 /* The following fields are not initialized here: id orderedList allStatesList */ 3573 power_state->classification.ui_label = 3574 (le16_to_cpu(state_entry->usClassification) & 3575 ATOM_PPLIB_CLASSIFICATION_UI_MASK) >> 3576 ATOM_PPLIB_CLASSIFICATION_UI_SHIFT; 3577 power_state->classification.flags = classification_flag; 3578 /* NOTE: There is a classification2 flag in BIOS that is not being used right now */ 3579 3580 power_state->classification.temporary_state = false; 3581 power_state->classification.to_be_deleted = false; 3582 3583 power_state->validation.disallowOnDC = 3584 (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & 3585 ATOM_Tonga_DISALLOW_ON_DC)); 3586 3587 power_state->pcie.lanes = 0; 3588 3589 power_state->display.disableFrameModulation = false; 3590 power_state->display.limitRefreshrate = false; 3591 power_state->display.enableVariBright = 3592 (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & 3593 ATOM_Tonga_ENABLE_VARIBRIGHT)); 3594 3595 power_state->validation.supportedPowerLevels = 0; 3596 power_state->uvd_clocks.VCLK = 0; 3597 power_state->uvd_clocks.DCLK = 0; 3598 power_state->temperatures.min = 0; 3599 power_state->temperatures.max = 0; 3600 3601 performance_level = &(smu7_power_state->performance_levels 3602 [smu7_power_state->performance_level_count++]); 3603 3604 PP_ASSERT_WITH_CODE( 3605 (smu7_power_state->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)), 3606 "Performance levels exceeds SMC limit!", 3607 return -EINVAL); 3608 3609 PP_ASSERT_WITH_CODE( 3610 (smu7_power_state->performance_level_count <= 3611 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels), 3612 "Performance levels exceeds Driver limit!", 3613 return -EINVAL); 3614 3615 /* Performance levels are arranged from low to high. */ 3616 performance_level->memory_clock = mclk_dep_table->entries 3617 [state_entry->ucMemoryClockIndexLow].ulMclk; 3618 if (sclk_dep_table->ucRevId == 0) 3619 performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries 3620 [state_entry->ucEngineClockIndexLow].ulSclk; 3621 else if (sclk_dep_table->ucRevId == 1) 3622 performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries 3623 [state_entry->ucEngineClockIndexLow].ulSclk; 3624 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, 3625 state_entry->ucPCIEGenLow); 3626 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, 3627 state_entry->ucPCIELaneLow); 3628 3629 performance_level = &(smu7_power_state->performance_levels 3630 [smu7_power_state->performance_level_count++]); 3631 performance_level->memory_clock = mclk_dep_table->entries 3632 [state_entry->ucMemoryClockIndexHigh].ulMclk; 3633 3634 if (sclk_dep_table->ucRevId == 0) 3635 performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries 3636 [state_entry->ucEngineClockIndexHigh].ulSclk; 3637 else if (sclk_dep_table->ucRevId == 1) 3638 performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries 3639 [state_entry->ucEngineClockIndexHigh].ulSclk; 3640 3641 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, 3642 state_entry->ucPCIEGenHigh); 3643 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, 3644 state_entry->ucPCIELaneHigh); 3645 3646 return 0; 3647 } 3648 3649 static int smu7_get_pp_table_entry_v1(struct pp_hwmgr *hwmgr, 3650 unsigned long entry_index, struct pp_power_state *state) 3651 { 3652 int result; 3653 struct smu7_power_state *ps; 3654 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3655 struct phm_ppt_v1_information *table_info = 3656 (struct phm_ppt_v1_information *)(hwmgr->pptable); 3657 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table = 3658 table_info->vdd_dep_on_mclk; 3659 3660 state->hardware.magic = PHM_VIslands_Magic; 3661 3662 ps = (struct smu7_power_state *)(&state->hardware); 3663 3664 result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state, 3665 smu7_get_pp_table_entry_callback_func_v1); 3666 3667 /* This is the earliest time we have all the dependency table and the VBIOS boot state 3668 * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state 3669 * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state 3670 */ 3671 if (dep_mclk_table != NULL && dep_mclk_table->count == 1) { 3672 if (dep_mclk_table->entries[0].clk != 3673 data->vbios_boot_state.mclk_bootup_value) 3674 pr_debug("Single MCLK entry VDDCI/MCLK dependency table " 3675 "does not match VBIOS boot MCLK level"); 3676 if (dep_mclk_table->entries[0].vddci != 3677 data->vbios_boot_state.vddci_bootup_value) 3678 pr_debug("Single VDDCI entry VDDCI/MCLK dependency table " 3679 "does not match VBIOS boot VDDCI level"); 3680 } 3681 3682 /* set DC compatible flag if this state supports DC */ 3683 if (!state->validation.disallowOnDC) 3684 ps->dc_compatible = true; 3685 3686 if (state->classification.flags & PP_StateClassificationFlag_ACPI) 3687 data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen; 3688 3689 ps->uvd_clks.vclk = state->uvd_clocks.VCLK; 3690 ps->uvd_clks.dclk = state->uvd_clocks.DCLK; 3691 3692 if (!result) { 3693 uint32_t i; 3694 3695 switch (state->classification.ui_label) { 3696 case PP_StateUILabel_Performance: 3697 data->use_pcie_performance_levels = true; 3698 for (i = 0; i < ps->performance_level_count; i++) { 3699 if (data->pcie_gen_performance.max < 3700 ps->performance_levels[i].pcie_gen) 3701 data->pcie_gen_performance.max = 3702 ps->performance_levels[i].pcie_gen; 3703 3704 if (data->pcie_gen_performance.min > 3705 ps->performance_levels[i].pcie_gen) 3706 data->pcie_gen_performance.min = 3707 ps->performance_levels[i].pcie_gen; 3708 3709 if (data->pcie_lane_performance.max < 3710 ps->performance_levels[i].pcie_lane) 3711 data->pcie_lane_performance.max = 3712 ps->performance_levels[i].pcie_lane; 3713 if (data->pcie_lane_performance.min > 3714 ps->performance_levels[i].pcie_lane) 3715 data->pcie_lane_performance.min = 3716 ps->performance_levels[i].pcie_lane; 3717 } 3718 break; 3719 case PP_StateUILabel_Battery: 3720 data->use_pcie_power_saving_levels = true; 3721 3722 for (i = 0; i < ps->performance_level_count; i++) { 3723 if (data->pcie_gen_power_saving.max < 3724 ps->performance_levels[i].pcie_gen) 3725 data->pcie_gen_power_saving.max = 3726 ps->performance_levels[i].pcie_gen; 3727 3728 if (data->pcie_gen_power_saving.min > 3729 ps->performance_levels[i].pcie_gen) 3730 data->pcie_gen_power_saving.min = 3731 ps->performance_levels[i].pcie_gen; 3732 3733 if (data->pcie_lane_power_saving.max < 3734 ps->performance_levels[i].pcie_lane) 3735 data->pcie_lane_power_saving.max = 3736 ps->performance_levels[i].pcie_lane; 3737 3738 if (data->pcie_lane_power_saving.min > 3739 ps->performance_levels[i].pcie_lane) 3740 data->pcie_lane_power_saving.min = 3741 ps->performance_levels[i].pcie_lane; 3742 } 3743 break; 3744 default: 3745 break; 3746 } 3747 } 3748 return 0; 3749 } 3750 3751 static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr, 3752 struct pp_hw_power_state *power_state, 3753 unsigned int index, const void *clock_info) 3754 { 3755 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3756 struct smu7_power_state *ps = cast_phw_smu7_power_state(power_state); 3757 const ATOM_PPLIB_CI_CLOCK_INFO *visland_clk_info = clock_info; 3758 struct smu7_performance_level *performance_level; 3759 uint32_t engine_clock, memory_clock; 3760 uint16_t pcie_gen_from_bios; 3761 3762 engine_clock = visland_clk_info->ucEngineClockHigh << 16 | visland_clk_info->usEngineClockLow; 3763 memory_clock = visland_clk_info->ucMemoryClockHigh << 16 | visland_clk_info->usMemoryClockLow; 3764 3765 if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk) 3766 data->highest_mclk = memory_clock; 3767 3768 PP_ASSERT_WITH_CODE( 3769 (ps->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)), 3770 "Performance levels exceeds SMC limit!", 3771 return -EINVAL); 3772 3773 PP_ASSERT_WITH_CODE( 3774 (ps->performance_level_count < 3775 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels), 3776 "Performance levels exceeds Driver limit, Skip!", 3777 return 0); 3778 3779 performance_level = &(ps->performance_levels 3780 [ps->performance_level_count++]); 3781 3782 /* Performance levels are arranged from low to high. */ 3783 performance_level->memory_clock = memory_clock; 3784 performance_level->engine_clock = engine_clock; 3785 3786 pcie_gen_from_bios = visland_clk_info->ucPCIEGen; 3787 3788 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, pcie_gen_from_bios); 3789 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, visland_clk_info->usPCIELane); 3790 3791 return 0; 3792 } 3793 3794 static int smu7_get_pp_table_entry_v0(struct pp_hwmgr *hwmgr, 3795 unsigned long entry_index, struct pp_power_state *state) 3796 { 3797 int result; 3798 struct smu7_power_state *ps; 3799 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3800 struct phm_clock_voltage_dependency_table *dep_mclk_table = 3801 hwmgr->dyn_state.vddci_dependency_on_mclk; 3802 3803 memset(&state->hardware, 0x00, sizeof(struct pp_hw_power_state)); 3804 3805 state->hardware.magic = PHM_VIslands_Magic; 3806 3807 ps = (struct smu7_power_state *)(&state->hardware); 3808 3809 result = pp_tables_get_entry(hwmgr, entry_index, state, 3810 smu7_get_pp_table_entry_callback_func_v0); 3811 3812 /* 3813 * This is the earliest time we have all the dependency table 3814 * and the VBIOS boot state as 3815 * PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot 3816 * state if there is only one VDDCI/MCLK level, check if it's 3817 * the same as VBIOS boot state 3818 */ 3819 if (dep_mclk_table != NULL && dep_mclk_table->count == 1) { 3820 if (dep_mclk_table->entries[0].clk != 3821 data->vbios_boot_state.mclk_bootup_value) 3822 pr_debug("Single MCLK entry VDDCI/MCLK dependency table " 3823 "does not match VBIOS boot MCLK level"); 3824 if (dep_mclk_table->entries[0].v != 3825 data->vbios_boot_state.vddci_bootup_value) 3826 pr_debug("Single VDDCI entry VDDCI/MCLK dependency table " 3827 "does not match VBIOS boot VDDCI level"); 3828 } 3829 3830 /* set DC compatible flag if this state supports DC */ 3831 if (!state->validation.disallowOnDC) 3832 ps->dc_compatible = true; 3833 3834 if (state->classification.flags & PP_StateClassificationFlag_ACPI) 3835 data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen; 3836 3837 ps->uvd_clks.vclk = state->uvd_clocks.VCLK; 3838 ps->uvd_clks.dclk = state->uvd_clocks.DCLK; 3839 3840 if (!result) { 3841 uint32_t i; 3842 3843 switch (state->classification.ui_label) { 3844 case PP_StateUILabel_Performance: 3845 data->use_pcie_performance_levels = true; 3846 3847 for (i = 0; i < ps->performance_level_count; i++) { 3848 if (data->pcie_gen_performance.max < 3849 ps->performance_levels[i].pcie_gen) 3850 data->pcie_gen_performance.max = 3851 ps->performance_levels[i].pcie_gen; 3852 3853 if (data->pcie_gen_performance.min > 3854 ps->performance_levels[i].pcie_gen) 3855 data->pcie_gen_performance.min = 3856 ps->performance_levels[i].pcie_gen; 3857 3858 if (data->pcie_lane_performance.max < 3859 ps->performance_levels[i].pcie_lane) 3860 data->pcie_lane_performance.max = 3861 ps->performance_levels[i].pcie_lane; 3862 3863 if (data->pcie_lane_performance.min > 3864 ps->performance_levels[i].pcie_lane) 3865 data->pcie_lane_performance.min = 3866 ps->performance_levels[i].pcie_lane; 3867 } 3868 break; 3869 case PP_StateUILabel_Battery: 3870 data->use_pcie_power_saving_levels = true; 3871 3872 for (i = 0; i < ps->performance_level_count; i++) { 3873 if (data->pcie_gen_power_saving.max < 3874 ps->performance_levels[i].pcie_gen) 3875 data->pcie_gen_power_saving.max = 3876 ps->performance_levels[i].pcie_gen; 3877 3878 if (data->pcie_gen_power_saving.min > 3879 ps->performance_levels[i].pcie_gen) 3880 data->pcie_gen_power_saving.min = 3881 ps->performance_levels[i].pcie_gen; 3882 3883 if (data->pcie_lane_power_saving.max < 3884 ps->performance_levels[i].pcie_lane) 3885 data->pcie_lane_power_saving.max = 3886 ps->performance_levels[i].pcie_lane; 3887 3888 if (data->pcie_lane_power_saving.min > 3889 ps->performance_levels[i].pcie_lane) 3890 data->pcie_lane_power_saving.min = 3891 ps->performance_levels[i].pcie_lane; 3892 } 3893 break; 3894 default: 3895 break; 3896 } 3897 } 3898 return 0; 3899 } 3900 3901 static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr, 3902 unsigned long entry_index, struct pp_power_state *state) 3903 { 3904 if (hwmgr->pp_table_version == PP_TABLE_V0) 3905 return smu7_get_pp_table_entry_v0(hwmgr, entry_index, state); 3906 else if (hwmgr->pp_table_version == PP_TABLE_V1) 3907 return smu7_get_pp_table_entry_v1(hwmgr, entry_index, state); 3908 3909 return 0; 3910 } 3911 3912 static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query) 3913 { 3914 struct amdgpu_device *adev = hwmgr->adev; 3915 int i; 3916 u32 tmp = 0; 3917 3918 if (!query) 3919 return -EINVAL; 3920 3921 /* 3922 * PPSMC_MSG_GetCurrPkgPwr is not supported on: 3923 * - Hawaii 3924 * - Bonaire 3925 * - Fiji 3926 * - Tonga 3927 */ 3928 if ((adev->asic_type != CHIP_HAWAII) && 3929 (adev->asic_type != CHIP_BONAIRE) && 3930 (adev->asic_type != CHIP_FIJI) && 3931 (adev->asic_type != CHIP_TONGA)) { 3932 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0, &tmp); 3933 *query = tmp; 3934 3935 if (tmp != 0) 3936 return 0; 3937 } 3938 3939 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart, NULL); 3940 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 3941 ixSMU_PM_STATUS_95, 0); 3942 3943 for (i = 0; i < 10; i++) { 3944 msleep(500); 3945 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample, NULL); 3946 tmp = cgs_read_ind_register(hwmgr->device, 3947 CGS_IND_REG__SMC, 3948 ixSMU_PM_STATUS_95); 3949 if (tmp != 0) 3950 break; 3951 } 3952 *query = tmp; 3953 3954 return 0; 3955 } 3956 3957 static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx, 3958 void *value, int *size) 3959 { 3960 uint32_t sclk, mclk, activity_percent; 3961 uint32_t offset, val_vid; 3962 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3963 3964 /* size must be at least 4 bytes for all sensors */ 3965 if (*size < 4) 3966 return -EINVAL; 3967 3968 switch (idx) { 3969 case AMDGPU_PP_SENSOR_GFX_SCLK: 3970 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &sclk); 3971 *((uint32_t *)value) = sclk; 3972 *size = 4; 3973 return 0; 3974 case AMDGPU_PP_SENSOR_GFX_MCLK: 3975 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &mclk); 3976 *((uint32_t *)value) = mclk; 3977 *size = 4; 3978 return 0; 3979 case AMDGPU_PP_SENSOR_GPU_LOAD: 3980 case AMDGPU_PP_SENSOR_MEM_LOAD: 3981 offset = data->soft_regs_start + smum_get_offsetof(hwmgr, 3982 SMU_SoftRegisters, 3983 (idx == AMDGPU_PP_SENSOR_GPU_LOAD) ? 3984 AverageGraphicsActivity: 3985 AverageMemoryActivity); 3986 3987 activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset); 3988 activity_percent += 0x80; 3989 activity_percent >>= 8; 3990 *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent; 3991 *size = 4; 3992 return 0; 3993 case AMDGPU_PP_SENSOR_GPU_TEMP: 3994 *((uint32_t *)value) = smu7_thermal_get_temperature(hwmgr); 3995 *size = 4; 3996 return 0; 3997 case AMDGPU_PP_SENSOR_UVD_POWER: 3998 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1; 3999 *size = 4; 4000 return 0; 4001 case AMDGPU_PP_SENSOR_VCE_POWER: 4002 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1; 4003 *size = 4; 4004 return 0; 4005 case AMDGPU_PP_SENSOR_GPU_POWER: 4006 return smu7_get_gpu_power(hwmgr, (uint32_t *)value); 4007 case AMDGPU_PP_SENSOR_VDDGFX: 4008 if ((data->vr_config & VRCONF_VDDGFX_MASK) == 4009 (VR_SVI2_PLANE_2 << VRCONF_VDDGFX_SHIFT)) 4010 val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device, 4011 CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE2_VID); 4012 else 4013 val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device, 4014 CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE1_VID); 4015 4016 *((uint32_t *)value) = (uint32_t)convert_to_vddc(val_vid); 4017 return 0; 4018 default: 4019 return -EOPNOTSUPP; 4020 } 4021 } 4022 4023 static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input) 4024 { 4025 const struct phm_set_power_state_input *states = 4026 (const struct phm_set_power_state_input *)input; 4027 const struct smu7_power_state *smu7_ps = 4028 cast_const_phw_smu7_power_state(states->pnew_state); 4029 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4030 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); 4031 uint32_t sclk = smu7_ps->performance_levels 4032 [smu7_ps->performance_level_count - 1].engine_clock; 4033 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); 4034 uint32_t mclk = smu7_ps->performance_levels 4035 [smu7_ps->performance_level_count - 1].memory_clock; 4036 struct PP_Clocks min_clocks = {0}; 4037 uint32_t i; 4038 4039 for (i = 0; i < sclk_table->count; i++) { 4040 if (sclk == sclk_table->dpm_levels[i].value) 4041 break; 4042 } 4043 4044 if (i >= sclk_table->count) { 4045 if (sclk > sclk_table->dpm_levels[i-1].value) { 4046 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; 4047 sclk_table->dpm_levels[i-1].value = sclk; 4048 } 4049 } else { 4050 /* TODO: Check SCLK in DAL's minimum clocks 4051 * in case DeepSleep divider update is required. 4052 */ 4053 if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR && 4054 (min_clocks.engineClockInSR >= SMU7_MINIMUM_ENGINE_CLOCK || 4055 data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK)) 4056 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK; 4057 } 4058 4059 for (i = 0; i < mclk_table->count; i++) { 4060 if (mclk == mclk_table->dpm_levels[i].value) 4061 break; 4062 } 4063 4064 if (i >= mclk_table->count) { 4065 if (mclk > mclk_table->dpm_levels[i-1].value) { 4066 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; 4067 mclk_table->dpm_levels[i-1].value = mclk; 4068 } 4069 } 4070 4071 if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display) 4072 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK; 4073 4074 return 0; 4075 } 4076 4077 static uint16_t smu7_get_maximum_link_speed(struct pp_hwmgr *hwmgr, 4078 const struct smu7_power_state *smu7_ps) 4079 { 4080 uint32_t i; 4081 uint32_t sclk, max_sclk = 0; 4082 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4083 struct smu7_dpm_table *dpm_table = &data->dpm_table; 4084 4085 for (i = 0; i < smu7_ps->performance_level_count; i++) { 4086 sclk = smu7_ps->performance_levels[i].engine_clock; 4087 if (max_sclk < sclk) 4088 max_sclk = sclk; 4089 } 4090 4091 for (i = 0; i < dpm_table->sclk_table.count; i++) { 4092 if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk) 4093 return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ? 4094 dpm_table->pcie_speed_table.dpm_levels 4095 [dpm_table->pcie_speed_table.count - 1].value : 4096 dpm_table->pcie_speed_table.dpm_levels[i].value); 4097 } 4098 4099 return 0; 4100 } 4101 4102 static int smu7_request_link_speed_change_before_state_change( 4103 struct pp_hwmgr *hwmgr, const void *input) 4104 { 4105 const struct phm_set_power_state_input *states = 4106 (const struct phm_set_power_state_input *)input; 4107 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4108 const struct smu7_power_state *smu7_nps = 4109 cast_const_phw_smu7_power_state(states->pnew_state); 4110 const struct smu7_power_state *polaris10_cps = 4111 cast_const_phw_smu7_power_state(states->pcurrent_state); 4112 4113 uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_nps); 4114 uint16_t current_link_speed; 4115 4116 if (data->force_pcie_gen == PP_PCIEGenInvalid) 4117 current_link_speed = smu7_get_maximum_link_speed(hwmgr, polaris10_cps); 4118 else 4119 current_link_speed = data->force_pcie_gen; 4120 4121 data->force_pcie_gen = PP_PCIEGenInvalid; 4122 data->pspp_notify_required = false; 4123 4124 if (target_link_speed > current_link_speed) { 4125 switch (target_link_speed) { 4126 #ifdef CONFIG_ACPI 4127 case PP_PCIEGen3: 4128 if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN3, false)) 4129 break; 4130 data->force_pcie_gen = PP_PCIEGen2; 4131 if (current_link_speed == PP_PCIEGen2) 4132 break; 4133 fallthrough; 4134 case PP_PCIEGen2: 4135 if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN2, false)) 4136 break; 4137 fallthrough; 4138 #endif 4139 default: 4140 data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr); 4141 break; 4142 } 4143 } else { 4144 if (target_link_speed < current_link_speed) 4145 data->pspp_notify_required = true; 4146 } 4147 4148 return 0; 4149 } 4150 4151 static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) 4152 { 4153 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4154 4155 if (0 == data->need_update_smu7_dpm_table) 4156 return 0; 4157 4158 if ((0 == data->sclk_dpm_key_disabled) && 4159 (data->need_update_smu7_dpm_table & 4160 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { 4161 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), 4162 "Trying to freeze SCLK DPM when DPM is disabled", 4163 ); 4164 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr, 4165 PPSMC_MSG_SCLKDPM_FreezeLevel, 4166 NULL), 4167 "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!", 4168 return -EINVAL); 4169 } 4170 4171 if ((0 == data->mclk_dpm_key_disabled) && 4172 !data->mclk_ignore_signal && 4173 (data->need_update_smu7_dpm_table & 4174 DPMTABLE_OD_UPDATE_MCLK)) { 4175 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), 4176 "Trying to freeze MCLK DPM when DPM is disabled", 4177 ); 4178 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr, 4179 PPSMC_MSG_MCLKDPM_FreezeLevel, 4180 NULL), 4181 "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!", 4182 return -EINVAL); 4183 } 4184 4185 return 0; 4186 } 4187 4188 static int smu7_populate_and_upload_sclk_mclk_dpm_levels( 4189 struct pp_hwmgr *hwmgr, const void *input) 4190 { 4191 int result = 0; 4192 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4193 struct smu7_dpm_table *dpm_table = &data->dpm_table; 4194 uint32_t count; 4195 struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table); 4196 struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels); 4197 struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels); 4198 4199 if (0 == data->need_update_smu7_dpm_table) 4200 return 0; 4201 4202 if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) { 4203 for (count = 0; count < dpm_table->sclk_table.count; count++) { 4204 dpm_table->sclk_table.dpm_levels[count].enabled = odn_sclk_table->entries[count].enabled; 4205 dpm_table->sclk_table.dpm_levels[count].value = odn_sclk_table->entries[count].clock; 4206 } 4207 } 4208 4209 if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) { 4210 for (count = 0; count < dpm_table->mclk_table.count; count++) { 4211 dpm_table->mclk_table.dpm_levels[count].enabled = odn_mclk_table->entries[count].enabled; 4212 dpm_table->mclk_table.dpm_levels[count].value = odn_mclk_table->entries[count].clock; 4213 } 4214 } 4215 4216 if (data->need_update_smu7_dpm_table & 4217 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) { 4218 result = smum_populate_all_graphic_levels(hwmgr); 4219 PP_ASSERT_WITH_CODE((0 == result), 4220 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!", 4221 return result); 4222 } 4223 4224 if (data->need_update_smu7_dpm_table & 4225 (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) { 4226 /*populate MCLK dpm table to SMU7 */ 4227 result = smum_populate_all_memory_levels(hwmgr); 4228 PP_ASSERT_WITH_CODE((0 == result), 4229 "Failed to populate MCLK during PopulateNewDPMClocksStates Function!", 4230 return result); 4231 } 4232 4233 return result; 4234 } 4235 4236 static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr, 4237 struct smu7_single_dpm_table *dpm_table, 4238 uint32_t low_limit, uint32_t high_limit) 4239 { 4240 uint32_t i; 4241 4242 /* force the trim if mclk_switching is disabled to prevent flicker */ 4243 bool force_trim = (low_limit == high_limit); 4244 for (i = 0; i < dpm_table->count; i++) { 4245 /*skip the trim if od is enabled*/ 4246 if ((!hwmgr->od_enabled || force_trim) 4247 && (dpm_table->dpm_levels[i].value < low_limit 4248 || dpm_table->dpm_levels[i].value > high_limit)) 4249 dpm_table->dpm_levels[i].enabled = false; 4250 else 4251 dpm_table->dpm_levels[i].enabled = true; 4252 } 4253 4254 return 0; 4255 } 4256 4257 static int smu7_trim_dpm_states(struct pp_hwmgr *hwmgr, 4258 const struct smu7_power_state *smu7_ps) 4259 { 4260 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4261 uint32_t high_limit_count; 4262 4263 PP_ASSERT_WITH_CODE((smu7_ps->performance_level_count >= 1), 4264 "power state did not have any performance level", 4265 return -EINVAL); 4266 4267 high_limit_count = (1 == smu7_ps->performance_level_count) ? 0 : 1; 4268 4269 smu7_trim_single_dpm_states(hwmgr, 4270 &(data->dpm_table.sclk_table), 4271 smu7_ps->performance_levels[0].engine_clock, 4272 smu7_ps->performance_levels[high_limit_count].engine_clock); 4273 4274 smu7_trim_single_dpm_states(hwmgr, 4275 &(data->dpm_table.mclk_table), 4276 smu7_ps->performance_levels[0].memory_clock, 4277 smu7_ps->performance_levels[high_limit_count].memory_clock); 4278 4279 return 0; 4280 } 4281 4282 static int smu7_generate_dpm_level_enable_mask( 4283 struct pp_hwmgr *hwmgr, const void *input) 4284 { 4285 int result = 0; 4286 const struct phm_set_power_state_input *states = 4287 (const struct phm_set_power_state_input *)input; 4288 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4289 const struct smu7_power_state *smu7_ps = 4290 cast_const_phw_smu7_power_state(states->pnew_state); 4291 4292 4293 result = smu7_trim_dpm_states(hwmgr, smu7_ps); 4294 if (result) 4295 return result; 4296 4297 data->dpm_level_enable_mask.sclk_dpm_enable_mask = 4298 phm_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table); 4299 data->dpm_level_enable_mask.mclk_dpm_enable_mask = 4300 phm_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table); 4301 data->dpm_level_enable_mask.pcie_dpm_enable_mask = 4302 phm_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table); 4303 4304 return 0; 4305 } 4306 4307 static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) 4308 { 4309 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4310 4311 if (0 == data->need_update_smu7_dpm_table) 4312 return 0; 4313 4314 if ((0 == data->sclk_dpm_key_disabled) && 4315 (data->need_update_smu7_dpm_table & 4316 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { 4317 4318 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), 4319 "Trying to Unfreeze SCLK DPM when DPM is disabled", 4320 ); 4321 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr, 4322 PPSMC_MSG_SCLKDPM_UnfreezeLevel, 4323 NULL), 4324 "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!", 4325 return -EINVAL); 4326 } 4327 4328 if ((0 == data->mclk_dpm_key_disabled) && 4329 !data->mclk_ignore_signal && 4330 (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { 4331 4332 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), 4333 "Trying to Unfreeze MCLK DPM when DPM is disabled", 4334 ); 4335 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr, 4336 PPSMC_MSG_MCLKDPM_UnfreezeLevel, 4337 NULL), 4338 "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!", 4339 return -EINVAL); 4340 } 4341 4342 data->need_update_smu7_dpm_table &= DPMTABLE_OD_UPDATE_VDDC; 4343 4344 return 0; 4345 } 4346 4347 static int smu7_notify_link_speed_change_after_state_change( 4348 struct pp_hwmgr *hwmgr, const void *input) 4349 { 4350 const struct phm_set_power_state_input *states = 4351 (const struct phm_set_power_state_input *)input; 4352 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4353 const struct smu7_power_state *smu7_ps = 4354 cast_const_phw_smu7_power_state(states->pnew_state); 4355 uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_ps); 4356 uint8_t request; 4357 4358 if (data->pspp_notify_required) { 4359 if (target_link_speed == PP_PCIEGen3) 4360 request = PCIE_PERF_REQ_GEN3; 4361 else if (target_link_speed == PP_PCIEGen2) 4362 request = PCIE_PERF_REQ_GEN2; 4363 else 4364 request = PCIE_PERF_REQ_GEN1; 4365 4366 if (request == PCIE_PERF_REQ_GEN1 && 4367 smu7_get_current_pcie_speed(hwmgr) > 0) 4368 return 0; 4369 4370 #ifdef CONFIG_ACPI 4371 if (amdgpu_acpi_pcie_performance_request(hwmgr->adev, request, false)) { 4372 if (PP_PCIEGen2 == target_link_speed) 4373 pr_info("PSPP request to switch to Gen2 from Gen3 Failed!"); 4374 else 4375 pr_info("PSPP request to switch to Gen1 from Gen2 Failed!"); 4376 } 4377 #endif 4378 } 4379 4380 return 0; 4381 } 4382 4383 static int smu7_notify_no_display(struct pp_hwmgr *hwmgr) 4384 { 4385 return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay, NULL) == 0) ? 0 : -EINVAL; 4386 } 4387 4388 static int smu7_notify_has_display(struct pp_hwmgr *hwmgr) 4389 { 4390 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4391 4392 if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) { 4393 if (hwmgr->chip_id == CHIP_VEGAM) 4394 smum_send_msg_to_smc_with_parameter(hwmgr, 4395 (PPSMC_Msg)PPSMC_MSG_SetVBITimeout_VEGAM, data->frame_time_x2, 4396 NULL); 4397 else 4398 smum_send_msg_to_smc_with_parameter(hwmgr, 4399 (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2, 4400 NULL); 4401 data->last_sent_vbi_timeout = data->frame_time_x2; 4402 } 4403 4404 return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay, NULL) == 0) ? 0 : -EINVAL; 4405 } 4406 4407 static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr) 4408 { 4409 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4410 int result = 0; 4411 4412 if (data->mclk_ignore_signal) 4413 result = smu7_notify_no_display(hwmgr); 4414 else 4415 result = smu7_notify_has_display(hwmgr); 4416 4417 return result; 4418 } 4419 4420 static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) 4421 { 4422 int tmp_result, result = 0; 4423 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4424 4425 tmp_result = smu7_find_dpm_states_clocks_in_dpm_table(hwmgr, input); 4426 PP_ASSERT_WITH_CODE((0 == tmp_result), 4427 "Failed to find DPM states clocks in DPM table!", 4428 result = tmp_result); 4429 4430 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 4431 PHM_PlatformCaps_PCIEPerformanceRequest)) { 4432 tmp_result = 4433 smu7_request_link_speed_change_before_state_change(hwmgr, input); 4434 PP_ASSERT_WITH_CODE((0 == tmp_result), 4435 "Failed to request link speed change before state change!", 4436 result = tmp_result); 4437 } 4438 4439 tmp_result = smu7_freeze_sclk_mclk_dpm(hwmgr); 4440 PP_ASSERT_WITH_CODE((0 == tmp_result), 4441 "Failed to freeze SCLK MCLK DPM!", result = tmp_result); 4442 4443 tmp_result = smu7_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input); 4444 PP_ASSERT_WITH_CODE((0 == tmp_result), 4445 "Failed to populate and upload SCLK MCLK DPM levels!", 4446 result = tmp_result); 4447 4448 /* 4449 * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag. 4450 * That effectively disables AVFS feature. 4451 */ 4452 if (hwmgr->hardcode_pp_table != NULL) 4453 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; 4454 4455 tmp_result = smu7_update_avfs(hwmgr); 4456 PP_ASSERT_WITH_CODE((0 == tmp_result), 4457 "Failed to update avfs voltages!", 4458 result = tmp_result); 4459 4460 tmp_result = smu7_generate_dpm_level_enable_mask(hwmgr, input); 4461 PP_ASSERT_WITH_CODE((0 == tmp_result), 4462 "Failed to generate DPM level enabled mask!", 4463 result = tmp_result); 4464 4465 tmp_result = smum_update_sclk_threshold(hwmgr); 4466 PP_ASSERT_WITH_CODE((0 == tmp_result), 4467 "Failed to update SCLK threshold!", 4468 result = tmp_result); 4469 4470 tmp_result = smu7_unfreeze_sclk_mclk_dpm(hwmgr); 4471 PP_ASSERT_WITH_CODE((0 == tmp_result), 4472 "Failed to unfreeze SCLK MCLK DPM!", 4473 result = tmp_result); 4474 4475 tmp_result = smu7_upload_dpm_level_enable_mask(hwmgr); 4476 PP_ASSERT_WITH_CODE((0 == tmp_result), 4477 "Failed to upload DPM level enabled mask!", 4478 result = tmp_result); 4479 4480 tmp_result = smu7_notify_smc_display(hwmgr); 4481 PP_ASSERT_WITH_CODE((0 == tmp_result), 4482 "Failed to notify smc display settings!", 4483 result = tmp_result); 4484 4485 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 4486 PHM_PlatformCaps_PCIEPerformanceRequest)) { 4487 tmp_result = 4488 smu7_notify_link_speed_change_after_state_change(hwmgr, input); 4489 PP_ASSERT_WITH_CODE((0 == tmp_result), 4490 "Failed to notify link speed change after state change!", 4491 result = tmp_result); 4492 } 4493 data->apply_optimized_settings = false; 4494 return result; 4495 } 4496 4497 static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm) 4498 { 4499 hwmgr->thermal_controller. 4500 advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm; 4501 4502 return smum_send_msg_to_smc_with_parameter(hwmgr, 4503 PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm, 4504 NULL); 4505 } 4506 4507 static int 4508 smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr) 4509 { 4510 return 0; 4511 } 4512 4513 /** 4514 * smu7_program_display_gap - Programs the display gap 4515 * 4516 * @hwmgr: the address of the powerplay hardware manager. 4517 * Return: always OK 4518 */ 4519 static int smu7_program_display_gap(struct pp_hwmgr *hwmgr) 4520 { 4521 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4522 uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL); 4523 uint32_t display_gap2; 4524 uint32_t pre_vbi_time_in_us; 4525 uint32_t frame_time_in_us; 4526 uint32_t ref_clock, refresh_rate; 4527 4528 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (hwmgr->display_config->num_display > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE); 4529 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap); 4530 4531 ref_clock = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev); 4532 refresh_rate = hwmgr->display_config->vrefresh; 4533 4534 if (0 == refresh_rate) 4535 refresh_rate = 60; 4536 4537 frame_time_in_us = 1000000 / refresh_rate; 4538 4539 pre_vbi_time_in_us = frame_time_in_us - 200 - hwmgr->display_config->min_vblank_time; 4540 4541 data->frame_time_x2 = frame_time_in_us * 2 / 100; 4542 4543 if (data->frame_time_x2 < 280) { 4544 pr_debug("%s: enforce minimal VBITimeout: %d -> 280\n", __func__, data->frame_time_x2); 4545 data->frame_time_x2 = 280; 4546 } 4547 4548 display_gap2 = pre_vbi_time_in_us * (ref_clock / 100); 4549 4550 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2); 4551 4552 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 4553 data->soft_regs_start + smum_get_offsetof(hwmgr, 4554 SMU_SoftRegisters, 4555 PreVBlankGap), 0x64); 4556 4557 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 4558 data->soft_regs_start + smum_get_offsetof(hwmgr, 4559 SMU_SoftRegisters, 4560 VBlankTimeout), 4561 (frame_time_in_us - pre_vbi_time_in_us)); 4562 4563 return 0; 4564 } 4565 4566 static int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr) 4567 { 4568 return smu7_program_display_gap(hwmgr); 4569 } 4570 4571 /** 4572 * smu7_set_max_fan_rpm_output - Set maximum target operating fan output RPM 4573 * 4574 * @hwmgr: the address of the powerplay hardware manager. 4575 * @us_max_fan_rpm: max operating fan RPM value. 4576 * Return: The response that came from the SMC. 4577 */ 4578 static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm) 4579 { 4580 hwmgr->thermal_controller. 4581 advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm; 4582 4583 return smum_send_msg_to_smc_with_parameter(hwmgr, 4584 PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm, 4585 NULL); 4586 } 4587 4588 static const struct amdgpu_irq_src_funcs smu7_irq_funcs = { 4589 .process = phm_irq_process, 4590 }; 4591 4592 static int smu7_register_irq_handlers(struct pp_hwmgr *hwmgr) 4593 { 4594 struct amdgpu_irq_src *source = 4595 kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL); 4596 4597 if (!source) 4598 return -ENOMEM; 4599 4600 source->funcs = &smu7_irq_funcs; 4601 4602 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev), 4603 AMDGPU_IRQ_CLIENTID_LEGACY, 4604 VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH, 4605 source); 4606 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev), 4607 AMDGPU_IRQ_CLIENTID_LEGACY, 4608 VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW, 4609 source); 4610 4611 /* Register CTF(GPIO_19) interrupt */ 4612 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev), 4613 AMDGPU_IRQ_CLIENTID_LEGACY, 4614 VISLANDS30_IV_SRCID_GPIO_19, 4615 source); 4616 4617 return 0; 4618 } 4619 4620 static bool 4621 smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr) 4622 { 4623 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4624 bool is_update_required = false; 4625 4626 if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display) 4627 is_update_required = true; 4628 4629 if (data->display_timing.vrefresh != hwmgr->display_config->vrefresh) 4630 is_update_required = true; 4631 4632 if (hwmgr->chip_id >= CHIP_POLARIS10 && 4633 hwmgr->chip_id <= CHIP_VEGAM && 4634 data->last_sent_vbi_timeout != data->frame_time_x2) 4635 is_update_required = true; 4636 4637 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { 4638 if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr && 4639 (data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK || 4640 hwmgr->display_config->min_core_set_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK)) 4641 is_update_required = true; 4642 } 4643 return is_update_required; 4644 } 4645 4646 static inline bool smu7_are_power_levels_equal(const struct smu7_performance_level *pl1, 4647 const struct smu7_performance_level *pl2) 4648 { 4649 return ((pl1->memory_clock == pl2->memory_clock) && 4650 (pl1->engine_clock == pl2->engine_clock) && 4651 (pl1->pcie_gen == pl2->pcie_gen) && 4652 (pl1->pcie_lane == pl2->pcie_lane)); 4653 } 4654 4655 static int smu7_check_states_equal(struct pp_hwmgr *hwmgr, 4656 const struct pp_hw_power_state *pstate1, 4657 const struct pp_hw_power_state *pstate2, bool *equal) 4658 { 4659 const struct smu7_power_state *psa; 4660 const struct smu7_power_state *psb; 4661 int i; 4662 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4663 4664 if (pstate1 == NULL || pstate2 == NULL || equal == NULL) 4665 return -EINVAL; 4666 4667 psa = cast_const_phw_smu7_power_state(pstate1); 4668 psb = cast_const_phw_smu7_power_state(pstate2); 4669 /* If the two states don't even have the same number of performance levels they cannot be the same state. */ 4670 if (psa->performance_level_count != psb->performance_level_count) { 4671 *equal = false; 4672 return 0; 4673 } 4674 4675 for (i = 0; i < psa->performance_level_count; i++) { 4676 if (!smu7_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) { 4677 /* If we have found even one performance level pair that is different the states are different. */ 4678 *equal = false; 4679 return 0; 4680 } 4681 } 4682 4683 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/ 4684 *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk)); 4685 *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk)); 4686 *equal &= (psa->sclk_threshold == psb->sclk_threshold); 4687 /* For OD call, set value based on flag */ 4688 *equal &= !(data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | 4689 DPMTABLE_OD_UPDATE_MCLK | 4690 DPMTABLE_OD_UPDATE_VDDC)); 4691 4692 return 0; 4693 } 4694 4695 static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr) 4696 { 4697 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4698 4699 uint32_t tmp; 4700 4701 /* Read MC indirect register offset 0x9F bits [3:0] to see 4702 * if VBIOS has already loaded a full version of MC ucode 4703 * or not. 4704 */ 4705 4706 smu7_get_mc_microcode_version(hwmgr); 4707 4708 data->need_long_memory_training = false; 4709 4710 cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 4711 ixMC_IO_DEBUG_UP_13); 4712 tmp = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA); 4713 4714 if (tmp & (1 << 23)) { 4715 data->mem_latency_high = MEM_LATENCY_HIGH; 4716 data->mem_latency_low = MEM_LATENCY_LOW; 4717 if ((hwmgr->chip_id == CHIP_POLARIS10) || 4718 (hwmgr->chip_id == CHIP_POLARIS11) || 4719 (hwmgr->chip_id == CHIP_POLARIS12)) 4720 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableFFC, NULL); 4721 } else { 4722 data->mem_latency_high = 330; 4723 data->mem_latency_low = 330; 4724 if ((hwmgr->chip_id == CHIP_POLARIS10) || 4725 (hwmgr->chip_id == CHIP_POLARIS11) || 4726 (hwmgr->chip_id == CHIP_POLARIS12)) 4727 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableFFC, NULL); 4728 } 4729 4730 return 0; 4731 } 4732 4733 static int smu7_read_clock_registers(struct pp_hwmgr *hwmgr) 4734 { 4735 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4736 4737 data->clock_registers.vCG_SPLL_FUNC_CNTL = 4738 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL); 4739 data->clock_registers.vCG_SPLL_FUNC_CNTL_2 = 4740 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2); 4741 data->clock_registers.vCG_SPLL_FUNC_CNTL_3 = 4742 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3); 4743 data->clock_registers.vCG_SPLL_FUNC_CNTL_4 = 4744 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4); 4745 data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM = 4746 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM); 4747 data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 = 4748 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2); 4749 data->clock_registers.vDLL_CNTL = 4750 cgs_read_register(hwmgr->device, mmDLL_CNTL); 4751 data->clock_registers.vMCLK_PWRMGT_CNTL = 4752 cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL); 4753 data->clock_registers.vMPLL_AD_FUNC_CNTL = 4754 cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL); 4755 data->clock_registers.vMPLL_DQ_FUNC_CNTL = 4756 cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL); 4757 data->clock_registers.vMPLL_FUNC_CNTL = 4758 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL); 4759 data->clock_registers.vMPLL_FUNC_CNTL_1 = 4760 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1); 4761 data->clock_registers.vMPLL_FUNC_CNTL_2 = 4762 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2); 4763 data->clock_registers.vMPLL_SS1 = 4764 cgs_read_register(hwmgr->device, mmMPLL_SS1); 4765 data->clock_registers.vMPLL_SS2 = 4766 cgs_read_register(hwmgr->device, mmMPLL_SS2); 4767 return 0; 4768 4769 } 4770 4771 /** 4772 * smu7_get_memory_type - Find out if memory is GDDR5. 4773 * 4774 * @hwmgr: the address of the powerplay hardware manager. 4775 * Return: always 0 4776 */ 4777 static int smu7_get_memory_type(struct pp_hwmgr *hwmgr) 4778 { 4779 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4780 struct amdgpu_device *adev = hwmgr->adev; 4781 4782 data->is_memory_gddr5 = (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5); 4783 4784 return 0; 4785 } 4786 4787 /** 4788 * smu7_enable_acpi_power_management - Enables Dynamic Power Management by SMC 4789 * 4790 * @hwmgr: the address of the powerplay hardware manager. 4791 * Return: always 0 4792 */ 4793 static int smu7_enable_acpi_power_management(struct pp_hwmgr *hwmgr) 4794 { 4795 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 4796 GENERAL_PWRMGT, STATIC_PM_EN, 1); 4797 4798 return 0; 4799 } 4800 4801 /** 4802 * smu7_init_power_gate_state - Initialize PowerGating States for different engines 4803 * 4804 * @hwmgr: the address of the powerplay hardware manager. 4805 * Return: always 0 4806 */ 4807 static int smu7_init_power_gate_state(struct pp_hwmgr *hwmgr) 4808 { 4809 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4810 4811 data->uvd_power_gated = false; 4812 data->vce_power_gated = false; 4813 4814 return 0; 4815 } 4816 4817 static int smu7_init_sclk_threshold(struct pp_hwmgr *hwmgr) 4818 { 4819 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4820 4821 data->low_sclk_interrupt_threshold = 0; 4822 return 0; 4823 } 4824 4825 static int smu7_setup_asic_task(struct pp_hwmgr *hwmgr) 4826 { 4827 int tmp_result, result = 0; 4828 4829 smu7_check_mc_firmware(hwmgr); 4830 4831 tmp_result = smu7_read_clock_registers(hwmgr); 4832 PP_ASSERT_WITH_CODE((0 == tmp_result), 4833 "Failed to read clock registers!", result = tmp_result); 4834 4835 tmp_result = smu7_get_memory_type(hwmgr); 4836 PP_ASSERT_WITH_CODE((0 == tmp_result), 4837 "Failed to get memory type!", result = tmp_result); 4838 4839 tmp_result = smu7_enable_acpi_power_management(hwmgr); 4840 PP_ASSERT_WITH_CODE((0 == tmp_result), 4841 "Failed to enable ACPI power management!", result = tmp_result); 4842 4843 tmp_result = smu7_init_power_gate_state(hwmgr); 4844 PP_ASSERT_WITH_CODE((0 == tmp_result), 4845 "Failed to init power gate state!", result = tmp_result); 4846 4847 tmp_result = smu7_get_mc_microcode_version(hwmgr); 4848 PP_ASSERT_WITH_CODE((0 == tmp_result), 4849 "Failed to get MC microcode version!", result = tmp_result); 4850 4851 tmp_result = smu7_init_sclk_threshold(hwmgr); 4852 PP_ASSERT_WITH_CODE((0 == tmp_result), 4853 "Failed to init sclk threshold!", result = tmp_result); 4854 4855 return result; 4856 } 4857 4858 static int smu7_force_clock_level(struct pp_hwmgr *hwmgr, 4859 enum pp_clock_type type, uint32_t mask) 4860 { 4861 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4862 4863 if (mask == 0) 4864 return -EINVAL; 4865 4866 switch (type) { 4867 case PP_SCLK: 4868 if (!data->sclk_dpm_key_disabled) 4869 smum_send_msg_to_smc_with_parameter(hwmgr, 4870 PPSMC_MSG_SCLKDPM_SetEnabledMask, 4871 data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask, 4872 NULL); 4873 break; 4874 case PP_MCLK: 4875 if (!data->mclk_dpm_key_disabled) 4876 smum_send_msg_to_smc_with_parameter(hwmgr, 4877 PPSMC_MSG_MCLKDPM_SetEnabledMask, 4878 data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask, 4879 NULL); 4880 break; 4881 case PP_PCIE: 4882 { 4883 uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask; 4884 4885 if (!data->pcie_dpm_key_disabled) { 4886 if (fls(tmp) != ffs(tmp)) 4887 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_UnForceLevel, 4888 NULL); 4889 else 4890 smum_send_msg_to_smc_with_parameter(hwmgr, 4891 PPSMC_MSG_PCIeDPM_ForceLevel, 4892 fls(tmp) - 1, 4893 NULL); 4894 } 4895 break; 4896 } 4897 default: 4898 break; 4899 } 4900 4901 return 0; 4902 } 4903 4904 static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, 4905 enum pp_clock_type type, char *buf) 4906 { 4907 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 4908 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); 4909 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); 4910 struct smu7_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table); 4911 struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table); 4912 struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels); 4913 struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels); 4914 int size = 0; 4915 uint32_t i, now, clock, pcie_speed; 4916 4917 phm_get_sysfs_buf(&buf, &size); 4918 4919 switch (type) { 4920 case PP_SCLK: 4921 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &clock); 4922 4923 for (i = 0; i < sclk_table->count; i++) { 4924 if (clock > sclk_table->dpm_levels[i].value) 4925 continue; 4926 break; 4927 } 4928 now = i; 4929 4930 for (i = 0; i < sclk_table->count; i++) 4931 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", 4932 i, sclk_table->dpm_levels[i].value / 100, 4933 (i == now) ? "*" : ""); 4934 break; 4935 case PP_MCLK: 4936 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &clock); 4937 4938 for (i = 0; i < mclk_table->count; i++) { 4939 if (clock > mclk_table->dpm_levels[i].value) 4940 continue; 4941 break; 4942 } 4943 now = i; 4944 4945 for (i = 0; i < mclk_table->count; i++) 4946 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", 4947 i, mclk_table->dpm_levels[i].value / 100, 4948 (i == now) ? "*" : ""); 4949 break; 4950 case PP_PCIE: 4951 pcie_speed = smu7_get_current_pcie_speed(hwmgr); 4952 for (i = 0; i < pcie_table->count; i++) { 4953 if (pcie_speed != pcie_table->dpm_levels[i].value) 4954 continue; 4955 break; 4956 } 4957 now = i; 4958 4959 for (i = 0; i < pcie_table->count; i++) 4960 size += sysfs_emit_at(buf, size, "%d: %s %s\n", i, 4961 (pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x8" : 4962 (pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" : 4963 (pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "", 4964 (i == now) ? "*" : ""); 4965 break; 4966 case OD_SCLK: 4967 if (hwmgr->od_enabled) { 4968 size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); 4969 for (i = 0; i < odn_sclk_table->num_of_pl; i++) 4970 size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n", 4971 i, odn_sclk_table->entries[i].clock/100, 4972 odn_sclk_table->entries[i].vddc); 4973 } 4974 break; 4975 case OD_MCLK: 4976 if (hwmgr->od_enabled) { 4977 size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK"); 4978 for (i = 0; i < odn_mclk_table->num_of_pl; i++) 4979 size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n", 4980 i, odn_mclk_table->entries[i].clock/100, 4981 odn_mclk_table->entries[i].vddc); 4982 } 4983 break; 4984 case OD_RANGE: 4985 if (hwmgr->od_enabled) { 4986 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); 4987 size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n", 4988 data->golden_dpm_table.sclk_table.dpm_levels[0].value/100, 4989 hwmgr->platform_descriptor.overdriveLimit.engineClock/100); 4990 size += sysfs_emit_at(buf, size, "MCLK: %7uMHz %10uMHz\n", 4991 data->golden_dpm_table.mclk_table.dpm_levels[0].value/100, 4992 hwmgr->platform_descriptor.overdriveLimit.memoryClock/100); 4993 size += sysfs_emit_at(buf, size, "VDDC: %7umV %11umV\n", 4994 data->odn_dpm_table.min_vddc, 4995 data->odn_dpm_table.max_vddc); 4996 } 4997 break; 4998 default: 4999 break; 5000 } 5001 return size; 5002 } 5003 5004 static void smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) 5005 { 5006 switch (mode) { 5007 case AMD_FAN_CTRL_NONE: 5008 smu7_fan_ctrl_set_fan_speed_pwm(hwmgr, 255); 5009 break; 5010 case AMD_FAN_CTRL_MANUAL: 5011 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 5012 PHM_PlatformCaps_MicrocodeFanControl)) 5013 smu7_fan_ctrl_stop_smc_fan_control(hwmgr); 5014 break; 5015 case AMD_FAN_CTRL_AUTO: 5016 if (!smu7_fan_ctrl_set_static_mode(hwmgr, mode)) 5017 smu7_fan_ctrl_start_smc_fan_control(hwmgr); 5018 break; 5019 default: 5020 break; 5021 } 5022 } 5023 5024 static uint32_t smu7_get_fan_control_mode(struct pp_hwmgr *hwmgr) 5025 { 5026 return hwmgr->fan_ctrl_enabled ? AMD_FAN_CTRL_AUTO : AMD_FAN_CTRL_MANUAL; 5027 } 5028 5029 static int smu7_get_sclk_od(struct pp_hwmgr *hwmgr) 5030 { 5031 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 5032 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); 5033 struct smu7_single_dpm_table *golden_sclk_table = 5034 &(data->golden_dpm_table.sclk_table); 5035 int value = sclk_table->dpm_levels[sclk_table->count - 1].value; 5036 int golden_value = golden_sclk_table->dpm_levels 5037 [golden_sclk_table->count - 1].value; 5038 5039 value -= golden_value; 5040 value = DIV_ROUND_UP(value * 100, golden_value); 5041 5042 return value; 5043 } 5044 5045 static int smu7_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value) 5046 { 5047 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 5048 struct smu7_single_dpm_table *golden_sclk_table = 5049 &(data->golden_dpm_table.sclk_table); 5050 struct pp_power_state *ps; 5051 struct smu7_power_state *smu7_ps; 5052 5053 if (value > 20) 5054 value = 20; 5055 5056 ps = hwmgr->request_ps; 5057 5058 if (ps == NULL) 5059 return -EINVAL; 5060 5061 smu7_ps = cast_phw_smu7_power_state(&ps->hardware); 5062 5063 smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].engine_clock = 5064 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * 5065 value / 100 + 5066 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; 5067 5068 return 0; 5069 } 5070 5071 static int smu7_get_mclk_od(struct pp_hwmgr *hwmgr) 5072 { 5073 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 5074 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); 5075 struct smu7_single_dpm_table *golden_mclk_table = 5076 &(data->golden_dpm_table.mclk_table); 5077 int value = mclk_table->dpm_levels[mclk_table->count - 1].value; 5078 int golden_value = golden_mclk_table->dpm_levels 5079 [golden_mclk_table->count - 1].value; 5080 5081 value -= golden_value; 5082 value = DIV_ROUND_UP(value * 100, golden_value); 5083 5084 return value; 5085 } 5086 5087 static int smu7_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value) 5088 { 5089 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 5090 struct smu7_single_dpm_table *golden_mclk_table = 5091 &(data->golden_dpm_table.mclk_table); 5092 struct pp_power_state *ps; 5093 struct smu7_power_state *smu7_ps; 5094 5095 if (value > 20) 5096 value = 20; 5097 5098 ps = hwmgr->request_ps; 5099 5100 if (ps == NULL) 5101 return -EINVAL; 5102 5103 smu7_ps = cast_phw_smu7_power_state(&ps->hardware); 5104 5105 smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].memory_clock = 5106 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * 5107 value / 100 + 5108 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; 5109 5110 return 0; 5111 } 5112 5113 5114 static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks) 5115 { 5116 struct phm_ppt_v1_information *table_info = 5117 (struct phm_ppt_v1_information *)hwmgr->pptable; 5118 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = NULL; 5119 struct phm_clock_voltage_dependency_table *sclk_table; 5120 int i; 5121 5122 if (hwmgr->pp_table_version == PP_TABLE_V1) { 5123 if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL) 5124 return -EINVAL; 5125 dep_sclk_table = table_info->vdd_dep_on_sclk; 5126 for (i = 0; i < dep_sclk_table->count; i++) 5127 clocks->clock[i] = dep_sclk_table->entries[i].clk * 10; 5128 clocks->count = dep_sclk_table->count; 5129 } else if (hwmgr->pp_table_version == PP_TABLE_V0) { 5130 sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk; 5131 for (i = 0; i < sclk_table->count; i++) 5132 clocks->clock[i] = sclk_table->entries[i].clk * 10; 5133 clocks->count = sclk_table->count; 5134 } 5135 5136 return 0; 5137 } 5138 5139 static uint32_t smu7_get_mem_latency(struct pp_hwmgr *hwmgr, uint32_t clk) 5140 { 5141 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 5142 5143 if (clk >= MEM_FREQ_LOW_LATENCY && clk < MEM_FREQ_HIGH_LATENCY) 5144 return data->mem_latency_high; 5145 else if (clk >= MEM_FREQ_HIGH_LATENCY) 5146 return data->mem_latency_low; 5147 else 5148 return MEM_LATENCY_ERR; 5149 } 5150 5151 static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks) 5152 { 5153 struct phm_ppt_v1_information *table_info = 5154 (struct phm_ppt_v1_information *)hwmgr->pptable; 5155 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table; 5156 int i; 5157 struct phm_clock_voltage_dependency_table *mclk_table; 5158 5159 if (hwmgr->pp_table_version == PP_TABLE_V1) { 5160 if (table_info == NULL) 5161 return -EINVAL; 5162 dep_mclk_table = table_info->vdd_dep_on_mclk; 5163 for (i = 0; i < dep_mclk_table->count; i++) { 5164 clocks->clock[i] = dep_mclk_table->entries[i].clk * 10; 5165 clocks->latency[i] = smu7_get_mem_latency(hwmgr, 5166 dep_mclk_table->entries[i].clk); 5167 } 5168 clocks->count = dep_mclk_table->count; 5169 } else if (hwmgr->pp_table_version == PP_TABLE_V0) { 5170 mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk; 5171 for (i = 0; i < mclk_table->count; i++) 5172 clocks->clock[i] = mclk_table->entries[i].clk * 10; 5173 clocks->count = mclk_table->count; 5174 } 5175 return 0; 5176 } 5177 5178 static int smu7_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, 5179 struct amd_pp_clocks *clocks) 5180 { 5181 switch (type) { 5182 case amd_pp_sys_clock: 5183 smu7_get_sclks(hwmgr, clocks); 5184 break; 5185 case amd_pp_mem_clock: 5186 smu7_get_mclks(hwmgr, clocks); 5187 break; 5188 default: 5189 return -EINVAL; 5190 } 5191 5192 return 0; 5193 } 5194 5195 static int smu7_get_sclks_with_latency(struct pp_hwmgr *hwmgr, 5196 struct pp_clock_levels_with_latency *clocks) 5197 { 5198 struct phm_ppt_v1_information *table_info = 5199 (struct phm_ppt_v1_information *)hwmgr->pptable; 5200 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = 5201 table_info->vdd_dep_on_sclk; 5202 int i; 5203 5204 clocks->num_levels = 0; 5205 for (i = 0; i < dep_sclk_table->count; i++) { 5206 if (dep_sclk_table->entries[i].clk) { 5207 clocks->data[clocks->num_levels].clocks_in_khz = 5208 dep_sclk_table->entries[i].clk * 10; 5209 clocks->num_levels++; 5210 } 5211 } 5212 5213 return 0; 5214 } 5215 5216 static int smu7_get_mclks_with_latency(struct pp_hwmgr *hwmgr, 5217 struct pp_clock_levels_with_latency *clocks) 5218 { 5219 struct phm_ppt_v1_information *table_info = 5220 (struct phm_ppt_v1_information *)hwmgr->pptable; 5221 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table = 5222 table_info->vdd_dep_on_mclk; 5223 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 5224 int i; 5225 5226 clocks->num_levels = 0; 5227 data->mclk_latency_table.count = 0; 5228 for (i = 0; i < dep_mclk_table->count; i++) { 5229 if (dep_mclk_table->entries[i].clk) { 5230 clocks->data[clocks->num_levels].clocks_in_khz = 5231 dep_mclk_table->entries[i].clk * 10; 5232 data->mclk_latency_table.entries[data->mclk_latency_table.count].frequency = 5233 dep_mclk_table->entries[i].clk; 5234 clocks->data[clocks->num_levels].latency_in_us = 5235 data->mclk_latency_table.entries[data->mclk_latency_table.count].latency = 5236 smu7_get_mem_latency(hwmgr, dep_mclk_table->entries[i].clk); 5237 clocks->num_levels++; 5238 data->mclk_latency_table.count++; 5239 } 5240 } 5241 5242 return 0; 5243 } 5244 5245 static int smu7_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr, 5246 enum amd_pp_clock_type type, 5247 struct pp_clock_levels_with_latency *clocks) 5248 { 5249 if (!(hwmgr->chip_id >= CHIP_POLARIS10 && 5250 hwmgr->chip_id <= CHIP_VEGAM)) 5251 return -EINVAL; 5252 5253 switch (type) { 5254 case amd_pp_sys_clock: 5255 smu7_get_sclks_with_latency(hwmgr, clocks); 5256 break; 5257 case amd_pp_mem_clock: 5258 smu7_get_mclks_with_latency(hwmgr, clocks); 5259 break; 5260 default: 5261 return -EINVAL; 5262 } 5263 5264 return 0; 5265 } 5266 5267 static int smu7_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, 5268 void *clock_range) 5269 { 5270 struct phm_ppt_v1_information *table_info = 5271 (struct phm_ppt_v1_information *)hwmgr->pptable; 5272 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table = 5273 table_info->vdd_dep_on_mclk; 5274 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = 5275 table_info->vdd_dep_on_sclk; 5276 struct polaris10_smumgr *smu_data = 5277 (struct polaris10_smumgr *)(hwmgr->smu_backend); 5278 SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table); 5279 struct dm_pp_wm_sets_with_clock_ranges *watermarks = 5280 (struct dm_pp_wm_sets_with_clock_ranges *)clock_range; 5281 uint32_t i, j, k; 5282 bool valid_entry; 5283 5284 if (!(hwmgr->chip_id >= CHIP_POLARIS10 && 5285 hwmgr->chip_id <= CHIP_VEGAM)) 5286 return -EINVAL; 5287 5288 for (i = 0; i < dep_mclk_table->count; i++) { 5289 for (j = 0; j < dep_sclk_table->count; j++) { 5290 valid_entry = false; 5291 for (k = 0; k < watermarks->num_wm_sets; k++) { 5292 if (dep_sclk_table->entries[i].clk >= watermarks->wm_clk_ranges[k].wm_min_eng_clk_in_khz / 10 && 5293 dep_sclk_table->entries[i].clk < watermarks->wm_clk_ranges[k].wm_max_eng_clk_in_khz / 10 && 5294 dep_mclk_table->entries[i].clk >= watermarks->wm_clk_ranges[k].wm_min_mem_clk_in_khz / 10 && 5295 dep_mclk_table->entries[i].clk < watermarks->wm_clk_ranges[k].wm_max_mem_clk_in_khz / 10) { 5296 valid_entry = true; 5297 table->DisplayWatermark[i][j] = watermarks->wm_clk_ranges[k].wm_set_id; 5298 break; 5299 } 5300 } 5301 PP_ASSERT_WITH_CODE(valid_entry, 5302 "Clock is not in range of specified clock range for watermark from DAL! Using highest water mark set.", 5303 table->DisplayWatermark[i][j] = watermarks->wm_clk_ranges[k - 1].wm_set_id); 5304 } 5305 } 5306 5307 return smu7_copy_bytes_to_smc(hwmgr, 5308 smu_data->smu7_data.dpm_table_start + offsetof(SMU74_Discrete_DpmTable, DisplayWatermark), 5309 (uint8_t *)table->DisplayWatermark, 5310 sizeof(uint8_t) * SMU74_MAX_LEVELS_MEMORY * SMU74_MAX_LEVELS_GRAPHICS, 5311 SMC_RAM_END); 5312 } 5313 5314 static int smu7_notify_cac_buffer_info(struct pp_hwmgr *hwmgr, 5315 uint32_t virtual_addr_low, 5316 uint32_t virtual_addr_hi, 5317 uint32_t mc_addr_low, 5318 uint32_t mc_addr_hi, 5319 uint32_t size) 5320 { 5321 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 5322 5323 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 5324 data->soft_regs_start + 5325 smum_get_offsetof(hwmgr, 5326 SMU_SoftRegisters, DRAM_LOG_ADDR_H), 5327 mc_addr_hi); 5328 5329 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 5330 data->soft_regs_start + 5331 smum_get_offsetof(hwmgr, 5332 SMU_SoftRegisters, DRAM_LOG_ADDR_L), 5333 mc_addr_low); 5334 5335 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 5336 data->soft_regs_start + 5337 smum_get_offsetof(hwmgr, 5338 SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_H), 5339 virtual_addr_hi); 5340 5341 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 5342 data->soft_regs_start + 5343 smum_get_offsetof(hwmgr, 5344 SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_L), 5345 virtual_addr_low); 5346 5347 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 5348 data->soft_regs_start + 5349 smum_get_offsetof(hwmgr, 5350 SMU_SoftRegisters, DRAM_LOG_BUFF_SIZE), 5351 size); 5352 return 0; 5353 } 5354 5355 static int smu7_get_max_high_clocks(struct pp_hwmgr *hwmgr, 5356 struct amd_pp_simple_clock_info *clocks) 5357 { 5358 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 5359 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); 5360 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); 5361 5362 if (clocks == NULL) 5363 return -EINVAL; 5364 5365 clocks->memory_max_clock = mclk_table->count > 1 ? 5366 mclk_table->dpm_levels[mclk_table->count-1].value : 5367 mclk_table->dpm_levels[0].value; 5368 clocks->engine_max_clock = sclk_table->count > 1 ? 5369 sclk_table->dpm_levels[sclk_table->count-1].value : 5370 sclk_table->dpm_levels[0].value; 5371 return 0; 5372 } 5373 5374 static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr, 5375 struct PP_TemperatureRange *thermal_data) 5376 { 5377 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 5378 struct phm_ppt_v1_information *table_info = 5379 (struct phm_ppt_v1_information *)hwmgr->pptable; 5380 5381 memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct PP_TemperatureRange)); 5382 5383 if (hwmgr->pp_table_version == PP_TABLE_V1) 5384 thermal_data->max = table_info->cac_dtp_table->usSoftwareShutdownTemp * 5385 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 5386 else if (hwmgr->pp_table_version == PP_TABLE_V0) 5387 thermal_data->max = data->thermal_temp_setting.temperature_shutdown * 5388 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 5389 5390 return 0; 5391 } 5392 5393 static bool smu7_check_clk_voltage_valid(struct pp_hwmgr *hwmgr, 5394 enum PP_OD_DPM_TABLE_COMMAND type, 5395 uint32_t clk, 5396 uint32_t voltage) 5397 { 5398 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 5399 5400 if (voltage < data->odn_dpm_table.min_vddc || voltage > data->odn_dpm_table.max_vddc) { 5401 pr_info("OD voltage is out of range [%d - %d] mV\n", 5402 data->odn_dpm_table.min_vddc, 5403 data->odn_dpm_table.max_vddc); 5404 return false; 5405 } 5406 5407 if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) { 5408 if (data->golden_dpm_table.sclk_table.dpm_levels[0].value > clk || 5409 hwmgr->platform_descriptor.overdriveLimit.engineClock < clk) { 5410 pr_info("OD engine clock is out of range [%d - %d] MHz\n", 5411 data->golden_dpm_table.sclk_table.dpm_levels[0].value/100, 5412 hwmgr->platform_descriptor.overdriveLimit.engineClock/100); 5413 return false; 5414 } 5415 } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) { 5416 if (data->golden_dpm_table.mclk_table.dpm_levels[0].value > clk || 5417 hwmgr->platform_descriptor.overdriveLimit.memoryClock < clk) { 5418 pr_info("OD memory clock is out of range [%d - %d] MHz\n", 5419 data->golden_dpm_table.mclk_table.dpm_levels[0].value/100, 5420 hwmgr->platform_descriptor.overdriveLimit.memoryClock/100); 5421 return false; 5422 } 5423 } else { 5424 return false; 5425 } 5426 5427 return true; 5428 } 5429 5430 static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, 5431 enum PP_OD_DPM_TABLE_COMMAND type, 5432 long *input, uint32_t size) 5433 { 5434 uint32_t i; 5435 struct phm_odn_clock_levels *podn_dpm_table_in_backend = NULL; 5436 struct smu7_odn_clock_voltage_dependency_table *podn_vdd_dep_in_backend = NULL; 5437 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 5438 5439 uint32_t input_clk; 5440 uint32_t input_vol; 5441 uint32_t input_level; 5442 5443 PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage", 5444 return -EINVAL); 5445 5446 if (!hwmgr->od_enabled) { 5447 pr_info("OverDrive feature not enabled\n"); 5448 return -EINVAL; 5449 } 5450 5451 if (PP_OD_EDIT_SCLK_VDDC_TABLE == type) { 5452 podn_dpm_table_in_backend = &data->odn_dpm_table.odn_core_clock_dpm_levels; 5453 podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_sclk; 5454 PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend), 5455 "Failed to get ODN SCLK and Voltage tables", 5456 return -EINVAL); 5457 } else if (PP_OD_EDIT_MCLK_VDDC_TABLE == type) { 5458 podn_dpm_table_in_backend = &data->odn_dpm_table.odn_memory_clock_dpm_levels; 5459 podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_mclk; 5460 5461 PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend), 5462 "Failed to get ODN MCLK and Voltage tables", 5463 return -EINVAL); 5464 } else if (PP_OD_RESTORE_DEFAULT_TABLE == type) { 5465 smu7_odn_initial_default_setting(hwmgr); 5466 return 0; 5467 } else if (PP_OD_COMMIT_DPM_TABLE == type) { 5468 smu7_check_dpm_table_updated(hwmgr); 5469 return 0; 5470 } else { 5471 return -EINVAL; 5472 } 5473 5474 for (i = 0; i < size; i += 3) { 5475 if (i + 3 > size || input[i] >= podn_dpm_table_in_backend->num_of_pl) { 5476 pr_info("invalid clock voltage input \n"); 5477 return 0; 5478 } 5479 input_level = input[i]; 5480 input_clk = input[i+1] * 100; 5481 input_vol = input[i+2]; 5482 5483 if (smu7_check_clk_voltage_valid(hwmgr, type, input_clk, input_vol)) { 5484 podn_dpm_table_in_backend->entries[input_level].clock = input_clk; 5485 podn_vdd_dep_in_backend->entries[input_level].clk = input_clk; 5486 podn_dpm_table_in_backend->entries[input_level].vddc = input_vol; 5487 podn_vdd_dep_in_backend->entries[input_level].vddc = input_vol; 5488 podn_vdd_dep_in_backend->entries[input_level].vddgfx = input_vol; 5489 } else { 5490 return -EINVAL; 5491 } 5492 } 5493 5494 return 0; 5495 } 5496 5497 static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) 5498 { 5499 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 5500 uint32_t i, size = 0; 5501 uint32_t len; 5502 5503 static const char *profile_name[7] = {"BOOTUP_DEFAULT", 5504 "3D_FULL_SCREEN", 5505 "POWER_SAVING", 5506 "VIDEO", 5507 "VR", 5508 "COMPUTE", 5509 "CUSTOM"}; 5510 5511 static const char *title[8] = {"NUM", 5512 "MODE_NAME", 5513 "SCLK_UP_HYST", 5514 "SCLK_DOWN_HYST", 5515 "SCLK_ACTIVE_LEVEL", 5516 "MCLK_UP_HYST", 5517 "MCLK_DOWN_HYST", 5518 "MCLK_ACTIVE_LEVEL"}; 5519 5520 if (!buf) 5521 return -EINVAL; 5522 5523 phm_get_sysfs_buf(&buf, &size); 5524 5525 size += sysfs_emit_at(buf, size, "%s %16s %16s %16s %16s %16s %16s %16s\n", 5526 title[0], title[1], title[2], title[3], 5527 title[4], title[5], title[6], title[7]); 5528 5529 len = ARRAY_SIZE(smu7_profiling); 5530 5531 for (i = 0; i < len; i++) { 5532 if (i == hwmgr->power_profile_mode) { 5533 size += sysfs_emit_at(buf, size, "%3d %14s %s: %8d %16d %16d %16d %16d %16d\n", 5534 i, profile_name[i], "*", 5535 data->current_profile_setting.sclk_up_hyst, 5536 data->current_profile_setting.sclk_down_hyst, 5537 data->current_profile_setting.sclk_activity, 5538 data->current_profile_setting.mclk_up_hyst, 5539 data->current_profile_setting.mclk_down_hyst, 5540 data->current_profile_setting.mclk_activity); 5541 continue; 5542 } 5543 if (smu7_profiling[i].bupdate_sclk) 5544 size += sysfs_emit_at(buf, size, "%3d %16s: %8d %16d %16d ", 5545 i, profile_name[i], smu7_profiling[i].sclk_up_hyst, 5546 smu7_profiling[i].sclk_down_hyst, 5547 smu7_profiling[i].sclk_activity); 5548 else 5549 size += sysfs_emit_at(buf, size, "%3d %16s: %8s %16s %16s ", 5550 i, profile_name[i], "-", "-", "-"); 5551 5552 if (smu7_profiling[i].bupdate_mclk) 5553 size += sysfs_emit_at(buf, size, "%16d %16d %16d\n", 5554 smu7_profiling[i].mclk_up_hyst, 5555 smu7_profiling[i].mclk_down_hyst, 5556 smu7_profiling[i].mclk_activity); 5557 else 5558 size += sysfs_emit_at(buf, size, "%16s %16s %16s\n", 5559 "-", "-", "-"); 5560 } 5561 5562 return size; 5563 } 5564 5565 static void smu7_patch_compute_profile_mode(struct pp_hwmgr *hwmgr, 5566 enum PP_SMC_POWER_PROFILE requst) 5567 { 5568 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 5569 uint32_t tmp, level; 5570 5571 if (requst == PP_SMC_POWER_PROFILE_COMPUTE) { 5572 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) { 5573 level = 0; 5574 tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask; 5575 while (tmp >>= 1) 5576 level++; 5577 if (level > 0) 5578 smu7_force_clock_level(hwmgr, PP_SCLK, 3 << (level-1)); 5579 } 5580 } else if (hwmgr->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) { 5581 smu7_force_clock_level(hwmgr, PP_SCLK, data->dpm_level_enable_mask.sclk_dpm_enable_mask); 5582 } 5583 } 5584 5585 static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size) 5586 { 5587 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 5588 struct profile_mode_setting tmp; 5589 enum PP_SMC_POWER_PROFILE mode; 5590 5591 if (input == NULL) 5592 return -EINVAL; 5593 5594 mode = input[size]; 5595 switch (mode) { 5596 case PP_SMC_POWER_PROFILE_CUSTOM: 5597 if (size < 8 && size != 0) 5598 return -EINVAL; 5599 /* If only CUSTOM is passed in, use the saved values. Check 5600 * that we actually have a CUSTOM profile by ensuring that 5601 * the "use sclk" or the "use mclk" bits are set 5602 */ 5603 tmp = smu7_profiling[PP_SMC_POWER_PROFILE_CUSTOM]; 5604 if (size == 0) { 5605 if (tmp.bupdate_sclk == 0 && tmp.bupdate_mclk == 0) 5606 return -EINVAL; 5607 } else { 5608 tmp.bupdate_sclk = input[0]; 5609 tmp.sclk_up_hyst = input[1]; 5610 tmp.sclk_down_hyst = input[2]; 5611 tmp.sclk_activity = input[3]; 5612 tmp.bupdate_mclk = input[4]; 5613 tmp.mclk_up_hyst = input[5]; 5614 tmp.mclk_down_hyst = input[6]; 5615 tmp.mclk_activity = input[7]; 5616 smu7_profiling[PP_SMC_POWER_PROFILE_CUSTOM] = tmp; 5617 } 5618 if (!smum_update_dpm_settings(hwmgr, &tmp)) { 5619 memcpy(&data->current_profile_setting, &tmp, sizeof(struct profile_mode_setting)); 5620 hwmgr->power_profile_mode = mode; 5621 } 5622 break; 5623 case PP_SMC_POWER_PROFILE_FULLSCREEN3D: 5624 case PP_SMC_POWER_PROFILE_POWERSAVING: 5625 case PP_SMC_POWER_PROFILE_VIDEO: 5626 case PP_SMC_POWER_PROFILE_VR: 5627 case PP_SMC_POWER_PROFILE_COMPUTE: 5628 if (mode == hwmgr->power_profile_mode) 5629 return 0; 5630 5631 memcpy(&tmp, &smu7_profiling[mode], sizeof(struct profile_mode_setting)); 5632 if (!smum_update_dpm_settings(hwmgr, &tmp)) { 5633 if (tmp.bupdate_sclk) { 5634 data->current_profile_setting.bupdate_sclk = tmp.bupdate_sclk; 5635 data->current_profile_setting.sclk_up_hyst = tmp.sclk_up_hyst; 5636 data->current_profile_setting.sclk_down_hyst = tmp.sclk_down_hyst; 5637 data->current_profile_setting.sclk_activity = tmp.sclk_activity; 5638 } 5639 if (tmp.bupdate_mclk) { 5640 data->current_profile_setting.bupdate_mclk = tmp.bupdate_mclk; 5641 data->current_profile_setting.mclk_up_hyst = tmp.mclk_up_hyst; 5642 data->current_profile_setting.mclk_down_hyst = tmp.mclk_down_hyst; 5643 data->current_profile_setting.mclk_activity = tmp.mclk_activity; 5644 } 5645 smu7_patch_compute_profile_mode(hwmgr, mode); 5646 hwmgr->power_profile_mode = mode; 5647 } 5648 break; 5649 default: 5650 return -EINVAL; 5651 } 5652 5653 return 0; 5654 } 5655 5656 static int smu7_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, 5657 PHM_PerformanceLevelDesignation designation, uint32_t index, 5658 PHM_PerformanceLevel *level) 5659 { 5660 const struct smu7_power_state *ps; 5661 uint32_t i; 5662 5663 if (level == NULL || hwmgr == NULL || state == NULL) 5664 return -EINVAL; 5665 5666 ps = cast_const_phw_smu7_power_state(state); 5667 5668 i = index > ps->performance_level_count - 1 ? 5669 ps->performance_level_count - 1 : index; 5670 5671 level->coreClock = ps->performance_levels[i].engine_clock; 5672 level->memory_clock = ps->performance_levels[i].memory_clock; 5673 5674 return 0; 5675 } 5676 5677 static int smu7_power_off_asic(struct pp_hwmgr *hwmgr) 5678 { 5679 int result; 5680 5681 result = smu7_disable_dpm_tasks(hwmgr); 5682 PP_ASSERT_WITH_CODE((0 == result), 5683 "[disable_dpm_tasks] Failed to disable DPM!", 5684 ); 5685 5686 return result; 5687 } 5688 5689 static const struct pp_hwmgr_func smu7_hwmgr_funcs = { 5690 .backend_init = &smu7_hwmgr_backend_init, 5691 .backend_fini = &smu7_hwmgr_backend_fini, 5692 .asic_setup = &smu7_setup_asic_task, 5693 .dynamic_state_management_enable = &smu7_enable_dpm_tasks, 5694 .apply_state_adjust_rules = smu7_apply_state_adjust_rules, 5695 .force_dpm_level = &smu7_force_dpm_level, 5696 .power_state_set = smu7_set_power_state_tasks, 5697 .get_power_state_size = smu7_get_power_state_size, 5698 .get_mclk = smu7_dpm_get_mclk, 5699 .get_sclk = smu7_dpm_get_sclk, 5700 .patch_boot_state = smu7_dpm_patch_boot_state, 5701 .get_pp_table_entry = smu7_get_pp_table_entry, 5702 .get_num_of_pp_table_entries = smu7_get_number_of_powerplay_table_entries, 5703 .powerdown_uvd = smu7_powerdown_uvd, 5704 .powergate_uvd = smu7_powergate_uvd, 5705 .powergate_vce = smu7_powergate_vce, 5706 .disable_clock_power_gating = smu7_disable_clock_power_gating, 5707 .update_clock_gatings = smu7_update_clock_gatings, 5708 .notify_smc_display_config_after_ps_adjustment = smu7_notify_smc_display_config_after_ps_adjustment, 5709 .display_config_changed = smu7_display_configuration_changed_task, 5710 .set_max_fan_pwm_output = smu7_set_max_fan_pwm_output, 5711 .set_max_fan_rpm_output = smu7_set_max_fan_rpm_output, 5712 .stop_thermal_controller = smu7_thermal_stop_thermal_controller, 5713 .get_fan_speed_info = smu7_fan_ctrl_get_fan_speed_info, 5714 .get_fan_speed_pwm = smu7_fan_ctrl_get_fan_speed_pwm, 5715 .set_fan_speed_pwm = smu7_fan_ctrl_set_fan_speed_pwm, 5716 .reset_fan_speed_to_default = smu7_fan_ctrl_reset_fan_speed_to_default, 5717 .get_fan_speed_rpm = smu7_fan_ctrl_get_fan_speed_rpm, 5718 .set_fan_speed_rpm = smu7_fan_ctrl_set_fan_speed_rpm, 5719 .uninitialize_thermal_controller = smu7_thermal_ctrl_uninitialize_thermal_controller, 5720 .register_irq_handlers = smu7_register_irq_handlers, 5721 .check_smc_update_required_for_display_configuration = smu7_check_smc_update_required_for_display_configuration, 5722 .check_states_equal = smu7_check_states_equal, 5723 .set_fan_control_mode = smu7_set_fan_control_mode, 5724 .get_fan_control_mode = smu7_get_fan_control_mode, 5725 .force_clock_level = smu7_force_clock_level, 5726 .print_clock_levels = smu7_print_clock_levels, 5727 .powergate_gfx = smu7_powergate_gfx, 5728 .get_sclk_od = smu7_get_sclk_od, 5729 .set_sclk_od = smu7_set_sclk_od, 5730 .get_mclk_od = smu7_get_mclk_od, 5731 .set_mclk_od = smu7_set_mclk_od, 5732 .get_clock_by_type = smu7_get_clock_by_type, 5733 .get_clock_by_type_with_latency = smu7_get_clock_by_type_with_latency, 5734 .set_watermarks_for_clocks_ranges = smu7_set_watermarks_for_clocks_ranges, 5735 .read_sensor = smu7_read_sensor, 5736 .dynamic_state_management_disable = smu7_disable_dpm_tasks, 5737 .avfs_control = smu7_avfs_control, 5738 .disable_smc_firmware_ctf = smu7_thermal_disable_alert, 5739 .start_thermal_controller = smu7_start_thermal_controller, 5740 .notify_cac_buffer_info = smu7_notify_cac_buffer_info, 5741 .get_max_high_clocks = smu7_get_max_high_clocks, 5742 .get_thermal_temperature_range = smu7_get_thermal_temperature_range, 5743 .odn_edit_dpm_table = smu7_odn_edit_dpm_table, 5744 .set_power_limit = smu7_set_power_limit, 5745 .get_power_profile_mode = smu7_get_power_profile_mode, 5746 .set_power_profile_mode = smu7_set_power_profile_mode, 5747 .get_performance_level = smu7_get_performance_level, 5748 .get_asic_baco_capability = smu7_baco_get_capability, 5749 .get_asic_baco_state = smu7_baco_get_state, 5750 .set_asic_baco_state = smu7_baco_set_state, 5751 .power_off_asic = smu7_power_off_asic, 5752 }; 5753 5754 uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock, 5755 uint32_t clock_insr) 5756 { 5757 uint8_t i; 5758 uint32_t temp; 5759 uint32_t min = max(clock_insr, (uint32_t)SMU7_MINIMUM_ENGINE_CLOCK); 5760 5761 PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0); 5762 for (i = SMU7_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) { 5763 temp = clock >> i; 5764 5765 if (temp >= min || i == 0) 5766 break; 5767 } 5768 return i; 5769 } 5770 5771 int smu7_init_function_pointers(struct pp_hwmgr *hwmgr) 5772 { 5773 hwmgr->hwmgr_func = &smu7_hwmgr_funcs; 5774 if (hwmgr->pp_table_version == PP_TABLE_V0) 5775 hwmgr->pptable_func = &pptable_funcs; 5776 else if (hwmgr->pp_table_version == PP_TABLE_V1) 5777 hwmgr->pptable_func = &pptable_v1_0_funcs; 5778 5779 return 0; 5780 } 5781