1 /* 2 * Copyright 2017 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <linux/module.h> 24 #include <linux/slab.h> 25 #include <linux/fb.h> 26 #include "linux/delay.h" 27 #include <linux/types.h> 28 #include <linux/pci.h> 29 30 #include "smumgr.h" 31 #include "pp_debug.h" 32 #include "ci_smumgr.h" 33 #include "ppsmc.h" 34 #include "smu7_hwmgr.h" 35 #include "hardwaremanager.h" 36 #include "ppatomctrl.h" 37 #include "cgs_common.h" 38 #include "atombios.h" 39 #include "pppcielanes.h" 40 #include "smu7_smumgr.h" 41 42 #include "smu/smu_7_0_1_d.h" 43 #include "smu/smu_7_0_1_sh_mask.h" 44 45 #include "dce/dce_8_0_d.h" 46 #include "dce/dce_8_0_sh_mask.h" 47 48 #include "bif/bif_4_1_d.h" 49 #include "bif/bif_4_1_sh_mask.h" 50 51 #include "gca/gfx_7_2_d.h" 52 #include "gca/gfx_7_2_sh_mask.h" 53 54 #include "gmc/gmc_7_1_d.h" 55 #include "gmc/gmc_7_1_sh_mask.h" 56 57 #include "processpptables.h" 58 59 #define MC_CG_ARB_FREQ_F0 0x0a 60 #define MC_CG_ARB_FREQ_F1 0x0b 61 #define MC_CG_ARB_FREQ_F2 0x0c 62 #define MC_CG_ARB_FREQ_F3 0x0d 63 64 #define SMC_RAM_END 0x40000 65 66 #define CISLAND_MINIMUM_ENGINE_CLOCK 800 67 #define CISLAND_MAX_DEEPSLEEP_DIVIDER_ID 5 68 69 static const struct ci_pt_defaults defaults_hawaii_xt = { 70 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000, 71 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 }, 72 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 } 73 }; 74 75 static const struct ci_pt_defaults defaults_hawaii_pro = { 76 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062, 77 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 }, 78 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 } 79 }; 80 81 static const struct ci_pt_defaults defaults_bonaire_xt = { 82 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000, 83 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 }, 84 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } 85 }; 86 87 88 static const struct ci_pt_defaults defaults_saturn_xt = { 89 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000, 90 { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D }, 91 { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 } 92 }; 93 94 95 static int ci_set_smc_sram_address(struct pp_hwmgr *hwmgr, 96 uint32_t smc_addr, uint32_t limit) 97 { 98 if ((0 != (3 & smc_addr)) 99 || ((smc_addr + 3) >= limit)) { 100 pr_err("smc_addr invalid \n"); 101 return -EINVAL; 102 } 103 104 cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, smc_addr); 105 PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); 106 return 0; 107 } 108 109 static int ci_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address, 110 const uint8_t *src, uint32_t byte_count, uint32_t limit) 111 { 112 int result; 113 uint32_t data = 0; 114 uint32_t original_data; 115 uint32_t addr = 0; 116 uint32_t extra_shift; 117 118 if ((3 & smc_start_address) 119 || ((smc_start_address + byte_count) >= limit)) { 120 pr_err("smc_start_address invalid \n"); 121 return -EINVAL; 122 } 123 124 addr = smc_start_address; 125 126 while (byte_count >= 4) { 127 /* Bytes are written into the SMC address space with the MSB first. */ 128 data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3]; 129 130 result = ci_set_smc_sram_address(hwmgr, addr, limit); 131 132 if (0 != result) 133 return result; 134 135 cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data); 136 137 src += 4; 138 byte_count -= 4; 139 addr += 4; 140 } 141 142 if (0 != byte_count) { 143 144 data = 0; 145 146 result = ci_set_smc_sram_address(hwmgr, addr, limit); 147 148 if (0 != result) 149 return result; 150 151 152 original_data = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_0); 153 154 extra_shift = 8 * (4 - byte_count); 155 156 while (byte_count > 0) { 157 /* Bytes are written into the SMC addres space with the MSB first. */ 158 data = (0x100 * data) + *src++; 159 byte_count--; 160 } 161 162 data <<= extra_shift; 163 164 data |= (original_data & ~((~0UL) << extra_shift)); 165 166 result = ci_set_smc_sram_address(hwmgr, addr, limit); 167 168 if (0 != result) 169 return result; 170 171 cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data); 172 } 173 174 return 0; 175 } 176 177 178 static int ci_program_jump_on_start(struct pp_hwmgr *hwmgr) 179 { 180 static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 }; 181 182 ci_copy_bytes_to_smc(hwmgr, 0x0, data, 4, sizeof(data)+1); 183 184 return 0; 185 } 186 187 static bool ci_is_smc_ram_running(struct pp_hwmgr *hwmgr) 188 { 189 return ((0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, 190 CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) 191 && (0x20100 <= cgs_read_ind_register(hwmgr->device, 192 CGS_IND_REG__SMC, ixSMC_PC_C))); 193 } 194 195 static int ci_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, 196 uint32_t *value, uint32_t limit) 197 { 198 int result; 199 200 result = ci_set_smc_sram_address(hwmgr, smc_addr, limit); 201 202 if (result) 203 return result; 204 205 *value = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_0); 206 return 0; 207 } 208 209 static int ci_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) 210 { 211 int ret; 212 213 cgs_write_register(hwmgr->device, mmSMC_RESP_0, 0); 214 cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg); 215 216 PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); 217 218 ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP); 219 220 if (ret != 1) 221 pr_info("\n failed to send message %x ret is %d\n", msg, ret); 222 223 return 0; 224 } 225 226 static int ci_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, 227 uint16_t msg, uint32_t parameter) 228 { 229 cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter); 230 return ci_send_msg_to_smc(hwmgr, msg); 231 } 232 233 static void ci_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) 234 { 235 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); 236 struct amdgpu_device *adev = hwmgr->adev; 237 uint32_t dev_id; 238 239 dev_id = adev->pdev->device; 240 241 switch (dev_id) { 242 case 0x67BA: 243 case 0x67B1: 244 smu_data->power_tune_defaults = &defaults_hawaii_pro; 245 break; 246 case 0x67B8: 247 case 0x66B0: 248 smu_data->power_tune_defaults = &defaults_hawaii_xt; 249 break; 250 case 0x6640: 251 case 0x6641: 252 case 0x6646: 253 case 0x6647: 254 smu_data->power_tune_defaults = &defaults_saturn_xt; 255 break; 256 case 0x6649: 257 case 0x6650: 258 case 0x6651: 259 case 0x6658: 260 case 0x665C: 261 case 0x665D: 262 case 0x67A0: 263 case 0x67A1: 264 case 0x67A2: 265 case 0x67A8: 266 case 0x67A9: 267 case 0x67AA: 268 case 0x67B9: 269 case 0x67BE: 270 default: 271 smu_data->power_tune_defaults = &defaults_bonaire_xt; 272 break; 273 } 274 } 275 276 static int ci_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, 277 struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table, 278 uint32_t clock, uint32_t *vol) 279 { 280 uint32_t i = 0; 281 282 if (allowed_clock_voltage_table->count == 0) 283 return -EINVAL; 284 285 for (i = 0; i < allowed_clock_voltage_table->count; i++) { 286 if (allowed_clock_voltage_table->entries[i].clk >= clock) { 287 *vol = allowed_clock_voltage_table->entries[i].v; 288 return 0; 289 } 290 } 291 292 *vol = allowed_clock_voltage_table->entries[i - 1].v; 293 return 0; 294 } 295 296 static int ci_calculate_sclk_params(struct pp_hwmgr *hwmgr, 297 uint32_t clock, struct SMU7_Discrete_GraphicsLevel *sclk) 298 { 299 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 300 struct pp_atomctrl_clock_dividers_vi dividers; 301 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; 302 uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; 303 uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; 304 uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; 305 uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; 306 uint32_t ref_clock; 307 uint32_t ref_divider; 308 uint32_t fbdiv; 309 int result; 310 311 /* get the engine clock dividers for this clock value */ 312 result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock, ÷rs); 313 314 PP_ASSERT_WITH_CODE(result == 0, 315 "Error retrieving Engine Clock dividers from VBIOS.", 316 return result); 317 318 /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */ 319 ref_clock = atomctrl_get_reference_clock(hwmgr); 320 ref_divider = 1 + dividers.uc_pll_ref_div; 321 322 /* low 14 bits is fraction and high 12 bits is divider */ 323 fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF; 324 325 /* SPLL_FUNC_CNTL setup */ 326 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL, 327 SPLL_REF_DIV, dividers.uc_pll_ref_div); 328 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL, 329 SPLL_PDIV_A, dividers.uc_pll_post_div); 330 331 /* SPLL_FUNC_CNTL_3 setup*/ 332 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3, 333 SPLL_FB_DIV, fbdiv); 334 335 /* set to use fractional accumulation*/ 336 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3, 337 SPLL_DITHEN, 1); 338 339 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 340 PHM_PlatformCaps_EngineSpreadSpectrumSupport)) { 341 struct pp_atomctrl_internal_ss_info ss_info; 342 uint32_t vco_freq = clock * dividers.uc_pll_post_div; 343 344 if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr, 345 vco_freq, &ss_info)) { 346 uint32_t clk_s = ref_clock * 5 / 347 (ref_divider * ss_info.speed_spectrum_rate); 348 uint32_t clk_v = 4 * ss_info.speed_spectrum_percentage * 349 fbdiv / (clk_s * 10000); 350 351 cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum, 352 CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s); 353 cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum, 354 CG_SPLL_SPREAD_SPECTRUM, SSEN, 1); 355 cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2, 356 CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v); 357 } 358 } 359 360 sclk->SclkFrequency = clock; 361 sclk->CgSpllFuncCntl3 = spll_func_cntl_3; 362 sclk->CgSpllFuncCntl4 = spll_func_cntl_4; 363 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum; 364 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2; 365 sclk->SclkDid = (uint8_t)dividers.pll_post_divider; 366 367 return 0; 368 } 369 370 static void ci_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr, 371 const struct phm_phase_shedding_limits_table *pl, 372 uint32_t sclk, uint32_t *p_shed) 373 { 374 unsigned int i; 375 376 /* use the minimum phase shedding */ 377 *p_shed = 1; 378 379 for (i = 0; i < pl->count; i++) { 380 if (sclk < pl->entries[i].Sclk) { 381 *p_shed = i; 382 break; 383 } 384 } 385 } 386 387 static uint8_t ci_get_sleep_divider_id_from_clock(uint32_t clock, 388 uint32_t clock_insr) 389 { 390 uint8_t i; 391 uint32_t temp; 392 uint32_t min = min_t(uint32_t, clock_insr, CISLAND_MINIMUM_ENGINE_CLOCK); 393 394 if (clock < min) { 395 pr_info("Engine clock can't satisfy stutter requirement!\n"); 396 return 0; 397 } 398 for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) { 399 temp = clock >> i; 400 401 if (temp >= min || i == 0) 402 break; 403 } 404 return i; 405 } 406 407 static int ci_populate_single_graphic_level(struct pp_hwmgr *hwmgr, 408 uint32_t clock, struct SMU7_Discrete_GraphicsLevel *level) 409 { 410 int result; 411 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 412 413 414 result = ci_calculate_sclk_params(hwmgr, clock, level); 415 416 /* populate graphics levels */ 417 result = ci_get_dependency_volt_by_clk(hwmgr, 418 hwmgr->dyn_state.vddc_dependency_on_sclk, clock, 419 (uint32_t *)(&level->MinVddc)); 420 if (result) { 421 pr_err("vdd_dep_on_sclk table is NULL\n"); 422 return result; 423 } 424 425 level->SclkFrequency = clock; 426 level->MinVddcPhases = 1; 427 428 if (data->vddc_phase_shed_control) 429 ci_populate_phase_value_based_on_sclk(hwmgr, 430 hwmgr->dyn_state.vddc_phase_shed_limits_table, 431 clock, 432 &level->MinVddcPhases); 433 434 level->ActivityLevel = data->current_profile_setting.sclk_activity; 435 level->CcPwrDynRm = 0; 436 level->CcPwrDynRm1 = 0; 437 level->EnabledForActivity = 0; 438 /* this level can be used for throttling.*/ 439 level->EnabledForThrottle = 1; 440 level->UpH = data->current_profile_setting.sclk_up_hyst; 441 level->DownH = data->current_profile_setting.sclk_down_hyst; 442 level->VoltageDownH = 0; 443 level->PowerThrottle = 0; 444 445 446 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 447 PHM_PlatformCaps_SclkDeepSleep)) 448 level->DeepSleepDivId = 449 ci_get_sleep_divider_id_from_clock(clock, 450 CISLAND_MINIMUM_ENGINE_CLOCK); 451 452 /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/ 453 level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; 454 455 if (0 == result) { 456 level->MinVddc = PP_HOST_TO_SMC_UL(level->MinVddc * VOLTAGE_SCALE); 457 CONVERT_FROM_HOST_TO_SMC_UL(level->MinVddcPhases); 458 CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency); 459 CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel); 460 CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3); 461 CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4); 462 CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum); 463 CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2); 464 CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm); 465 CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1); 466 } 467 468 return result; 469 } 470 471 static int ci_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) 472 { 473 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 474 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); 475 struct smu7_dpm_table *dpm_table = &data->dpm_table; 476 int result = 0; 477 uint32_t array = smu_data->dpm_table_start + 478 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel); 479 uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) * 480 SMU7_MAX_LEVELS_GRAPHICS; 481 struct SMU7_Discrete_GraphicsLevel *levels = 482 smu_data->smc_state_table.GraphicsLevel; 483 uint32_t i; 484 485 for (i = 0; i < dpm_table->sclk_table.count; i++) { 486 result = ci_populate_single_graphic_level(hwmgr, 487 dpm_table->sclk_table.dpm_levels[i].value, 488 &levels[i]); 489 if (result) 490 return result; 491 if (i > 1) 492 smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0; 493 if (i == (dpm_table->sclk_table.count - 1)) 494 smu_data->smc_state_table.GraphicsLevel[i].DisplayWatermark = 495 PPSMC_DISPLAY_WATERMARK_HIGH; 496 } 497 498 smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1; 499 500 smu_data->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count; 501 data->dpm_level_enable_mask.sclk_dpm_enable_mask = 502 phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table); 503 504 result = ci_copy_bytes_to_smc(hwmgr, array, 505 (u8 *)levels, array_size, 506 SMC_RAM_END); 507 508 return result; 509 510 } 511 512 static int ci_populate_svi_load_line(struct pp_hwmgr *hwmgr) 513 { 514 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); 515 const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults; 516 517 smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en; 518 smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddc; 519 smu_data->power_tune_table.SviLoadLineTrimVddC = 3; 520 smu_data->power_tune_table.SviLoadLineOffsetVddC = 0; 521 522 return 0; 523 } 524 525 static int ci_populate_tdc_limit(struct pp_hwmgr *hwmgr) 526 { 527 uint16_t tdc_limit; 528 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); 529 const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults; 530 531 tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256); 532 smu_data->power_tune_table.TDC_VDDC_PkgLimit = 533 CONVERT_FROM_HOST_TO_SMC_US(tdc_limit); 534 smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc = 535 defaults->tdc_vddc_throttle_release_limit_perc; 536 smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt; 537 538 return 0; 539 } 540 541 static int ci_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) 542 { 543 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); 544 const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults; 545 uint32_t temp; 546 547 if (ci_read_smc_sram_dword(hwmgr, 548 fuse_table_offset + 549 offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl), 550 (uint32_t *)&temp, SMC_RAM_END)) 551 PP_ASSERT_WITH_CODE(false, 552 "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!", 553 return -EINVAL); 554 else 555 smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl; 556 557 return 0; 558 } 559 560 static int ci_populate_fuzzy_fan(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) 561 { 562 uint16_t tmp; 563 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); 564 565 if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15)) 566 || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity) 567 tmp = hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity; 568 else 569 tmp = hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity; 570 571 smu_data->power_tune_table.FuzzyFan_PwmSetDelta = CONVERT_FROM_HOST_TO_SMC_US(tmp); 572 573 return 0; 574 } 575 576 static int ci_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr) 577 { 578 int i; 579 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); 580 uint8_t *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd; 581 uint8_t *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd; 582 uint8_t *hi2_vid = smu_data->power_tune_table.BapmVddCVidHiSidd2; 583 584 PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.cac_leakage_table, 585 "The CAC Leakage table does not exist!", return -EINVAL); 586 PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count <= 8, 587 "There should never be more than 8 entries for BapmVddcVid!!!", return -EINVAL); 588 PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count == hwmgr->dyn_state.vddc_dependency_on_sclk->count, 589 "CACLeakageTable->count and VddcDependencyOnSCLk->count not equal", return -EINVAL); 590 591 for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) { 592 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) { 593 lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1); 594 hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2); 595 hi2_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc3); 596 } else { 597 lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc); 598 hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Leakage); 599 } 600 } 601 602 return 0; 603 } 604 605 static int ci_populate_vddc_vid(struct pp_hwmgr *hwmgr) 606 { 607 int i; 608 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); 609 uint8_t *vid = smu_data->power_tune_table.VddCVid; 610 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 611 612 PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 8, 613 "There should never be more than 8 entries for VddcVid!!!", 614 return -EINVAL); 615 616 for (i = 0; i < (int)data->vddc_voltage_table.count; i++) 617 vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value); 618 619 return 0; 620 } 621 622 static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct pp_hwmgr *hwmgr) 623 { 624 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); 625 u8 *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd; 626 u8 *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd; 627 int i, min, max; 628 629 min = max = hi_vid[0]; 630 for (i = 0; i < 8; i++) { 631 if (0 != hi_vid[i]) { 632 if (min > hi_vid[i]) 633 min = hi_vid[i]; 634 if (max < hi_vid[i]) 635 max = hi_vid[i]; 636 } 637 638 if (0 != lo_vid[i]) { 639 if (min > lo_vid[i]) 640 min = lo_vid[i]; 641 if (max < lo_vid[i]) 642 max = lo_vid[i]; 643 } 644 } 645 646 if ((min == 0) || (max == 0)) 647 return -EINVAL; 648 smu_data->power_tune_table.GnbLPMLMaxVid = (u8)max; 649 smu_data->power_tune_table.GnbLPMLMinVid = (u8)min; 650 651 return 0; 652 } 653 654 static int ci_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) 655 { 656 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); 657 uint16_t HiSidd; 658 uint16_t LoSidd; 659 struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table; 660 661 HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256); 662 LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256); 663 664 smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd = 665 CONVERT_FROM_HOST_TO_SMC_US(HiSidd); 666 smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd = 667 CONVERT_FROM_HOST_TO_SMC_US(LoSidd); 668 669 return 0; 670 } 671 672 static int ci_populate_pm_fuses(struct pp_hwmgr *hwmgr) 673 { 674 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); 675 uint32_t pm_fuse_table_offset; 676 int ret = 0; 677 678 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 679 PHM_PlatformCaps_PowerContainment)) { 680 if (ci_read_smc_sram_dword(hwmgr, 681 SMU7_FIRMWARE_HEADER_LOCATION + 682 offsetof(SMU7_Firmware_Header, PmFuseTable), 683 &pm_fuse_table_offset, SMC_RAM_END)) { 684 pr_err("Attempt to get pm_fuse_table_offset Failed!\n"); 685 return -EINVAL; 686 } 687 688 /* DW0 - DW3 */ 689 ret = ci_populate_bapm_vddc_vid_sidd(hwmgr); 690 /* DW4 - DW5 */ 691 ret |= ci_populate_vddc_vid(hwmgr); 692 /* DW6 */ 693 ret |= ci_populate_svi_load_line(hwmgr); 694 /* DW7 */ 695 ret |= ci_populate_tdc_limit(hwmgr); 696 /* DW8 */ 697 ret |= ci_populate_dw8(hwmgr, pm_fuse_table_offset); 698 699 ret |= ci_populate_fuzzy_fan(hwmgr, pm_fuse_table_offset); 700 701 ret |= ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(hwmgr); 702 703 ret |= ci_populate_bapm_vddc_base_leakage_sidd(hwmgr); 704 if (ret) 705 return ret; 706 707 ret = ci_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset, 708 (uint8_t *)&smu_data->power_tune_table, 709 sizeof(struct SMU7_Discrete_PmFuses), SMC_RAM_END); 710 } 711 return ret; 712 } 713 714 static int ci_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) 715 { 716 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); 717 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 718 const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults; 719 SMU7_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table); 720 struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table; 721 struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table; 722 const uint16_t *def1, *def2; 723 int i, j, k; 724 725 dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256)); 726 dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256)); 727 728 dpm_table->DTETjOffset = 0; 729 dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES); 730 dpm_table->GpuTjHyst = 8; 731 732 dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base; 733 734 if (ppm) { 735 dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000; 736 dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256; 737 } else { 738 dpm_table->PPM_PkgPwrLimit = 0; 739 dpm_table->PPM_TemperatureLimit = 0; 740 } 741 742 CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit); 743 CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit); 744 745 dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient); 746 def1 = defaults->bapmti_r; 747 def2 = defaults->bapmti_rc; 748 749 for (i = 0; i < SMU7_DTE_ITERATIONS; i++) { 750 for (j = 0; j < SMU7_DTE_SOURCES; j++) { 751 for (k = 0; k < SMU7_DTE_SINKS; k++) { 752 dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1); 753 dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2); 754 def1++; 755 def2++; 756 } 757 } 758 } 759 760 return 0; 761 } 762 763 static int ci_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr, 764 pp_atomctrl_voltage_table_entry *tab, uint16_t *hi, 765 uint16_t *lo) 766 { 767 uint16_t v_index; 768 bool vol_found = false; 769 *hi = tab->value * VOLTAGE_SCALE; 770 *lo = tab->value * VOLTAGE_SCALE; 771 772 PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk, 773 "The SCLK/VDDC Dependency Table does not exist.\n", 774 return -EINVAL); 775 776 if (NULL == hwmgr->dyn_state.cac_leakage_table) { 777 pr_warn("CAC Leakage Table does not exist, using vddc.\n"); 778 return 0; 779 } 780 781 for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) { 782 if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) { 783 vol_found = true; 784 if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) { 785 *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE; 786 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage * VOLTAGE_SCALE); 787 } else { 788 pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index, using maximum index from CAC table.\n"); 789 *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE; 790 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE); 791 } 792 break; 793 } 794 } 795 796 if (!vol_found) { 797 for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) { 798 if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) { 799 vol_found = true; 800 if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) { 801 *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE; 802 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage) * VOLTAGE_SCALE; 803 } else { 804 pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index in second look up, using maximum index from CAC table."); 805 *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE; 806 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE); 807 } 808 break; 809 } 810 } 811 812 if (!vol_found) 813 pr_warn("Unable to get std_vddc from SCLK/VDDC Dependency Table, using vddc.\n"); 814 } 815 816 return 0; 817 } 818 819 static int ci_populate_smc_voltage_table(struct pp_hwmgr *hwmgr, 820 pp_atomctrl_voltage_table_entry *tab, 821 SMU7_Discrete_VoltageLevel *smc_voltage_tab) 822 { 823 int result; 824 825 result = ci_get_std_voltage_value_sidd(hwmgr, tab, 826 &smc_voltage_tab->StdVoltageHiSidd, 827 &smc_voltage_tab->StdVoltageLoSidd); 828 if (result) { 829 smc_voltage_tab->StdVoltageHiSidd = tab->value * VOLTAGE_SCALE; 830 smc_voltage_tab->StdVoltageLoSidd = tab->value * VOLTAGE_SCALE; 831 } 832 833 smc_voltage_tab->Voltage = PP_HOST_TO_SMC_US(tab->value * VOLTAGE_SCALE); 834 CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd); 835 CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageLoSidd); 836 837 return 0; 838 } 839 840 static int ci_populate_smc_vddc_table(struct pp_hwmgr *hwmgr, 841 SMU7_Discrete_DpmTable *table) 842 { 843 unsigned int count; 844 int result; 845 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 846 847 table->VddcLevelCount = data->vddc_voltage_table.count; 848 for (count = 0; count < table->VddcLevelCount; count++) { 849 result = ci_populate_smc_voltage_table(hwmgr, 850 &(data->vddc_voltage_table.entries[count]), 851 &(table->VddcLevel[count])); 852 PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL); 853 854 /* GPIO voltage control */ 855 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) { 856 table->VddcLevel[count].Smio = (uint8_t) count; 857 table->Smio[count] |= data->vddc_voltage_table.entries[count].smio_low; 858 table->SmioMaskVddcVid |= data->vddc_voltage_table.entries[count].smio_low; 859 } else { 860 table->VddcLevel[count].Smio = 0; 861 } 862 } 863 864 CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount); 865 866 return 0; 867 } 868 869 static int ci_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr, 870 SMU7_Discrete_DpmTable *table) 871 { 872 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 873 uint32_t count; 874 int result; 875 876 table->VddciLevelCount = data->vddci_voltage_table.count; 877 878 for (count = 0; count < table->VddciLevelCount; count++) { 879 result = ci_populate_smc_voltage_table(hwmgr, 880 &(data->vddci_voltage_table.entries[count]), 881 &(table->VddciLevel[count])); 882 PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC VDDCI voltage table", return -EINVAL); 883 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) { 884 table->VddciLevel[count].Smio = (uint8_t) count; 885 table->Smio[count] |= data->vddci_voltage_table.entries[count].smio_low; 886 table->SmioMaskVddciVid |= data->vddci_voltage_table.entries[count].smio_low; 887 } else { 888 table->VddciLevel[count].Smio = 0; 889 } 890 } 891 892 CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount); 893 894 return 0; 895 } 896 897 static int ci_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr, 898 SMU7_Discrete_DpmTable *table) 899 { 900 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 901 uint32_t count; 902 int result; 903 904 table->MvddLevelCount = data->mvdd_voltage_table.count; 905 906 for (count = 0; count < table->MvddLevelCount; count++) { 907 result = ci_populate_smc_voltage_table(hwmgr, 908 &(data->mvdd_voltage_table.entries[count]), 909 &table->MvddLevel[count]); 910 PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC mvdd voltage table", return -EINVAL); 911 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { 912 table->MvddLevel[count].Smio = (uint8_t) count; 913 table->Smio[count] |= data->mvdd_voltage_table.entries[count].smio_low; 914 table->SmioMaskMvddVid |= data->mvdd_voltage_table.entries[count].smio_low; 915 } else { 916 table->MvddLevel[count].Smio = 0; 917 } 918 } 919 920 CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount); 921 922 return 0; 923 } 924 925 926 static int ci_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr, 927 SMU7_Discrete_DpmTable *table) 928 { 929 int result; 930 931 result = ci_populate_smc_vddc_table(hwmgr, table); 932 PP_ASSERT_WITH_CODE(0 == result, 933 "can not populate VDDC voltage table to SMC", return -EINVAL); 934 935 result = ci_populate_smc_vdd_ci_table(hwmgr, table); 936 PP_ASSERT_WITH_CODE(0 == result, 937 "can not populate VDDCI voltage table to SMC", return -EINVAL); 938 939 result = ci_populate_smc_mvdd_table(hwmgr, table); 940 PP_ASSERT_WITH_CODE(0 == result, 941 "can not populate MVDD voltage table to SMC", return -EINVAL); 942 943 return 0; 944 } 945 946 static int ci_populate_ulv_level(struct pp_hwmgr *hwmgr, 947 struct SMU7_Discrete_Ulv *state) 948 { 949 uint32_t voltage_response_time, ulv_voltage; 950 int result; 951 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 952 953 state->CcPwrDynRm = 0; 954 state->CcPwrDynRm1 = 0; 955 956 result = pp_tables_get_response_times(hwmgr, &voltage_response_time, &ulv_voltage); 957 PP_ASSERT_WITH_CODE((0 == result), "can not get ULV voltage value", return result;); 958 959 if (ulv_voltage == 0) { 960 data->ulv_supported = false; 961 return 0; 962 } 963 964 if (data->voltage_control != SMU7_VOLTAGE_CONTROL_BY_SVID2) { 965 /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */ 966 if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v) 967 state->VddcOffset = 0; 968 else 969 /* used in SMIO Mode. not implemented for now. this is backup only for CI. */ 970 state->VddcOffset = (uint16_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage); 971 } else { 972 /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */ 973 if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v) 974 state->VddcOffsetVid = 0; 975 else /* used in SVI2 Mode */ 976 state->VddcOffsetVid = (uint8_t)( 977 (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage) 978 * VOLTAGE_VID_OFFSET_SCALE2 979 / VOLTAGE_VID_OFFSET_SCALE1); 980 } 981 state->VddcPhase = 1; 982 983 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm); 984 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1); 985 CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset); 986 987 return 0; 988 } 989 990 static int ci_populate_ulv_state(struct pp_hwmgr *hwmgr, 991 SMU7_Discrete_Ulv *ulv_level) 992 { 993 return ci_populate_ulv_level(hwmgr, ulv_level); 994 } 995 996 static int ci_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU7_Discrete_DpmTable *table) 997 { 998 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 999 struct smu7_dpm_table *dpm_table = &data->dpm_table; 1000 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); 1001 uint32_t i; 1002 1003 /* Index dpm_table->pcie_speed_table.count is reserved for PCIE boot level.*/ 1004 for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) { 1005 table->LinkLevel[i].PcieGenSpeed = 1006 (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value; 1007 table->LinkLevel[i].PcieLaneCount = 1008 (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1); 1009 table->LinkLevel[i].EnabledForActivity = 1; 1010 table->LinkLevel[i].DownT = PP_HOST_TO_SMC_UL(5); 1011 table->LinkLevel[i].UpT = PP_HOST_TO_SMC_UL(30); 1012 } 1013 1014 smu_data->smc_state_table.LinkLevelCount = 1015 (uint8_t)dpm_table->pcie_speed_table.count; 1016 data->dpm_level_enable_mask.pcie_dpm_enable_mask = 1017 phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table); 1018 1019 return 0; 1020 } 1021 1022 static int ci_calculate_mclk_params( 1023 struct pp_hwmgr *hwmgr, 1024 uint32_t memory_clock, 1025 SMU7_Discrete_MemoryLevel *mclk, 1026 bool strobe_mode, 1027 bool dllStateOn 1028 ) 1029 { 1030 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1031 uint32_t dll_cntl = data->clock_registers.vDLL_CNTL; 1032 uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL; 1033 uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL; 1034 uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL; 1035 uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL; 1036 uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1; 1037 uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2; 1038 uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1; 1039 uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2; 1040 1041 pp_atomctrl_memory_clock_param mpll_param; 1042 int result; 1043 1044 result = atomctrl_get_memory_pll_dividers_si(hwmgr, 1045 memory_clock, &mpll_param, strobe_mode); 1046 PP_ASSERT_WITH_CODE(0 == result, 1047 "Error retrieving Memory Clock Parameters from VBIOS.", return result); 1048 1049 mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl); 1050 1051 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, 1052 MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf); 1053 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, 1054 MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac); 1055 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, 1056 MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode); 1057 1058 mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl, 1059 MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider); 1060 1061 if (data->is_memory_gddr5) { 1062 mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl, 1063 MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel); 1064 mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl, 1065 MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider); 1066 } 1067 1068 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1069 PHM_PlatformCaps_MemorySpreadSpectrumSupport)) { 1070 pp_atomctrl_internal_ss_info ss_info; 1071 uint32_t freq_nom; 1072 uint32_t tmp; 1073 uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr); 1074 1075 /* for GDDR5 for all modes and DDR3 */ 1076 if (1 == mpll_param.qdr) 1077 freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider); 1078 else 1079 freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider); 1080 1081 /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/ 1082 tmp = (freq_nom / reference_clock); 1083 tmp = tmp * tmp; 1084 1085 if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) { 1086 uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate; 1087 uint32_t clkv = 1088 (uint32_t)((((131 * ss_info.speed_spectrum_percentage * 1089 ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom); 1090 1091 mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv); 1092 mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks); 1093 } 1094 } 1095 1096 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, 1097 MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed); 1098 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, 1099 MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn); 1100 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, 1101 MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn); 1102 1103 1104 mclk->MclkFrequency = memory_clock; 1105 mclk->MpllFuncCntl = mpll_func_cntl; 1106 mclk->MpllFuncCntl_1 = mpll_func_cntl_1; 1107 mclk->MpllFuncCntl_2 = mpll_func_cntl_2; 1108 mclk->MpllAdFuncCntl = mpll_ad_func_cntl; 1109 mclk->MpllDqFuncCntl = mpll_dq_func_cntl; 1110 mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl; 1111 mclk->DllCntl = dll_cntl; 1112 mclk->MpllSs1 = mpll_ss1; 1113 mclk->MpllSs2 = mpll_ss2; 1114 1115 return 0; 1116 } 1117 1118 static uint8_t ci_get_mclk_frequency_ratio(uint32_t memory_clock, 1119 bool strobe_mode) 1120 { 1121 uint8_t mc_para_index; 1122 1123 if (strobe_mode) { 1124 if (memory_clock < 12500) 1125 mc_para_index = 0x00; 1126 else if (memory_clock > 47500) 1127 mc_para_index = 0x0f; 1128 else 1129 mc_para_index = (uint8_t)((memory_clock - 10000) / 2500); 1130 } else { 1131 if (memory_clock < 65000) 1132 mc_para_index = 0x00; 1133 else if (memory_clock > 135000) 1134 mc_para_index = 0x0f; 1135 else 1136 mc_para_index = (uint8_t)((memory_clock - 60000) / 5000); 1137 } 1138 1139 return mc_para_index; 1140 } 1141 1142 static uint8_t ci_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock) 1143 { 1144 uint8_t mc_para_index; 1145 1146 if (memory_clock < 10000) 1147 mc_para_index = 0; 1148 else if (memory_clock >= 80000) 1149 mc_para_index = 0x0f; 1150 else 1151 mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1); 1152 1153 return mc_para_index; 1154 } 1155 1156 static int ci_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl, 1157 uint32_t memory_clock, uint32_t *p_shed) 1158 { 1159 unsigned int i; 1160 1161 *p_shed = 1; 1162 1163 for (i = 0; i < pl->count; i++) { 1164 if (memory_clock < pl->entries[i].Mclk) { 1165 *p_shed = i; 1166 break; 1167 } 1168 } 1169 1170 return 0; 1171 } 1172 1173 static int ci_populate_single_memory_level( 1174 struct pp_hwmgr *hwmgr, 1175 uint32_t memory_clock, 1176 SMU7_Discrete_MemoryLevel *memory_level 1177 ) 1178 { 1179 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1180 int result = 0; 1181 bool dll_state_on; 1182 uint32_t mclk_edc_wr_enable_threshold = 40000; 1183 uint32_t mclk_edc_enable_threshold = 40000; 1184 uint32_t mclk_strobe_mode_threshold = 40000; 1185 1186 if (hwmgr->dyn_state.vddc_dependency_on_mclk != NULL) { 1187 result = ci_get_dependency_volt_by_clk(hwmgr, 1188 hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc); 1189 PP_ASSERT_WITH_CODE((0 == result), 1190 "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result); 1191 } 1192 1193 if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) { 1194 result = ci_get_dependency_volt_by_clk(hwmgr, 1195 hwmgr->dyn_state.vddci_dependency_on_mclk, 1196 memory_clock, 1197 &memory_level->MinVddci); 1198 PP_ASSERT_WITH_CODE((0 == result), 1199 "can not find MinVddci voltage value from memory VDDCI voltage dependency table", return result); 1200 } 1201 1202 if (NULL != hwmgr->dyn_state.mvdd_dependency_on_mclk) { 1203 result = ci_get_dependency_volt_by_clk(hwmgr, 1204 hwmgr->dyn_state.mvdd_dependency_on_mclk, 1205 memory_clock, 1206 &memory_level->MinMvdd); 1207 PP_ASSERT_WITH_CODE((0 == result), 1208 "can not find MinVddci voltage value from memory MVDD voltage dependency table", return result); 1209 } 1210 1211 memory_level->MinVddcPhases = 1; 1212 1213 if (data->vddc_phase_shed_control) { 1214 ci_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table, 1215 memory_clock, &memory_level->MinVddcPhases); 1216 } 1217 1218 memory_level->EnabledForThrottle = 1; 1219 memory_level->EnabledForActivity = 1; 1220 memory_level->UpH = data->current_profile_setting.mclk_up_hyst; 1221 memory_level->DownH = data->current_profile_setting.mclk_down_hyst; 1222 memory_level->VoltageDownH = 0; 1223 1224 /* Indicates maximum activity level for this performance level.*/ 1225 memory_level->ActivityLevel = data->current_profile_setting.mclk_activity; 1226 memory_level->StutterEnable = 0; 1227 memory_level->StrobeEnable = 0; 1228 memory_level->EdcReadEnable = 0; 1229 memory_level->EdcWriteEnable = 0; 1230 memory_level->RttEnable = 0; 1231 1232 /* default set to low watermark. Highest level will be set to high later.*/ 1233 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; 1234 1235 data->display_timing.num_existing_displays = hwmgr->display_config->num_display; 1236 data->display_timing.vrefresh = hwmgr->display_config->vrefresh; 1237 1238 /* stutter mode not support on ci */ 1239 1240 /* decide strobe mode*/ 1241 memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) && 1242 (memory_clock <= mclk_strobe_mode_threshold); 1243 1244 /* decide EDC mode and memory clock ratio*/ 1245 if (data->is_memory_gddr5) { 1246 memory_level->StrobeRatio = ci_get_mclk_frequency_ratio(memory_clock, 1247 memory_level->StrobeEnable); 1248 1249 if ((mclk_edc_enable_threshold != 0) && 1250 (memory_clock > mclk_edc_enable_threshold)) { 1251 memory_level->EdcReadEnable = 1; 1252 } 1253 1254 if ((mclk_edc_wr_enable_threshold != 0) && 1255 (memory_clock > mclk_edc_wr_enable_threshold)) { 1256 memory_level->EdcWriteEnable = 1; 1257 } 1258 1259 if (memory_level->StrobeEnable) { 1260 if (ci_get_mclk_frequency_ratio(memory_clock, 1) >= 1261 ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf)) 1262 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0; 1263 else 1264 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0; 1265 } else 1266 dll_state_on = data->dll_default_on; 1267 } else { 1268 memory_level->StrobeRatio = 1269 ci_get_ddr3_mclk_frequency_ratio(memory_clock); 1270 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0; 1271 } 1272 1273 result = ci_calculate_mclk_params(hwmgr, 1274 memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on); 1275 1276 if (0 == result) { 1277 memory_level->MinVddc = PP_HOST_TO_SMC_UL(memory_level->MinVddc * VOLTAGE_SCALE); 1278 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinVddcPhases); 1279 memory_level->MinVddci = PP_HOST_TO_SMC_UL(memory_level->MinVddci * VOLTAGE_SCALE); 1280 memory_level->MinMvdd = PP_HOST_TO_SMC_UL(memory_level->MinMvdd * VOLTAGE_SCALE); 1281 /* MCLK frequency in units of 10KHz*/ 1282 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency); 1283 /* Indicates maximum activity level for this performance level.*/ 1284 CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel); 1285 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl); 1286 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1); 1287 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2); 1288 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl); 1289 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl); 1290 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl); 1291 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl); 1292 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1); 1293 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2); 1294 } 1295 1296 return result; 1297 } 1298 1299 static int ci_populate_all_memory_levels(struct pp_hwmgr *hwmgr) 1300 { 1301 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1302 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); 1303 struct smu7_dpm_table *dpm_table = &data->dpm_table; 1304 int result; 1305 struct amdgpu_device *adev = hwmgr->adev; 1306 uint32_t dev_id; 1307 1308 uint32_t level_array_address = smu_data->dpm_table_start + offsetof(SMU7_Discrete_DpmTable, MemoryLevel); 1309 uint32_t level_array_size = sizeof(SMU7_Discrete_MemoryLevel) * SMU7_MAX_LEVELS_MEMORY; 1310 SMU7_Discrete_MemoryLevel *levels = smu_data->smc_state_table.MemoryLevel; 1311 uint32_t i; 1312 1313 memset(levels, 0x00, level_array_size); 1314 1315 for (i = 0; i < dpm_table->mclk_table.count; i++) { 1316 PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value), 1317 "can not populate memory level as memory clock is zero", return -EINVAL); 1318 result = ci_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value, 1319 &(smu_data->smc_state_table.MemoryLevel[i])); 1320 if (0 != result) 1321 return result; 1322 } 1323 1324 smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1; 1325 1326 dev_id = adev->pdev->device; 1327 1328 if ((dpm_table->mclk_table.count >= 2) 1329 && ((dev_id == 0x67B0) || (dev_id == 0x67B1))) { 1330 smu_data->smc_state_table.MemoryLevel[1].MinVddci = 1331 smu_data->smc_state_table.MemoryLevel[0].MinVddci; 1332 smu_data->smc_state_table.MemoryLevel[1].MinMvdd = 1333 smu_data->smc_state_table.MemoryLevel[0].MinMvdd; 1334 } 1335 smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F; 1336 CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel); 1337 1338 smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count; 1339 data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table); 1340 smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH; 1341 1342 result = ci_copy_bytes_to_smc(hwmgr, 1343 level_array_address, (uint8_t *)levels, (uint32_t)level_array_size, 1344 SMC_RAM_END); 1345 1346 return result; 1347 } 1348 1349 static int ci_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk, 1350 SMU7_Discrete_VoltageLevel *voltage) 1351 { 1352 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1353 1354 uint32_t i = 0; 1355 1356 if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) { 1357 /* find mvdd value which clock is more than request */ 1358 for (i = 0; i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count; i++) { 1359 if (mclk <= hwmgr->dyn_state.mvdd_dependency_on_mclk->entries[i].clk) { 1360 /* Always round to higher voltage. */ 1361 voltage->Voltage = data->mvdd_voltage_table.entries[i].value; 1362 break; 1363 } 1364 } 1365 1366 PP_ASSERT_WITH_CODE(i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count, 1367 "MVDD Voltage is outside the supported range.", return -EINVAL); 1368 1369 } else { 1370 return -EINVAL; 1371 } 1372 1373 return 0; 1374 } 1375 1376 static int ci_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, 1377 SMU7_Discrete_DpmTable *table) 1378 { 1379 int result = 0; 1380 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1381 struct pp_atomctrl_clock_dividers_vi dividers; 1382 1383 SMU7_Discrete_VoltageLevel voltage_level; 1384 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; 1385 uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2; 1386 uint32_t dll_cntl = data->clock_registers.vDLL_CNTL; 1387 uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL; 1388 1389 1390 /* The ACPI state should not do DPM on DC (or ever).*/ 1391 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; 1392 1393 if (data->acpi_vddc) 1394 table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE); 1395 else 1396 table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pptable * VOLTAGE_SCALE); 1397 1398 table->ACPILevel.MinVddcPhases = data->vddc_phase_shed_control ? 0 : 1; 1399 /* assign zero for now*/ 1400 table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr); 1401 1402 /* get the engine clock dividers for this clock value*/ 1403 result = atomctrl_get_engine_pll_dividers_vi(hwmgr, 1404 table->ACPILevel.SclkFrequency, ÷rs); 1405 1406 PP_ASSERT_WITH_CODE(result == 0, 1407 "Error retrieving Engine Clock dividers from VBIOS.", return result); 1408 1409 /* divider ID for required SCLK*/ 1410 table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider; 1411 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; 1412 table->ACPILevel.DeepSleepDivId = 0; 1413 1414 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, 1415 CG_SPLL_FUNC_CNTL, SPLL_PWRON, 0); 1416 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, 1417 CG_SPLL_FUNC_CNTL, SPLL_RESET, 1); 1418 spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, 1419 CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL, 4); 1420 1421 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl; 1422 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2; 1423 table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; 1424 table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; 1425 table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; 1426 table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; 1427 table->ACPILevel.CcPwrDynRm = 0; 1428 table->ACPILevel.CcPwrDynRm1 = 0; 1429 1430 /* For various features to be enabled/disabled while this level is active.*/ 1431 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags); 1432 /* SCLK frequency in units of 10KHz*/ 1433 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency); 1434 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl); 1435 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2); 1436 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3); 1437 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4); 1438 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum); 1439 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2); 1440 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm); 1441 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1); 1442 1443 1444 /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/ 1445 table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc; 1446 table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases; 1447 1448 if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control) 1449 table->MemoryACPILevel.MinVddci = table->MemoryACPILevel.MinVddc; 1450 else { 1451 if (data->acpi_vddci != 0) 1452 table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->acpi_vddci * VOLTAGE_SCALE); 1453 else 1454 table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->min_vddci_in_pptable * VOLTAGE_SCALE); 1455 } 1456 1457 if (0 == ci_populate_mvdd_value(hwmgr, 0, &voltage_level)) 1458 table->MemoryACPILevel.MinMvdd = 1459 PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE); 1460 else 1461 table->MemoryACPILevel.MinMvdd = 0; 1462 1463 /* Force reset on DLL*/ 1464 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, 1465 MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1); 1466 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, 1467 MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1); 1468 1469 /* Disable DLL in ACPIState*/ 1470 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, 1471 MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0); 1472 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, 1473 MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0); 1474 1475 /* Enable DLL bypass signal*/ 1476 dll_cntl = PHM_SET_FIELD(dll_cntl, 1477 DLL_CNTL, MRDCK0_BYPASS, 0); 1478 dll_cntl = PHM_SET_FIELD(dll_cntl, 1479 DLL_CNTL, MRDCK1_BYPASS, 0); 1480 1481 table->MemoryACPILevel.DllCntl = 1482 PP_HOST_TO_SMC_UL(dll_cntl); 1483 table->MemoryACPILevel.MclkPwrmgtCntl = 1484 PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl); 1485 table->MemoryACPILevel.MpllAdFuncCntl = 1486 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL); 1487 table->MemoryACPILevel.MpllDqFuncCntl = 1488 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL); 1489 table->MemoryACPILevel.MpllFuncCntl = 1490 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL); 1491 table->MemoryACPILevel.MpllFuncCntl_1 = 1492 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1); 1493 table->MemoryACPILevel.MpllFuncCntl_2 = 1494 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2); 1495 table->MemoryACPILevel.MpllSs1 = 1496 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1); 1497 table->MemoryACPILevel.MpllSs2 = 1498 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2); 1499 1500 table->MemoryACPILevel.EnabledForThrottle = 0; 1501 table->MemoryACPILevel.EnabledForActivity = 0; 1502 table->MemoryACPILevel.UpH = 0; 1503 table->MemoryACPILevel.DownH = 100; 1504 table->MemoryACPILevel.VoltageDownH = 0; 1505 /* Indicates maximum activity level for this performance level.*/ 1506 table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US(data->current_profile_setting.mclk_activity); 1507 1508 table->MemoryACPILevel.StutterEnable = 0; 1509 table->MemoryACPILevel.StrobeEnable = 0; 1510 table->MemoryACPILevel.EdcReadEnable = 0; 1511 table->MemoryACPILevel.EdcWriteEnable = 0; 1512 table->MemoryACPILevel.RttEnable = 0; 1513 1514 return result; 1515 } 1516 1517 static int ci_populate_smc_uvd_level(struct pp_hwmgr *hwmgr, 1518 SMU7_Discrete_DpmTable *table) 1519 { 1520 int result = 0; 1521 uint8_t count; 1522 struct pp_atomctrl_clock_dividers_vi dividers; 1523 struct phm_uvd_clock_voltage_dependency_table *uvd_table = 1524 hwmgr->dyn_state.uvd_clock_voltage_dependency_table; 1525 1526 table->UvdLevelCount = (uint8_t)(uvd_table->count); 1527 1528 for (count = 0; count < table->UvdLevelCount; count++) { 1529 table->UvdLevel[count].VclkFrequency = 1530 uvd_table->entries[count].vclk; 1531 table->UvdLevel[count].DclkFrequency = 1532 uvd_table->entries[count].dclk; 1533 table->UvdLevel[count].MinVddc = 1534 uvd_table->entries[count].v * VOLTAGE_SCALE; 1535 table->UvdLevel[count].MinVddcPhases = 1; 1536 1537 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, 1538 table->UvdLevel[count].VclkFrequency, ÷rs); 1539 PP_ASSERT_WITH_CODE((0 == result), 1540 "can not find divide id for Vclk clock", return result); 1541 1542 table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider; 1543 1544 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, 1545 table->UvdLevel[count].DclkFrequency, ÷rs); 1546 PP_ASSERT_WITH_CODE((0 == result), 1547 "can not find divide id for Dclk clock", return result); 1548 1549 table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider; 1550 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency); 1551 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency); 1552 CONVERT_FROM_HOST_TO_SMC_US(table->UvdLevel[count].MinVddc); 1553 } 1554 1555 return result; 1556 } 1557 1558 static int ci_populate_smc_vce_level(struct pp_hwmgr *hwmgr, 1559 SMU7_Discrete_DpmTable *table) 1560 { 1561 int result = -EINVAL; 1562 uint8_t count; 1563 struct pp_atomctrl_clock_dividers_vi dividers; 1564 struct phm_vce_clock_voltage_dependency_table *vce_table = 1565 hwmgr->dyn_state.vce_clock_voltage_dependency_table; 1566 1567 table->VceLevelCount = (uint8_t)(vce_table->count); 1568 table->VceBootLevel = 0; 1569 1570 for (count = 0; count < table->VceLevelCount; count++) { 1571 table->VceLevel[count].Frequency = vce_table->entries[count].evclk; 1572 table->VceLevel[count].MinVoltage = 1573 vce_table->entries[count].v * VOLTAGE_SCALE; 1574 table->VceLevel[count].MinPhases = 1; 1575 1576 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, 1577 table->VceLevel[count].Frequency, ÷rs); 1578 PP_ASSERT_WITH_CODE((0 == result), 1579 "can not find divide id for VCE engine clock", 1580 return result); 1581 1582 table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider; 1583 1584 CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency); 1585 CONVERT_FROM_HOST_TO_SMC_US(table->VceLevel[count].MinVoltage); 1586 } 1587 return result; 1588 } 1589 1590 static int ci_populate_smc_acp_level(struct pp_hwmgr *hwmgr, 1591 SMU7_Discrete_DpmTable *table) 1592 { 1593 int result = -EINVAL; 1594 uint8_t count; 1595 struct pp_atomctrl_clock_dividers_vi dividers; 1596 struct phm_acp_clock_voltage_dependency_table *acp_table = 1597 hwmgr->dyn_state.acp_clock_voltage_dependency_table; 1598 1599 table->AcpLevelCount = (uint8_t)(acp_table->count); 1600 table->AcpBootLevel = 0; 1601 1602 for (count = 0; count < table->AcpLevelCount; count++) { 1603 table->AcpLevel[count].Frequency = acp_table->entries[count].acpclk; 1604 table->AcpLevel[count].MinVoltage = acp_table->entries[count].v; 1605 table->AcpLevel[count].MinPhases = 1; 1606 1607 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, 1608 table->AcpLevel[count].Frequency, ÷rs); 1609 PP_ASSERT_WITH_CODE((0 == result), 1610 "can not find divide id for engine clock", return result); 1611 1612 table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider; 1613 1614 CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency); 1615 CONVERT_FROM_HOST_TO_SMC_US(table->AcpLevel[count].MinVoltage); 1616 } 1617 return result; 1618 } 1619 1620 static int ci_populate_memory_timing_parameters( 1621 struct pp_hwmgr *hwmgr, 1622 uint32_t engine_clock, 1623 uint32_t memory_clock, 1624 struct SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs 1625 ) 1626 { 1627 uint32_t dramTiming; 1628 uint32_t dramTiming2; 1629 uint32_t burstTime; 1630 int result; 1631 1632 result = atomctrl_set_engine_dram_timings_rv770(hwmgr, 1633 engine_clock, memory_clock); 1634 1635 PP_ASSERT_WITH_CODE(result == 0, 1636 "Error calling VBIOS to set DRAM_TIMING.", return result); 1637 1638 dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); 1639 dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); 1640 burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0); 1641 1642 arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming); 1643 arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2); 1644 arb_regs->McArbBurstTime = (uint8_t)burstTime; 1645 1646 return 0; 1647 } 1648 1649 static int ci_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) 1650 { 1651 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1652 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); 1653 int result = 0; 1654 SMU7_Discrete_MCArbDramTimingTable arb_regs; 1655 uint32_t i, j; 1656 1657 memset(&arb_regs, 0x00, sizeof(SMU7_Discrete_MCArbDramTimingTable)); 1658 1659 for (i = 0; i < data->dpm_table.sclk_table.count; i++) { 1660 for (j = 0; j < data->dpm_table.mclk_table.count; j++) { 1661 result = ci_populate_memory_timing_parameters 1662 (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value, 1663 data->dpm_table.mclk_table.dpm_levels[j].value, 1664 &arb_regs.entries[i][j]); 1665 1666 if (0 != result) 1667 break; 1668 } 1669 } 1670 1671 if (0 == result) { 1672 result = ci_copy_bytes_to_smc( 1673 hwmgr, 1674 smu_data->arb_table_start, 1675 (uint8_t *)&arb_regs, 1676 sizeof(SMU7_Discrete_MCArbDramTimingTable), 1677 SMC_RAM_END 1678 ); 1679 } 1680 1681 return result; 1682 } 1683 1684 static int ci_populate_smc_boot_level(struct pp_hwmgr *hwmgr, 1685 SMU7_Discrete_DpmTable *table) 1686 { 1687 int result = 0; 1688 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1689 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); 1690 1691 table->GraphicsBootLevel = 0; 1692 table->MemoryBootLevel = 0; 1693 1694 /* find boot level from dpm table*/ 1695 result = phm_find_boot_level(&(data->dpm_table.sclk_table), 1696 data->vbios_boot_state.sclk_bootup_value, 1697 (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel)); 1698 1699 if (0 != result) { 1700 smu_data->smc_state_table.GraphicsBootLevel = 0; 1701 pr_err("VBIOS did not find boot engine clock value in dependency table. Using Graphics DPM level 0!\n"); 1702 result = 0; 1703 } 1704 1705 result = phm_find_boot_level(&(data->dpm_table.mclk_table), 1706 data->vbios_boot_state.mclk_bootup_value, 1707 (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel)); 1708 1709 if (0 != result) { 1710 smu_data->smc_state_table.MemoryBootLevel = 0; 1711 pr_err("VBIOS did not find boot engine clock value in dependency table. Using Memory DPM level 0!\n"); 1712 result = 0; 1713 } 1714 1715 table->BootVddc = data->vbios_boot_state.vddc_bootup_value; 1716 table->BootVddci = data->vbios_boot_state.vddci_bootup_value; 1717 table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value; 1718 1719 return result; 1720 } 1721 1722 static int ci_populate_mc_reg_address(struct pp_hwmgr *hwmgr, 1723 SMU7_Discrete_MCRegisters *mc_reg_table) 1724 { 1725 const struct ci_smumgr *smu_data = (struct ci_smumgr *)hwmgr->smu_backend; 1726 1727 uint32_t i, j; 1728 1729 for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) { 1730 if (smu_data->mc_reg_table.validflag & 1<<j) { 1731 PP_ASSERT_WITH_CODE(i < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE, 1732 "Index of mc_reg_table->address[] array out of boundary", return -EINVAL); 1733 mc_reg_table->address[i].s0 = 1734 PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0); 1735 mc_reg_table->address[i].s1 = 1736 PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1); 1737 i++; 1738 } 1739 } 1740 1741 mc_reg_table->last = (uint8_t)i; 1742 1743 return 0; 1744 } 1745 1746 static void ci_convert_mc_registers( 1747 const struct ci_mc_reg_entry *entry, 1748 SMU7_Discrete_MCRegisterSet *data, 1749 uint32_t num_entries, uint32_t valid_flag) 1750 { 1751 uint32_t i, j; 1752 1753 for (i = 0, j = 0; j < num_entries; j++) { 1754 if (valid_flag & 1<<j) { 1755 data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]); 1756 i++; 1757 } 1758 } 1759 } 1760 1761 static int ci_convert_mc_reg_table_entry_to_smc( 1762 struct pp_hwmgr *hwmgr, 1763 const uint32_t memory_clock, 1764 SMU7_Discrete_MCRegisterSet *mc_reg_table_data 1765 ) 1766 { 1767 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); 1768 uint32_t i = 0; 1769 1770 for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) { 1771 if (memory_clock <= 1772 smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) { 1773 break; 1774 } 1775 } 1776 1777 if ((i == smu_data->mc_reg_table.num_entries) && (i > 0)) 1778 --i; 1779 1780 ci_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i], 1781 mc_reg_table_data, smu_data->mc_reg_table.last, 1782 smu_data->mc_reg_table.validflag); 1783 1784 return 0; 1785 } 1786 1787 static int ci_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr, 1788 SMU7_Discrete_MCRegisters *mc_regs) 1789 { 1790 int result = 0; 1791 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1792 int res; 1793 uint32_t i; 1794 1795 for (i = 0; i < data->dpm_table.mclk_table.count; i++) { 1796 res = ci_convert_mc_reg_table_entry_to_smc( 1797 hwmgr, 1798 data->dpm_table.mclk_table.dpm_levels[i].value, 1799 &mc_regs->data[i] 1800 ); 1801 1802 if (0 != res) 1803 result = res; 1804 } 1805 1806 return result; 1807 } 1808 1809 static int ci_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) 1810 { 1811 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); 1812 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1813 uint32_t address; 1814 int32_t result; 1815 1816 if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) 1817 return 0; 1818 1819 1820 memset(&smu_data->mc_regs, 0, sizeof(SMU7_Discrete_MCRegisters)); 1821 1822 result = ci_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs)); 1823 1824 if (result != 0) 1825 return result; 1826 1827 address = smu_data->mc_reg_table_start + (uint32_t)offsetof(SMU7_Discrete_MCRegisters, data[0]); 1828 1829 return ci_copy_bytes_to_smc(hwmgr, address, 1830 (uint8_t *)&smu_data->mc_regs.data[0], 1831 sizeof(SMU7_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count, 1832 SMC_RAM_END); 1833 } 1834 1835 static int ci_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr) 1836 { 1837 int result; 1838 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); 1839 1840 memset(&smu_data->mc_regs, 0x00, sizeof(SMU7_Discrete_MCRegisters)); 1841 result = ci_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs)); 1842 PP_ASSERT_WITH_CODE(0 == result, 1843 "Failed to initialize MCRegTable for the MC register addresses!", return result;); 1844 1845 result = ci_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs); 1846 PP_ASSERT_WITH_CODE(0 == result, 1847 "Failed to initialize MCRegTable for driver state!", return result;); 1848 1849 return ci_copy_bytes_to_smc(hwmgr, smu_data->mc_reg_table_start, 1850 (uint8_t *)&smu_data->mc_regs, sizeof(SMU7_Discrete_MCRegisters), SMC_RAM_END); 1851 } 1852 1853 static int ci_populate_smc_initial_state(struct pp_hwmgr *hwmgr) 1854 { 1855 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1856 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); 1857 uint8_t count, level; 1858 1859 count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count); 1860 1861 for (level = 0; level < count; level++) { 1862 if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[level].clk 1863 >= data->vbios_boot_state.sclk_bootup_value) { 1864 smu_data->smc_state_table.GraphicsBootLevel = level; 1865 break; 1866 } 1867 } 1868 1869 count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_mclk->count); 1870 1871 for (level = 0; level < count; level++) { 1872 if (hwmgr->dyn_state.vddc_dependency_on_mclk->entries[level].clk 1873 >= data->vbios_boot_state.mclk_bootup_value) { 1874 smu_data->smc_state_table.MemoryBootLevel = level; 1875 break; 1876 } 1877 } 1878 1879 return 0; 1880 } 1881 1882 static int ci_populate_smc_svi2_config(struct pp_hwmgr *hwmgr, 1883 SMU7_Discrete_DpmTable *table) 1884 { 1885 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1886 1887 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) 1888 table->SVI2Enable = 1; 1889 else 1890 table->SVI2Enable = 0; 1891 return 0; 1892 } 1893 1894 static int ci_start_smc(struct pp_hwmgr *hwmgr) 1895 { 1896 /* set smc instruct start point at 0x0 */ 1897 ci_program_jump_on_start(hwmgr); 1898 1899 /* enable smc clock */ 1900 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); 1901 1902 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0); 1903 1904 PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, 1905 INTERRUPTS_ENABLED, 1); 1906 1907 return 0; 1908 } 1909 1910 static int ci_populate_vr_config(struct pp_hwmgr *hwmgr, SMU7_Discrete_DpmTable *table) 1911 { 1912 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1913 uint16_t config; 1914 1915 config = VR_SVI2_PLANE_1; 1916 table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT); 1917 1918 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) { 1919 config = VR_SVI2_PLANE_2; 1920 table->VRConfig |= config; 1921 } else { 1922 pr_info("VDDCshould be on SVI2 controller!"); 1923 } 1924 1925 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) { 1926 config = VR_SVI2_PLANE_2; 1927 table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT); 1928 } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) { 1929 config = VR_SMIO_PATTERN_1; 1930 table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT); 1931 } 1932 1933 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { 1934 config = VR_SMIO_PATTERN_2; 1935 table->VRConfig |= (config<<VRCONF_MVDD_SHIFT); 1936 } 1937 1938 return 0; 1939 } 1940 1941 static int ci_init_smc_table(struct pp_hwmgr *hwmgr) 1942 { 1943 int result; 1944 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1945 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); 1946 SMU7_Discrete_DpmTable *table = &(smu_data->smc_state_table); 1947 struct pp_atomctrl_gpio_pin_assignment gpio_pin; 1948 u32 i; 1949 1950 ci_initialize_power_tune_defaults(hwmgr); 1951 memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table)); 1952 1953 if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control) 1954 ci_populate_smc_voltage_tables(hwmgr, table); 1955 1956 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1957 PHM_PlatformCaps_AutomaticDCTransition)) 1958 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; 1959 1960 1961 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1962 PHM_PlatformCaps_StepVddc)) 1963 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; 1964 1965 if (data->is_memory_gddr5) 1966 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; 1967 1968 if (data->ulv_supported) { 1969 result = ci_populate_ulv_state(hwmgr, &(table->Ulv)); 1970 PP_ASSERT_WITH_CODE(0 == result, 1971 "Failed to initialize ULV state!", return result); 1972 1973 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 1974 ixCG_ULV_PARAMETER, 0x40035); 1975 } 1976 1977 result = ci_populate_all_graphic_levels(hwmgr); 1978 PP_ASSERT_WITH_CODE(0 == result, 1979 "Failed to initialize Graphics Level!", return result); 1980 1981 result = ci_populate_all_memory_levels(hwmgr); 1982 PP_ASSERT_WITH_CODE(0 == result, 1983 "Failed to initialize Memory Level!", return result); 1984 1985 result = ci_populate_smc_link_level(hwmgr, table); 1986 PP_ASSERT_WITH_CODE(0 == result, 1987 "Failed to initialize Link Level!", return result); 1988 1989 result = ci_populate_smc_acpi_level(hwmgr, table); 1990 PP_ASSERT_WITH_CODE(0 == result, 1991 "Failed to initialize ACPI Level!", return result); 1992 1993 result = ci_populate_smc_vce_level(hwmgr, table); 1994 PP_ASSERT_WITH_CODE(0 == result, 1995 "Failed to initialize VCE Level!", return result); 1996 1997 result = ci_populate_smc_acp_level(hwmgr, table); 1998 PP_ASSERT_WITH_CODE(0 == result, 1999 "Failed to initialize ACP Level!", return result); 2000 2001 /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */ 2002 /* need to populate the ARB settings for the initial state. */ 2003 result = ci_program_memory_timing_parameters(hwmgr); 2004 PP_ASSERT_WITH_CODE(0 == result, 2005 "Failed to Write ARB settings for the initial state.", return result); 2006 2007 result = ci_populate_smc_uvd_level(hwmgr, table); 2008 PP_ASSERT_WITH_CODE(0 == result, 2009 "Failed to initialize UVD Level!", return result); 2010 2011 table->UvdBootLevel = 0; 2012 table->VceBootLevel = 0; 2013 table->AcpBootLevel = 0; 2014 table->SamuBootLevel = 0; 2015 2016 table->GraphicsBootLevel = 0; 2017 table->MemoryBootLevel = 0; 2018 2019 result = ci_populate_smc_boot_level(hwmgr, table); 2020 PP_ASSERT_WITH_CODE(0 == result, 2021 "Failed to initialize Boot Level!", return result); 2022 2023 result = ci_populate_smc_initial_state(hwmgr); 2024 PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot State!", return result); 2025 2026 result = ci_populate_bapm_parameters_in_dpm_table(hwmgr); 2027 PP_ASSERT_WITH_CODE(0 == result, "Failed to populate BAPM Parameters!", return result); 2028 2029 table->UVDInterval = 1; 2030 table->VCEInterval = 1; 2031 table->ACPInterval = 1; 2032 table->SAMUInterval = 1; 2033 table->GraphicsVoltageChangeEnable = 1; 2034 table->GraphicsThermThrottleEnable = 1; 2035 table->GraphicsInterval = 1; 2036 table->VoltageInterval = 1; 2037 table->ThermalInterval = 1; 2038 2039 table->TemperatureLimitHigh = 2040 (data->thermal_temp_setting.temperature_high * 2041 SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 2042 table->TemperatureLimitLow = 2043 (data->thermal_temp_setting.temperature_low * 2044 SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 2045 2046 table->MemoryVoltageChangeEnable = 1; 2047 table->MemoryInterval = 1; 2048 table->VoltageResponseTime = 0; 2049 table->VddcVddciDelta = 4000; 2050 table->PhaseResponseTime = 0; 2051 table->MemoryThermThrottleEnable = 1; 2052 2053 PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count), 2054 "There must be 1 or more PCIE levels defined in PPTable.", 2055 return -EINVAL); 2056 2057 table->PCIeBootLinkLevel = (uint8_t)data->dpm_table.pcie_speed_table.count; 2058 table->PCIeGenInterval = 1; 2059 2060 result = ci_populate_vr_config(hwmgr, table); 2061 PP_ASSERT_WITH_CODE(0 == result, 2062 "Failed to populate VRConfig setting!", return result); 2063 data->vr_config = table->VRConfig; 2064 2065 ci_populate_smc_svi2_config(hwmgr, table); 2066 2067 for (i = 0; i < SMU7_MAX_ENTRIES_SMIO; i++) 2068 CONVERT_FROM_HOST_TO_SMC_UL(table->Smio[i]); 2069 2070 table->ThermGpio = 17; 2071 table->SclkStepSize = 0x4000; 2072 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) { 2073 table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift; 2074 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 2075 PHM_PlatformCaps_RegulatorHot); 2076 } else { 2077 table->VRHotGpio = SMU7_UNUSED_GPIO_PIN; 2078 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 2079 PHM_PlatformCaps_RegulatorHot); 2080 } 2081 2082 table->AcDcGpio = SMU7_UNUSED_GPIO_PIN; 2083 2084 CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags); 2085 CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig); 2086 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid); 2087 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase); 2088 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid); 2089 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid); 2090 CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize); 2091 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh); 2092 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow); 2093 table->VddcVddciDelta = PP_HOST_TO_SMC_US(table->VddcVddciDelta); 2094 CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime); 2095 CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime); 2096 2097 table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE); 2098 table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE); 2099 table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE); 2100 2101 /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ 2102 result = ci_copy_bytes_to_smc(hwmgr, smu_data->dpm_table_start + 2103 offsetof(SMU7_Discrete_DpmTable, SystemFlags), 2104 (uint8_t *)&(table->SystemFlags), 2105 sizeof(SMU7_Discrete_DpmTable)-3 * sizeof(SMU7_PIDController), 2106 SMC_RAM_END); 2107 2108 PP_ASSERT_WITH_CODE(0 == result, 2109 "Failed to upload dpm data to SMC memory!", return result;); 2110 2111 result = ci_populate_initial_mc_reg_table(hwmgr); 2112 PP_ASSERT_WITH_CODE((0 == result), 2113 "Failed to populate initialize MC Reg table!", return result); 2114 2115 result = ci_populate_pm_fuses(hwmgr); 2116 PP_ASSERT_WITH_CODE(0 == result, 2117 "Failed to populate PM fuses to SMC memory!", return result); 2118 2119 ci_start_smc(hwmgr); 2120 2121 return 0; 2122 } 2123 2124 static int ci_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) 2125 { 2126 struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smu_backend); 2127 SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; 2128 uint32_t duty100; 2129 uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2; 2130 uint16_t fdo_min, slope1, slope2; 2131 uint32_t reference_clock; 2132 int res; 2133 uint64_t tmp64; 2134 2135 if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) 2136 return 0; 2137 2138 if (hwmgr->thermal_controller.fanInfo.bNoFan) { 2139 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 2140 PHM_PlatformCaps_MicrocodeFanControl); 2141 return 0; 2142 } 2143 2144 if (0 == ci_data->fan_table_start) { 2145 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl); 2146 return 0; 2147 } 2148 2149 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100); 2150 2151 if (0 == duty100) { 2152 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl); 2153 return 0; 2154 } 2155 2156 tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100; 2157 do_div(tmp64, 10000); 2158 fdo_min = (uint16_t)tmp64; 2159 2160 t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin; 2161 t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed; 2162 2163 pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin; 2164 pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed; 2165 2166 slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100); 2167 slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100); 2168 2169 fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100); 2170 fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100); 2171 fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100); 2172 2173 fan_table.Slope1 = cpu_to_be16(slope1); 2174 fan_table.Slope2 = cpu_to_be16(slope2); 2175 2176 fan_table.FdoMin = cpu_to_be16(fdo_min); 2177 2178 fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst); 2179 2180 fan_table.HystUp = cpu_to_be16(1); 2181 2182 fan_table.HystSlope = cpu_to_be16(1); 2183 2184 fan_table.TempRespLim = cpu_to_be16(5); 2185 2186 reference_clock = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev); 2187 2188 fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600); 2189 2190 fan_table.FdoMax = cpu_to_be16((uint16_t)duty100); 2191 2192 fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL); 2193 2194 res = ci_copy_bytes_to_smc(hwmgr, ci_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END); 2195 2196 return res; 2197 } 2198 2199 static int ci_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) 2200 { 2201 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2202 2203 if (data->need_update_smu7_dpm_table & 2204 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK)) 2205 return ci_program_memory_timing_parameters(hwmgr); 2206 2207 return 0; 2208 } 2209 2210 static int ci_update_sclk_threshold(struct pp_hwmgr *hwmgr) 2211 { 2212 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2213 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); 2214 2215 int result = 0; 2216 uint32_t low_sclk_interrupt_threshold = 0; 2217 2218 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 2219 PHM_PlatformCaps_SclkThrottleLowNotification) 2220 && (data->low_sclk_interrupt_threshold != 0)) { 2221 low_sclk_interrupt_threshold = 2222 data->low_sclk_interrupt_threshold; 2223 2224 CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold); 2225 2226 result = ci_copy_bytes_to_smc( 2227 hwmgr, 2228 smu_data->dpm_table_start + 2229 offsetof(SMU7_Discrete_DpmTable, 2230 LowSclkInterruptT), 2231 (uint8_t *)&low_sclk_interrupt_threshold, 2232 sizeof(uint32_t), 2233 SMC_RAM_END); 2234 } 2235 2236 result = ci_update_and_upload_mc_reg_table(hwmgr); 2237 2238 PP_ASSERT_WITH_CODE((0 == result), "Failed to upload MC reg table!", return result); 2239 2240 result = ci_program_mem_timing_parameters(hwmgr); 2241 PP_ASSERT_WITH_CODE((result == 0), 2242 "Failed to program memory timing parameters!", 2243 ); 2244 2245 return result; 2246 } 2247 2248 static uint32_t ci_get_offsetof(uint32_t type, uint32_t member) 2249 { 2250 switch (type) { 2251 case SMU_SoftRegisters: 2252 switch (member) { 2253 case HandshakeDisables: 2254 return offsetof(SMU7_SoftRegisters, HandshakeDisables); 2255 case VoltageChangeTimeout: 2256 return offsetof(SMU7_SoftRegisters, VoltageChangeTimeout); 2257 case AverageGraphicsActivity: 2258 return offsetof(SMU7_SoftRegisters, AverageGraphicsA); 2259 case AverageMemoryActivity: 2260 return offsetof(SMU7_SoftRegisters, AverageMemoryA); 2261 case PreVBlankGap: 2262 return offsetof(SMU7_SoftRegisters, PreVBlankGap); 2263 case VBlankTimeout: 2264 return offsetof(SMU7_SoftRegisters, VBlankTimeout); 2265 case DRAM_LOG_ADDR_H: 2266 return offsetof(SMU7_SoftRegisters, DRAM_LOG_ADDR_H); 2267 case DRAM_LOG_ADDR_L: 2268 return offsetof(SMU7_SoftRegisters, DRAM_LOG_ADDR_L); 2269 case DRAM_LOG_PHY_ADDR_H: 2270 return offsetof(SMU7_SoftRegisters, DRAM_LOG_PHY_ADDR_H); 2271 case DRAM_LOG_PHY_ADDR_L: 2272 return offsetof(SMU7_SoftRegisters, DRAM_LOG_PHY_ADDR_L); 2273 case DRAM_LOG_BUFF_SIZE: 2274 return offsetof(SMU7_SoftRegisters, DRAM_LOG_BUFF_SIZE); 2275 } 2276 break; 2277 case SMU_Discrete_DpmTable: 2278 switch (member) { 2279 case LowSclkInterruptThreshold: 2280 return offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT); 2281 } 2282 break; 2283 } 2284 pr_debug("can't get the offset of type %x member %x\n", type, member); 2285 return 0; 2286 } 2287 2288 static uint32_t ci_get_mac_definition(uint32_t value) 2289 { 2290 switch (value) { 2291 case SMU_MAX_LEVELS_GRAPHICS: 2292 return SMU7_MAX_LEVELS_GRAPHICS; 2293 case SMU_MAX_LEVELS_MEMORY: 2294 return SMU7_MAX_LEVELS_MEMORY; 2295 case SMU_MAX_LEVELS_LINK: 2296 return SMU7_MAX_LEVELS_LINK; 2297 case SMU_MAX_ENTRIES_SMIO: 2298 return SMU7_MAX_ENTRIES_SMIO; 2299 case SMU_MAX_LEVELS_VDDC: 2300 return SMU7_MAX_LEVELS_VDDC; 2301 case SMU_MAX_LEVELS_VDDCI: 2302 return SMU7_MAX_LEVELS_VDDCI; 2303 case SMU_MAX_LEVELS_MVDD: 2304 return SMU7_MAX_LEVELS_MVDD; 2305 } 2306 2307 pr_debug("can't get the mac of %x\n", value); 2308 return 0; 2309 } 2310 2311 static int ci_load_smc_ucode(struct pp_hwmgr *hwmgr) 2312 { 2313 uint32_t byte_count, start_addr; 2314 uint8_t *src; 2315 uint32_t data; 2316 2317 struct cgs_firmware_info info = {0}; 2318 2319 cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU, &info); 2320 2321 hwmgr->is_kicker = info.is_kicker; 2322 hwmgr->smu_version = info.version; 2323 byte_count = info.image_size; 2324 src = (uint8_t *)info.kptr; 2325 start_addr = info.ucode_start_address; 2326 2327 if (byte_count > SMC_RAM_END) { 2328 pr_err("SMC address is beyond the SMC RAM area.\n"); 2329 return -EINVAL; 2330 } 2331 2332 cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, start_addr); 2333 PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1); 2334 2335 for (; byte_count >= 4; byte_count -= 4) { 2336 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; 2337 cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data); 2338 src += 4; 2339 } 2340 PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); 2341 2342 if (0 != byte_count) { 2343 pr_err("SMC size must be divisible by 4\n"); 2344 return -EINVAL; 2345 } 2346 2347 return 0; 2348 } 2349 2350 static int ci_upload_firmware(struct pp_hwmgr *hwmgr) 2351 { 2352 if (ci_is_smc_ram_running(hwmgr)) { 2353 pr_info("smc is running, no need to load smc firmware\n"); 2354 return 0; 2355 } 2356 PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS, 2357 boot_seq_done, 1); 2358 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_MISC_CNTL, 2359 pre_fetcher_en, 1); 2360 2361 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1); 2362 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); 2363 return ci_load_smc_ucode(hwmgr); 2364 } 2365 2366 static int ci_process_firmware_header(struct pp_hwmgr *hwmgr) 2367 { 2368 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2369 struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smu_backend); 2370 2371 uint32_t tmp = 0; 2372 int result; 2373 bool error = false; 2374 2375 if (ci_upload_firmware(hwmgr)) 2376 return -EINVAL; 2377 2378 result = ci_read_smc_sram_dword(hwmgr, 2379 SMU7_FIRMWARE_HEADER_LOCATION + 2380 offsetof(SMU7_Firmware_Header, DpmTable), 2381 &tmp, SMC_RAM_END); 2382 2383 if (0 == result) 2384 ci_data->dpm_table_start = tmp; 2385 2386 error |= (0 != result); 2387 2388 result = ci_read_smc_sram_dword(hwmgr, 2389 SMU7_FIRMWARE_HEADER_LOCATION + 2390 offsetof(SMU7_Firmware_Header, SoftRegisters), 2391 &tmp, SMC_RAM_END); 2392 2393 if (0 == result) { 2394 data->soft_regs_start = tmp; 2395 ci_data->soft_regs_start = tmp; 2396 } 2397 2398 error |= (0 != result); 2399 2400 result = ci_read_smc_sram_dword(hwmgr, 2401 SMU7_FIRMWARE_HEADER_LOCATION + 2402 offsetof(SMU7_Firmware_Header, mcRegisterTable), 2403 &tmp, SMC_RAM_END); 2404 2405 if (0 == result) 2406 ci_data->mc_reg_table_start = tmp; 2407 2408 result = ci_read_smc_sram_dword(hwmgr, 2409 SMU7_FIRMWARE_HEADER_LOCATION + 2410 offsetof(SMU7_Firmware_Header, FanTable), 2411 &tmp, SMC_RAM_END); 2412 2413 if (0 == result) 2414 ci_data->fan_table_start = tmp; 2415 2416 error |= (0 != result); 2417 2418 result = ci_read_smc_sram_dword(hwmgr, 2419 SMU7_FIRMWARE_HEADER_LOCATION + 2420 offsetof(SMU7_Firmware_Header, mcArbDramTimingTable), 2421 &tmp, SMC_RAM_END); 2422 2423 if (0 == result) 2424 ci_data->arb_table_start = tmp; 2425 2426 error |= (0 != result); 2427 2428 result = ci_read_smc_sram_dword(hwmgr, 2429 SMU7_FIRMWARE_HEADER_LOCATION + 2430 offsetof(SMU7_Firmware_Header, Version), 2431 &tmp, SMC_RAM_END); 2432 2433 if (0 == result) 2434 hwmgr->microcode_version_info.SMC = tmp; 2435 2436 error |= (0 != result); 2437 2438 return error ? 1 : 0; 2439 } 2440 2441 static uint8_t ci_get_memory_modile_index(struct pp_hwmgr *hwmgr) 2442 { 2443 return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16)); 2444 } 2445 2446 static bool ci_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg) 2447 { 2448 bool result = true; 2449 2450 switch (in_reg) { 2451 case mmMC_SEQ_RAS_TIMING: 2452 *out_reg = mmMC_SEQ_RAS_TIMING_LP; 2453 break; 2454 2455 case mmMC_SEQ_DLL_STBY: 2456 *out_reg = mmMC_SEQ_DLL_STBY_LP; 2457 break; 2458 2459 case mmMC_SEQ_G5PDX_CMD0: 2460 *out_reg = mmMC_SEQ_G5PDX_CMD0_LP; 2461 break; 2462 2463 case mmMC_SEQ_G5PDX_CMD1: 2464 *out_reg = mmMC_SEQ_G5PDX_CMD1_LP; 2465 break; 2466 2467 case mmMC_SEQ_G5PDX_CTRL: 2468 *out_reg = mmMC_SEQ_G5PDX_CTRL_LP; 2469 break; 2470 2471 case mmMC_SEQ_CAS_TIMING: 2472 *out_reg = mmMC_SEQ_CAS_TIMING_LP; 2473 break; 2474 2475 case mmMC_SEQ_MISC_TIMING: 2476 *out_reg = mmMC_SEQ_MISC_TIMING_LP; 2477 break; 2478 2479 case mmMC_SEQ_MISC_TIMING2: 2480 *out_reg = mmMC_SEQ_MISC_TIMING2_LP; 2481 break; 2482 2483 case mmMC_SEQ_PMG_DVS_CMD: 2484 *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP; 2485 break; 2486 2487 case mmMC_SEQ_PMG_DVS_CTL: 2488 *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP; 2489 break; 2490 2491 case mmMC_SEQ_RD_CTL_D0: 2492 *out_reg = mmMC_SEQ_RD_CTL_D0_LP; 2493 break; 2494 2495 case mmMC_SEQ_RD_CTL_D1: 2496 *out_reg = mmMC_SEQ_RD_CTL_D1_LP; 2497 break; 2498 2499 case mmMC_SEQ_WR_CTL_D0: 2500 *out_reg = mmMC_SEQ_WR_CTL_D0_LP; 2501 break; 2502 2503 case mmMC_SEQ_WR_CTL_D1: 2504 *out_reg = mmMC_SEQ_WR_CTL_D1_LP; 2505 break; 2506 2507 case mmMC_PMG_CMD_EMRS: 2508 *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP; 2509 break; 2510 2511 case mmMC_PMG_CMD_MRS: 2512 *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP; 2513 break; 2514 2515 case mmMC_PMG_CMD_MRS1: 2516 *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP; 2517 break; 2518 2519 case mmMC_SEQ_PMG_TIMING: 2520 *out_reg = mmMC_SEQ_PMG_TIMING_LP; 2521 break; 2522 2523 case mmMC_PMG_CMD_MRS2: 2524 *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP; 2525 break; 2526 2527 case mmMC_SEQ_WR_CTL_2: 2528 *out_reg = mmMC_SEQ_WR_CTL_2_LP; 2529 break; 2530 2531 default: 2532 result = false; 2533 break; 2534 } 2535 2536 return result; 2537 } 2538 2539 static int ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table) 2540 { 2541 uint32_t i; 2542 uint16_t address; 2543 2544 for (i = 0; i < table->last; i++) { 2545 table->mc_reg_address[i].s0 = 2546 ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) 2547 ? address : table->mc_reg_address[i].s1; 2548 } 2549 return 0; 2550 } 2551 2552 static int ci_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table, 2553 struct ci_mc_reg_table *ni_table) 2554 { 2555 uint8_t i, j; 2556 2557 PP_ASSERT_WITH_CODE((table->last <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE), 2558 "Invalid VramInfo table.", return -EINVAL); 2559 PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES), 2560 "Invalid VramInfo table.", return -EINVAL); 2561 2562 for (i = 0; i < table->last; i++) 2563 ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1; 2564 2565 ni_table->last = table->last; 2566 2567 for (i = 0; i < table->num_entries; i++) { 2568 ni_table->mc_reg_table_entry[i].mclk_max = 2569 table->mc_reg_table_entry[i].mclk_max; 2570 for (j = 0; j < table->last; j++) { 2571 ni_table->mc_reg_table_entry[i].mc_data[j] = 2572 table->mc_reg_table_entry[i].mc_data[j]; 2573 } 2574 } 2575 2576 ni_table->num_entries = table->num_entries; 2577 2578 return 0; 2579 } 2580 2581 static int ci_set_mc_special_registers(struct pp_hwmgr *hwmgr, 2582 struct ci_mc_reg_table *table) 2583 { 2584 uint8_t i, j, k; 2585 uint32_t temp_reg; 2586 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2587 2588 for (i = 0, j = table->last; i < table->last; i++) { 2589 PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE), 2590 "Invalid VramInfo table.", return -EINVAL); 2591 2592 switch (table->mc_reg_address[i].s1) { 2593 2594 case mmMC_SEQ_MISC1: 2595 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS); 2596 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS; 2597 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP; 2598 for (k = 0; k < table->num_entries; k++) { 2599 table->mc_reg_table_entry[k].mc_data[j] = 2600 ((temp_reg & 0xffff0000)) | 2601 ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16); 2602 } 2603 j++; 2604 2605 PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE), 2606 "Invalid VramInfo table.", return -EINVAL); 2607 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS); 2608 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS; 2609 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP; 2610 for (k = 0; k < table->num_entries; k++) { 2611 table->mc_reg_table_entry[k].mc_data[j] = 2612 (temp_reg & 0xffff0000) | 2613 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); 2614 2615 if (!data->is_memory_gddr5) 2616 table->mc_reg_table_entry[k].mc_data[j] |= 0x100; 2617 } 2618 j++; 2619 2620 if (!data->is_memory_gddr5) { 2621 PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE), 2622 "Invalid VramInfo table.", return -EINVAL); 2623 table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD; 2624 table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD; 2625 for (k = 0; k < table->num_entries; k++) { 2626 table->mc_reg_table_entry[k].mc_data[j] = 2627 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16; 2628 } 2629 j++; 2630 } 2631 2632 break; 2633 2634 case mmMC_SEQ_RESERVE_M: 2635 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1); 2636 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1; 2637 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP; 2638 for (k = 0; k < table->num_entries; k++) { 2639 table->mc_reg_table_entry[k].mc_data[j] = 2640 (temp_reg & 0xffff0000) | 2641 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); 2642 } 2643 j++; 2644 break; 2645 2646 default: 2647 break; 2648 } 2649 2650 } 2651 2652 table->last = j; 2653 2654 return 0; 2655 } 2656 2657 static int ci_set_valid_flag(struct ci_mc_reg_table *table) 2658 { 2659 uint8_t i, j; 2660 2661 for (i = 0; i < table->last; i++) { 2662 for (j = 1; j < table->num_entries; j++) { 2663 if (table->mc_reg_table_entry[j-1].mc_data[i] != 2664 table->mc_reg_table_entry[j].mc_data[i]) { 2665 table->validflag |= (1 << i); 2666 break; 2667 } 2668 } 2669 } 2670 2671 return 0; 2672 } 2673 2674 static int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) 2675 { 2676 int result; 2677 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); 2678 pp_atomctrl_mc_reg_table *table; 2679 struct ci_mc_reg_table *ni_table = &smu_data->mc_reg_table; 2680 uint8_t module_index = ci_get_memory_modile_index(hwmgr); 2681 2682 table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL); 2683 2684 if (NULL == table) 2685 return -ENOMEM; 2686 2687 /* Program additional LP registers that are no longer programmed by VBIOS */ 2688 cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING)); 2689 cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING)); 2690 cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY)); 2691 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0)); 2692 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1)); 2693 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL)); 2694 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD)); 2695 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL)); 2696 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING)); 2697 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2)); 2698 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS)); 2699 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS)); 2700 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1)); 2701 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0)); 2702 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1)); 2703 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0)); 2704 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1)); 2705 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING)); 2706 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2)); 2707 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2)); 2708 2709 result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table); 2710 2711 if (0 == result) 2712 result = ci_copy_vbios_smc_reg_table(table, ni_table); 2713 2714 if (0 == result) { 2715 ci_set_s0_mc_reg_index(ni_table); 2716 result = ci_set_mc_special_registers(hwmgr, ni_table); 2717 } 2718 2719 if (0 == result) 2720 ci_set_valid_flag(ni_table); 2721 2722 kfree(table); 2723 2724 return result; 2725 } 2726 2727 static bool ci_is_dpm_running(struct pp_hwmgr *hwmgr) 2728 { 2729 return ci_is_smc_ram_running(hwmgr); 2730 } 2731 2732 static int ci_smu_init(struct pp_hwmgr *hwmgr) 2733 { 2734 struct ci_smumgr *ci_priv = NULL; 2735 2736 ci_priv = kzalloc(sizeof(struct ci_smumgr), GFP_KERNEL); 2737 2738 if (ci_priv == NULL) 2739 return -ENOMEM; 2740 2741 hwmgr->smu_backend = ci_priv; 2742 2743 return 0; 2744 } 2745 2746 static int ci_smu_fini(struct pp_hwmgr *hwmgr) 2747 { 2748 kfree(hwmgr->smu_backend); 2749 hwmgr->smu_backend = NULL; 2750 return 0; 2751 } 2752 2753 static int ci_start_smu(struct pp_hwmgr *hwmgr) 2754 { 2755 return 0; 2756 } 2757 2758 static int ci_update_dpm_settings(struct pp_hwmgr *hwmgr, 2759 void *profile_setting) 2760 { 2761 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 2762 struct ci_smumgr *smu_data = (struct ci_smumgr *) 2763 (hwmgr->smu_backend); 2764 struct profile_mode_setting *setting; 2765 struct SMU7_Discrete_GraphicsLevel *levels = 2766 smu_data->smc_state_table.GraphicsLevel; 2767 uint32_t array = smu_data->dpm_table_start + 2768 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel); 2769 2770 uint32_t mclk_array = smu_data->dpm_table_start + 2771 offsetof(SMU7_Discrete_DpmTable, MemoryLevel); 2772 struct SMU7_Discrete_MemoryLevel *mclk_levels = 2773 smu_data->smc_state_table.MemoryLevel; 2774 uint32_t i; 2775 uint32_t offset, up_hyst_offset, down_hyst_offset, clk_activity_offset, tmp; 2776 2777 if (profile_setting == NULL) 2778 return -EINVAL; 2779 2780 setting = (struct profile_mode_setting *)profile_setting; 2781 2782 if (setting->bupdate_sclk) { 2783 if (!data->sclk_dpm_key_disabled) 2784 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel, NULL); 2785 for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) { 2786 if (levels[i].ActivityLevel != 2787 cpu_to_be16(setting->sclk_activity)) { 2788 levels[i].ActivityLevel = cpu_to_be16(setting->sclk_activity); 2789 2790 clk_activity_offset = array + (sizeof(SMU7_Discrete_GraphicsLevel) * i) 2791 + offsetof(SMU7_Discrete_GraphicsLevel, ActivityLevel); 2792 offset = clk_activity_offset & ~0x3; 2793 tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset)); 2794 tmp = phm_set_field_to_u32(clk_activity_offset, tmp, levels[i].ActivityLevel, sizeof(uint16_t)); 2795 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp)); 2796 2797 } 2798 if (levels[i].UpH != setting->sclk_up_hyst || 2799 levels[i].DownH != setting->sclk_down_hyst) { 2800 levels[i].UpH = setting->sclk_up_hyst; 2801 levels[i].DownH = setting->sclk_down_hyst; 2802 up_hyst_offset = array + (sizeof(SMU7_Discrete_GraphicsLevel) * i) 2803 + offsetof(SMU7_Discrete_GraphicsLevel, UpH); 2804 down_hyst_offset = array + (sizeof(SMU7_Discrete_GraphicsLevel) * i) 2805 + offsetof(SMU7_Discrete_GraphicsLevel, DownH); 2806 offset = up_hyst_offset & ~0x3; 2807 tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset)); 2808 tmp = phm_set_field_to_u32(up_hyst_offset, tmp, levels[i].UpH, sizeof(uint8_t)); 2809 tmp = phm_set_field_to_u32(down_hyst_offset, tmp, levels[i].DownH, sizeof(uint8_t)); 2810 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp)); 2811 } 2812 } 2813 if (!data->sclk_dpm_key_disabled) 2814 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel, NULL); 2815 } 2816 2817 if (setting->bupdate_mclk) { 2818 if (!data->mclk_dpm_key_disabled) 2819 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel, NULL); 2820 for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) { 2821 if (mclk_levels[i].ActivityLevel != 2822 cpu_to_be16(setting->mclk_activity)) { 2823 mclk_levels[i].ActivityLevel = cpu_to_be16(setting->mclk_activity); 2824 2825 clk_activity_offset = mclk_array + (sizeof(SMU7_Discrete_MemoryLevel) * i) 2826 + offsetof(SMU7_Discrete_MemoryLevel, ActivityLevel); 2827 offset = clk_activity_offset & ~0x3; 2828 tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset)); 2829 tmp = phm_set_field_to_u32(clk_activity_offset, tmp, mclk_levels[i].ActivityLevel, sizeof(uint16_t)); 2830 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp)); 2831 2832 } 2833 if (mclk_levels[i].UpH != setting->mclk_up_hyst || 2834 mclk_levels[i].DownH != setting->mclk_down_hyst) { 2835 mclk_levels[i].UpH = setting->mclk_up_hyst; 2836 mclk_levels[i].DownH = setting->mclk_down_hyst; 2837 up_hyst_offset = mclk_array + (sizeof(SMU7_Discrete_MemoryLevel) * i) 2838 + offsetof(SMU7_Discrete_MemoryLevel, UpH); 2839 down_hyst_offset = mclk_array + (sizeof(SMU7_Discrete_MemoryLevel) * i) 2840 + offsetof(SMU7_Discrete_MemoryLevel, DownH); 2841 offset = up_hyst_offset & ~0x3; 2842 tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset)); 2843 tmp = phm_set_field_to_u32(up_hyst_offset, tmp, mclk_levels[i].UpH, sizeof(uint8_t)); 2844 tmp = phm_set_field_to_u32(down_hyst_offset, tmp, mclk_levels[i].DownH, sizeof(uint8_t)); 2845 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp)); 2846 } 2847 } 2848 if (!data->mclk_dpm_key_disabled) 2849 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel, NULL); 2850 } 2851 return 0; 2852 } 2853 2854 static int ci_update_uvd_smc_table(struct pp_hwmgr *hwmgr) 2855 { 2856 struct amdgpu_device *adev = hwmgr->adev; 2857 struct smu7_hwmgr *data = hwmgr->backend; 2858 struct ci_smumgr *smu_data = hwmgr->smu_backend; 2859 struct phm_uvd_clock_voltage_dependency_table *uvd_table = 2860 hwmgr->dyn_state.uvd_clock_voltage_dependency_table; 2861 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | 2862 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | 2863 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | 2864 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 2865 uint32_t max_vddc = adev->pm.ac_power ? hwmgr->dyn_state.max_clock_voltage_on_ac.vddc : 2866 hwmgr->dyn_state.max_clock_voltage_on_dc.vddc; 2867 int32_t i; 2868 2869 if (PP_CAP(PHM_PlatformCaps_UVDDPM) || uvd_table->count <= 0) 2870 smu_data->smc_state_table.UvdBootLevel = 0; 2871 else 2872 smu_data->smc_state_table.UvdBootLevel = uvd_table->count - 1; 2873 2874 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, DPM_TABLE_475, 2875 UvdBootLevel, smu_data->smc_state_table.UvdBootLevel); 2876 2877 data->dpm_level_enable_mask.uvd_dpm_enable_mask = 0; 2878 2879 for (i = uvd_table->count - 1; i >= 0; i--) { 2880 if (uvd_table->entries[i].v <= max_vddc) 2881 data->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i; 2882 if (hwmgr->dpm_level & profile_mode_mask || !PP_CAP(PHM_PlatformCaps_UVDDPM)) 2883 break; 2884 } 2885 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDDPM_SetEnabledMask, 2886 data->dpm_level_enable_mask.uvd_dpm_enable_mask, 2887 NULL); 2888 2889 return 0; 2890 } 2891 2892 static int ci_update_vce_smc_table(struct pp_hwmgr *hwmgr) 2893 { 2894 struct amdgpu_device *adev = hwmgr->adev; 2895 struct smu7_hwmgr *data = hwmgr->backend; 2896 struct phm_vce_clock_voltage_dependency_table *vce_table = 2897 hwmgr->dyn_state.vce_clock_voltage_dependency_table; 2898 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | 2899 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | 2900 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | 2901 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 2902 uint32_t max_vddc = adev->pm.ac_power ? hwmgr->dyn_state.max_clock_voltage_on_ac.vddc : 2903 hwmgr->dyn_state.max_clock_voltage_on_dc.vddc; 2904 int32_t i; 2905 2906 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, DPM_TABLE_475, 2907 VceBootLevel, 0); /* temp hard code to level 0, vce can set min evclk*/ 2908 2909 data->dpm_level_enable_mask.vce_dpm_enable_mask = 0; 2910 2911 for (i = vce_table->count - 1; i >= 0; i--) { 2912 if (vce_table->entries[i].v <= max_vddc) 2913 data->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i; 2914 if (hwmgr->dpm_level & profile_mode_mask || !PP_CAP(PHM_PlatformCaps_VCEDPM)) 2915 break; 2916 } 2917 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_VCEDPM_SetEnabledMask, 2918 data->dpm_level_enable_mask.vce_dpm_enable_mask, 2919 NULL); 2920 2921 return 0; 2922 } 2923 2924 static int ci_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type) 2925 { 2926 switch (type) { 2927 case SMU_UVD_TABLE: 2928 ci_update_uvd_smc_table(hwmgr); 2929 break; 2930 case SMU_VCE_TABLE: 2931 ci_update_vce_smc_table(hwmgr); 2932 break; 2933 default: 2934 break; 2935 } 2936 return 0; 2937 } 2938 2939 static void ci_reset_smc(struct pp_hwmgr *hwmgr) 2940 { 2941 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 2942 SMC_SYSCON_RESET_CNTL, 2943 rst_reg, 1); 2944 } 2945 2946 2947 static void ci_stop_smc_clock(struct pp_hwmgr *hwmgr) 2948 { 2949 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, 2950 SMC_SYSCON_CLOCK_CNTL_0, 2951 ck_disable, 1); 2952 } 2953 2954 static int ci_stop_smc(struct pp_hwmgr *hwmgr) 2955 { 2956 ci_reset_smc(hwmgr); 2957 ci_stop_smc_clock(hwmgr); 2958 2959 return 0; 2960 } 2961 2962 const struct pp_smumgr_func ci_smu_funcs = { 2963 .name = "ci_smu", 2964 .smu_init = ci_smu_init, 2965 .smu_fini = ci_smu_fini, 2966 .start_smu = ci_start_smu, 2967 .check_fw_load_finish = NULL, 2968 .request_smu_load_fw = NULL, 2969 .request_smu_load_specific_fw = NULL, 2970 .send_msg_to_smc = ci_send_msg_to_smc, 2971 .send_msg_to_smc_with_parameter = ci_send_msg_to_smc_with_parameter, 2972 .get_argument = smu7_get_argument, 2973 .download_pptable_settings = NULL, 2974 .upload_pptable_settings = NULL, 2975 .get_offsetof = ci_get_offsetof, 2976 .process_firmware_header = ci_process_firmware_header, 2977 .init_smc_table = ci_init_smc_table, 2978 .update_sclk_threshold = ci_update_sclk_threshold, 2979 .thermal_setup_fan_table = ci_thermal_setup_fan_table, 2980 .populate_all_graphic_levels = ci_populate_all_graphic_levels, 2981 .populate_all_memory_levels = ci_populate_all_memory_levels, 2982 .get_mac_definition = ci_get_mac_definition, 2983 .initialize_mc_reg_table = ci_initialize_mc_reg_table, 2984 .is_dpm_running = ci_is_dpm_running, 2985 .update_dpm_settings = ci_update_dpm_settings, 2986 .update_smc_table = ci_update_smc_table, 2987 .stop_smc = ci_stop_smc, 2988 }; 2989