1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 #include <linux/firmware.h> 24 #include <linux/module.h> 25 #include <linux/pci.h> 26 #include <linux/reboot.h> 27 28 #define SMU_11_0_PARTIAL_PPTABLE 29 #define SWSMU_CODE_LAYER_L3 30 31 #include "amdgpu.h" 32 #include "amdgpu_smu.h" 33 #include "atomfirmware.h" 34 #include "amdgpu_atomfirmware.h" 35 #include "amdgpu_atombios.h" 36 #include "smu_v11_0.h" 37 #include "soc15_common.h" 38 #include "atom.h" 39 #include "amdgpu_ras.h" 40 #include "smu_cmn.h" 41 42 #include "asic_reg/thm/thm_11_0_2_offset.h" 43 #include "asic_reg/thm/thm_11_0_2_sh_mask.h" 44 #include "asic_reg/mp/mp_11_0_offset.h" 45 #include "asic_reg/mp/mp_11_0_sh_mask.h" 46 #include "asic_reg/smuio/smuio_11_0_0_offset.h" 47 #include "asic_reg/smuio/smuio_11_0_0_sh_mask.h" 48 49 /* 50 * DO NOT use these for err/warn/info/debug messages. 51 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 52 * They are more MGPU friendly. 53 */ 54 #undef pr_err 55 #undef pr_warn 56 #undef pr_info 57 #undef pr_debug 58 59 MODULE_FIRMWARE("amdgpu/arcturus_smc.bin"); 60 MODULE_FIRMWARE("amdgpu/navi10_smc.bin"); 61 MODULE_FIRMWARE("amdgpu/navi14_smc.bin"); 62 MODULE_FIRMWARE("amdgpu/navi12_smc.bin"); 63 MODULE_FIRMWARE("amdgpu/sienna_cichlid_smc.bin"); 64 MODULE_FIRMWARE("amdgpu/navy_flounder_smc.bin"); 65 MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_smc.bin"); 66 MODULE_FIRMWARE("amdgpu/beige_goby_smc.bin"); 67 68 #define SMU11_VOLTAGE_SCALE 4 69 70 #define SMU11_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms 71 72 #define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288 73 #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L 74 #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4 75 #define smnPCIE_LC_SPEED_CNTL 0x11140290 76 #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xC000 77 #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0xE 78 79 #define mmTHM_BACO_CNTL_ARCT 0xA7 80 #define mmTHM_BACO_CNTL_ARCT_BASE_IDX 0 81 82 int smu_v11_0_init_microcode(struct smu_context *smu) 83 { 84 struct amdgpu_device *adev = smu->adev; 85 const char *chip_name; 86 char fw_name[SMU_FW_NAME_LEN]; 87 int err = 0; 88 const struct smc_firmware_header_v1_0 *hdr; 89 const struct common_firmware_header *header; 90 struct amdgpu_firmware_info *ucode = NULL; 91 92 if (amdgpu_sriov_vf(adev) && 93 ((adev->asic_type == CHIP_NAVI12) || 94 (adev->asic_type == CHIP_SIENNA_CICHLID))) 95 return 0; 96 97 switch (adev->asic_type) { 98 case CHIP_ARCTURUS: 99 chip_name = "arcturus"; 100 break; 101 case CHIP_NAVI10: 102 chip_name = "navi10"; 103 break; 104 case CHIP_NAVI14: 105 chip_name = "navi14"; 106 break; 107 case CHIP_NAVI12: 108 chip_name = "navi12"; 109 break; 110 case CHIP_SIENNA_CICHLID: 111 chip_name = "sienna_cichlid"; 112 break; 113 case CHIP_NAVY_FLOUNDER: 114 chip_name = "navy_flounder"; 115 break; 116 case CHIP_DIMGREY_CAVEFISH: 117 chip_name = "dimgrey_cavefish"; 118 break; 119 case CHIP_BEIGE_GOBY: 120 chip_name = "beige_goby"; 121 break; 122 default: 123 dev_err(adev->dev, "Unsupported ASIC type %d\n", adev->asic_type); 124 return -EINVAL; 125 } 126 127 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name); 128 129 err = request_firmware(&adev->pm.fw, fw_name, adev->dev); 130 if (err) 131 goto out; 132 err = amdgpu_ucode_validate(adev->pm.fw); 133 if (err) 134 goto out; 135 136 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; 137 amdgpu_ucode_print_smc_hdr(&hdr->header); 138 adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); 139 140 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 141 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; 142 ucode->ucode_id = AMDGPU_UCODE_ID_SMC; 143 ucode->fw = adev->pm.fw; 144 header = (const struct common_firmware_header *)ucode->fw->data; 145 adev->firmware.fw_size += 146 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 147 } 148 149 out: 150 if (err) { 151 DRM_ERROR("smu_v11_0: Failed to load firmware \"%s\"\n", 152 fw_name); 153 release_firmware(adev->pm.fw); 154 adev->pm.fw = NULL; 155 } 156 return err; 157 } 158 159 void smu_v11_0_fini_microcode(struct smu_context *smu) 160 { 161 struct amdgpu_device *adev = smu->adev; 162 163 release_firmware(adev->pm.fw); 164 adev->pm.fw = NULL; 165 adev->pm.fw_version = 0; 166 } 167 168 int smu_v11_0_load_microcode(struct smu_context *smu) 169 { 170 struct amdgpu_device *adev = smu->adev; 171 const uint32_t *src; 172 const struct smc_firmware_header_v1_0 *hdr; 173 uint32_t addr_start = MP1_SRAM; 174 uint32_t i; 175 uint32_t smc_fw_size; 176 uint32_t mp1_fw_flags; 177 178 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; 179 src = (const uint32_t *)(adev->pm.fw->data + 180 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 181 smc_fw_size = hdr->header.ucode_size_bytes; 182 183 for (i = 1; i < smc_fw_size/4 - 1; i++) { 184 WREG32_PCIE(addr_start, src[i]); 185 addr_start += 4; 186 } 187 188 WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff), 189 1 & MP1_SMN_PUB_CTRL__RESET_MASK); 190 WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff), 191 1 & ~MP1_SMN_PUB_CTRL__RESET_MASK); 192 193 for (i = 0; i < adev->usec_timeout; i++) { 194 mp1_fw_flags = RREG32_PCIE(MP1_Public | 195 (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); 196 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> 197 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) 198 break; 199 udelay(1); 200 } 201 202 if (i == adev->usec_timeout) 203 return -ETIME; 204 205 return 0; 206 } 207 208 int smu_v11_0_check_fw_status(struct smu_context *smu) 209 { 210 struct amdgpu_device *adev = smu->adev; 211 uint32_t mp1_fw_flags; 212 213 mp1_fw_flags = RREG32_PCIE(MP1_Public | 214 (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); 215 216 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> 217 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) 218 return 0; 219 220 return -EIO; 221 } 222 223 int smu_v11_0_check_fw_version(struct smu_context *smu) 224 { 225 struct amdgpu_device *adev = smu->adev; 226 uint32_t if_version = 0xff, smu_version = 0xff; 227 uint16_t smu_major; 228 uint8_t smu_minor, smu_debug; 229 int ret = 0; 230 231 ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version); 232 if (ret) 233 return ret; 234 235 smu_major = (smu_version >> 16) & 0xffff; 236 smu_minor = (smu_version >> 8) & 0xff; 237 smu_debug = (smu_version >> 0) & 0xff; 238 if (smu->is_apu) 239 adev->pm.fw_version = smu_version; 240 241 switch (smu->adev->asic_type) { 242 case CHIP_ARCTURUS: 243 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_ARCT; 244 break; 245 case CHIP_NAVI10: 246 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV10; 247 break; 248 case CHIP_NAVI12: 249 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV12; 250 break; 251 case CHIP_NAVI14: 252 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV14; 253 break; 254 case CHIP_SIENNA_CICHLID: 255 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Sienna_Cichlid; 256 break; 257 case CHIP_NAVY_FLOUNDER: 258 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Navy_Flounder; 259 break; 260 case CHIP_VANGOGH: 261 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_VANGOGH; 262 break; 263 case CHIP_DIMGREY_CAVEFISH: 264 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish; 265 break; 266 case CHIP_BEIGE_GOBY: 267 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Beige_Goby; 268 break; 269 case CHIP_CYAN_SKILLFISH: 270 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Cyan_Skillfish; 271 break; 272 default: 273 dev_err(smu->adev->dev, "smu unsupported asic type:%d.\n", smu->adev->asic_type); 274 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_INV; 275 break; 276 } 277 278 /* 279 * 1. if_version mismatch is not critical as our fw is designed 280 * to be backward compatible. 281 * 2. New fw usually brings some optimizations. But that's visible 282 * only on the paired driver. 283 * Considering above, we just leave user a warning message instead 284 * of halt driver loading. 285 */ 286 if (if_version != smu->smc_driver_if_version) { 287 dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, " 288 "smu fw version = 0x%08x (%d.%d.%d)\n", 289 smu->smc_driver_if_version, if_version, 290 smu_version, smu_major, smu_minor, smu_debug); 291 dev_warn(smu->adev->dev, "SMU driver if version not matched\n"); 292 } 293 294 return ret; 295 } 296 297 static int smu_v11_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size) 298 { 299 struct amdgpu_device *adev = smu->adev; 300 uint32_t ppt_offset_bytes; 301 const struct smc_firmware_header_v2_0 *v2; 302 303 v2 = (const struct smc_firmware_header_v2_0 *) adev->pm.fw->data; 304 305 ppt_offset_bytes = le32_to_cpu(v2->ppt_offset_bytes); 306 *size = le32_to_cpu(v2->ppt_size_bytes); 307 *table = (uint8_t *)v2 + ppt_offset_bytes; 308 309 return 0; 310 } 311 312 static int smu_v11_0_set_pptable_v2_1(struct smu_context *smu, void **table, 313 uint32_t *size, uint32_t pptable_id) 314 { 315 struct amdgpu_device *adev = smu->adev; 316 const struct smc_firmware_header_v2_1 *v2_1; 317 struct smc_soft_pptable_entry *entries; 318 uint32_t pptable_count = 0; 319 int i = 0; 320 321 v2_1 = (const struct smc_firmware_header_v2_1 *) adev->pm.fw->data; 322 entries = (struct smc_soft_pptable_entry *) 323 ((uint8_t *)v2_1 + le32_to_cpu(v2_1->pptable_entry_offset)); 324 pptable_count = le32_to_cpu(v2_1->pptable_count); 325 for (i = 0; i < pptable_count; i++) { 326 if (le32_to_cpu(entries[i].id) == pptable_id) { 327 *table = ((uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes)); 328 *size = le32_to_cpu(entries[i].ppt_size_bytes); 329 break; 330 } 331 } 332 333 if (i == pptable_count) 334 return -EINVAL; 335 336 return 0; 337 } 338 339 int smu_v11_0_setup_pptable(struct smu_context *smu) 340 { 341 struct amdgpu_device *adev = smu->adev; 342 const struct smc_firmware_header_v1_0 *hdr; 343 int ret, index; 344 uint32_t size = 0; 345 uint16_t atom_table_size; 346 uint8_t frev, crev; 347 void *table; 348 uint16_t version_major, version_minor; 349 350 if (!amdgpu_sriov_vf(adev)) { 351 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; 352 version_major = le16_to_cpu(hdr->header.header_version_major); 353 version_minor = le16_to_cpu(hdr->header.header_version_minor); 354 if (version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) { 355 dev_info(adev->dev, "use driver provided pptable %d\n", smu->smu_table.boot_values.pp_table_id); 356 switch (version_minor) { 357 case 0: 358 ret = smu_v11_0_set_pptable_v2_0(smu, &table, &size); 359 break; 360 case 1: 361 ret = smu_v11_0_set_pptable_v2_1(smu, &table, &size, 362 smu->smu_table.boot_values.pp_table_id); 363 break; 364 default: 365 ret = -EINVAL; 366 break; 367 } 368 if (ret) 369 return ret; 370 goto out; 371 } 372 } 373 374 dev_info(adev->dev, "use vbios provided pptable\n"); 375 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 376 powerplayinfo); 377 378 ret = amdgpu_atombios_get_data_table(adev, index, &atom_table_size, &frev, &crev, 379 (uint8_t **)&table); 380 if (ret) 381 return ret; 382 size = atom_table_size; 383 384 out: 385 if (!smu->smu_table.power_play_table) 386 smu->smu_table.power_play_table = table; 387 if (!smu->smu_table.power_play_table_size) 388 smu->smu_table.power_play_table_size = size; 389 390 return 0; 391 } 392 393 int smu_v11_0_init_smc_tables(struct smu_context *smu) 394 { 395 struct smu_table_context *smu_table = &smu->smu_table; 396 struct smu_table *tables = smu_table->tables; 397 int ret = 0; 398 399 smu_table->driver_pptable = 400 kzalloc(tables[SMU_TABLE_PPTABLE].size, GFP_KERNEL); 401 if (!smu_table->driver_pptable) { 402 ret = -ENOMEM; 403 goto err0_out; 404 } 405 406 smu_table->max_sustainable_clocks = 407 kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks), GFP_KERNEL); 408 if (!smu_table->max_sustainable_clocks) { 409 ret = -ENOMEM; 410 goto err1_out; 411 } 412 413 /* Arcturus does not support OVERDRIVE */ 414 if (tables[SMU_TABLE_OVERDRIVE].size) { 415 smu_table->overdrive_table = 416 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL); 417 if (!smu_table->overdrive_table) { 418 ret = -ENOMEM; 419 goto err2_out; 420 } 421 422 smu_table->boot_overdrive_table = 423 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL); 424 if (!smu_table->boot_overdrive_table) { 425 ret = -ENOMEM; 426 goto err3_out; 427 } 428 429 smu_table->user_overdrive_table = 430 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL); 431 if (!smu_table->user_overdrive_table) { 432 ret = -ENOMEM; 433 goto err4_out; 434 } 435 436 } 437 438 return 0; 439 440 err4_out: 441 kfree(smu_table->boot_overdrive_table); 442 err3_out: 443 kfree(smu_table->overdrive_table); 444 err2_out: 445 kfree(smu_table->max_sustainable_clocks); 446 err1_out: 447 kfree(smu_table->driver_pptable); 448 err0_out: 449 return ret; 450 } 451 452 int smu_v11_0_fini_smc_tables(struct smu_context *smu) 453 { 454 struct smu_table_context *smu_table = &smu->smu_table; 455 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 456 457 kfree(smu_table->gpu_metrics_table); 458 kfree(smu_table->user_overdrive_table); 459 kfree(smu_table->boot_overdrive_table); 460 kfree(smu_table->overdrive_table); 461 kfree(smu_table->max_sustainable_clocks); 462 kfree(smu_table->driver_pptable); 463 kfree(smu_table->clocks_table); 464 smu_table->gpu_metrics_table = NULL; 465 smu_table->user_overdrive_table = NULL; 466 smu_table->boot_overdrive_table = NULL; 467 smu_table->overdrive_table = NULL; 468 smu_table->max_sustainable_clocks = NULL; 469 smu_table->driver_pptable = NULL; 470 smu_table->clocks_table = NULL; 471 kfree(smu_table->hardcode_pptable); 472 smu_table->hardcode_pptable = NULL; 473 474 kfree(smu_table->metrics_table); 475 kfree(smu_table->watermarks_table); 476 smu_table->metrics_table = NULL; 477 smu_table->watermarks_table = NULL; 478 smu_table->metrics_time = 0; 479 480 kfree(smu_dpm->dpm_context); 481 kfree(smu_dpm->golden_dpm_context); 482 kfree(smu_dpm->dpm_current_power_state); 483 kfree(smu_dpm->dpm_request_power_state); 484 smu_dpm->dpm_context = NULL; 485 smu_dpm->golden_dpm_context = NULL; 486 smu_dpm->dpm_context_size = 0; 487 smu_dpm->dpm_current_power_state = NULL; 488 smu_dpm->dpm_request_power_state = NULL; 489 490 return 0; 491 } 492 493 int smu_v11_0_init_power(struct smu_context *smu) 494 { 495 struct smu_power_context *smu_power = &smu->smu_power; 496 size_t size = smu->adev->asic_type == CHIP_VANGOGH ? 497 sizeof(struct smu_11_5_power_context) : 498 sizeof(struct smu_11_0_power_context); 499 500 smu_power->power_context = kzalloc(size, GFP_KERNEL); 501 if (!smu_power->power_context) 502 return -ENOMEM; 503 smu_power->power_context_size = size; 504 505 return 0; 506 } 507 508 int smu_v11_0_fini_power(struct smu_context *smu) 509 { 510 struct smu_power_context *smu_power = &smu->smu_power; 511 512 kfree(smu_power->power_context); 513 smu_power->power_context = NULL; 514 smu_power->power_context_size = 0; 515 516 return 0; 517 } 518 519 static int smu_v11_0_atom_get_smu_clockinfo(struct amdgpu_device *adev, 520 uint8_t clk_id, 521 uint8_t syspll_id, 522 uint32_t *clk_freq) 523 { 524 struct atom_get_smu_clock_info_parameters_v3_1 input = {0}; 525 struct atom_get_smu_clock_info_output_parameters_v3_1 *output; 526 int ret, index; 527 528 input.clk_id = clk_id; 529 input.syspll_id = syspll_id; 530 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ; 531 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1, 532 getsmuclockinfo); 533 534 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index, 535 (uint32_t *)&input); 536 if (ret) 537 return -EINVAL; 538 539 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input; 540 *clk_freq = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000; 541 542 return 0; 543 } 544 545 int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu) 546 { 547 int ret, index; 548 uint16_t size; 549 uint8_t frev, crev; 550 struct atom_common_table_header *header; 551 struct atom_firmware_info_v3_3 *v_3_3; 552 struct atom_firmware_info_v3_1 *v_3_1; 553 554 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 555 firmwareinfo); 556 557 ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev, 558 (uint8_t **)&header); 559 if (ret) 560 return ret; 561 562 if (header->format_revision != 3) { 563 dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu11\n"); 564 return -EINVAL; 565 } 566 567 switch (header->content_revision) { 568 case 0: 569 case 1: 570 case 2: 571 v_3_1 = (struct atom_firmware_info_v3_1 *)header; 572 smu->smu_table.boot_values.revision = v_3_1->firmware_revision; 573 smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz; 574 smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz; 575 smu->smu_table.boot_values.socclk = 0; 576 smu->smu_table.boot_values.dcefclk = 0; 577 smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv; 578 smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv; 579 smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv; 580 smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv; 581 smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id; 582 smu->smu_table.boot_values.pp_table_id = 0; 583 smu->smu_table.boot_values.firmware_caps = v_3_1->firmware_capability; 584 break; 585 case 3: 586 case 4: 587 default: 588 v_3_3 = (struct atom_firmware_info_v3_3 *)header; 589 smu->smu_table.boot_values.revision = v_3_3->firmware_revision; 590 smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz; 591 smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz; 592 smu->smu_table.boot_values.socclk = 0; 593 smu->smu_table.boot_values.dcefclk = 0; 594 smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv; 595 smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv; 596 smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv; 597 smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv; 598 smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id; 599 smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id; 600 smu->smu_table.boot_values.firmware_caps = v_3_3->firmware_capability; 601 } 602 603 smu->smu_table.boot_values.format_revision = header->format_revision; 604 smu->smu_table.boot_values.content_revision = header->content_revision; 605 606 smu_v11_0_atom_get_smu_clockinfo(smu->adev, 607 (uint8_t)SMU11_SYSPLL0_SOCCLK_ID, 608 (uint8_t)0, 609 &smu->smu_table.boot_values.socclk); 610 611 smu_v11_0_atom_get_smu_clockinfo(smu->adev, 612 (uint8_t)SMU11_SYSPLL0_DCEFCLK_ID, 613 (uint8_t)0, 614 &smu->smu_table.boot_values.dcefclk); 615 616 smu_v11_0_atom_get_smu_clockinfo(smu->adev, 617 (uint8_t)SMU11_SYSPLL0_ECLK_ID, 618 (uint8_t)0, 619 &smu->smu_table.boot_values.eclk); 620 621 smu_v11_0_atom_get_smu_clockinfo(smu->adev, 622 (uint8_t)SMU11_SYSPLL0_VCLK_ID, 623 (uint8_t)0, 624 &smu->smu_table.boot_values.vclk); 625 626 smu_v11_0_atom_get_smu_clockinfo(smu->adev, 627 (uint8_t)SMU11_SYSPLL0_DCLK_ID, 628 (uint8_t)0, 629 &smu->smu_table.boot_values.dclk); 630 631 if ((smu->smu_table.boot_values.format_revision == 3) && 632 (smu->smu_table.boot_values.content_revision >= 2)) 633 smu_v11_0_atom_get_smu_clockinfo(smu->adev, 634 (uint8_t)SMU11_SYSPLL1_0_FCLK_ID, 635 (uint8_t)SMU11_SYSPLL1_2_ID, 636 &smu->smu_table.boot_values.fclk); 637 638 smu_v11_0_atom_get_smu_clockinfo(smu->adev, 639 (uint8_t)SMU11_SYSPLL3_1_LCLK_ID, 640 (uint8_t)SMU11_SYSPLL3_1_ID, 641 &smu->smu_table.boot_values.lclk); 642 643 return 0; 644 } 645 646 int smu_v11_0_notify_memory_pool_location(struct smu_context *smu) 647 { 648 struct smu_table_context *smu_table = &smu->smu_table; 649 struct smu_table *memory_pool = &smu_table->memory_pool; 650 int ret = 0; 651 uint64_t address; 652 uint32_t address_low, address_high; 653 654 if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL) 655 return ret; 656 657 address = (uintptr_t)memory_pool->cpu_addr; 658 address_high = (uint32_t)upper_32_bits(address); 659 address_low = (uint32_t)lower_32_bits(address); 660 661 ret = smu_cmn_send_smc_msg_with_param(smu, 662 SMU_MSG_SetSystemVirtualDramAddrHigh, 663 address_high, 664 NULL); 665 if (ret) 666 return ret; 667 ret = smu_cmn_send_smc_msg_with_param(smu, 668 SMU_MSG_SetSystemVirtualDramAddrLow, 669 address_low, 670 NULL); 671 if (ret) 672 return ret; 673 674 address = memory_pool->mc_address; 675 address_high = (uint32_t)upper_32_bits(address); 676 address_low = (uint32_t)lower_32_bits(address); 677 678 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh, 679 address_high, NULL); 680 if (ret) 681 return ret; 682 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow, 683 address_low, NULL); 684 if (ret) 685 return ret; 686 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize, 687 (uint32_t)memory_pool->size, NULL); 688 if (ret) 689 return ret; 690 691 return ret; 692 } 693 694 int smu_v11_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk) 695 { 696 int ret; 697 698 ret = smu_cmn_send_smc_msg_with_param(smu, 699 SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL); 700 if (ret) 701 dev_err(smu->adev->dev, "SMU11 attempt to set divider for DCEFCLK Failed!"); 702 703 return ret; 704 } 705 706 int smu_v11_0_set_driver_table_location(struct smu_context *smu) 707 { 708 struct smu_table *driver_table = &smu->smu_table.driver_table; 709 int ret = 0; 710 711 if (driver_table->mc_address) { 712 ret = smu_cmn_send_smc_msg_with_param(smu, 713 SMU_MSG_SetDriverDramAddrHigh, 714 upper_32_bits(driver_table->mc_address), 715 NULL); 716 if (!ret) 717 ret = smu_cmn_send_smc_msg_with_param(smu, 718 SMU_MSG_SetDriverDramAddrLow, 719 lower_32_bits(driver_table->mc_address), 720 NULL); 721 } 722 723 return ret; 724 } 725 726 int smu_v11_0_set_tool_table_location(struct smu_context *smu) 727 { 728 int ret = 0; 729 struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG]; 730 731 if (tool_table->mc_address) { 732 ret = smu_cmn_send_smc_msg_with_param(smu, 733 SMU_MSG_SetToolsDramAddrHigh, 734 upper_32_bits(tool_table->mc_address), 735 NULL); 736 if (!ret) 737 ret = smu_cmn_send_smc_msg_with_param(smu, 738 SMU_MSG_SetToolsDramAddrLow, 739 lower_32_bits(tool_table->mc_address), 740 NULL); 741 } 742 743 return ret; 744 } 745 746 int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count) 747 { 748 struct amdgpu_device *adev = smu->adev; 749 750 /* Navy_Flounder/Dimgrey_Cavefish do not support to change 751 * display num currently 752 */ 753 if (adev->asic_type >= CHIP_NAVY_FLOUNDER && 754 adev->asic_type <= CHIP_BEIGE_GOBY) 755 return 0; 756 757 return smu_cmn_send_smc_msg_with_param(smu, 758 SMU_MSG_NumOfDisplays, 759 count, 760 NULL); 761 } 762 763 764 int smu_v11_0_set_allowed_mask(struct smu_context *smu) 765 { 766 struct smu_feature *feature = &smu->smu_feature; 767 int ret = 0; 768 uint32_t feature_mask[2]; 769 770 if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64) { 771 ret = -EINVAL; 772 goto failed; 773 } 774 775 bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64); 776 777 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh, 778 feature_mask[1], NULL); 779 if (ret) 780 goto failed; 781 782 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow, 783 feature_mask[0], NULL); 784 if (ret) 785 goto failed; 786 787 failed: 788 return ret; 789 } 790 791 int smu_v11_0_system_features_control(struct smu_context *smu, 792 bool en) 793 { 794 struct smu_feature *feature = &smu->smu_feature; 795 uint32_t feature_mask[2]; 796 int ret = 0; 797 798 ret = smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures : 799 SMU_MSG_DisableAllSmuFeatures), NULL); 800 if (ret) 801 return ret; 802 803 bitmap_zero(feature->enabled, feature->feature_num); 804 bitmap_zero(feature->supported, feature->feature_num); 805 806 if (en) { 807 ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2); 808 if (ret) 809 return ret; 810 811 bitmap_copy(feature->enabled, (unsigned long *)&feature_mask, 812 feature->feature_num); 813 bitmap_copy(feature->supported, (unsigned long *)&feature_mask, 814 feature->feature_num); 815 } 816 817 return ret; 818 } 819 820 int smu_v11_0_notify_display_change(struct smu_context *smu) 821 { 822 int ret = 0; 823 824 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) && 825 smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM) 826 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL); 827 828 return ret; 829 } 830 831 static int 832 smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock, 833 enum smu_clk_type clock_select) 834 { 835 int ret = 0; 836 int clk_id; 837 838 if ((smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetDcModeMaxDpmFreq) < 0) || 839 (smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetMaxDpmFreq) < 0)) 840 return 0; 841 842 clk_id = smu_cmn_to_asic_specific_index(smu, 843 CMN2ASIC_MAPPING_CLK, 844 clock_select); 845 if (clk_id < 0) 846 return -EINVAL; 847 848 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq, 849 clk_id << 16, clock); 850 if (ret) { 851 dev_err(smu->adev->dev, "[GetMaxSustainableClock] Failed to get max DC clock from SMC!"); 852 return ret; 853 } 854 855 if (*clock != 0) 856 return 0; 857 858 /* if DC limit is zero, return AC limit */ 859 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, 860 clk_id << 16, clock); 861 if (ret) { 862 dev_err(smu->adev->dev, "[GetMaxSustainableClock] failed to get max AC clock from SMC!"); 863 return ret; 864 } 865 866 return 0; 867 } 868 869 int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu) 870 { 871 struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks = 872 smu->smu_table.max_sustainable_clocks; 873 int ret = 0; 874 875 max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100; 876 max_sustainable_clocks->soc_clock = smu->smu_table.boot_values.socclk / 100; 877 max_sustainable_clocks->dcef_clock = smu->smu_table.boot_values.dcefclk / 100; 878 max_sustainable_clocks->display_clock = 0xFFFFFFFF; 879 max_sustainable_clocks->phy_clock = 0xFFFFFFFF; 880 max_sustainable_clocks->pixel_clock = 0xFFFFFFFF; 881 882 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { 883 ret = smu_v11_0_get_max_sustainable_clock(smu, 884 &(max_sustainable_clocks->uclock), 885 SMU_UCLK); 886 if (ret) { 887 dev_err(smu->adev->dev, "[%s] failed to get max UCLK from SMC!", 888 __func__); 889 return ret; 890 } 891 } 892 893 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { 894 ret = smu_v11_0_get_max_sustainable_clock(smu, 895 &(max_sustainable_clocks->soc_clock), 896 SMU_SOCCLK); 897 if (ret) { 898 dev_err(smu->adev->dev, "[%s] failed to get max SOCCLK from SMC!", 899 __func__); 900 return ret; 901 } 902 } 903 904 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) { 905 ret = smu_v11_0_get_max_sustainable_clock(smu, 906 &(max_sustainable_clocks->dcef_clock), 907 SMU_DCEFCLK); 908 if (ret) { 909 dev_err(smu->adev->dev, "[%s] failed to get max DCEFCLK from SMC!", 910 __func__); 911 return ret; 912 } 913 914 ret = smu_v11_0_get_max_sustainable_clock(smu, 915 &(max_sustainable_clocks->display_clock), 916 SMU_DISPCLK); 917 if (ret) { 918 dev_err(smu->adev->dev, "[%s] failed to get max DISPCLK from SMC!", 919 __func__); 920 return ret; 921 } 922 ret = smu_v11_0_get_max_sustainable_clock(smu, 923 &(max_sustainable_clocks->phy_clock), 924 SMU_PHYCLK); 925 if (ret) { 926 dev_err(smu->adev->dev, "[%s] failed to get max PHYCLK from SMC!", 927 __func__); 928 return ret; 929 } 930 ret = smu_v11_0_get_max_sustainable_clock(smu, 931 &(max_sustainable_clocks->pixel_clock), 932 SMU_PIXCLK); 933 if (ret) { 934 dev_err(smu->adev->dev, "[%s] failed to get max PIXCLK from SMC!", 935 __func__); 936 return ret; 937 } 938 } 939 940 if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock) 941 max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock; 942 943 return 0; 944 } 945 946 int smu_v11_0_get_current_power_limit(struct smu_context *smu, 947 uint32_t *power_limit) 948 { 949 int power_src; 950 int ret = 0; 951 952 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) 953 return -EINVAL; 954 955 power_src = smu_cmn_to_asic_specific_index(smu, 956 CMN2ASIC_MAPPING_PWR, 957 smu->adev->pm.ac_power ? 958 SMU_POWER_SOURCE_AC : 959 SMU_POWER_SOURCE_DC); 960 if (power_src < 0) 961 return -EINVAL; 962 963 /* 964 * BIT 24-31: ControllerId (only PPT0 is supported for now) 965 * BIT 16-23: PowerSource 966 */ 967 ret = smu_cmn_send_smc_msg_with_param(smu, 968 SMU_MSG_GetPptLimit, 969 (0 << 24) | (power_src << 16), 970 power_limit); 971 if (ret) 972 dev_err(smu->adev->dev, "[%s] get PPT limit failed!", __func__); 973 974 return ret; 975 } 976 977 int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n) 978 { 979 int power_src; 980 int ret = 0; 981 982 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) { 983 dev_err(smu->adev->dev, "Setting new power limit is not supported!\n"); 984 return -EOPNOTSUPP; 985 } 986 987 power_src = smu_cmn_to_asic_specific_index(smu, 988 CMN2ASIC_MAPPING_PWR, 989 smu->adev->pm.ac_power ? 990 SMU_POWER_SOURCE_AC : 991 SMU_POWER_SOURCE_DC); 992 if (power_src < 0) 993 return -EINVAL; 994 995 /* 996 * BIT 24-31: ControllerId (only PPT0 is supported for now) 997 * BIT 16-23: PowerSource 998 * BIT 0-15: PowerLimit 999 */ 1000 n &= 0xFFFF; 1001 n |= 0 << 24; 1002 n |= (power_src) << 16; 1003 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n, NULL); 1004 if (ret) { 1005 dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__); 1006 return ret; 1007 } 1008 1009 smu->current_power_limit = n; 1010 1011 return 0; 1012 } 1013 1014 static int smu_v11_0_ack_ac_dc_interrupt(struct smu_context *smu) 1015 { 1016 return smu_cmn_send_smc_msg(smu, 1017 SMU_MSG_ReenableAcDcInterrupt, 1018 NULL); 1019 } 1020 1021 static int smu_v11_0_process_pending_interrupt(struct smu_context *smu) 1022 { 1023 int ret = 0; 1024 1025 if (smu->dc_controlled_by_gpio && 1026 smu_cmn_feature_is_enabled(smu, SMU_FEATURE_ACDC_BIT)) 1027 ret = smu_v11_0_ack_ac_dc_interrupt(smu); 1028 1029 return ret; 1030 } 1031 1032 void smu_v11_0_interrupt_work(struct smu_context *smu) 1033 { 1034 if (smu_v11_0_ack_ac_dc_interrupt(smu)) 1035 dev_err(smu->adev->dev, "Ack AC/DC interrupt Failed!\n"); 1036 } 1037 1038 int smu_v11_0_enable_thermal_alert(struct smu_context *smu) 1039 { 1040 int ret = 0; 1041 1042 if (smu->smu_table.thermal_controller_type) { 1043 ret = amdgpu_irq_get(smu->adev, &smu->irq_source, 0); 1044 if (ret) 1045 return ret; 1046 } 1047 1048 /* 1049 * After init there might have been missed interrupts triggered 1050 * before driver registers for interrupt (Ex. AC/DC). 1051 */ 1052 return smu_v11_0_process_pending_interrupt(smu); 1053 } 1054 1055 int smu_v11_0_disable_thermal_alert(struct smu_context *smu) 1056 { 1057 return amdgpu_irq_put(smu->adev, &smu->irq_source, 0); 1058 } 1059 1060 static uint16_t convert_to_vddc(uint8_t vid) 1061 { 1062 return (uint16_t) ((6200 - (vid * 25)) / SMU11_VOLTAGE_SCALE); 1063 } 1064 1065 int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value) 1066 { 1067 struct amdgpu_device *adev = smu->adev; 1068 uint32_t vdd = 0, val_vid = 0; 1069 1070 if (!value) 1071 return -EINVAL; 1072 val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_TEL_PLANE0) & 1073 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >> 1074 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT; 1075 1076 vdd = (uint32_t)convert_to_vddc((uint8_t)val_vid); 1077 1078 *value = vdd; 1079 1080 return 0; 1081 1082 } 1083 1084 int 1085 smu_v11_0_display_clock_voltage_request(struct smu_context *smu, 1086 struct pp_display_clock_request 1087 *clock_req) 1088 { 1089 enum amd_pp_clock_type clk_type = clock_req->clock_type; 1090 int ret = 0; 1091 enum smu_clk_type clk_select = 0; 1092 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000; 1093 1094 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) || 1095 smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { 1096 switch (clk_type) { 1097 case amd_pp_dcef_clock: 1098 clk_select = SMU_DCEFCLK; 1099 break; 1100 case amd_pp_disp_clock: 1101 clk_select = SMU_DISPCLK; 1102 break; 1103 case amd_pp_pixel_clock: 1104 clk_select = SMU_PIXCLK; 1105 break; 1106 case amd_pp_phy_clock: 1107 clk_select = SMU_PHYCLK; 1108 break; 1109 case amd_pp_mem_clock: 1110 clk_select = SMU_UCLK; 1111 break; 1112 default: 1113 dev_info(smu->adev->dev, "[%s] Invalid Clock Type!", __func__); 1114 ret = -EINVAL; 1115 break; 1116 } 1117 1118 if (ret) 1119 goto failed; 1120 1121 if (clk_select == SMU_UCLK && smu->disable_uclk_switch) 1122 return 0; 1123 1124 ret = smu_v11_0_set_hard_freq_limited_range(smu, clk_select, clk_freq, 0); 1125 1126 if(clk_select == SMU_UCLK) 1127 smu->hard_min_uclk_req_from_dal = clk_freq; 1128 } 1129 1130 failed: 1131 return ret; 1132 } 1133 1134 int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable) 1135 { 1136 int ret = 0; 1137 struct amdgpu_device *adev = smu->adev; 1138 1139 switch (adev->asic_type) { 1140 case CHIP_NAVI10: 1141 case CHIP_NAVI14: 1142 case CHIP_NAVI12: 1143 case CHIP_SIENNA_CICHLID: 1144 case CHIP_NAVY_FLOUNDER: 1145 case CHIP_DIMGREY_CAVEFISH: 1146 case CHIP_BEIGE_GOBY: 1147 case CHIP_VANGOGH: 1148 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) 1149 return 0; 1150 if (enable) 1151 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL); 1152 else 1153 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL); 1154 break; 1155 default: 1156 break; 1157 } 1158 1159 return ret; 1160 } 1161 1162 uint32_t 1163 smu_v11_0_get_fan_control_mode(struct smu_context *smu) 1164 { 1165 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT)) 1166 return AMD_FAN_CTRL_AUTO; 1167 else 1168 return smu->user_dpm_profile.fan_mode; 1169 } 1170 1171 static int 1172 smu_v11_0_auto_fan_control(struct smu_context *smu, bool auto_fan_control) 1173 { 1174 int ret = 0; 1175 1176 if (!smu_cmn_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT)) 1177 return 0; 1178 1179 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control); 1180 if (ret) 1181 dev_err(smu->adev->dev, "[%s]%s smc FAN CONTROL feature failed!", 1182 __func__, (auto_fan_control ? "Start" : "Stop")); 1183 1184 return ret; 1185 } 1186 1187 static int 1188 smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode) 1189 { 1190 struct amdgpu_device *adev = smu->adev; 1191 1192 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2, 1193 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2), 1194 CG_FDO_CTRL2, TMIN, 0)); 1195 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2, 1196 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2), 1197 CG_FDO_CTRL2, FDO_PWM_MODE, mode)); 1198 1199 return 0; 1200 } 1201 1202 int 1203 smu_v11_0_set_fan_speed_pwm(struct smu_context *smu, uint32_t speed) 1204 { 1205 struct amdgpu_device *adev = smu->adev; 1206 uint32_t duty100, duty; 1207 uint64_t tmp64; 1208 1209 speed = MIN(speed, 255); 1210 1211 duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1), 1212 CG_FDO_CTRL1, FMAX_DUTY100); 1213 if (!duty100) 1214 return -EINVAL; 1215 1216 tmp64 = (uint64_t)speed * duty100; 1217 do_div(tmp64, 255); 1218 duty = (uint32_t)tmp64; 1219 1220 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0, 1221 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0), 1222 CG_FDO_CTRL0, FDO_STATIC_DUTY, duty)); 1223 1224 return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC); 1225 } 1226 1227 int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu, 1228 uint32_t speed) 1229 { 1230 struct amdgpu_device *adev = smu->adev; 1231 /* 1232 * crystal_clock_freq used for fan speed rpm calculation is 1233 * always 25Mhz. So, hardcode it as 2500(in 10K unit). 1234 */ 1235 uint32_t crystal_clock_freq = 2500; 1236 uint32_t tach_period; 1237 1238 /* 1239 * To prevent from possible overheat, some ASICs may have requirement 1240 * for minimum fan speed: 1241 * - For some NV10 SKU, the fan speed cannot be set lower than 1242 * 700 RPM. 1243 * - For some Sienna Cichlid SKU, the fan speed cannot be set 1244 * lower than 500 RPM. 1245 */ 1246 tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); 1247 WREG32_SOC15(THM, 0, mmCG_TACH_CTRL, 1248 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL), 1249 CG_TACH_CTRL, TARGET_PERIOD, 1250 tach_period)); 1251 1252 return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM); 1253 } 1254 1255 int smu_v11_0_get_fan_speed_pwm(struct smu_context *smu, 1256 uint32_t *speed) 1257 { 1258 struct amdgpu_device *adev = smu->adev; 1259 uint32_t duty100, duty; 1260 uint64_t tmp64; 1261 1262 /* 1263 * For pre Sienna Cichlid ASICs, the 0 RPM may be not correctly 1264 * detected via register retrieving. To workaround this, we will 1265 * report the fan speed as 0 PWM if user just requested such. 1266 */ 1267 if ((smu->user_dpm_profile.flags & SMU_CUSTOM_FAN_SPEED_PWM) 1268 && !smu->user_dpm_profile.fan_speed_pwm) { 1269 *speed = 0; 1270 return 0; 1271 } 1272 1273 duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1), 1274 CG_FDO_CTRL1, FMAX_DUTY100); 1275 duty = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_THERMAL_STATUS), 1276 CG_THERMAL_STATUS, FDO_PWM_DUTY); 1277 if (!duty100) 1278 return -EINVAL; 1279 1280 tmp64 = (uint64_t)duty * 255; 1281 do_div(tmp64, duty100); 1282 *speed = MIN((uint32_t)tmp64, 255); 1283 1284 return 0; 1285 } 1286 1287 int smu_v11_0_get_fan_speed_rpm(struct smu_context *smu, 1288 uint32_t *speed) 1289 { 1290 struct amdgpu_device *adev = smu->adev; 1291 uint32_t crystal_clock_freq = 2500; 1292 uint32_t tach_status; 1293 uint64_t tmp64; 1294 1295 /* 1296 * For pre Sienna Cichlid ASICs, the 0 RPM may be not correctly 1297 * detected via register retrieving. To workaround this, we will 1298 * report the fan speed as 0 RPM if user just requested such. 1299 */ 1300 if ((smu->user_dpm_profile.flags & SMU_CUSTOM_FAN_SPEED_RPM) 1301 && !smu->user_dpm_profile.fan_speed_rpm) { 1302 *speed = 0; 1303 return 0; 1304 } 1305 1306 tmp64 = (uint64_t)crystal_clock_freq * 60 * 10000; 1307 1308 tach_status = RREG32_SOC15(THM, 0, mmCG_TACH_STATUS); 1309 if (tach_status) { 1310 do_div(tmp64, tach_status); 1311 *speed = (uint32_t)tmp64; 1312 } else { 1313 dev_warn_once(adev->dev, "Got zero output on CG_TACH_STATUS reading!\n"); 1314 *speed = 0; 1315 } 1316 1317 return 0; 1318 } 1319 1320 int 1321 smu_v11_0_set_fan_control_mode(struct smu_context *smu, 1322 uint32_t mode) 1323 { 1324 int ret = 0; 1325 1326 switch (mode) { 1327 case AMD_FAN_CTRL_NONE: 1328 ret = smu_v11_0_auto_fan_control(smu, 0); 1329 if (!ret) 1330 ret = smu_v11_0_set_fan_speed_pwm(smu, 255); 1331 break; 1332 case AMD_FAN_CTRL_MANUAL: 1333 ret = smu_v11_0_auto_fan_control(smu, 0); 1334 break; 1335 case AMD_FAN_CTRL_AUTO: 1336 ret = smu_v11_0_auto_fan_control(smu, 1); 1337 break; 1338 default: 1339 break; 1340 } 1341 1342 if (ret) { 1343 dev_err(smu->adev->dev, "[%s]Set fan control mode failed!", __func__); 1344 return -EINVAL; 1345 } 1346 1347 return ret; 1348 } 1349 1350 int smu_v11_0_set_xgmi_pstate(struct smu_context *smu, 1351 uint32_t pstate) 1352 { 1353 return smu_cmn_send_smc_msg_with_param(smu, 1354 SMU_MSG_SetXgmiMode, 1355 pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3, 1356 NULL); 1357 } 1358 1359 static int smu_v11_0_set_irq_state(struct amdgpu_device *adev, 1360 struct amdgpu_irq_src *source, 1361 unsigned tyep, 1362 enum amdgpu_interrupt_state state) 1363 { 1364 struct smu_context *smu = &adev->smu; 1365 uint32_t low, high; 1366 uint32_t val = 0; 1367 1368 switch (state) { 1369 case AMDGPU_IRQ_STATE_DISABLE: 1370 /* For THM irqs */ 1371 val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL); 1372 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 1); 1373 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 1); 1374 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val); 1375 1376 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, 0); 1377 1378 /* For MP1 SW irqs */ 1379 val = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL); 1380 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1); 1381 WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL, val); 1382 1383 break; 1384 case AMDGPU_IRQ_STATE_ENABLE: 1385 /* For THM irqs */ 1386 low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP, 1387 smu->thermal_range.min / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES); 1388 high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP, 1389 smu->thermal_range.software_shutdown_temp); 1390 1391 val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL); 1392 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5); 1393 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); 1394 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0); 1395 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0); 1396 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff)); 1397 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff)); 1398 val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); 1399 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val); 1400 1401 val = (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT); 1402 val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT); 1403 val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT); 1404 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, val); 1405 1406 /* For MP1 SW irqs */ 1407 val = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT); 1408 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE); 1409 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0); 1410 WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT, val); 1411 1412 val = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL); 1413 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0); 1414 WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL, val); 1415 1416 break; 1417 default: 1418 break; 1419 } 1420 1421 return 0; 1422 } 1423 1424 #define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */ 1425 #define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */ 1426 1427 #define SMUIO_11_0__SRCID__SMUIO_GPIO19 83 1428 1429 static int smu_v11_0_irq_process(struct amdgpu_device *adev, 1430 struct amdgpu_irq_src *source, 1431 struct amdgpu_iv_entry *entry) 1432 { 1433 struct smu_context *smu = &adev->smu; 1434 uint32_t client_id = entry->client_id; 1435 uint32_t src_id = entry->src_id; 1436 /* 1437 * ctxid is used to distinguish different 1438 * events for SMCToHost interrupt. 1439 */ 1440 uint32_t ctxid = entry->src_data[0]; 1441 uint32_t data; 1442 1443 if (client_id == SOC15_IH_CLIENTID_THM) { 1444 switch (src_id) { 1445 case THM_11_0__SRCID__THM_DIG_THERM_L2H: 1446 dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n"); 1447 /* 1448 * SW CTF just occurred. 1449 * Try to do a graceful shutdown to prevent further damage. 1450 */ 1451 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n"); 1452 orderly_poweroff(true); 1453 break; 1454 case THM_11_0__SRCID__THM_DIG_THERM_H2L: 1455 dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n"); 1456 break; 1457 default: 1458 dev_emerg(adev->dev, "ERROR: GPU under temperature range unknown src id (%d)\n", 1459 src_id); 1460 break; 1461 } 1462 } else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO) { 1463 dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n"); 1464 /* 1465 * HW CTF just occurred. Shutdown to prevent further damage. 1466 */ 1467 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n"); 1468 orderly_poweroff(true); 1469 } else if (client_id == SOC15_IH_CLIENTID_MP1) { 1470 if (src_id == 0xfe) { 1471 /* ACK SMUToHost interrupt */ 1472 data = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL); 1473 data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1); 1474 WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL, data); 1475 1476 switch (ctxid) { 1477 case 0x3: 1478 dev_dbg(adev->dev, "Switched to AC mode!\n"); 1479 schedule_work(&smu->interrupt_work); 1480 break; 1481 case 0x4: 1482 dev_dbg(adev->dev, "Switched to DC mode!\n"); 1483 schedule_work(&smu->interrupt_work); 1484 break; 1485 case 0x7: 1486 /* 1487 * Increment the throttle interrupt counter 1488 */ 1489 atomic64_inc(&smu->throttle_int_counter); 1490 1491 if (!atomic_read(&adev->throttling_logging_enabled)) 1492 return 0; 1493 1494 if (__ratelimit(&adev->throttling_logging_rs)) 1495 schedule_work(&smu->throttling_logging_work); 1496 1497 break; 1498 } 1499 } 1500 } 1501 1502 return 0; 1503 } 1504 1505 static const struct amdgpu_irq_src_funcs smu_v11_0_irq_funcs = 1506 { 1507 .set = smu_v11_0_set_irq_state, 1508 .process = smu_v11_0_irq_process, 1509 }; 1510 1511 int smu_v11_0_register_irq_handler(struct smu_context *smu) 1512 { 1513 struct amdgpu_device *adev = smu->adev; 1514 struct amdgpu_irq_src *irq_src = &smu->irq_source; 1515 int ret = 0; 1516 1517 irq_src->num_types = 1; 1518 irq_src->funcs = &smu_v11_0_irq_funcs; 1519 1520 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM, 1521 THM_11_0__SRCID__THM_DIG_THERM_L2H, 1522 irq_src); 1523 if (ret) 1524 return ret; 1525 1526 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM, 1527 THM_11_0__SRCID__THM_DIG_THERM_H2L, 1528 irq_src); 1529 if (ret) 1530 return ret; 1531 1532 /* Register CTF(GPIO_19) interrupt */ 1533 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_ROM_SMUIO, 1534 SMUIO_11_0__SRCID__SMUIO_GPIO19, 1535 irq_src); 1536 if (ret) 1537 return ret; 1538 1539 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1, 1540 0xfe, 1541 irq_src); 1542 if (ret) 1543 return ret; 1544 1545 return ret; 1546 } 1547 1548 int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu, 1549 struct pp_smu_nv_clock_table *max_clocks) 1550 { 1551 struct smu_table_context *table_context = &smu->smu_table; 1552 struct smu_11_0_max_sustainable_clocks *sustainable_clocks = NULL; 1553 1554 if (!max_clocks || !table_context->max_sustainable_clocks) 1555 return -EINVAL; 1556 1557 sustainable_clocks = table_context->max_sustainable_clocks; 1558 1559 max_clocks->dcfClockInKhz = 1560 (unsigned int) sustainable_clocks->dcef_clock * 1000; 1561 max_clocks->displayClockInKhz = 1562 (unsigned int) sustainable_clocks->display_clock * 1000; 1563 max_clocks->phyClockInKhz = 1564 (unsigned int) sustainable_clocks->phy_clock * 1000; 1565 max_clocks->pixelClockInKhz = 1566 (unsigned int) sustainable_clocks->pixel_clock * 1000; 1567 max_clocks->uClockInKhz = 1568 (unsigned int) sustainable_clocks->uclock * 1000; 1569 max_clocks->socClockInKhz = 1570 (unsigned int) sustainable_clocks->soc_clock * 1000; 1571 max_clocks->dscClockInKhz = 0; 1572 max_clocks->dppClockInKhz = 0; 1573 max_clocks->fabricClockInKhz = 0; 1574 1575 return 0; 1576 } 1577 1578 int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu) 1579 { 1580 return smu_cmn_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL); 1581 } 1582 1583 int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, 1584 enum smu_v11_0_baco_seq baco_seq) 1585 { 1586 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL); 1587 } 1588 1589 bool smu_v11_0_baco_is_support(struct smu_context *smu) 1590 { 1591 struct smu_baco_context *smu_baco = &smu->smu_baco; 1592 1593 if (amdgpu_sriov_vf(smu->adev) || !smu_baco->platform_support) 1594 return false; 1595 1596 /* Arcturus does not support this bit mask */ 1597 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) && 1598 !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) 1599 return false; 1600 1601 return true; 1602 } 1603 1604 enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu) 1605 { 1606 struct smu_baco_context *smu_baco = &smu->smu_baco; 1607 enum smu_baco_state baco_state; 1608 1609 mutex_lock(&smu_baco->mutex); 1610 baco_state = smu_baco->state; 1611 mutex_unlock(&smu_baco->mutex); 1612 1613 return baco_state; 1614 } 1615 1616 #define D3HOT_BACO_SEQUENCE 0 1617 #define D3HOT_BAMACO_SEQUENCE 2 1618 1619 int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) 1620 { 1621 struct smu_baco_context *smu_baco = &smu->smu_baco; 1622 struct amdgpu_device *adev = smu->adev; 1623 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 1624 uint32_t data; 1625 int ret = 0; 1626 1627 if (smu_v11_0_baco_get_state(smu) == state) 1628 return 0; 1629 1630 mutex_lock(&smu_baco->mutex); 1631 1632 if (state == SMU_BACO_STATE_ENTER) { 1633 switch (adev->asic_type) { 1634 case CHIP_SIENNA_CICHLID: 1635 case CHIP_NAVY_FLOUNDER: 1636 case CHIP_DIMGREY_CAVEFISH: 1637 case CHIP_BEIGE_GOBY: 1638 if (amdgpu_runtime_pm == 2) 1639 ret = smu_cmn_send_smc_msg_with_param(smu, 1640 SMU_MSG_EnterBaco, 1641 D3HOT_BAMACO_SEQUENCE, 1642 NULL); 1643 else 1644 ret = smu_cmn_send_smc_msg_with_param(smu, 1645 SMU_MSG_EnterBaco, 1646 D3HOT_BACO_SEQUENCE, 1647 NULL); 1648 break; 1649 default: 1650 if (!ras || !adev->ras_enabled || 1651 adev->gmc.xgmi.pending_reset) { 1652 if (adev->asic_type == CHIP_ARCTURUS) { 1653 data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT); 1654 data |= 0x80000000; 1655 WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT, data); 1656 } else { 1657 data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL); 1658 data |= 0x80000000; 1659 WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data); 1660 } 1661 1662 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0, NULL); 1663 } else { 1664 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1, NULL); 1665 } 1666 break; 1667 } 1668 1669 } else { 1670 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_ExitBaco, NULL); 1671 if (ret) 1672 goto out; 1673 1674 /* clear vbios scratch 6 and 7 for coming asic reinit */ 1675 WREG32(adev->bios_scratch_reg_offset + 6, 0); 1676 WREG32(adev->bios_scratch_reg_offset + 7, 0); 1677 } 1678 if (ret) 1679 goto out; 1680 1681 smu_baco->state = state; 1682 out: 1683 mutex_unlock(&smu_baco->mutex); 1684 return ret; 1685 } 1686 1687 int smu_v11_0_baco_enter(struct smu_context *smu) 1688 { 1689 int ret = 0; 1690 1691 ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_ENTER); 1692 if (ret) 1693 return ret; 1694 1695 msleep(10); 1696 1697 return ret; 1698 } 1699 1700 int smu_v11_0_baco_exit(struct smu_context *smu) 1701 { 1702 return smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_EXIT); 1703 } 1704 1705 int smu_v11_0_mode1_reset(struct smu_context *smu) 1706 { 1707 int ret = 0; 1708 1709 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL); 1710 if (!ret) 1711 msleep(SMU11_MODE1_RESET_WAIT_TIME_IN_MS); 1712 1713 return ret; 1714 } 1715 1716 int smu_v11_0_set_light_sbr(struct smu_context *smu, bool enable) 1717 { 1718 int ret = 0; 1719 1720 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_LightSBR, enable ? 1 : 0, NULL); 1721 1722 return ret; 1723 } 1724 1725 1726 int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, 1727 uint32_t *min, uint32_t *max) 1728 { 1729 int ret = 0, clk_id = 0; 1730 uint32_t param = 0; 1731 uint32_t clock_limit; 1732 1733 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) { 1734 switch (clk_type) { 1735 case SMU_MCLK: 1736 case SMU_UCLK: 1737 clock_limit = smu->smu_table.boot_values.uclk; 1738 break; 1739 case SMU_GFXCLK: 1740 case SMU_SCLK: 1741 clock_limit = smu->smu_table.boot_values.gfxclk; 1742 break; 1743 case SMU_SOCCLK: 1744 clock_limit = smu->smu_table.boot_values.socclk; 1745 break; 1746 default: 1747 clock_limit = 0; 1748 break; 1749 } 1750 1751 /* clock in Mhz unit */ 1752 if (min) 1753 *min = clock_limit / 100; 1754 if (max) 1755 *max = clock_limit / 100; 1756 1757 return 0; 1758 } 1759 1760 clk_id = smu_cmn_to_asic_specific_index(smu, 1761 CMN2ASIC_MAPPING_CLK, 1762 clk_type); 1763 if (clk_id < 0) { 1764 ret = -EINVAL; 1765 goto failed; 1766 } 1767 param = (clk_id & 0xffff) << 16; 1768 1769 if (max) { 1770 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param, max); 1771 if (ret) 1772 goto failed; 1773 } 1774 1775 if (min) { 1776 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min); 1777 if (ret) 1778 goto failed; 1779 } 1780 1781 failed: 1782 return ret; 1783 } 1784 1785 int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, 1786 enum smu_clk_type clk_type, 1787 uint32_t min, 1788 uint32_t max) 1789 { 1790 struct amdgpu_device *adev = smu->adev; 1791 int ret = 0, clk_id = 0; 1792 uint32_t param; 1793 1794 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) 1795 return 0; 1796 1797 clk_id = smu_cmn_to_asic_specific_index(smu, 1798 CMN2ASIC_MAPPING_CLK, 1799 clk_type); 1800 if (clk_id < 0) 1801 return clk_id; 1802 1803 if (clk_type == SMU_GFXCLK) 1804 amdgpu_gfx_off_ctrl(adev, false); 1805 1806 if (max > 0) { 1807 param = (uint32_t)((clk_id << 16) | (max & 0xffff)); 1808 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq, 1809 param, NULL); 1810 if (ret) 1811 goto out; 1812 } 1813 1814 if (min > 0) { 1815 param = (uint32_t)((clk_id << 16) | (min & 0xffff)); 1816 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq, 1817 param, NULL); 1818 if (ret) 1819 goto out; 1820 } 1821 1822 out: 1823 if (clk_type == SMU_GFXCLK) 1824 amdgpu_gfx_off_ctrl(adev, true); 1825 1826 return ret; 1827 } 1828 1829 int smu_v11_0_set_hard_freq_limited_range(struct smu_context *smu, 1830 enum smu_clk_type clk_type, 1831 uint32_t min, 1832 uint32_t max) 1833 { 1834 int ret = 0, clk_id = 0; 1835 uint32_t param; 1836 1837 if (min <= 0 && max <= 0) 1838 return -EINVAL; 1839 1840 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) 1841 return 0; 1842 1843 clk_id = smu_cmn_to_asic_specific_index(smu, 1844 CMN2ASIC_MAPPING_CLK, 1845 clk_type); 1846 if (clk_id < 0) 1847 return clk_id; 1848 1849 if (max > 0) { 1850 param = (uint32_t)((clk_id << 16) | (max & 0xffff)); 1851 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq, 1852 param, NULL); 1853 if (ret) 1854 return ret; 1855 } 1856 1857 if (min > 0) { 1858 param = (uint32_t)((clk_id << 16) | (min & 0xffff)); 1859 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq, 1860 param, NULL); 1861 if (ret) 1862 return ret; 1863 } 1864 1865 return ret; 1866 } 1867 1868 int smu_v11_0_set_performance_level(struct smu_context *smu, 1869 enum amd_dpm_forced_level level) 1870 { 1871 struct smu_11_0_dpm_context *dpm_context = 1872 smu->smu_dpm.dpm_context; 1873 struct smu_11_0_dpm_table *gfx_table = 1874 &dpm_context->dpm_tables.gfx_table; 1875 struct smu_11_0_dpm_table *mem_table = 1876 &dpm_context->dpm_tables.uclk_table; 1877 struct smu_11_0_dpm_table *soc_table = 1878 &dpm_context->dpm_tables.soc_table; 1879 struct smu_umd_pstate_table *pstate_table = 1880 &smu->pstate_table; 1881 struct amdgpu_device *adev = smu->adev; 1882 uint32_t sclk_min = 0, sclk_max = 0; 1883 uint32_t mclk_min = 0, mclk_max = 0; 1884 uint32_t socclk_min = 0, socclk_max = 0; 1885 int ret = 0; 1886 1887 switch (level) { 1888 case AMD_DPM_FORCED_LEVEL_HIGH: 1889 sclk_min = sclk_max = gfx_table->max; 1890 mclk_min = mclk_max = mem_table->max; 1891 socclk_min = socclk_max = soc_table->max; 1892 break; 1893 case AMD_DPM_FORCED_LEVEL_LOW: 1894 sclk_min = sclk_max = gfx_table->min; 1895 mclk_min = mclk_max = mem_table->min; 1896 socclk_min = socclk_max = soc_table->min; 1897 break; 1898 case AMD_DPM_FORCED_LEVEL_AUTO: 1899 sclk_min = gfx_table->min; 1900 sclk_max = gfx_table->max; 1901 mclk_min = mem_table->min; 1902 mclk_max = mem_table->max; 1903 socclk_min = soc_table->min; 1904 socclk_max = soc_table->max; 1905 break; 1906 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 1907 sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard; 1908 mclk_min = mclk_max = pstate_table->uclk_pstate.standard; 1909 socclk_min = socclk_max = pstate_table->socclk_pstate.standard; 1910 break; 1911 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 1912 sclk_min = sclk_max = pstate_table->gfxclk_pstate.min; 1913 break; 1914 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 1915 mclk_min = mclk_max = pstate_table->uclk_pstate.min; 1916 break; 1917 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 1918 sclk_min = sclk_max = pstate_table->gfxclk_pstate.peak; 1919 mclk_min = mclk_max = pstate_table->uclk_pstate.peak; 1920 socclk_min = socclk_max = pstate_table->socclk_pstate.peak; 1921 break; 1922 case AMD_DPM_FORCED_LEVEL_MANUAL: 1923 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 1924 return 0; 1925 default: 1926 dev_err(adev->dev, "Invalid performance level %d\n", level); 1927 return -EINVAL; 1928 } 1929 1930 /* 1931 * Separate MCLK and SOCCLK soft min/max settings are not allowed 1932 * on Arcturus. 1933 */ 1934 if (adev->asic_type == CHIP_ARCTURUS) { 1935 mclk_min = mclk_max = 0; 1936 socclk_min = socclk_max = 0; 1937 } 1938 1939 if (sclk_min && sclk_max) { 1940 ret = smu_v11_0_set_soft_freq_limited_range(smu, 1941 SMU_GFXCLK, 1942 sclk_min, 1943 sclk_max); 1944 if (ret) 1945 return ret; 1946 } 1947 1948 if (mclk_min && mclk_max) { 1949 ret = smu_v11_0_set_soft_freq_limited_range(smu, 1950 SMU_MCLK, 1951 mclk_min, 1952 mclk_max); 1953 if (ret) 1954 return ret; 1955 } 1956 1957 if (socclk_min && socclk_max) { 1958 ret = smu_v11_0_set_soft_freq_limited_range(smu, 1959 SMU_SOCCLK, 1960 socclk_min, 1961 socclk_max); 1962 if (ret) 1963 return ret; 1964 } 1965 1966 return ret; 1967 } 1968 1969 int smu_v11_0_set_power_source(struct smu_context *smu, 1970 enum smu_power_src_type power_src) 1971 { 1972 int pwr_source; 1973 1974 pwr_source = smu_cmn_to_asic_specific_index(smu, 1975 CMN2ASIC_MAPPING_PWR, 1976 (uint32_t)power_src); 1977 if (pwr_source < 0) 1978 return -EINVAL; 1979 1980 return smu_cmn_send_smc_msg_with_param(smu, 1981 SMU_MSG_NotifyPowerSource, 1982 pwr_source, 1983 NULL); 1984 } 1985 1986 int smu_v11_0_get_dpm_freq_by_index(struct smu_context *smu, 1987 enum smu_clk_type clk_type, 1988 uint16_t level, 1989 uint32_t *value) 1990 { 1991 int ret = 0, clk_id = 0; 1992 uint32_t param; 1993 1994 if (!value) 1995 return -EINVAL; 1996 1997 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) 1998 return 0; 1999 2000 clk_id = smu_cmn_to_asic_specific_index(smu, 2001 CMN2ASIC_MAPPING_CLK, 2002 clk_type); 2003 if (clk_id < 0) 2004 return clk_id; 2005 2006 param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff)); 2007 2008 ret = smu_cmn_send_smc_msg_with_param(smu, 2009 SMU_MSG_GetDpmFreqByIndex, 2010 param, 2011 value); 2012 if (ret) 2013 return ret; 2014 2015 /* 2016 * BIT31: 0 - Fine grained DPM, 1 - Dicrete DPM 2017 * now, we un-support it 2018 */ 2019 *value = *value & 0x7fffffff; 2020 2021 return ret; 2022 } 2023 2024 int smu_v11_0_get_dpm_level_count(struct smu_context *smu, 2025 enum smu_clk_type clk_type, 2026 uint32_t *value) 2027 { 2028 return smu_v11_0_get_dpm_freq_by_index(smu, 2029 clk_type, 2030 0xff, 2031 value); 2032 } 2033 2034 int smu_v11_0_set_single_dpm_table(struct smu_context *smu, 2035 enum smu_clk_type clk_type, 2036 struct smu_11_0_dpm_table *single_dpm_table) 2037 { 2038 int ret = 0; 2039 uint32_t clk; 2040 int i; 2041 2042 ret = smu_v11_0_get_dpm_level_count(smu, 2043 clk_type, 2044 &single_dpm_table->count); 2045 if (ret) { 2046 dev_err(smu->adev->dev, "[%s] failed to get dpm levels!\n", __func__); 2047 return ret; 2048 } 2049 2050 for (i = 0; i < single_dpm_table->count; i++) { 2051 ret = smu_v11_0_get_dpm_freq_by_index(smu, 2052 clk_type, 2053 i, 2054 &clk); 2055 if (ret) { 2056 dev_err(smu->adev->dev, "[%s] failed to get dpm freq by index!\n", __func__); 2057 return ret; 2058 } 2059 2060 single_dpm_table->dpm_levels[i].value = clk; 2061 single_dpm_table->dpm_levels[i].enabled = true; 2062 2063 if (i == 0) 2064 single_dpm_table->min = clk; 2065 else if (i == single_dpm_table->count - 1) 2066 single_dpm_table->max = clk; 2067 } 2068 2069 return 0; 2070 } 2071 2072 int smu_v11_0_get_dpm_level_range(struct smu_context *smu, 2073 enum smu_clk_type clk_type, 2074 uint32_t *min_value, 2075 uint32_t *max_value) 2076 { 2077 uint32_t level_count = 0; 2078 int ret = 0; 2079 2080 if (!min_value && !max_value) 2081 return -EINVAL; 2082 2083 if (min_value) { 2084 /* by default, level 0 clock value as min value */ 2085 ret = smu_v11_0_get_dpm_freq_by_index(smu, 2086 clk_type, 2087 0, 2088 min_value); 2089 if (ret) 2090 return ret; 2091 } 2092 2093 if (max_value) { 2094 ret = smu_v11_0_get_dpm_level_count(smu, 2095 clk_type, 2096 &level_count); 2097 if (ret) 2098 return ret; 2099 2100 ret = smu_v11_0_get_dpm_freq_by_index(smu, 2101 clk_type, 2102 level_count - 1, 2103 max_value); 2104 if (ret) 2105 return ret; 2106 } 2107 2108 return ret; 2109 } 2110 2111 int smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu) 2112 { 2113 struct amdgpu_device *adev = smu->adev; 2114 2115 return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) & 2116 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK) 2117 >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT; 2118 } 2119 2120 uint16_t smu_v11_0_get_current_pcie_link_width(struct smu_context *smu) 2121 { 2122 uint32_t width_level; 2123 2124 width_level = smu_v11_0_get_current_pcie_link_width_level(smu); 2125 if (width_level > LINK_WIDTH_MAX) 2126 width_level = 0; 2127 2128 return link_width[width_level]; 2129 } 2130 2131 int smu_v11_0_get_current_pcie_link_speed_level(struct smu_context *smu) 2132 { 2133 struct amdgpu_device *adev = smu->adev; 2134 2135 return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & 2136 PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) 2137 >> PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; 2138 } 2139 2140 uint16_t smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu) 2141 { 2142 uint32_t speed_level; 2143 2144 speed_level = smu_v11_0_get_current_pcie_link_speed_level(smu); 2145 if (speed_level > LINK_SPEED_MAX) 2146 speed_level = 0; 2147 2148 return link_speed[speed_level]; 2149 } 2150 2151 int smu_v11_0_gfx_ulv_control(struct smu_context *smu, 2152 bool enablement) 2153 { 2154 int ret = 0; 2155 2156 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_GFX_ULV_BIT)) 2157 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_GFX_ULV_BIT, enablement); 2158 2159 return ret; 2160 } 2161 2162 int smu_v11_0_deep_sleep_control(struct smu_context *smu, 2163 bool enablement) 2164 { 2165 struct amdgpu_device *adev = smu->adev; 2166 int ret = 0; 2167 2168 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_GFXCLK_BIT)) { 2169 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_GFXCLK_BIT, enablement); 2170 if (ret) { 2171 dev_err(adev->dev, "Failed to %s GFXCLK DS!\n", enablement ? "enable" : "disable"); 2172 return ret; 2173 } 2174 } 2175 2176 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_UCLK_BIT)) { 2177 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_UCLK_BIT, enablement); 2178 if (ret) { 2179 dev_err(adev->dev, "Failed to %s UCLK DS!\n", enablement ? "enable" : "disable"); 2180 return ret; 2181 } 2182 } 2183 2184 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_FCLK_BIT)) { 2185 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_FCLK_BIT, enablement); 2186 if (ret) { 2187 dev_err(adev->dev, "Failed to %s FCLK DS!\n", enablement ? "enable" : "disable"); 2188 return ret; 2189 } 2190 } 2191 2192 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_SOCCLK_BIT)) { 2193 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_SOCCLK_BIT, enablement); 2194 if (ret) { 2195 dev_err(adev->dev, "Failed to %s SOCCLK DS!\n", enablement ? "enable" : "disable"); 2196 return ret; 2197 } 2198 } 2199 2200 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_LCLK_BIT)) { 2201 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_LCLK_BIT, enablement); 2202 if (ret) { 2203 dev_err(adev->dev, "Failed to %s LCLK DS!\n", enablement ? "enable" : "disable"); 2204 return ret; 2205 } 2206 } 2207 2208 return ret; 2209 } 2210 2211 int smu_v11_0_restore_user_od_settings(struct smu_context *smu) 2212 { 2213 struct smu_table_context *table_context = &smu->smu_table; 2214 void *user_od_table = table_context->user_overdrive_table; 2215 int ret = 0; 2216 2217 ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)user_od_table, true); 2218 if (ret) 2219 dev_err(smu->adev->dev, "Failed to import overdrive table!\n"); 2220 2221 return ret; 2222 } 2223