1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 #include <linux/firmware.h> 24 #include <linux/module.h> 25 #include <linux/pci.h> 26 #include <linux/reboot.h> 27 28 #define SMU_11_0_PARTIAL_PPTABLE 29 #define SWSMU_CODE_LAYER_L3 30 31 #include "amdgpu.h" 32 #include "amdgpu_smu.h" 33 #include "atomfirmware.h" 34 #include "amdgpu_atomfirmware.h" 35 #include "amdgpu_atombios.h" 36 #include "smu_v11_0.h" 37 #include "soc15_common.h" 38 #include "atom.h" 39 #include "amdgpu_ras.h" 40 #include "smu_cmn.h" 41 42 #include "asic_reg/thm/thm_11_0_2_offset.h" 43 #include "asic_reg/thm/thm_11_0_2_sh_mask.h" 44 #include "asic_reg/mp/mp_11_0_offset.h" 45 #include "asic_reg/mp/mp_11_0_sh_mask.h" 46 #include "asic_reg/smuio/smuio_11_0_0_offset.h" 47 #include "asic_reg/smuio/smuio_11_0_0_sh_mask.h" 48 49 /* 50 * DO NOT use these for err/warn/info/debug messages. 51 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 52 * They are more MGPU friendly. 53 */ 54 #undef pr_err 55 #undef pr_warn 56 #undef pr_info 57 #undef pr_debug 58 59 MODULE_FIRMWARE("amdgpu/arcturus_smc.bin"); 60 MODULE_FIRMWARE("amdgpu/navi10_smc.bin"); 61 MODULE_FIRMWARE("amdgpu/navi14_smc.bin"); 62 MODULE_FIRMWARE("amdgpu/navi12_smc.bin"); 63 MODULE_FIRMWARE("amdgpu/sienna_cichlid_smc.bin"); 64 MODULE_FIRMWARE("amdgpu/navy_flounder_smc.bin"); 65 MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_smc.bin"); 66 MODULE_FIRMWARE("amdgpu/beige_goby_smc.bin"); 67 68 #define SMU11_VOLTAGE_SCALE 4 69 70 #define SMU11_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms 71 72 #define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288 73 #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L 74 #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4 75 #define smnPCIE_LC_SPEED_CNTL 0x11140290 76 #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xC000 77 #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0xE 78 79 #define mmTHM_BACO_CNTL_ARCT 0xA7 80 #define mmTHM_BACO_CNTL_ARCT_BASE_IDX 0 81 82 int smu_v11_0_init_microcode(struct smu_context *smu) 83 { 84 struct amdgpu_device *adev = smu->adev; 85 const char *chip_name; 86 char fw_name[SMU_FW_NAME_LEN]; 87 int err = 0; 88 const struct smc_firmware_header_v1_0 *hdr; 89 const struct common_firmware_header *header; 90 struct amdgpu_firmware_info *ucode = NULL; 91 92 if (amdgpu_sriov_vf(adev) && 93 ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 9)) || 94 (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)))) 95 return 0; 96 97 switch (adev->ip_versions[MP1_HWIP][0]) { 98 case IP_VERSION(11, 0, 0): 99 chip_name = "navi10"; 100 break; 101 case IP_VERSION(11, 0, 5): 102 chip_name = "navi14"; 103 break; 104 case IP_VERSION(11, 0, 9): 105 chip_name = "navi12"; 106 break; 107 case IP_VERSION(11, 0, 7): 108 chip_name = "sienna_cichlid"; 109 break; 110 case IP_VERSION(11, 0, 11): 111 chip_name = "navy_flounder"; 112 break; 113 case IP_VERSION(11, 0, 12): 114 chip_name = "dimgrey_cavefish"; 115 break; 116 case IP_VERSION(11, 0, 13): 117 chip_name = "beige_goby"; 118 break; 119 case IP_VERSION(11, 0, 2): 120 chip_name = "arcturus"; 121 break; 122 default: 123 dev_err(adev->dev, "Unsupported IP version 0x%x\n", 124 adev->ip_versions[MP1_HWIP][0]); 125 return -EINVAL; 126 } 127 128 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name); 129 130 err = request_firmware(&adev->pm.fw, fw_name, adev->dev); 131 if (err) 132 goto out; 133 err = amdgpu_ucode_validate(adev->pm.fw); 134 if (err) 135 goto out; 136 137 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; 138 amdgpu_ucode_print_smc_hdr(&hdr->header); 139 adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); 140 141 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 142 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; 143 ucode->ucode_id = AMDGPU_UCODE_ID_SMC; 144 ucode->fw = adev->pm.fw; 145 header = (const struct common_firmware_header *)ucode->fw->data; 146 adev->firmware.fw_size += 147 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 148 } 149 150 out: 151 if (err) { 152 DRM_ERROR("smu_v11_0: Failed to load firmware \"%s\"\n", 153 fw_name); 154 release_firmware(adev->pm.fw); 155 adev->pm.fw = NULL; 156 } 157 return err; 158 } 159 160 void smu_v11_0_fini_microcode(struct smu_context *smu) 161 { 162 struct amdgpu_device *adev = smu->adev; 163 164 release_firmware(adev->pm.fw); 165 adev->pm.fw = NULL; 166 adev->pm.fw_version = 0; 167 } 168 169 int smu_v11_0_load_microcode(struct smu_context *smu) 170 { 171 struct amdgpu_device *adev = smu->adev; 172 const uint32_t *src; 173 const struct smc_firmware_header_v1_0 *hdr; 174 uint32_t addr_start = MP1_SRAM; 175 uint32_t i; 176 uint32_t smc_fw_size; 177 uint32_t mp1_fw_flags; 178 179 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; 180 src = (const uint32_t *)(adev->pm.fw->data + 181 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 182 smc_fw_size = hdr->header.ucode_size_bytes; 183 184 for (i = 1; i < smc_fw_size/4 - 1; i++) { 185 WREG32_PCIE(addr_start, src[i]); 186 addr_start += 4; 187 } 188 189 WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff), 190 1 & MP1_SMN_PUB_CTRL__RESET_MASK); 191 WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff), 192 1 & ~MP1_SMN_PUB_CTRL__RESET_MASK); 193 194 for (i = 0; i < adev->usec_timeout; i++) { 195 mp1_fw_flags = RREG32_PCIE(MP1_Public | 196 (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); 197 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> 198 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) 199 break; 200 udelay(1); 201 } 202 203 if (i == adev->usec_timeout) 204 return -ETIME; 205 206 return 0; 207 } 208 209 int smu_v11_0_check_fw_status(struct smu_context *smu) 210 { 211 struct amdgpu_device *adev = smu->adev; 212 uint32_t mp1_fw_flags; 213 214 mp1_fw_flags = RREG32_PCIE(MP1_Public | 215 (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); 216 217 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> 218 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) 219 return 0; 220 221 return -EIO; 222 } 223 224 int smu_v11_0_check_fw_version(struct smu_context *smu) 225 { 226 struct amdgpu_device *adev = smu->adev; 227 uint32_t if_version = 0xff, smu_version = 0xff; 228 uint16_t smu_major; 229 uint8_t smu_minor, smu_debug; 230 int ret = 0; 231 232 ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version); 233 if (ret) 234 return ret; 235 236 smu_major = (smu_version >> 16) & 0xffff; 237 smu_minor = (smu_version >> 8) & 0xff; 238 smu_debug = (smu_version >> 0) & 0xff; 239 if (smu->is_apu) 240 adev->pm.fw_version = smu_version; 241 242 switch (adev->ip_versions[MP1_HWIP][0]) { 243 case IP_VERSION(11, 0, 0): 244 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV10; 245 break; 246 case IP_VERSION(11, 0, 9): 247 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV12; 248 break; 249 case IP_VERSION(11, 0, 5): 250 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV14; 251 break; 252 case IP_VERSION(11, 0, 7): 253 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Sienna_Cichlid; 254 break; 255 case IP_VERSION(11, 0, 11): 256 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Navy_Flounder; 257 break; 258 case IP_VERSION(11, 5, 0): 259 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_VANGOGH; 260 break; 261 case IP_VERSION(11, 0, 12): 262 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish; 263 break; 264 case IP_VERSION(11, 0, 13): 265 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Beige_Goby; 266 break; 267 case IP_VERSION(11, 0, 8): 268 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Cyan_Skillfish; 269 break; 270 case IP_VERSION(11, 0, 2): 271 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_ARCT; 272 break; 273 default: 274 dev_err(smu->adev->dev, "smu unsupported IP version: 0x%x.\n", 275 adev->ip_versions[MP1_HWIP][0]); 276 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_INV; 277 break; 278 } 279 280 /* 281 * 1. if_version mismatch is not critical as our fw is designed 282 * to be backward compatible. 283 * 2. New fw usually brings some optimizations. But that's visible 284 * only on the paired driver. 285 * Considering above, we just leave user a warning message instead 286 * of halt driver loading. 287 */ 288 if (if_version != smu->smc_driver_if_version) { 289 dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, " 290 "smu fw version = 0x%08x (%d.%d.%d)\n", 291 smu->smc_driver_if_version, if_version, 292 smu_version, smu_major, smu_minor, smu_debug); 293 dev_warn(smu->adev->dev, "SMU driver if version not matched\n"); 294 } 295 296 return ret; 297 } 298 299 static int smu_v11_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size) 300 { 301 struct amdgpu_device *adev = smu->adev; 302 uint32_t ppt_offset_bytes; 303 const struct smc_firmware_header_v2_0 *v2; 304 305 v2 = (const struct smc_firmware_header_v2_0 *) adev->pm.fw->data; 306 307 ppt_offset_bytes = le32_to_cpu(v2->ppt_offset_bytes); 308 *size = le32_to_cpu(v2->ppt_size_bytes); 309 *table = (uint8_t *)v2 + ppt_offset_bytes; 310 311 return 0; 312 } 313 314 static int smu_v11_0_set_pptable_v2_1(struct smu_context *smu, void **table, 315 uint32_t *size, uint32_t pptable_id) 316 { 317 struct amdgpu_device *adev = smu->adev; 318 const struct smc_firmware_header_v2_1 *v2_1; 319 struct smc_soft_pptable_entry *entries; 320 uint32_t pptable_count = 0; 321 int i = 0; 322 323 v2_1 = (const struct smc_firmware_header_v2_1 *) adev->pm.fw->data; 324 entries = (struct smc_soft_pptable_entry *) 325 ((uint8_t *)v2_1 + le32_to_cpu(v2_1->pptable_entry_offset)); 326 pptable_count = le32_to_cpu(v2_1->pptable_count); 327 for (i = 0; i < pptable_count; i++) { 328 if (le32_to_cpu(entries[i].id) == pptable_id) { 329 *table = ((uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes)); 330 *size = le32_to_cpu(entries[i].ppt_size_bytes); 331 break; 332 } 333 } 334 335 if (i == pptable_count) 336 return -EINVAL; 337 338 return 0; 339 } 340 341 int smu_v11_0_setup_pptable(struct smu_context *smu) 342 { 343 struct amdgpu_device *adev = smu->adev; 344 const struct smc_firmware_header_v1_0 *hdr; 345 int ret, index; 346 uint32_t size = 0; 347 uint16_t atom_table_size; 348 uint8_t frev, crev; 349 void *table; 350 uint16_t version_major, version_minor; 351 352 if (!amdgpu_sriov_vf(adev)) { 353 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; 354 version_major = le16_to_cpu(hdr->header.header_version_major); 355 version_minor = le16_to_cpu(hdr->header.header_version_minor); 356 if (version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) { 357 dev_info(adev->dev, "use driver provided pptable %d\n", smu->smu_table.boot_values.pp_table_id); 358 switch (version_minor) { 359 case 0: 360 ret = smu_v11_0_set_pptable_v2_0(smu, &table, &size); 361 break; 362 case 1: 363 ret = smu_v11_0_set_pptable_v2_1(smu, &table, &size, 364 smu->smu_table.boot_values.pp_table_id); 365 break; 366 default: 367 ret = -EINVAL; 368 break; 369 } 370 if (ret) 371 return ret; 372 goto out; 373 } 374 } 375 376 dev_info(adev->dev, "use vbios provided pptable\n"); 377 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 378 powerplayinfo); 379 380 ret = amdgpu_atombios_get_data_table(adev, index, &atom_table_size, &frev, &crev, 381 (uint8_t **)&table); 382 if (ret) 383 return ret; 384 size = atom_table_size; 385 386 out: 387 if (!smu->smu_table.power_play_table) 388 smu->smu_table.power_play_table = table; 389 if (!smu->smu_table.power_play_table_size) 390 smu->smu_table.power_play_table_size = size; 391 392 return 0; 393 } 394 395 int smu_v11_0_init_smc_tables(struct smu_context *smu) 396 { 397 struct smu_table_context *smu_table = &smu->smu_table; 398 struct smu_table *tables = smu_table->tables; 399 int ret = 0; 400 401 smu_table->driver_pptable = 402 kzalloc(tables[SMU_TABLE_PPTABLE].size, GFP_KERNEL); 403 if (!smu_table->driver_pptable) { 404 ret = -ENOMEM; 405 goto err0_out; 406 } 407 408 smu_table->max_sustainable_clocks = 409 kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks), GFP_KERNEL); 410 if (!smu_table->max_sustainable_clocks) { 411 ret = -ENOMEM; 412 goto err1_out; 413 } 414 415 /* Arcturus does not support OVERDRIVE */ 416 if (tables[SMU_TABLE_OVERDRIVE].size) { 417 smu_table->overdrive_table = 418 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL); 419 if (!smu_table->overdrive_table) { 420 ret = -ENOMEM; 421 goto err2_out; 422 } 423 424 smu_table->boot_overdrive_table = 425 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL); 426 if (!smu_table->boot_overdrive_table) { 427 ret = -ENOMEM; 428 goto err3_out; 429 } 430 431 smu_table->user_overdrive_table = 432 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL); 433 if (!smu_table->user_overdrive_table) { 434 ret = -ENOMEM; 435 goto err4_out; 436 } 437 438 } 439 440 return 0; 441 442 err4_out: 443 kfree(smu_table->boot_overdrive_table); 444 err3_out: 445 kfree(smu_table->overdrive_table); 446 err2_out: 447 kfree(smu_table->max_sustainable_clocks); 448 err1_out: 449 kfree(smu_table->driver_pptable); 450 err0_out: 451 return ret; 452 } 453 454 int smu_v11_0_fini_smc_tables(struct smu_context *smu) 455 { 456 struct smu_table_context *smu_table = &smu->smu_table; 457 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 458 459 kfree(smu_table->gpu_metrics_table); 460 kfree(smu_table->user_overdrive_table); 461 kfree(smu_table->boot_overdrive_table); 462 kfree(smu_table->overdrive_table); 463 kfree(smu_table->max_sustainable_clocks); 464 kfree(smu_table->driver_pptable); 465 kfree(smu_table->clocks_table); 466 smu_table->gpu_metrics_table = NULL; 467 smu_table->user_overdrive_table = NULL; 468 smu_table->boot_overdrive_table = NULL; 469 smu_table->overdrive_table = NULL; 470 smu_table->max_sustainable_clocks = NULL; 471 smu_table->driver_pptable = NULL; 472 smu_table->clocks_table = NULL; 473 kfree(smu_table->hardcode_pptable); 474 smu_table->hardcode_pptable = NULL; 475 476 kfree(smu_table->metrics_table); 477 kfree(smu_table->watermarks_table); 478 smu_table->metrics_table = NULL; 479 smu_table->watermarks_table = NULL; 480 smu_table->metrics_time = 0; 481 482 kfree(smu_dpm->dpm_context); 483 kfree(smu_dpm->golden_dpm_context); 484 kfree(smu_dpm->dpm_current_power_state); 485 kfree(smu_dpm->dpm_request_power_state); 486 smu_dpm->dpm_context = NULL; 487 smu_dpm->golden_dpm_context = NULL; 488 smu_dpm->dpm_context_size = 0; 489 smu_dpm->dpm_current_power_state = NULL; 490 smu_dpm->dpm_request_power_state = NULL; 491 492 return 0; 493 } 494 495 int smu_v11_0_init_power(struct smu_context *smu) 496 { 497 struct amdgpu_device *adev = smu->adev; 498 struct smu_power_context *smu_power = &smu->smu_power; 499 size_t size = adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 5, 0) ? 500 sizeof(struct smu_11_5_power_context) : 501 sizeof(struct smu_11_0_power_context); 502 503 smu_power->power_context = kzalloc(size, GFP_KERNEL); 504 if (!smu_power->power_context) 505 return -ENOMEM; 506 smu_power->power_context_size = size; 507 508 return 0; 509 } 510 511 int smu_v11_0_fini_power(struct smu_context *smu) 512 { 513 struct smu_power_context *smu_power = &smu->smu_power; 514 515 kfree(smu_power->power_context); 516 smu_power->power_context = NULL; 517 smu_power->power_context_size = 0; 518 519 return 0; 520 } 521 522 static int smu_v11_0_atom_get_smu_clockinfo(struct amdgpu_device *adev, 523 uint8_t clk_id, 524 uint8_t syspll_id, 525 uint32_t *clk_freq) 526 { 527 struct atom_get_smu_clock_info_parameters_v3_1 input = {0}; 528 struct atom_get_smu_clock_info_output_parameters_v3_1 *output; 529 int ret, index; 530 531 input.clk_id = clk_id; 532 input.syspll_id = syspll_id; 533 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ; 534 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1, 535 getsmuclockinfo); 536 537 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index, 538 (uint32_t *)&input); 539 if (ret) 540 return -EINVAL; 541 542 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input; 543 *clk_freq = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000; 544 545 return 0; 546 } 547 548 int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu) 549 { 550 int ret, index; 551 uint16_t size; 552 uint8_t frev, crev; 553 struct atom_common_table_header *header; 554 struct atom_firmware_info_v3_3 *v_3_3; 555 struct atom_firmware_info_v3_1 *v_3_1; 556 557 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 558 firmwareinfo); 559 560 ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev, 561 (uint8_t **)&header); 562 if (ret) 563 return ret; 564 565 if (header->format_revision != 3) { 566 dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu11\n"); 567 return -EINVAL; 568 } 569 570 switch (header->content_revision) { 571 case 0: 572 case 1: 573 case 2: 574 v_3_1 = (struct atom_firmware_info_v3_1 *)header; 575 smu->smu_table.boot_values.revision = v_3_1->firmware_revision; 576 smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz; 577 smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz; 578 smu->smu_table.boot_values.socclk = 0; 579 smu->smu_table.boot_values.dcefclk = 0; 580 smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv; 581 smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv; 582 smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv; 583 smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv; 584 smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id; 585 smu->smu_table.boot_values.pp_table_id = 0; 586 smu->smu_table.boot_values.firmware_caps = v_3_1->firmware_capability; 587 break; 588 case 3: 589 case 4: 590 default: 591 v_3_3 = (struct atom_firmware_info_v3_3 *)header; 592 smu->smu_table.boot_values.revision = v_3_3->firmware_revision; 593 smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz; 594 smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz; 595 smu->smu_table.boot_values.socclk = 0; 596 smu->smu_table.boot_values.dcefclk = 0; 597 smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv; 598 smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv; 599 smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv; 600 smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv; 601 smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id; 602 smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id; 603 smu->smu_table.boot_values.firmware_caps = v_3_3->firmware_capability; 604 } 605 606 smu->smu_table.boot_values.format_revision = header->format_revision; 607 smu->smu_table.boot_values.content_revision = header->content_revision; 608 609 smu_v11_0_atom_get_smu_clockinfo(smu->adev, 610 (uint8_t)SMU11_SYSPLL0_SOCCLK_ID, 611 (uint8_t)0, 612 &smu->smu_table.boot_values.socclk); 613 614 smu_v11_0_atom_get_smu_clockinfo(smu->adev, 615 (uint8_t)SMU11_SYSPLL0_DCEFCLK_ID, 616 (uint8_t)0, 617 &smu->smu_table.boot_values.dcefclk); 618 619 smu_v11_0_atom_get_smu_clockinfo(smu->adev, 620 (uint8_t)SMU11_SYSPLL0_ECLK_ID, 621 (uint8_t)0, 622 &smu->smu_table.boot_values.eclk); 623 624 smu_v11_0_atom_get_smu_clockinfo(smu->adev, 625 (uint8_t)SMU11_SYSPLL0_VCLK_ID, 626 (uint8_t)0, 627 &smu->smu_table.boot_values.vclk); 628 629 smu_v11_0_atom_get_smu_clockinfo(smu->adev, 630 (uint8_t)SMU11_SYSPLL0_DCLK_ID, 631 (uint8_t)0, 632 &smu->smu_table.boot_values.dclk); 633 634 if ((smu->smu_table.boot_values.format_revision == 3) && 635 (smu->smu_table.boot_values.content_revision >= 2)) 636 smu_v11_0_atom_get_smu_clockinfo(smu->adev, 637 (uint8_t)SMU11_SYSPLL1_0_FCLK_ID, 638 (uint8_t)SMU11_SYSPLL1_2_ID, 639 &smu->smu_table.boot_values.fclk); 640 641 smu_v11_0_atom_get_smu_clockinfo(smu->adev, 642 (uint8_t)SMU11_SYSPLL3_1_LCLK_ID, 643 (uint8_t)SMU11_SYSPLL3_1_ID, 644 &smu->smu_table.boot_values.lclk); 645 646 return 0; 647 } 648 649 int smu_v11_0_notify_memory_pool_location(struct smu_context *smu) 650 { 651 struct smu_table_context *smu_table = &smu->smu_table; 652 struct smu_table *memory_pool = &smu_table->memory_pool; 653 int ret = 0; 654 uint64_t address; 655 uint32_t address_low, address_high; 656 657 if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL) 658 return ret; 659 660 address = (uintptr_t)memory_pool->cpu_addr; 661 address_high = (uint32_t)upper_32_bits(address); 662 address_low = (uint32_t)lower_32_bits(address); 663 664 ret = smu_cmn_send_smc_msg_with_param(smu, 665 SMU_MSG_SetSystemVirtualDramAddrHigh, 666 address_high, 667 NULL); 668 if (ret) 669 return ret; 670 ret = smu_cmn_send_smc_msg_with_param(smu, 671 SMU_MSG_SetSystemVirtualDramAddrLow, 672 address_low, 673 NULL); 674 if (ret) 675 return ret; 676 677 address = memory_pool->mc_address; 678 address_high = (uint32_t)upper_32_bits(address); 679 address_low = (uint32_t)lower_32_bits(address); 680 681 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh, 682 address_high, NULL); 683 if (ret) 684 return ret; 685 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow, 686 address_low, NULL); 687 if (ret) 688 return ret; 689 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize, 690 (uint32_t)memory_pool->size, NULL); 691 if (ret) 692 return ret; 693 694 return ret; 695 } 696 697 int smu_v11_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk) 698 { 699 int ret; 700 701 ret = smu_cmn_send_smc_msg_with_param(smu, 702 SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL); 703 if (ret) 704 dev_err(smu->adev->dev, "SMU11 attempt to set divider for DCEFCLK Failed!"); 705 706 return ret; 707 } 708 709 int smu_v11_0_set_driver_table_location(struct smu_context *smu) 710 { 711 struct smu_table *driver_table = &smu->smu_table.driver_table; 712 int ret = 0; 713 714 if (driver_table->mc_address) { 715 ret = smu_cmn_send_smc_msg_with_param(smu, 716 SMU_MSG_SetDriverDramAddrHigh, 717 upper_32_bits(driver_table->mc_address), 718 NULL); 719 if (!ret) 720 ret = smu_cmn_send_smc_msg_with_param(smu, 721 SMU_MSG_SetDriverDramAddrLow, 722 lower_32_bits(driver_table->mc_address), 723 NULL); 724 } 725 726 return ret; 727 } 728 729 int smu_v11_0_set_tool_table_location(struct smu_context *smu) 730 { 731 int ret = 0; 732 struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG]; 733 734 if (tool_table->mc_address) { 735 ret = smu_cmn_send_smc_msg_with_param(smu, 736 SMU_MSG_SetToolsDramAddrHigh, 737 upper_32_bits(tool_table->mc_address), 738 NULL); 739 if (!ret) 740 ret = smu_cmn_send_smc_msg_with_param(smu, 741 SMU_MSG_SetToolsDramAddrLow, 742 lower_32_bits(tool_table->mc_address), 743 NULL); 744 } 745 746 return ret; 747 } 748 749 int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count) 750 { 751 struct amdgpu_device *adev = smu->adev; 752 753 /* Navy_Flounder/Dimgrey_Cavefish do not support to change 754 * display num currently 755 */ 756 if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 11) || 757 adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 5, 0) || 758 adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 12) || 759 adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13)) 760 return 0; 761 762 return smu_cmn_send_smc_msg_with_param(smu, 763 SMU_MSG_NumOfDisplays, 764 count, 765 NULL); 766 } 767 768 769 int smu_v11_0_set_allowed_mask(struct smu_context *smu) 770 { 771 struct smu_feature *feature = &smu->smu_feature; 772 int ret = 0; 773 uint32_t feature_mask[2]; 774 775 if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64) { 776 ret = -EINVAL; 777 goto failed; 778 } 779 780 bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64); 781 782 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh, 783 feature_mask[1], NULL); 784 if (ret) 785 goto failed; 786 787 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow, 788 feature_mask[0], NULL); 789 if (ret) 790 goto failed; 791 792 failed: 793 return ret; 794 } 795 796 int smu_v11_0_system_features_control(struct smu_context *smu, 797 bool en) 798 { 799 struct smu_feature *feature = &smu->smu_feature; 800 uint32_t feature_mask[2]; 801 int ret = 0; 802 803 ret = smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures : 804 SMU_MSG_DisableAllSmuFeatures), NULL); 805 if (ret) 806 return ret; 807 808 bitmap_zero(feature->enabled, feature->feature_num); 809 bitmap_zero(feature->supported, feature->feature_num); 810 811 if (en) { 812 ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2); 813 if (ret) 814 return ret; 815 816 bitmap_copy(feature->enabled, (unsigned long *)&feature_mask, 817 feature->feature_num); 818 bitmap_copy(feature->supported, (unsigned long *)&feature_mask, 819 feature->feature_num); 820 } 821 822 return ret; 823 } 824 825 int smu_v11_0_notify_display_change(struct smu_context *smu) 826 { 827 int ret = 0; 828 829 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) && 830 smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM) 831 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL); 832 833 return ret; 834 } 835 836 static int 837 smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock, 838 enum smu_clk_type clock_select) 839 { 840 int ret = 0; 841 int clk_id; 842 843 if ((smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetDcModeMaxDpmFreq) < 0) || 844 (smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetMaxDpmFreq) < 0)) 845 return 0; 846 847 clk_id = smu_cmn_to_asic_specific_index(smu, 848 CMN2ASIC_MAPPING_CLK, 849 clock_select); 850 if (clk_id < 0) 851 return -EINVAL; 852 853 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq, 854 clk_id << 16, clock); 855 if (ret) { 856 dev_err(smu->adev->dev, "[GetMaxSustainableClock] Failed to get max DC clock from SMC!"); 857 return ret; 858 } 859 860 if (*clock != 0) 861 return 0; 862 863 /* if DC limit is zero, return AC limit */ 864 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, 865 clk_id << 16, clock); 866 if (ret) { 867 dev_err(smu->adev->dev, "[GetMaxSustainableClock] failed to get max AC clock from SMC!"); 868 return ret; 869 } 870 871 return 0; 872 } 873 874 int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu) 875 { 876 struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks = 877 smu->smu_table.max_sustainable_clocks; 878 int ret = 0; 879 880 max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100; 881 max_sustainable_clocks->soc_clock = smu->smu_table.boot_values.socclk / 100; 882 max_sustainable_clocks->dcef_clock = smu->smu_table.boot_values.dcefclk / 100; 883 max_sustainable_clocks->display_clock = 0xFFFFFFFF; 884 max_sustainable_clocks->phy_clock = 0xFFFFFFFF; 885 max_sustainable_clocks->pixel_clock = 0xFFFFFFFF; 886 887 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { 888 ret = smu_v11_0_get_max_sustainable_clock(smu, 889 &(max_sustainable_clocks->uclock), 890 SMU_UCLK); 891 if (ret) { 892 dev_err(smu->adev->dev, "[%s] failed to get max UCLK from SMC!", 893 __func__); 894 return ret; 895 } 896 } 897 898 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { 899 ret = smu_v11_0_get_max_sustainable_clock(smu, 900 &(max_sustainable_clocks->soc_clock), 901 SMU_SOCCLK); 902 if (ret) { 903 dev_err(smu->adev->dev, "[%s] failed to get max SOCCLK from SMC!", 904 __func__); 905 return ret; 906 } 907 } 908 909 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) { 910 ret = smu_v11_0_get_max_sustainable_clock(smu, 911 &(max_sustainable_clocks->dcef_clock), 912 SMU_DCEFCLK); 913 if (ret) { 914 dev_err(smu->adev->dev, "[%s] failed to get max DCEFCLK from SMC!", 915 __func__); 916 return ret; 917 } 918 919 ret = smu_v11_0_get_max_sustainable_clock(smu, 920 &(max_sustainable_clocks->display_clock), 921 SMU_DISPCLK); 922 if (ret) { 923 dev_err(smu->adev->dev, "[%s] failed to get max DISPCLK from SMC!", 924 __func__); 925 return ret; 926 } 927 ret = smu_v11_0_get_max_sustainable_clock(smu, 928 &(max_sustainable_clocks->phy_clock), 929 SMU_PHYCLK); 930 if (ret) { 931 dev_err(smu->adev->dev, "[%s] failed to get max PHYCLK from SMC!", 932 __func__); 933 return ret; 934 } 935 ret = smu_v11_0_get_max_sustainable_clock(smu, 936 &(max_sustainable_clocks->pixel_clock), 937 SMU_PIXCLK); 938 if (ret) { 939 dev_err(smu->adev->dev, "[%s] failed to get max PIXCLK from SMC!", 940 __func__); 941 return ret; 942 } 943 } 944 945 if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock) 946 max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock; 947 948 return 0; 949 } 950 951 int smu_v11_0_get_current_power_limit(struct smu_context *smu, 952 uint32_t *power_limit) 953 { 954 int power_src; 955 int ret = 0; 956 957 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) 958 return -EINVAL; 959 960 power_src = smu_cmn_to_asic_specific_index(smu, 961 CMN2ASIC_MAPPING_PWR, 962 smu->adev->pm.ac_power ? 963 SMU_POWER_SOURCE_AC : 964 SMU_POWER_SOURCE_DC); 965 if (power_src < 0) 966 return -EINVAL; 967 968 /* 969 * BIT 24-31: ControllerId (only PPT0 is supported for now) 970 * BIT 16-23: PowerSource 971 */ 972 ret = smu_cmn_send_smc_msg_with_param(smu, 973 SMU_MSG_GetPptLimit, 974 (0 << 24) | (power_src << 16), 975 power_limit); 976 if (ret) 977 dev_err(smu->adev->dev, "[%s] get PPT limit failed!", __func__); 978 979 return ret; 980 } 981 982 int smu_v11_0_set_power_limit(struct smu_context *smu, 983 enum smu_ppt_limit_type limit_type, 984 uint32_t limit) 985 { 986 int power_src; 987 int ret = 0; 988 uint32_t limit_param; 989 990 if (limit_type != SMU_DEFAULT_PPT_LIMIT) 991 return -EINVAL; 992 993 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) { 994 dev_err(smu->adev->dev, "Setting new power limit is not supported!\n"); 995 return -EOPNOTSUPP; 996 } 997 998 power_src = smu_cmn_to_asic_specific_index(smu, 999 CMN2ASIC_MAPPING_PWR, 1000 smu->adev->pm.ac_power ? 1001 SMU_POWER_SOURCE_AC : 1002 SMU_POWER_SOURCE_DC); 1003 if (power_src < 0) 1004 return -EINVAL; 1005 1006 /* 1007 * BIT 24-31: ControllerId (only PPT0 is supported for now) 1008 * BIT 16-23: PowerSource 1009 * BIT 0-15: PowerLimit 1010 */ 1011 limit_param = (limit & 0xFFFF); 1012 limit_param |= 0 << 24; 1013 limit_param |= (power_src) << 16; 1014 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, limit_param, NULL); 1015 if (ret) { 1016 dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__); 1017 return ret; 1018 } 1019 1020 smu->current_power_limit = limit; 1021 1022 return 0; 1023 } 1024 1025 static int smu_v11_0_ack_ac_dc_interrupt(struct smu_context *smu) 1026 { 1027 return smu_cmn_send_smc_msg(smu, 1028 SMU_MSG_ReenableAcDcInterrupt, 1029 NULL); 1030 } 1031 1032 static int smu_v11_0_process_pending_interrupt(struct smu_context *smu) 1033 { 1034 int ret = 0; 1035 1036 if (smu->dc_controlled_by_gpio && 1037 smu_cmn_feature_is_enabled(smu, SMU_FEATURE_ACDC_BIT)) 1038 ret = smu_v11_0_ack_ac_dc_interrupt(smu); 1039 1040 return ret; 1041 } 1042 1043 void smu_v11_0_interrupt_work(struct smu_context *smu) 1044 { 1045 if (smu_v11_0_ack_ac_dc_interrupt(smu)) 1046 dev_err(smu->adev->dev, "Ack AC/DC interrupt Failed!\n"); 1047 } 1048 1049 int smu_v11_0_enable_thermal_alert(struct smu_context *smu) 1050 { 1051 int ret = 0; 1052 1053 if (smu->smu_table.thermal_controller_type) { 1054 ret = amdgpu_irq_get(smu->adev, &smu->irq_source, 0); 1055 if (ret) 1056 return ret; 1057 } 1058 1059 /* 1060 * After init there might have been missed interrupts triggered 1061 * before driver registers for interrupt (Ex. AC/DC). 1062 */ 1063 return smu_v11_0_process_pending_interrupt(smu); 1064 } 1065 1066 int smu_v11_0_disable_thermal_alert(struct smu_context *smu) 1067 { 1068 return amdgpu_irq_put(smu->adev, &smu->irq_source, 0); 1069 } 1070 1071 static uint16_t convert_to_vddc(uint8_t vid) 1072 { 1073 return (uint16_t) ((6200 - (vid * 25)) / SMU11_VOLTAGE_SCALE); 1074 } 1075 1076 int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value) 1077 { 1078 struct amdgpu_device *adev = smu->adev; 1079 uint32_t vdd = 0, val_vid = 0; 1080 1081 if (!value) 1082 return -EINVAL; 1083 val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_TEL_PLANE0) & 1084 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >> 1085 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT; 1086 1087 vdd = (uint32_t)convert_to_vddc((uint8_t)val_vid); 1088 1089 *value = vdd; 1090 1091 return 0; 1092 1093 } 1094 1095 int 1096 smu_v11_0_display_clock_voltage_request(struct smu_context *smu, 1097 struct pp_display_clock_request 1098 *clock_req) 1099 { 1100 enum amd_pp_clock_type clk_type = clock_req->clock_type; 1101 int ret = 0; 1102 enum smu_clk_type clk_select = 0; 1103 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000; 1104 1105 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) || 1106 smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { 1107 switch (clk_type) { 1108 case amd_pp_dcef_clock: 1109 clk_select = SMU_DCEFCLK; 1110 break; 1111 case amd_pp_disp_clock: 1112 clk_select = SMU_DISPCLK; 1113 break; 1114 case amd_pp_pixel_clock: 1115 clk_select = SMU_PIXCLK; 1116 break; 1117 case amd_pp_phy_clock: 1118 clk_select = SMU_PHYCLK; 1119 break; 1120 case amd_pp_mem_clock: 1121 clk_select = SMU_UCLK; 1122 break; 1123 default: 1124 dev_info(smu->adev->dev, "[%s] Invalid Clock Type!", __func__); 1125 ret = -EINVAL; 1126 break; 1127 } 1128 1129 if (ret) 1130 goto failed; 1131 1132 if (clk_select == SMU_UCLK && smu->disable_uclk_switch) 1133 return 0; 1134 1135 ret = smu_v11_0_set_hard_freq_limited_range(smu, clk_select, clk_freq, 0); 1136 1137 if(clk_select == SMU_UCLK) 1138 smu->hard_min_uclk_req_from_dal = clk_freq; 1139 } 1140 1141 failed: 1142 return ret; 1143 } 1144 1145 int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable) 1146 { 1147 int ret = 0; 1148 struct amdgpu_device *adev = smu->adev; 1149 1150 switch (adev->ip_versions[MP1_HWIP][0]) { 1151 case IP_VERSION(11, 0, 0): 1152 case IP_VERSION(11, 0, 5): 1153 case IP_VERSION(11, 0, 9): 1154 case IP_VERSION(11, 0, 7): 1155 case IP_VERSION(11, 0, 11): 1156 case IP_VERSION(11, 0, 12): 1157 case IP_VERSION(11, 0, 13): 1158 case IP_VERSION(11, 5, 0): 1159 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) 1160 return 0; 1161 if (enable) 1162 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL); 1163 else 1164 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL); 1165 break; 1166 default: 1167 break; 1168 } 1169 1170 return ret; 1171 } 1172 1173 uint32_t 1174 smu_v11_0_get_fan_control_mode(struct smu_context *smu) 1175 { 1176 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT)) 1177 return AMD_FAN_CTRL_AUTO; 1178 else 1179 return smu->user_dpm_profile.fan_mode; 1180 } 1181 1182 static int 1183 smu_v11_0_auto_fan_control(struct smu_context *smu, bool auto_fan_control) 1184 { 1185 int ret = 0; 1186 1187 if (!smu_cmn_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT)) 1188 return 0; 1189 1190 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control); 1191 if (ret) 1192 dev_err(smu->adev->dev, "[%s]%s smc FAN CONTROL feature failed!", 1193 __func__, (auto_fan_control ? "Start" : "Stop")); 1194 1195 return ret; 1196 } 1197 1198 static int 1199 smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode) 1200 { 1201 struct amdgpu_device *adev = smu->adev; 1202 1203 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2, 1204 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2), 1205 CG_FDO_CTRL2, TMIN, 0)); 1206 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2, 1207 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2), 1208 CG_FDO_CTRL2, FDO_PWM_MODE, mode)); 1209 1210 return 0; 1211 } 1212 1213 int 1214 smu_v11_0_set_fan_speed_pwm(struct smu_context *smu, uint32_t speed) 1215 { 1216 struct amdgpu_device *adev = smu->adev; 1217 uint32_t duty100, duty; 1218 uint64_t tmp64; 1219 1220 speed = MIN(speed, 255); 1221 1222 duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1), 1223 CG_FDO_CTRL1, FMAX_DUTY100); 1224 if (!duty100) 1225 return -EINVAL; 1226 1227 tmp64 = (uint64_t)speed * duty100; 1228 do_div(tmp64, 255); 1229 duty = (uint32_t)tmp64; 1230 1231 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0, 1232 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0), 1233 CG_FDO_CTRL0, FDO_STATIC_DUTY, duty)); 1234 1235 return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC); 1236 } 1237 1238 int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu, 1239 uint32_t speed) 1240 { 1241 struct amdgpu_device *adev = smu->adev; 1242 /* 1243 * crystal_clock_freq used for fan speed rpm calculation is 1244 * always 25Mhz. So, hardcode it as 2500(in 10K unit). 1245 */ 1246 uint32_t crystal_clock_freq = 2500; 1247 uint32_t tach_period; 1248 1249 /* 1250 * To prevent from possible overheat, some ASICs may have requirement 1251 * for minimum fan speed: 1252 * - For some NV10 SKU, the fan speed cannot be set lower than 1253 * 700 RPM. 1254 * - For some Sienna Cichlid SKU, the fan speed cannot be set 1255 * lower than 500 RPM. 1256 */ 1257 tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); 1258 WREG32_SOC15(THM, 0, mmCG_TACH_CTRL, 1259 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL), 1260 CG_TACH_CTRL, TARGET_PERIOD, 1261 tach_period)); 1262 1263 return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM); 1264 } 1265 1266 int smu_v11_0_get_fan_speed_pwm(struct smu_context *smu, 1267 uint32_t *speed) 1268 { 1269 struct amdgpu_device *adev = smu->adev; 1270 uint32_t duty100, duty; 1271 uint64_t tmp64; 1272 1273 /* 1274 * For pre Sienna Cichlid ASICs, the 0 RPM may be not correctly 1275 * detected via register retrieving. To workaround this, we will 1276 * report the fan speed as 0 PWM if user just requested such. 1277 */ 1278 if ((smu->user_dpm_profile.flags & SMU_CUSTOM_FAN_SPEED_PWM) 1279 && !smu->user_dpm_profile.fan_speed_pwm) { 1280 *speed = 0; 1281 return 0; 1282 } 1283 1284 duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1), 1285 CG_FDO_CTRL1, FMAX_DUTY100); 1286 duty = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_THERMAL_STATUS), 1287 CG_THERMAL_STATUS, FDO_PWM_DUTY); 1288 if (!duty100) 1289 return -EINVAL; 1290 1291 tmp64 = (uint64_t)duty * 255; 1292 do_div(tmp64, duty100); 1293 *speed = MIN((uint32_t)tmp64, 255); 1294 1295 return 0; 1296 } 1297 1298 int smu_v11_0_get_fan_speed_rpm(struct smu_context *smu, 1299 uint32_t *speed) 1300 { 1301 struct amdgpu_device *adev = smu->adev; 1302 uint32_t crystal_clock_freq = 2500; 1303 uint32_t tach_status; 1304 uint64_t tmp64; 1305 1306 /* 1307 * For pre Sienna Cichlid ASICs, the 0 RPM may be not correctly 1308 * detected via register retrieving. To workaround this, we will 1309 * report the fan speed as 0 RPM if user just requested such. 1310 */ 1311 if ((smu->user_dpm_profile.flags & SMU_CUSTOM_FAN_SPEED_RPM) 1312 && !smu->user_dpm_profile.fan_speed_rpm) { 1313 *speed = 0; 1314 return 0; 1315 } 1316 1317 tmp64 = (uint64_t)crystal_clock_freq * 60 * 10000; 1318 1319 tach_status = RREG32_SOC15(THM, 0, mmCG_TACH_STATUS); 1320 if (tach_status) { 1321 do_div(tmp64, tach_status); 1322 *speed = (uint32_t)tmp64; 1323 } else { 1324 dev_warn_once(adev->dev, "Got zero output on CG_TACH_STATUS reading!\n"); 1325 *speed = 0; 1326 } 1327 1328 return 0; 1329 } 1330 1331 int 1332 smu_v11_0_set_fan_control_mode(struct smu_context *smu, 1333 uint32_t mode) 1334 { 1335 int ret = 0; 1336 1337 switch (mode) { 1338 case AMD_FAN_CTRL_NONE: 1339 ret = smu_v11_0_auto_fan_control(smu, 0); 1340 if (!ret) 1341 ret = smu_v11_0_set_fan_speed_pwm(smu, 255); 1342 break; 1343 case AMD_FAN_CTRL_MANUAL: 1344 ret = smu_v11_0_auto_fan_control(smu, 0); 1345 break; 1346 case AMD_FAN_CTRL_AUTO: 1347 ret = smu_v11_0_auto_fan_control(smu, 1); 1348 break; 1349 default: 1350 break; 1351 } 1352 1353 if (ret) { 1354 dev_err(smu->adev->dev, "[%s]Set fan control mode failed!", __func__); 1355 return -EINVAL; 1356 } 1357 1358 return ret; 1359 } 1360 1361 int smu_v11_0_set_xgmi_pstate(struct smu_context *smu, 1362 uint32_t pstate) 1363 { 1364 return smu_cmn_send_smc_msg_with_param(smu, 1365 SMU_MSG_SetXgmiMode, 1366 pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3, 1367 NULL); 1368 } 1369 1370 static int smu_v11_0_set_irq_state(struct amdgpu_device *adev, 1371 struct amdgpu_irq_src *source, 1372 unsigned tyep, 1373 enum amdgpu_interrupt_state state) 1374 { 1375 struct smu_context *smu = &adev->smu; 1376 uint32_t low, high; 1377 uint32_t val = 0; 1378 1379 switch (state) { 1380 case AMDGPU_IRQ_STATE_DISABLE: 1381 /* For THM irqs */ 1382 val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL); 1383 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 1); 1384 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 1); 1385 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val); 1386 1387 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, 0); 1388 1389 /* For MP1 SW irqs */ 1390 val = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL); 1391 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1); 1392 WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL, val); 1393 1394 break; 1395 case AMDGPU_IRQ_STATE_ENABLE: 1396 /* For THM irqs */ 1397 low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP, 1398 smu->thermal_range.min / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES); 1399 high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP, 1400 smu->thermal_range.software_shutdown_temp); 1401 1402 val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL); 1403 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5); 1404 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); 1405 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0); 1406 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0); 1407 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff)); 1408 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff)); 1409 val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); 1410 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val); 1411 1412 val = (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT); 1413 val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT); 1414 val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT); 1415 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, val); 1416 1417 /* For MP1 SW irqs */ 1418 val = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT); 1419 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE); 1420 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0); 1421 WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT, val); 1422 1423 val = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL); 1424 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0); 1425 WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL, val); 1426 1427 break; 1428 default: 1429 break; 1430 } 1431 1432 return 0; 1433 } 1434 1435 #define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */ 1436 #define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */ 1437 1438 #define SMUIO_11_0__SRCID__SMUIO_GPIO19 83 1439 1440 static int smu_v11_0_irq_process(struct amdgpu_device *adev, 1441 struct amdgpu_irq_src *source, 1442 struct amdgpu_iv_entry *entry) 1443 { 1444 struct smu_context *smu = &adev->smu; 1445 uint32_t client_id = entry->client_id; 1446 uint32_t src_id = entry->src_id; 1447 /* 1448 * ctxid is used to distinguish different 1449 * events for SMCToHost interrupt. 1450 */ 1451 uint32_t ctxid = entry->src_data[0]; 1452 uint32_t data; 1453 1454 if (client_id == SOC15_IH_CLIENTID_THM) { 1455 switch (src_id) { 1456 case THM_11_0__SRCID__THM_DIG_THERM_L2H: 1457 dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n"); 1458 /* 1459 * SW CTF just occurred. 1460 * Try to do a graceful shutdown to prevent further damage. 1461 */ 1462 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n"); 1463 orderly_poweroff(true); 1464 break; 1465 case THM_11_0__SRCID__THM_DIG_THERM_H2L: 1466 dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n"); 1467 break; 1468 default: 1469 dev_emerg(adev->dev, "ERROR: GPU under temperature range unknown src id (%d)\n", 1470 src_id); 1471 break; 1472 } 1473 } else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO) { 1474 dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n"); 1475 /* 1476 * HW CTF just occurred. Shutdown to prevent further damage. 1477 */ 1478 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n"); 1479 orderly_poweroff(true); 1480 } else if (client_id == SOC15_IH_CLIENTID_MP1) { 1481 if (src_id == 0xfe) { 1482 /* ACK SMUToHost interrupt */ 1483 data = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL); 1484 data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1); 1485 WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL, data); 1486 1487 switch (ctxid) { 1488 case 0x3: 1489 dev_dbg(adev->dev, "Switched to AC mode!\n"); 1490 schedule_work(&smu->interrupt_work); 1491 break; 1492 case 0x4: 1493 dev_dbg(adev->dev, "Switched to DC mode!\n"); 1494 schedule_work(&smu->interrupt_work); 1495 break; 1496 case 0x7: 1497 /* 1498 * Increment the throttle interrupt counter 1499 */ 1500 atomic64_inc(&smu->throttle_int_counter); 1501 1502 if (!atomic_read(&adev->throttling_logging_enabled)) 1503 return 0; 1504 1505 if (__ratelimit(&adev->throttling_logging_rs)) 1506 schedule_work(&smu->throttling_logging_work); 1507 1508 break; 1509 } 1510 } 1511 } 1512 1513 return 0; 1514 } 1515 1516 static const struct amdgpu_irq_src_funcs smu_v11_0_irq_funcs = 1517 { 1518 .set = smu_v11_0_set_irq_state, 1519 .process = smu_v11_0_irq_process, 1520 }; 1521 1522 int smu_v11_0_register_irq_handler(struct smu_context *smu) 1523 { 1524 struct amdgpu_device *adev = smu->adev; 1525 struct amdgpu_irq_src *irq_src = &smu->irq_source; 1526 int ret = 0; 1527 1528 irq_src->num_types = 1; 1529 irq_src->funcs = &smu_v11_0_irq_funcs; 1530 1531 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM, 1532 THM_11_0__SRCID__THM_DIG_THERM_L2H, 1533 irq_src); 1534 if (ret) 1535 return ret; 1536 1537 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM, 1538 THM_11_0__SRCID__THM_DIG_THERM_H2L, 1539 irq_src); 1540 if (ret) 1541 return ret; 1542 1543 /* Register CTF(GPIO_19) interrupt */ 1544 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_ROM_SMUIO, 1545 SMUIO_11_0__SRCID__SMUIO_GPIO19, 1546 irq_src); 1547 if (ret) 1548 return ret; 1549 1550 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1, 1551 0xfe, 1552 irq_src); 1553 if (ret) 1554 return ret; 1555 1556 return ret; 1557 } 1558 1559 int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu, 1560 struct pp_smu_nv_clock_table *max_clocks) 1561 { 1562 struct smu_table_context *table_context = &smu->smu_table; 1563 struct smu_11_0_max_sustainable_clocks *sustainable_clocks = NULL; 1564 1565 if (!max_clocks || !table_context->max_sustainable_clocks) 1566 return -EINVAL; 1567 1568 sustainable_clocks = table_context->max_sustainable_clocks; 1569 1570 max_clocks->dcfClockInKhz = 1571 (unsigned int) sustainable_clocks->dcef_clock * 1000; 1572 max_clocks->displayClockInKhz = 1573 (unsigned int) sustainable_clocks->display_clock * 1000; 1574 max_clocks->phyClockInKhz = 1575 (unsigned int) sustainable_clocks->phy_clock * 1000; 1576 max_clocks->pixelClockInKhz = 1577 (unsigned int) sustainable_clocks->pixel_clock * 1000; 1578 max_clocks->uClockInKhz = 1579 (unsigned int) sustainable_clocks->uclock * 1000; 1580 max_clocks->socClockInKhz = 1581 (unsigned int) sustainable_clocks->soc_clock * 1000; 1582 max_clocks->dscClockInKhz = 0; 1583 max_clocks->dppClockInKhz = 0; 1584 max_clocks->fabricClockInKhz = 0; 1585 1586 return 0; 1587 } 1588 1589 int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu) 1590 { 1591 return smu_cmn_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL); 1592 } 1593 1594 int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, 1595 enum smu_v11_0_baco_seq baco_seq) 1596 { 1597 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL); 1598 } 1599 1600 bool smu_v11_0_baco_is_support(struct smu_context *smu) 1601 { 1602 struct smu_baco_context *smu_baco = &smu->smu_baco; 1603 1604 if (amdgpu_sriov_vf(smu->adev) || !smu_baco->platform_support) 1605 return false; 1606 1607 /* Arcturus does not support this bit mask */ 1608 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) && 1609 !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) 1610 return false; 1611 1612 return true; 1613 } 1614 1615 enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu) 1616 { 1617 struct smu_baco_context *smu_baco = &smu->smu_baco; 1618 enum smu_baco_state baco_state; 1619 1620 mutex_lock(&smu_baco->mutex); 1621 baco_state = smu_baco->state; 1622 mutex_unlock(&smu_baco->mutex); 1623 1624 return baco_state; 1625 } 1626 1627 #define D3HOT_BACO_SEQUENCE 0 1628 #define D3HOT_BAMACO_SEQUENCE 2 1629 1630 int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) 1631 { 1632 struct smu_baco_context *smu_baco = &smu->smu_baco; 1633 struct amdgpu_device *adev = smu->adev; 1634 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 1635 uint32_t data; 1636 int ret = 0; 1637 1638 if (smu_v11_0_baco_get_state(smu) == state) 1639 return 0; 1640 1641 mutex_lock(&smu_baco->mutex); 1642 1643 if (state == SMU_BACO_STATE_ENTER) { 1644 switch (adev->ip_versions[MP1_HWIP][0]) { 1645 case IP_VERSION(11, 0, 7): 1646 case IP_VERSION(11, 0, 11): 1647 case IP_VERSION(11, 0, 12): 1648 case IP_VERSION(11, 0, 13): 1649 if (amdgpu_runtime_pm == 2) 1650 ret = smu_cmn_send_smc_msg_with_param(smu, 1651 SMU_MSG_EnterBaco, 1652 D3HOT_BAMACO_SEQUENCE, 1653 NULL); 1654 else 1655 ret = smu_cmn_send_smc_msg_with_param(smu, 1656 SMU_MSG_EnterBaco, 1657 D3HOT_BACO_SEQUENCE, 1658 NULL); 1659 break; 1660 default: 1661 if (!ras || !adev->ras_enabled || 1662 adev->gmc.xgmi.pending_reset) { 1663 if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 2)) { 1664 data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT); 1665 data |= 0x80000000; 1666 WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT, data); 1667 } else { 1668 data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL); 1669 data |= 0x80000000; 1670 WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data); 1671 } 1672 1673 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0, NULL); 1674 } else { 1675 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1, NULL); 1676 } 1677 break; 1678 } 1679 1680 } else { 1681 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_ExitBaco, NULL); 1682 if (ret) 1683 goto out; 1684 1685 /* clear vbios scratch 6 and 7 for coming asic reinit */ 1686 WREG32(adev->bios_scratch_reg_offset + 6, 0); 1687 WREG32(adev->bios_scratch_reg_offset + 7, 0); 1688 } 1689 if (ret) 1690 goto out; 1691 1692 smu_baco->state = state; 1693 out: 1694 mutex_unlock(&smu_baco->mutex); 1695 return ret; 1696 } 1697 1698 int smu_v11_0_baco_enter(struct smu_context *smu) 1699 { 1700 int ret = 0; 1701 1702 ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_ENTER); 1703 if (ret) 1704 return ret; 1705 1706 msleep(10); 1707 1708 return ret; 1709 } 1710 1711 int smu_v11_0_baco_exit(struct smu_context *smu) 1712 { 1713 return smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_EXIT); 1714 } 1715 1716 int smu_v11_0_mode1_reset(struct smu_context *smu) 1717 { 1718 int ret = 0; 1719 1720 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL); 1721 if (!ret) 1722 msleep(SMU11_MODE1_RESET_WAIT_TIME_IN_MS); 1723 1724 return ret; 1725 } 1726 1727 int smu_v11_0_set_light_sbr(struct smu_context *smu, bool enable) 1728 { 1729 int ret = 0; 1730 1731 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_LightSBR, enable ? 1 : 0, NULL); 1732 1733 return ret; 1734 } 1735 1736 1737 int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, 1738 uint32_t *min, uint32_t *max) 1739 { 1740 int ret = 0, clk_id = 0; 1741 uint32_t param = 0; 1742 uint32_t clock_limit; 1743 1744 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) { 1745 switch (clk_type) { 1746 case SMU_MCLK: 1747 case SMU_UCLK: 1748 clock_limit = smu->smu_table.boot_values.uclk; 1749 break; 1750 case SMU_GFXCLK: 1751 case SMU_SCLK: 1752 clock_limit = smu->smu_table.boot_values.gfxclk; 1753 break; 1754 case SMU_SOCCLK: 1755 clock_limit = smu->smu_table.boot_values.socclk; 1756 break; 1757 default: 1758 clock_limit = 0; 1759 break; 1760 } 1761 1762 /* clock in Mhz unit */ 1763 if (min) 1764 *min = clock_limit / 100; 1765 if (max) 1766 *max = clock_limit / 100; 1767 1768 return 0; 1769 } 1770 1771 clk_id = smu_cmn_to_asic_specific_index(smu, 1772 CMN2ASIC_MAPPING_CLK, 1773 clk_type); 1774 if (clk_id < 0) { 1775 ret = -EINVAL; 1776 goto failed; 1777 } 1778 param = (clk_id & 0xffff) << 16; 1779 1780 if (max) { 1781 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param, max); 1782 if (ret) 1783 goto failed; 1784 } 1785 1786 if (min) { 1787 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min); 1788 if (ret) 1789 goto failed; 1790 } 1791 1792 failed: 1793 return ret; 1794 } 1795 1796 int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, 1797 enum smu_clk_type clk_type, 1798 uint32_t min, 1799 uint32_t max) 1800 { 1801 struct amdgpu_device *adev = smu->adev; 1802 int ret = 0, clk_id = 0; 1803 uint32_t param; 1804 1805 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) 1806 return 0; 1807 1808 clk_id = smu_cmn_to_asic_specific_index(smu, 1809 CMN2ASIC_MAPPING_CLK, 1810 clk_type); 1811 if (clk_id < 0) 1812 return clk_id; 1813 1814 if (clk_type == SMU_GFXCLK) 1815 amdgpu_gfx_off_ctrl(adev, false); 1816 1817 if (max > 0) { 1818 param = (uint32_t)((clk_id << 16) | (max & 0xffff)); 1819 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq, 1820 param, NULL); 1821 if (ret) 1822 goto out; 1823 } 1824 1825 if (min > 0) { 1826 param = (uint32_t)((clk_id << 16) | (min & 0xffff)); 1827 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq, 1828 param, NULL); 1829 if (ret) 1830 goto out; 1831 } 1832 1833 out: 1834 if (clk_type == SMU_GFXCLK) 1835 amdgpu_gfx_off_ctrl(adev, true); 1836 1837 return ret; 1838 } 1839 1840 int smu_v11_0_set_hard_freq_limited_range(struct smu_context *smu, 1841 enum smu_clk_type clk_type, 1842 uint32_t min, 1843 uint32_t max) 1844 { 1845 int ret = 0, clk_id = 0; 1846 uint32_t param; 1847 1848 if (min <= 0 && max <= 0) 1849 return -EINVAL; 1850 1851 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) 1852 return 0; 1853 1854 clk_id = smu_cmn_to_asic_specific_index(smu, 1855 CMN2ASIC_MAPPING_CLK, 1856 clk_type); 1857 if (clk_id < 0) 1858 return clk_id; 1859 1860 if (max > 0) { 1861 param = (uint32_t)((clk_id << 16) | (max & 0xffff)); 1862 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq, 1863 param, NULL); 1864 if (ret) 1865 return ret; 1866 } 1867 1868 if (min > 0) { 1869 param = (uint32_t)((clk_id << 16) | (min & 0xffff)); 1870 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq, 1871 param, NULL); 1872 if (ret) 1873 return ret; 1874 } 1875 1876 return ret; 1877 } 1878 1879 int smu_v11_0_set_performance_level(struct smu_context *smu, 1880 enum amd_dpm_forced_level level) 1881 { 1882 struct smu_11_0_dpm_context *dpm_context = 1883 smu->smu_dpm.dpm_context; 1884 struct smu_11_0_dpm_table *gfx_table = 1885 &dpm_context->dpm_tables.gfx_table; 1886 struct smu_11_0_dpm_table *mem_table = 1887 &dpm_context->dpm_tables.uclk_table; 1888 struct smu_11_0_dpm_table *soc_table = 1889 &dpm_context->dpm_tables.soc_table; 1890 struct smu_umd_pstate_table *pstate_table = 1891 &smu->pstate_table; 1892 struct amdgpu_device *adev = smu->adev; 1893 uint32_t sclk_min = 0, sclk_max = 0; 1894 uint32_t mclk_min = 0, mclk_max = 0; 1895 uint32_t socclk_min = 0, socclk_max = 0; 1896 int ret = 0; 1897 1898 switch (level) { 1899 case AMD_DPM_FORCED_LEVEL_HIGH: 1900 sclk_min = sclk_max = gfx_table->max; 1901 mclk_min = mclk_max = mem_table->max; 1902 socclk_min = socclk_max = soc_table->max; 1903 break; 1904 case AMD_DPM_FORCED_LEVEL_LOW: 1905 sclk_min = sclk_max = gfx_table->min; 1906 mclk_min = mclk_max = mem_table->min; 1907 socclk_min = socclk_max = soc_table->min; 1908 break; 1909 case AMD_DPM_FORCED_LEVEL_AUTO: 1910 sclk_min = gfx_table->min; 1911 sclk_max = gfx_table->max; 1912 mclk_min = mem_table->min; 1913 mclk_max = mem_table->max; 1914 socclk_min = soc_table->min; 1915 socclk_max = soc_table->max; 1916 break; 1917 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 1918 sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard; 1919 mclk_min = mclk_max = pstate_table->uclk_pstate.standard; 1920 socclk_min = socclk_max = pstate_table->socclk_pstate.standard; 1921 break; 1922 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 1923 sclk_min = sclk_max = pstate_table->gfxclk_pstate.min; 1924 break; 1925 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 1926 mclk_min = mclk_max = pstate_table->uclk_pstate.min; 1927 break; 1928 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 1929 sclk_min = sclk_max = pstate_table->gfxclk_pstate.peak; 1930 mclk_min = mclk_max = pstate_table->uclk_pstate.peak; 1931 socclk_min = socclk_max = pstate_table->socclk_pstate.peak; 1932 break; 1933 case AMD_DPM_FORCED_LEVEL_MANUAL: 1934 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 1935 return 0; 1936 default: 1937 dev_err(adev->dev, "Invalid performance level %d\n", level); 1938 return -EINVAL; 1939 } 1940 1941 /* 1942 * Separate MCLK and SOCCLK soft min/max settings are not allowed 1943 * on Arcturus. 1944 */ 1945 if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 2)) { 1946 mclk_min = mclk_max = 0; 1947 socclk_min = socclk_max = 0; 1948 } 1949 1950 if (sclk_min && sclk_max) { 1951 ret = smu_v11_0_set_soft_freq_limited_range(smu, 1952 SMU_GFXCLK, 1953 sclk_min, 1954 sclk_max); 1955 if (ret) 1956 return ret; 1957 } 1958 1959 if (mclk_min && mclk_max) { 1960 ret = smu_v11_0_set_soft_freq_limited_range(smu, 1961 SMU_MCLK, 1962 mclk_min, 1963 mclk_max); 1964 if (ret) 1965 return ret; 1966 } 1967 1968 if (socclk_min && socclk_max) { 1969 ret = smu_v11_0_set_soft_freq_limited_range(smu, 1970 SMU_SOCCLK, 1971 socclk_min, 1972 socclk_max); 1973 if (ret) 1974 return ret; 1975 } 1976 1977 return ret; 1978 } 1979 1980 int smu_v11_0_set_power_source(struct smu_context *smu, 1981 enum smu_power_src_type power_src) 1982 { 1983 int pwr_source; 1984 1985 pwr_source = smu_cmn_to_asic_specific_index(smu, 1986 CMN2ASIC_MAPPING_PWR, 1987 (uint32_t)power_src); 1988 if (pwr_source < 0) 1989 return -EINVAL; 1990 1991 return smu_cmn_send_smc_msg_with_param(smu, 1992 SMU_MSG_NotifyPowerSource, 1993 pwr_source, 1994 NULL); 1995 } 1996 1997 int smu_v11_0_get_dpm_freq_by_index(struct smu_context *smu, 1998 enum smu_clk_type clk_type, 1999 uint16_t level, 2000 uint32_t *value) 2001 { 2002 int ret = 0, clk_id = 0; 2003 uint32_t param; 2004 2005 if (!value) 2006 return -EINVAL; 2007 2008 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) 2009 return 0; 2010 2011 clk_id = smu_cmn_to_asic_specific_index(smu, 2012 CMN2ASIC_MAPPING_CLK, 2013 clk_type); 2014 if (clk_id < 0) 2015 return clk_id; 2016 2017 param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff)); 2018 2019 ret = smu_cmn_send_smc_msg_with_param(smu, 2020 SMU_MSG_GetDpmFreqByIndex, 2021 param, 2022 value); 2023 if (ret) 2024 return ret; 2025 2026 /* 2027 * BIT31: 0 - Fine grained DPM, 1 - Dicrete DPM 2028 * now, we un-support it 2029 */ 2030 *value = *value & 0x7fffffff; 2031 2032 return ret; 2033 } 2034 2035 int smu_v11_0_get_dpm_level_count(struct smu_context *smu, 2036 enum smu_clk_type clk_type, 2037 uint32_t *value) 2038 { 2039 return smu_v11_0_get_dpm_freq_by_index(smu, 2040 clk_type, 2041 0xff, 2042 value); 2043 } 2044 2045 int smu_v11_0_set_single_dpm_table(struct smu_context *smu, 2046 enum smu_clk_type clk_type, 2047 struct smu_11_0_dpm_table *single_dpm_table) 2048 { 2049 int ret = 0; 2050 uint32_t clk; 2051 int i; 2052 2053 ret = smu_v11_0_get_dpm_level_count(smu, 2054 clk_type, 2055 &single_dpm_table->count); 2056 if (ret) { 2057 dev_err(smu->adev->dev, "[%s] failed to get dpm levels!\n", __func__); 2058 return ret; 2059 } 2060 2061 for (i = 0; i < single_dpm_table->count; i++) { 2062 ret = smu_v11_0_get_dpm_freq_by_index(smu, 2063 clk_type, 2064 i, 2065 &clk); 2066 if (ret) { 2067 dev_err(smu->adev->dev, "[%s] failed to get dpm freq by index!\n", __func__); 2068 return ret; 2069 } 2070 2071 single_dpm_table->dpm_levels[i].value = clk; 2072 single_dpm_table->dpm_levels[i].enabled = true; 2073 2074 if (i == 0) 2075 single_dpm_table->min = clk; 2076 else if (i == single_dpm_table->count - 1) 2077 single_dpm_table->max = clk; 2078 } 2079 2080 return 0; 2081 } 2082 2083 int smu_v11_0_get_dpm_level_range(struct smu_context *smu, 2084 enum smu_clk_type clk_type, 2085 uint32_t *min_value, 2086 uint32_t *max_value) 2087 { 2088 uint32_t level_count = 0; 2089 int ret = 0; 2090 2091 if (!min_value && !max_value) 2092 return -EINVAL; 2093 2094 if (min_value) { 2095 /* by default, level 0 clock value as min value */ 2096 ret = smu_v11_0_get_dpm_freq_by_index(smu, 2097 clk_type, 2098 0, 2099 min_value); 2100 if (ret) 2101 return ret; 2102 } 2103 2104 if (max_value) { 2105 ret = smu_v11_0_get_dpm_level_count(smu, 2106 clk_type, 2107 &level_count); 2108 if (ret) 2109 return ret; 2110 2111 ret = smu_v11_0_get_dpm_freq_by_index(smu, 2112 clk_type, 2113 level_count - 1, 2114 max_value); 2115 if (ret) 2116 return ret; 2117 } 2118 2119 return ret; 2120 } 2121 2122 int smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu) 2123 { 2124 struct amdgpu_device *adev = smu->adev; 2125 2126 return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) & 2127 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK) 2128 >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT; 2129 } 2130 2131 uint16_t smu_v11_0_get_current_pcie_link_width(struct smu_context *smu) 2132 { 2133 uint32_t width_level; 2134 2135 width_level = smu_v11_0_get_current_pcie_link_width_level(smu); 2136 if (width_level > LINK_WIDTH_MAX) 2137 width_level = 0; 2138 2139 return link_width[width_level]; 2140 } 2141 2142 int smu_v11_0_get_current_pcie_link_speed_level(struct smu_context *smu) 2143 { 2144 struct amdgpu_device *adev = smu->adev; 2145 2146 return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & 2147 PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) 2148 >> PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; 2149 } 2150 2151 uint16_t smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu) 2152 { 2153 uint32_t speed_level; 2154 2155 speed_level = smu_v11_0_get_current_pcie_link_speed_level(smu); 2156 if (speed_level > LINK_SPEED_MAX) 2157 speed_level = 0; 2158 2159 return link_speed[speed_level]; 2160 } 2161 2162 int smu_v11_0_gfx_ulv_control(struct smu_context *smu, 2163 bool enablement) 2164 { 2165 int ret = 0; 2166 2167 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_GFX_ULV_BIT)) 2168 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_GFX_ULV_BIT, enablement); 2169 2170 return ret; 2171 } 2172 2173 int smu_v11_0_deep_sleep_control(struct smu_context *smu, 2174 bool enablement) 2175 { 2176 struct amdgpu_device *adev = smu->adev; 2177 int ret = 0; 2178 2179 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_GFXCLK_BIT)) { 2180 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_GFXCLK_BIT, enablement); 2181 if (ret) { 2182 dev_err(adev->dev, "Failed to %s GFXCLK DS!\n", enablement ? "enable" : "disable"); 2183 return ret; 2184 } 2185 } 2186 2187 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_UCLK_BIT)) { 2188 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_UCLK_BIT, enablement); 2189 if (ret) { 2190 dev_err(adev->dev, "Failed to %s UCLK DS!\n", enablement ? "enable" : "disable"); 2191 return ret; 2192 } 2193 } 2194 2195 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_FCLK_BIT)) { 2196 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_FCLK_BIT, enablement); 2197 if (ret) { 2198 dev_err(adev->dev, "Failed to %s FCLK DS!\n", enablement ? "enable" : "disable"); 2199 return ret; 2200 } 2201 } 2202 2203 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_SOCCLK_BIT)) { 2204 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_SOCCLK_BIT, enablement); 2205 if (ret) { 2206 dev_err(adev->dev, "Failed to %s SOCCLK DS!\n", enablement ? "enable" : "disable"); 2207 return ret; 2208 } 2209 } 2210 2211 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_LCLK_BIT)) { 2212 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_LCLK_BIT, enablement); 2213 if (ret) { 2214 dev_err(adev->dev, "Failed to %s LCLK DS!\n", enablement ? "enable" : "disable"); 2215 return ret; 2216 } 2217 } 2218 2219 return ret; 2220 } 2221 2222 int smu_v11_0_restore_user_od_settings(struct smu_context *smu) 2223 { 2224 struct smu_table_context *table_context = &smu->smu_table; 2225 void *user_od_table = table_context->user_overdrive_table; 2226 int ret = 0; 2227 2228 ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)user_od_table, true); 2229 if (ret) 2230 dev_err(smu->adev->dev, "Failed to import overdrive table!\n"); 2231 2232 return ret; 2233 } 2234