1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 #include <linux/firmware.h> 24 #include <linux/module.h> 25 #include <linux/pci.h> 26 #include <linux/reboot.h> 27 28 #define SMU_11_0_PARTIAL_PPTABLE 29 #define SWSMU_CODE_LAYER_L3 30 31 #include "amdgpu.h" 32 #include "amdgpu_smu.h" 33 #include "atomfirmware.h" 34 #include "amdgpu_atomfirmware.h" 35 #include "amdgpu_atombios.h" 36 #include "smu_v11_0.h" 37 #include "soc15_common.h" 38 #include "atom.h" 39 #include "amdgpu_ras.h" 40 #include "smu_cmn.h" 41 42 #include "asic_reg/thm/thm_11_0_2_offset.h" 43 #include "asic_reg/thm/thm_11_0_2_sh_mask.h" 44 #include "asic_reg/mp/mp_11_0_offset.h" 45 #include "asic_reg/mp/mp_11_0_sh_mask.h" 46 #include "asic_reg/smuio/smuio_11_0_0_offset.h" 47 #include "asic_reg/smuio/smuio_11_0_0_sh_mask.h" 48 49 /* 50 * DO NOT use these for err/warn/info/debug messages. 51 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 52 * They are more MGPU friendly. 53 */ 54 #undef pr_err 55 #undef pr_warn 56 #undef pr_info 57 #undef pr_debug 58 59 MODULE_FIRMWARE("amdgpu/arcturus_smc.bin"); 60 MODULE_FIRMWARE("amdgpu/navi10_smc.bin"); 61 MODULE_FIRMWARE("amdgpu/navi14_smc.bin"); 62 MODULE_FIRMWARE("amdgpu/navi12_smc.bin"); 63 MODULE_FIRMWARE("amdgpu/sienna_cichlid_smc.bin"); 64 MODULE_FIRMWARE("amdgpu/navy_flounder_smc.bin"); 65 MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_smc.bin"); 66 MODULE_FIRMWARE("amdgpu/beige_goby_smc.bin"); 67 68 #define SMU11_VOLTAGE_SCALE 4 69 70 #define SMU11_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms 71 72 #define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288 73 #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L 74 #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4 75 #define smnPCIE_LC_SPEED_CNTL 0x11140290 76 #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xC000 77 #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0xE 78 79 #define mmTHM_BACO_CNTL_ARCT 0xA7 80 #define mmTHM_BACO_CNTL_ARCT_BASE_IDX 0 81 82 static void smu_v11_0_poll_baco_exit(struct smu_context *smu) 83 { 84 struct amdgpu_device *adev = smu->adev; 85 uint32_t data, loop = 0; 86 87 do { 88 usleep_range(1000, 1100); 89 data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL); 90 } while ((data & 0x100) && (++loop < 100)); 91 } 92 93 int smu_v11_0_init_microcode(struct smu_context *smu) 94 { 95 struct amdgpu_device *adev = smu->adev; 96 char ucode_prefix[30]; 97 char fw_name[SMU_FW_NAME_LEN]; 98 int err = 0; 99 const struct smc_firmware_header_v1_0 *hdr; 100 const struct common_firmware_header *header; 101 struct amdgpu_firmware_info *ucode = NULL; 102 103 if (amdgpu_sriov_vf(adev) && 104 ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 9)) || 105 (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)))) 106 return 0; 107 108 amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix)); 109 110 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix); 111 112 err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name); 113 if (err) 114 goto out; 115 116 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; 117 amdgpu_ucode_print_smc_hdr(&hdr->header); 118 adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); 119 120 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 121 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; 122 ucode->ucode_id = AMDGPU_UCODE_ID_SMC; 123 ucode->fw = adev->pm.fw; 124 header = (const struct common_firmware_header *)ucode->fw->data; 125 adev->firmware.fw_size += 126 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 127 } 128 129 out: 130 if (err) 131 amdgpu_ucode_release(&adev->pm.fw); 132 return err; 133 } 134 135 void smu_v11_0_fini_microcode(struct smu_context *smu) 136 { 137 struct amdgpu_device *adev = smu->adev; 138 139 amdgpu_ucode_release(&adev->pm.fw); 140 adev->pm.fw_version = 0; 141 } 142 143 int smu_v11_0_load_microcode(struct smu_context *smu) 144 { 145 struct amdgpu_device *adev = smu->adev; 146 const uint32_t *src; 147 const struct smc_firmware_header_v1_0 *hdr; 148 uint32_t addr_start = MP1_SRAM; 149 uint32_t i; 150 uint32_t smc_fw_size; 151 uint32_t mp1_fw_flags; 152 153 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; 154 src = (const uint32_t *)(adev->pm.fw->data + 155 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 156 smc_fw_size = hdr->header.ucode_size_bytes; 157 158 for (i = 1; i < smc_fw_size/4 - 1; i++) { 159 WREG32_PCIE(addr_start, src[i]); 160 addr_start += 4; 161 } 162 163 WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff), 164 1 & MP1_SMN_PUB_CTRL__RESET_MASK); 165 WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff), 166 1 & ~MP1_SMN_PUB_CTRL__RESET_MASK); 167 168 for (i = 0; i < adev->usec_timeout; i++) { 169 mp1_fw_flags = RREG32_PCIE(MP1_Public | 170 (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); 171 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> 172 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) 173 break; 174 udelay(1); 175 } 176 177 if (i == adev->usec_timeout) 178 return -ETIME; 179 180 return 0; 181 } 182 183 int smu_v11_0_check_fw_status(struct smu_context *smu) 184 { 185 struct amdgpu_device *adev = smu->adev; 186 uint32_t mp1_fw_flags; 187 188 mp1_fw_flags = RREG32_PCIE(MP1_Public | 189 (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); 190 191 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> 192 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) 193 return 0; 194 195 return -EIO; 196 } 197 198 int smu_v11_0_check_fw_version(struct smu_context *smu) 199 { 200 struct amdgpu_device *adev = smu->adev; 201 uint32_t if_version = 0xff, smu_version = 0xff; 202 uint8_t smu_program, smu_major, smu_minor, smu_debug; 203 int ret = 0; 204 205 ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version); 206 if (ret) 207 return ret; 208 209 smu_program = (smu_version >> 24) & 0xff; 210 smu_major = (smu_version >> 16) & 0xff; 211 smu_minor = (smu_version >> 8) & 0xff; 212 smu_debug = (smu_version >> 0) & 0xff; 213 if (smu->is_apu) 214 adev->pm.fw_version = smu_version; 215 216 switch (adev->ip_versions[MP1_HWIP][0]) { 217 case IP_VERSION(11, 0, 0): 218 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV10; 219 break; 220 case IP_VERSION(11, 0, 9): 221 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV12; 222 break; 223 case IP_VERSION(11, 0, 5): 224 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV14; 225 break; 226 case IP_VERSION(11, 0, 7): 227 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Sienna_Cichlid; 228 break; 229 case IP_VERSION(11, 0, 11): 230 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Navy_Flounder; 231 break; 232 case IP_VERSION(11, 5, 0): 233 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_VANGOGH; 234 break; 235 case IP_VERSION(11, 0, 12): 236 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish; 237 break; 238 case IP_VERSION(11, 0, 13): 239 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Beige_Goby; 240 break; 241 case IP_VERSION(11, 0, 8): 242 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Cyan_Skillfish; 243 break; 244 case IP_VERSION(11, 0, 2): 245 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_ARCT; 246 break; 247 default: 248 dev_err(smu->adev->dev, "smu unsupported IP version: 0x%x.\n", 249 adev->ip_versions[MP1_HWIP][0]); 250 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_INV; 251 break; 252 } 253 254 /* 255 * 1. if_version mismatch is not critical as our fw is designed 256 * to be backward compatible. 257 * 2. New fw usually brings some optimizations. But that's visible 258 * only on the paired driver. 259 * Considering above, we just leave user a verbal message instead 260 * of halt driver loading. 261 */ 262 if (if_version != smu->smc_driver_if_version) { 263 dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, " 264 "smu fw program = %d, version = 0x%08x (%d.%d.%d)\n", 265 smu->smc_driver_if_version, if_version, 266 smu_program, smu_version, smu_major, smu_minor, smu_debug); 267 dev_info(smu->adev->dev, "SMU driver if version not matched\n"); 268 } 269 270 return ret; 271 } 272 273 static int smu_v11_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size) 274 { 275 struct amdgpu_device *adev = smu->adev; 276 uint32_t ppt_offset_bytes; 277 const struct smc_firmware_header_v2_0 *v2; 278 279 v2 = (const struct smc_firmware_header_v2_0 *) adev->pm.fw->data; 280 281 ppt_offset_bytes = le32_to_cpu(v2->ppt_offset_bytes); 282 *size = le32_to_cpu(v2->ppt_size_bytes); 283 *table = (uint8_t *)v2 + ppt_offset_bytes; 284 285 return 0; 286 } 287 288 static int smu_v11_0_set_pptable_v2_1(struct smu_context *smu, void **table, 289 uint32_t *size, uint32_t pptable_id) 290 { 291 struct amdgpu_device *adev = smu->adev; 292 const struct smc_firmware_header_v2_1 *v2_1; 293 struct smc_soft_pptable_entry *entries; 294 uint32_t pptable_count = 0; 295 int i = 0; 296 297 v2_1 = (const struct smc_firmware_header_v2_1 *) adev->pm.fw->data; 298 entries = (struct smc_soft_pptable_entry *) 299 ((uint8_t *)v2_1 + le32_to_cpu(v2_1->pptable_entry_offset)); 300 pptable_count = le32_to_cpu(v2_1->pptable_count); 301 for (i = 0; i < pptable_count; i++) { 302 if (le32_to_cpu(entries[i].id) == pptable_id) { 303 *table = ((uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes)); 304 *size = le32_to_cpu(entries[i].ppt_size_bytes); 305 break; 306 } 307 } 308 309 if (i == pptable_count) 310 return -EINVAL; 311 312 return 0; 313 } 314 315 int smu_v11_0_setup_pptable(struct smu_context *smu) 316 { 317 struct amdgpu_device *adev = smu->adev; 318 const struct smc_firmware_header_v1_0 *hdr; 319 int ret, index; 320 uint32_t size = 0; 321 uint16_t atom_table_size; 322 uint8_t frev, crev; 323 void *table; 324 uint16_t version_major, version_minor; 325 326 if (!amdgpu_sriov_vf(adev)) { 327 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; 328 version_major = le16_to_cpu(hdr->header.header_version_major); 329 version_minor = le16_to_cpu(hdr->header.header_version_minor); 330 if (version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) { 331 dev_info(adev->dev, "use driver provided pptable %d\n", smu->smu_table.boot_values.pp_table_id); 332 switch (version_minor) { 333 case 0: 334 ret = smu_v11_0_set_pptable_v2_0(smu, &table, &size); 335 break; 336 case 1: 337 ret = smu_v11_0_set_pptable_v2_1(smu, &table, &size, 338 smu->smu_table.boot_values.pp_table_id); 339 break; 340 default: 341 ret = -EINVAL; 342 break; 343 } 344 if (ret) 345 return ret; 346 goto out; 347 } 348 } 349 350 dev_info(adev->dev, "use vbios provided pptable\n"); 351 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 352 powerplayinfo); 353 354 ret = amdgpu_atombios_get_data_table(adev, index, &atom_table_size, &frev, &crev, 355 (uint8_t **)&table); 356 if (ret) 357 return ret; 358 size = atom_table_size; 359 360 out: 361 if (!smu->smu_table.power_play_table) 362 smu->smu_table.power_play_table = table; 363 if (!smu->smu_table.power_play_table_size) 364 smu->smu_table.power_play_table_size = size; 365 366 return 0; 367 } 368 369 int smu_v11_0_init_smc_tables(struct smu_context *smu) 370 { 371 struct smu_table_context *smu_table = &smu->smu_table; 372 struct smu_table *tables = smu_table->tables; 373 int ret = 0; 374 375 smu_table->driver_pptable = 376 kzalloc(tables[SMU_TABLE_PPTABLE].size, GFP_KERNEL); 377 if (!smu_table->driver_pptable) { 378 ret = -ENOMEM; 379 goto err0_out; 380 } 381 382 smu_table->max_sustainable_clocks = 383 kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks), GFP_KERNEL); 384 if (!smu_table->max_sustainable_clocks) { 385 ret = -ENOMEM; 386 goto err1_out; 387 } 388 389 /* Arcturus does not support OVERDRIVE */ 390 if (tables[SMU_TABLE_OVERDRIVE].size) { 391 smu_table->overdrive_table = 392 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL); 393 if (!smu_table->overdrive_table) { 394 ret = -ENOMEM; 395 goto err2_out; 396 } 397 398 smu_table->boot_overdrive_table = 399 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL); 400 if (!smu_table->boot_overdrive_table) { 401 ret = -ENOMEM; 402 goto err3_out; 403 } 404 405 smu_table->user_overdrive_table = 406 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL); 407 if (!smu_table->user_overdrive_table) { 408 ret = -ENOMEM; 409 goto err4_out; 410 } 411 412 } 413 414 return 0; 415 416 err4_out: 417 kfree(smu_table->boot_overdrive_table); 418 err3_out: 419 kfree(smu_table->overdrive_table); 420 err2_out: 421 kfree(smu_table->max_sustainable_clocks); 422 err1_out: 423 kfree(smu_table->driver_pptable); 424 err0_out: 425 return ret; 426 } 427 428 int smu_v11_0_fini_smc_tables(struct smu_context *smu) 429 { 430 struct smu_table_context *smu_table = &smu->smu_table; 431 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 432 433 kfree(smu_table->gpu_metrics_table); 434 kfree(smu_table->user_overdrive_table); 435 kfree(smu_table->boot_overdrive_table); 436 kfree(smu_table->overdrive_table); 437 kfree(smu_table->max_sustainable_clocks); 438 kfree(smu_table->driver_pptable); 439 kfree(smu_table->clocks_table); 440 smu_table->gpu_metrics_table = NULL; 441 smu_table->user_overdrive_table = NULL; 442 smu_table->boot_overdrive_table = NULL; 443 smu_table->overdrive_table = NULL; 444 smu_table->max_sustainable_clocks = NULL; 445 smu_table->driver_pptable = NULL; 446 smu_table->clocks_table = NULL; 447 kfree(smu_table->hardcode_pptable); 448 smu_table->hardcode_pptable = NULL; 449 450 kfree(smu_table->driver_smu_config_table); 451 kfree(smu_table->ecc_table); 452 kfree(smu_table->metrics_table); 453 kfree(smu_table->watermarks_table); 454 smu_table->driver_smu_config_table = NULL; 455 smu_table->ecc_table = NULL; 456 smu_table->metrics_table = NULL; 457 smu_table->watermarks_table = NULL; 458 smu_table->metrics_time = 0; 459 460 kfree(smu_dpm->dpm_context); 461 kfree(smu_dpm->golden_dpm_context); 462 kfree(smu_dpm->dpm_current_power_state); 463 kfree(smu_dpm->dpm_request_power_state); 464 smu_dpm->dpm_context = NULL; 465 smu_dpm->golden_dpm_context = NULL; 466 smu_dpm->dpm_context_size = 0; 467 smu_dpm->dpm_current_power_state = NULL; 468 smu_dpm->dpm_request_power_state = NULL; 469 470 return 0; 471 } 472 473 int smu_v11_0_init_power(struct smu_context *smu) 474 { 475 struct amdgpu_device *adev = smu->adev; 476 struct smu_power_context *smu_power = &smu->smu_power; 477 size_t size = adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 5, 0) ? 478 sizeof(struct smu_11_5_power_context) : 479 sizeof(struct smu_11_0_power_context); 480 481 smu_power->power_context = kzalloc(size, GFP_KERNEL); 482 if (!smu_power->power_context) 483 return -ENOMEM; 484 smu_power->power_context_size = size; 485 486 return 0; 487 } 488 489 int smu_v11_0_fini_power(struct smu_context *smu) 490 { 491 struct smu_power_context *smu_power = &smu->smu_power; 492 493 kfree(smu_power->power_context); 494 smu_power->power_context = NULL; 495 smu_power->power_context_size = 0; 496 497 return 0; 498 } 499 500 static int smu_v11_0_atom_get_smu_clockinfo(struct amdgpu_device *adev, 501 uint8_t clk_id, 502 uint8_t syspll_id, 503 uint32_t *clk_freq) 504 { 505 struct atom_get_smu_clock_info_parameters_v3_1 input = {0}; 506 struct atom_get_smu_clock_info_output_parameters_v3_1 *output; 507 int ret, index; 508 509 input.clk_id = clk_id; 510 input.syspll_id = syspll_id; 511 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ; 512 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1, 513 getsmuclockinfo); 514 515 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index, 516 (uint32_t *)&input); 517 if (ret) 518 return -EINVAL; 519 520 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input; 521 *clk_freq = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000; 522 523 return 0; 524 } 525 526 int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu) 527 { 528 int ret, index; 529 uint16_t size; 530 uint8_t frev, crev; 531 struct atom_common_table_header *header; 532 struct atom_firmware_info_v3_3 *v_3_3; 533 struct atom_firmware_info_v3_1 *v_3_1; 534 535 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 536 firmwareinfo); 537 538 ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev, 539 (uint8_t **)&header); 540 if (ret) 541 return ret; 542 543 if (header->format_revision != 3) { 544 dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu11\n"); 545 return -EINVAL; 546 } 547 548 switch (header->content_revision) { 549 case 0: 550 case 1: 551 case 2: 552 v_3_1 = (struct atom_firmware_info_v3_1 *)header; 553 smu->smu_table.boot_values.revision = v_3_1->firmware_revision; 554 smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz; 555 smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz; 556 smu->smu_table.boot_values.socclk = 0; 557 smu->smu_table.boot_values.dcefclk = 0; 558 smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv; 559 smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv; 560 smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv; 561 smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv; 562 smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id; 563 smu->smu_table.boot_values.pp_table_id = 0; 564 smu->smu_table.boot_values.firmware_caps = v_3_1->firmware_capability; 565 break; 566 case 3: 567 case 4: 568 default: 569 v_3_3 = (struct atom_firmware_info_v3_3 *)header; 570 smu->smu_table.boot_values.revision = v_3_3->firmware_revision; 571 smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz; 572 smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz; 573 smu->smu_table.boot_values.socclk = 0; 574 smu->smu_table.boot_values.dcefclk = 0; 575 smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv; 576 smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv; 577 smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv; 578 smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv; 579 smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id; 580 smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id; 581 smu->smu_table.boot_values.firmware_caps = v_3_3->firmware_capability; 582 } 583 584 smu->smu_table.boot_values.format_revision = header->format_revision; 585 smu->smu_table.boot_values.content_revision = header->content_revision; 586 587 smu_v11_0_atom_get_smu_clockinfo(smu->adev, 588 (uint8_t)SMU11_SYSPLL0_SOCCLK_ID, 589 (uint8_t)0, 590 &smu->smu_table.boot_values.socclk); 591 592 smu_v11_0_atom_get_smu_clockinfo(smu->adev, 593 (uint8_t)SMU11_SYSPLL0_DCEFCLK_ID, 594 (uint8_t)0, 595 &smu->smu_table.boot_values.dcefclk); 596 597 smu_v11_0_atom_get_smu_clockinfo(smu->adev, 598 (uint8_t)SMU11_SYSPLL0_ECLK_ID, 599 (uint8_t)0, 600 &smu->smu_table.boot_values.eclk); 601 602 smu_v11_0_atom_get_smu_clockinfo(smu->adev, 603 (uint8_t)SMU11_SYSPLL0_VCLK_ID, 604 (uint8_t)0, 605 &smu->smu_table.boot_values.vclk); 606 607 smu_v11_0_atom_get_smu_clockinfo(smu->adev, 608 (uint8_t)SMU11_SYSPLL0_DCLK_ID, 609 (uint8_t)0, 610 &smu->smu_table.boot_values.dclk); 611 612 if ((smu->smu_table.boot_values.format_revision == 3) && 613 (smu->smu_table.boot_values.content_revision >= 2)) 614 smu_v11_0_atom_get_smu_clockinfo(smu->adev, 615 (uint8_t)SMU11_SYSPLL1_0_FCLK_ID, 616 (uint8_t)SMU11_SYSPLL1_2_ID, 617 &smu->smu_table.boot_values.fclk); 618 619 smu_v11_0_atom_get_smu_clockinfo(smu->adev, 620 (uint8_t)SMU11_SYSPLL3_1_LCLK_ID, 621 (uint8_t)SMU11_SYSPLL3_1_ID, 622 &smu->smu_table.boot_values.lclk); 623 624 return 0; 625 } 626 627 int smu_v11_0_notify_memory_pool_location(struct smu_context *smu) 628 { 629 struct smu_table_context *smu_table = &smu->smu_table; 630 struct smu_table *memory_pool = &smu_table->memory_pool; 631 int ret = 0; 632 uint64_t address; 633 uint32_t address_low, address_high; 634 635 if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL) 636 return ret; 637 638 address = (uintptr_t)memory_pool->cpu_addr; 639 address_high = (uint32_t)upper_32_bits(address); 640 address_low = (uint32_t)lower_32_bits(address); 641 642 ret = smu_cmn_send_smc_msg_with_param(smu, 643 SMU_MSG_SetSystemVirtualDramAddrHigh, 644 address_high, 645 NULL); 646 if (ret) 647 return ret; 648 ret = smu_cmn_send_smc_msg_with_param(smu, 649 SMU_MSG_SetSystemVirtualDramAddrLow, 650 address_low, 651 NULL); 652 if (ret) 653 return ret; 654 655 address = memory_pool->mc_address; 656 address_high = (uint32_t)upper_32_bits(address); 657 address_low = (uint32_t)lower_32_bits(address); 658 659 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh, 660 address_high, NULL); 661 if (ret) 662 return ret; 663 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow, 664 address_low, NULL); 665 if (ret) 666 return ret; 667 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize, 668 (uint32_t)memory_pool->size, NULL); 669 if (ret) 670 return ret; 671 672 return ret; 673 } 674 675 int smu_v11_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk) 676 { 677 int ret; 678 679 ret = smu_cmn_send_smc_msg_with_param(smu, 680 SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL); 681 if (ret) 682 dev_err(smu->adev->dev, "SMU11 attempt to set divider for DCEFCLK Failed!"); 683 684 return ret; 685 } 686 687 int smu_v11_0_set_driver_table_location(struct smu_context *smu) 688 { 689 struct smu_table *driver_table = &smu->smu_table.driver_table; 690 int ret = 0; 691 692 if (driver_table->mc_address) { 693 ret = smu_cmn_send_smc_msg_with_param(smu, 694 SMU_MSG_SetDriverDramAddrHigh, 695 upper_32_bits(driver_table->mc_address), 696 NULL); 697 if (!ret) 698 ret = smu_cmn_send_smc_msg_with_param(smu, 699 SMU_MSG_SetDriverDramAddrLow, 700 lower_32_bits(driver_table->mc_address), 701 NULL); 702 } 703 704 return ret; 705 } 706 707 int smu_v11_0_set_tool_table_location(struct smu_context *smu) 708 { 709 int ret = 0; 710 struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG]; 711 712 if (tool_table->mc_address) { 713 ret = smu_cmn_send_smc_msg_with_param(smu, 714 SMU_MSG_SetToolsDramAddrHigh, 715 upper_32_bits(tool_table->mc_address), 716 NULL); 717 if (!ret) 718 ret = smu_cmn_send_smc_msg_with_param(smu, 719 SMU_MSG_SetToolsDramAddrLow, 720 lower_32_bits(tool_table->mc_address), 721 NULL); 722 } 723 724 return ret; 725 } 726 727 int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count) 728 { 729 struct amdgpu_device *adev = smu->adev; 730 731 /* Navy_Flounder/Dimgrey_Cavefish do not support to change 732 * display num currently 733 */ 734 if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 11) || 735 adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 5, 0) || 736 adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 12) || 737 adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13)) 738 return 0; 739 740 return smu_cmn_send_smc_msg_with_param(smu, 741 SMU_MSG_NumOfDisplays, 742 count, 743 NULL); 744 } 745 746 747 int smu_v11_0_set_allowed_mask(struct smu_context *smu) 748 { 749 struct smu_feature *feature = &smu->smu_feature; 750 int ret = 0; 751 uint32_t feature_mask[2]; 752 753 if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64) { 754 ret = -EINVAL; 755 goto failed; 756 } 757 758 bitmap_to_arr32(feature_mask, feature->allowed, 64); 759 760 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh, 761 feature_mask[1], NULL); 762 if (ret) 763 goto failed; 764 765 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow, 766 feature_mask[0], NULL); 767 if (ret) 768 goto failed; 769 770 failed: 771 return ret; 772 } 773 774 int smu_v11_0_system_features_control(struct smu_context *smu, 775 bool en) 776 { 777 return smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures : 778 SMU_MSG_DisableAllSmuFeatures), NULL); 779 } 780 781 int smu_v11_0_notify_display_change(struct smu_context *smu) 782 { 783 int ret = 0; 784 785 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) && 786 smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM) 787 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL); 788 789 return ret; 790 } 791 792 static int 793 smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock, 794 enum smu_clk_type clock_select) 795 { 796 int ret = 0; 797 int clk_id; 798 799 if ((smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetDcModeMaxDpmFreq) < 0) || 800 (smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetMaxDpmFreq) < 0)) 801 return 0; 802 803 clk_id = smu_cmn_to_asic_specific_index(smu, 804 CMN2ASIC_MAPPING_CLK, 805 clock_select); 806 if (clk_id < 0) 807 return -EINVAL; 808 809 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq, 810 clk_id << 16, clock); 811 if (ret) { 812 dev_err(smu->adev->dev, "[GetMaxSustainableClock] Failed to get max DC clock from SMC!"); 813 return ret; 814 } 815 816 if (*clock != 0) 817 return 0; 818 819 /* if DC limit is zero, return AC limit */ 820 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, 821 clk_id << 16, clock); 822 if (ret) { 823 dev_err(smu->adev->dev, "[GetMaxSustainableClock] failed to get max AC clock from SMC!"); 824 return ret; 825 } 826 827 return 0; 828 } 829 830 int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu) 831 { 832 struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks = 833 smu->smu_table.max_sustainable_clocks; 834 int ret = 0; 835 836 max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100; 837 max_sustainable_clocks->soc_clock = smu->smu_table.boot_values.socclk / 100; 838 max_sustainable_clocks->dcef_clock = smu->smu_table.boot_values.dcefclk / 100; 839 max_sustainable_clocks->display_clock = 0xFFFFFFFF; 840 max_sustainable_clocks->phy_clock = 0xFFFFFFFF; 841 max_sustainable_clocks->pixel_clock = 0xFFFFFFFF; 842 843 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { 844 ret = smu_v11_0_get_max_sustainable_clock(smu, 845 &(max_sustainable_clocks->uclock), 846 SMU_UCLK); 847 if (ret) { 848 dev_err(smu->adev->dev, "[%s] failed to get max UCLK from SMC!", 849 __func__); 850 return ret; 851 } 852 } 853 854 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { 855 ret = smu_v11_0_get_max_sustainable_clock(smu, 856 &(max_sustainable_clocks->soc_clock), 857 SMU_SOCCLK); 858 if (ret) { 859 dev_err(smu->adev->dev, "[%s] failed to get max SOCCLK from SMC!", 860 __func__); 861 return ret; 862 } 863 } 864 865 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) { 866 ret = smu_v11_0_get_max_sustainable_clock(smu, 867 &(max_sustainable_clocks->dcef_clock), 868 SMU_DCEFCLK); 869 if (ret) { 870 dev_err(smu->adev->dev, "[%s] failed to get max DCEFCLK from SMC!", 871 __func__); 872 return ret; 873 } 874 875 ret = smu_v11_0_get_max_sustainable_clock(smu, 876 &(max_sustainable_clocks->display_clock), 877 SMU_DISPCLK); 878 if (ret) { 879 dev_err(smu->adev->dev, "[%s] failed to get max DISPCLK from SMC!", 880 __func__); 881 return ret; 882 } 883 ret = smu_v11_0_get_max_sustainable_clock(smu, 884 &(max_sustainable_clocks->phy_clock), 885 SMU_PHYCLK); 886 if (ret) { 887 dev_err(smu->adev->dev, "[%s] failed to get max PHYCLK from SMC!", 888 __func__); 889 return ret; 890 } 891 ret = smu_v11_0_get_max_sustainable_clock(smu, 892 &(max_sustainable_clocks->pixel_clock), 893 SMU_PIXCLK); 894 if (ret) { 895 dev_err(smu->adev->dev, "[%s] failed to get max PIXCLK from SMC!", 896 __func__); 897 return ret; 898 } 899 } 900 901 if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock) 902 max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock; 903 904 return 0; 905 } 906 907 int smu_v11_0_get_current_power_limit(struct smu_context *smu, 908 uint32_t *power_limit) 909 { 910 int power_src; 911 int ret = 0; 912 913 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) 914 return -EINVAL; 915 916 power_src = smu_cmn_to_asic_specific_index(smu, 917 CMN2ASIC_MAPPING_PWR, 918 smu->adev->pm.ac_power ? 919 SMU_POWER_SOURCE_AC : 920 SMU_POWER_SOURCE_DC); 921 if (power_src < 0) 922 return -EINVAL; 923 924 /* 925 * BIT 24-31: ControllerId (only PPT0 is supported for now) 926 * BIT 16-23: PowerSource 927 */ 928 ret = smu_cmn_send_smc_msg_with_param(smu, 929 SMU_MSG_GetPptLimit, 930 (0 << 24) | (power_src << 16), 931 power_limit); 932 if (ret) 933 dev_err(smu->adev->dev, "[%s] get PPT limit failed!", __func__); 934 935 return ret; 936 } 937 938 int smu_v11_0_set_power_limit(struct smu_context *smu, 939 enum smu_ppt_limit_type limit_type, 940 uint32_t limit) 941 { 942 int power_src; 943 int ret = 0; 944 uint32_t limit_param; 945 946 if (limit_type != SMU_DEFAULT_PPT_LIMIT) 947 return -EINVAL; 948 949 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) { 950 dev_err(smu->adev->dev, "Setting new power limit is not supported!\n"); 951 return -EOPNOTSUPP; 952 } 953 954 power_src = smu_cmn_to_asic_specific_index(smu, 955 CMN2ASIC_MAPPING_PWR, 956 smu->adev->pm.ac_power ? 957 SMU_POWER_SOURCE_AC : 958 SMU_POWER_SOURCE_DC); 959 if (power_src < 0) 960 return -EINVAL; 961 962 /* 963 * BIT 24-31: ControllerId (only PPT0 is supported for now) 964 * BIT 16-23: PowerSource 965 * BIT 0-15: PowerLimit 966 */ 967 limit_param = (limit & 0xFFFF); 968 limit_param |= 0 << 24; 969 limit_param |= (power_src) << 16; 970 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, limit_param, NULL); 971 if (ret) { 972 dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__); 973 return ret; 974 } 975 976 smu->current_power_limit = limit; 977 978 return 0; 979 } 980 981 static int smu_v11_0_ack_ac_dc_interrupt(struct smu_context *smu) 982 { 983 return smu_cmn_send_smc_msg(smu, 984 SMU_MSG_ReenableAcDcInterrupt, 985 NULL); 986 } 987 988 static int smu_v11_0_process_pending_interrupt(struct smu_context *smu) 989 { 990 int ret = 0; 991 992 if (smu->dc_controlled_by_gpio && 993 smu_cmn_feature_is_enabled(smu, SMU_FEATURE_ACDC_BIT)) 994 ret = smu_v11_0_ack_ac_dc_interrupt(smu); 995 996 return ret; 997 } 998 999 void smu_v11_0_interrupt_work(struct smu_context *smu) 1000 { 1001 if (smu_v11_0_ack_ac_dc_interrupt(smu)) 1002 dev_err(smu->adev->dev, "Ack AC/DC interrupt Failed!\n"); 1003 } 1004 1005 int smu_v11_0_enable_thermal_alert(struct smu_context *smu) 1006 { 1007 int ret = 0; 1008 1009 if (smu->smu_table.thermal_controller_type) { 1010 ret = amdgpu_irq_get(smu->adev, &smu->irq_source, 0); 1011 if (ret) 1012 return ret; 1013 } 1014 1015 /* 1016 * After init there might have been missed interrupts triggered 1017 * before driver registers for interrupt (Ex. AC/DC). 1018 */ 1019 return smu_v11_0_process_pending_interrupt(smu); 1020 } 1021 1022 int smu_v11_0_disable_thermal_alert(struct smu_context *smu) 1023 { 1024 return amdgpu_irq_put(smu->adev, &smu->irq_source, 0); 1025 } 1026 1027 static uint16_t convert_to_vddc(uint8_t vid) 1028 { 1029 return (uint16_t) ((6200 - (vid * 25)) / SMU11_VOLTAGE_SCALE); 1030 } 1031 1032 int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value) 1033 { 1034 struct amdgpu_device *adev = smu->adev; 1035 uint32_t vdd = 0, val_vid = 0; 1036 1037 if (!value) 1038 return -EINVAL; 1039 val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_TEL_PLANE0) & 1040 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >> 1041 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT; 1042 1043 vdd = (uint32_t)convert_to_vddc((uint8_t)val_vid); 1044 1045 *value = vdd; 1046 1047 return 0; 1048 1049 } 1050 1051 int 1052 smu_v11_0_display_clock_voltage_request(struct smu_context *smu, 1053 struct pp_display_clock_request 1054 *clock_req) 1055 { 1056 enum amd_pp_clock_type clk_type = clock_req->clock_type; 1057 int ret = 0; 1058 enum smu_clk_type clk_select = 0; 1059 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000; 1060 1061 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) || 1062 smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { 1063 switch (clk_type) { 1064 case amd_pp_dcef_clock: 1065 clk_select = SMU_DCEFCLK; 1066 break; 1067 case amd_pp_disp_clock: 1068 clk_select = SMU_DISPCLK; 1069 break; 1070 case amd_pp_pixel_clock: 1071 clk_select = SMU_PIXCLK; 1072 break; 1073 case amd_pp_phy_clock: 1074 clk_select = SMU_PHYCLK; 1075 break; 1076 case amd_pp_mem_clock: 1077 clk_select = SMU_UCLK; 1078 break; 1079 default: 1080 dev_info(smu->adev->dev, "[%s] Invalid Clock Type!", __func__); 1081 ret = -EINVAL; 1082 break; 1083 } 1084 1085 if (ret) 1086 goto failed; 1087 1088 if (clk_select == SMU_UCLK && smu->disable_uclk_switch) 1089 return 0; 1090 1091 ret = smu_v11_0_set_hard_freq_limited_range(smu, clk_select, clk_freq, 0); 1092 1093 if(clk_select == SMU_UCLK) 1094 smu->hard_min_uclk_req_from_dal = clk_freq; 1095 } 1096 1097 failed: 1098 return ret; 1099 } 1100 1101 int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable) 1102 { 1103 int ret = 0; 1104 struct amdgpu_device *adev = smu->adev; 1105 1106 switch (adev->ip_versions[MP1_HWIP][0]) { 1107 case IP_VERSION(11, 0, 0): 1108 case IP_VERSION(11, 0, 5): 1109 case IP_VERSION(11, 0, 9): 1110 case IP_VERSION(11, 0, 7): 1111 case IP_VERSION(11, 0, 11): 1112 case IP_VERSION(11, 0, 12): 1113 case IP_VERSION(11, 0, 13): 1114 case IP_VERSION(11, 5, 0): 1115 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) 1116 return 0; 1117 if (enable) 1118 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL); 1119 else 1120 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL); 1121 break; 1122 default: 1123 break; 1124 } 1125 1126 return ret; 1127 } 1128 1129 uint32_t 1130 smu_v11_0_get_fan_control_mode(struct smu_context *smu) 1131 { 1132 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT)) 1133 return AMD_FAN_CTRL_AUTO; 1134 else 1135 return smu->user_dpm_profile.fan_mode; 1136 } 1137 1138 static int 1139 smu_v11_0_auto_fan_control(struct smu_context *smu, bool auto_fan_control) 1140 { 1141 int ret = 0; 1142 1143 if (!smu_cmn_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT)) 1144 return 0; 1145 1146 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control); 1147 if (ret) 1148 dev_err(smu->adev->dev, "[%s]%s smc FAN CONTROL feature failed!", 1149 __func__, (auto_fan_control ? "Start" : "Stop")); 1150 1151 return ret; 1152 } 1153 1154 static int 1155 smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode) 1156 { 1157 struct amdgpu_device *adev = smu->adev; 1158 1159 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2, 1160 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2), 1161 CG_FDO_CTRL2, TMIN, 0)); 1162 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2, 1163 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2), 1164 CG_FDO_CTRL2, FDO_PWM_MODE, mode)); 1165 1166 return 0; 1167 } 1168 1169 int 1170 smu_v11_0_set_fan_speed_pwm(struct smu_context *smu, uint32_t speed) 1171 { 1172 struct amdgpu_device *adev = smu->adev; 1173 uint32_t duty100, duty; 1174 uint64_t tmp64; 1175 1176 speed = MIN(speed, 255); 1177 1178 duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1), 1179 CG_FDO_CTRL1, FMAX_DUTY100); 1180 if (!duty100) 1181 return -EINVAL; 1182 1183 tmp64 = (uint64_t)speed * duty100; 1184 do_div(tmp64, 255); 1185 duty = (uint32_t)tmp64; 1186 1187 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0, 1188 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0), 1189 CG_FDO_CTRL0, FDO_STATIC_DUTY, duty)); 1190 1191 return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC); 1192 } 1193 1194 int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu, 1195 uint32_t speed) 1196 { 1197 struct amdgpu_device *adev = smu->adev; 1198 /* 1199 * crystal_clock_freq used for fan speed rpm calculation is 1200 * always 25Mhz. So, hardcode it as 2500(in 10K unit). 1201 */ 1202 uint32_t crystal_clock_freq = 2500; 1203 uint32_t tach_period; 1204 1205 if (speed == 0) 1206 return -EINVAL; 1207 /* 1208 * To prevent from possible overheat, some ASICs may have requirement 1209 * for minimum fan speed: 1210 * - For some NV10 SKU, the fan speed cannot be set lower than 1211 * 700 RPM. 1212 * - For some Sienna Cichlid SKU, the fan speed cannot be set 1213 * lower than 500 RPM. 1214 */ 1215 tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); 1216 WREG32_SOC15(THM, 0, mmCG_TACH_CTRL, 1217 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL), 1218 CG_TACH_CTRL, TARGET_PERIOD, 1219 tach_period)); 1220 1221 return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM); 1222 } 1223 1224 int smu_v11_0_get_fan_speed_pwm(struct smu_context *smu, 1225 uint32_t *speed) 1226 { 1227 struct amdgpu_device *adev = smu->adev; 1228 uint32_t duty100, duty; 1229 uint64_t tmp64; 1230 1231 /* 1232 * For pre Sienna Cichlid ASICs, the 0 RPM may be not correctly 1233 * detected via register retrieving. To workaround this, we will 1234 * report the fan speed as 0 PWM if user just requested such. 1235 */ 1236 if ((smu->user_dpm_profile.flags & SMU_CUSTOM_FAN_SPEED_PWM) 1237 && !smu->user_dpm_profile.fan_speed_pwm) { 1238 *speed = 0; 1239 return 0; 1240 } 1241 1242 duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1), 1243 CG_FDO_CTRL1, FMAX_DUTY100); 1244 duty = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_THERMAL_STATUS), 1245 CG_THERMAL_STATUS, FDO_PWM_DUTY); 1246 if (!duty100) 1247 return -EINVAL; 1248 1249 tmp64 = (uint64_t)duty * 255; 1250 do_div(tmp64, duty100); 1251 *speed = MIN((uint32_t)tmp64, 255); 1252 1253 return 0; 1254 } 1255 1256 int smu_v11_0_get_fan_speed_rpm(struct smu_context *smu, 1257 uint32_t *speed) 1258 { 1259 struct amdgpu_device *adev = smu->adev; 1260 uint32_t crystal_clock_freq = 2500; 1261 uint32_t tach_status; 1262 uint64_t tmp64; 1263 1264 /* 1265 * For pre Sienna Cichlid ASICs, the 0 RPM may be not correctly 1266 * detected via register retrieving. To workaround this, we will 1267 * report the fan speed as 0 RPM if user just requested such. 1268 */ 1269 if ((smu->user_dpm_profile.flags & SMU_CUSTOM_FAN_SPEED_RPM) 1270 && !smu->user_dpm_profile.fan_speed_rpm) { 1271 *speed = 0; 1272 return 0; 1273 } 1274 1275 tmp64 = (uint64_t)crystal_clock_freq * 60 * 10000; 1276 1277 tach_status = RREG32_SOC15(THM, 0, mmCG_TACH_STATUS); 1278 if (tach_status) { 1279 do_div(tmp64, tach_status); 1280 *speed = (uint32_t)tmp64; 1281 } else { 1282 dev_warn_once(adev->dev, "Got zero output on CG_TACH_STATUS reading!\n"); 1283 *speed = 0; 1284 } 1285 1286 return 0; 1287 } 1288 1289 int 1290 smu_v11_0_set_fan_control_mode(struct smu_context *smu, 1291 uint32_t mode) 1292 { 1293 int ret = 0; 1294 1295 switch (mode) { 1296 case AMD_FAN_CTRL_NONE: 1297 ret = smu_v11_0_auto_fan_control(smu, 0); 1298 if (!ret) 1299 ret = smu_v11_0_set_fan_speed_pwm(smu, 255); 1300 break; 1301 case AMD_FAN_CTRL_MANUAL: 1302 ret = smu_v11_0_auto_fan_control(smu, 0); 1303 break; 1304 case AMD_FAN_CTRL_AUTO: 1305 ret = smu_v11_0_auto_fan_control(smu, 1); 1306 break; 1307 default: 1308 break; 1309 } 1310 1311 if (ret) { 1312 dev_err(smu->adev->dev, "[%s]Set fan control mode failed!", __func__); 1313 return -EINVAL; 1314 } 1315 1316 return ret; 1317 } 1318 1319 int smu_v11_0_set_xgmi_pstate(struct smu_context *smu, 1320 uint32_t pstate) 1321 { 1322 return smu_cmn_send_smc_msg_with_param(smu, 1323 SMU_MSG_SetXgmiMode, 1324 pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3, 1325 NULL); 1326 } 1327 1328 static int smu_v11_0_set_irq_state(struct amdgpu_device *adev, 1329 struct amdgpu_irq_src *source, 1330 unsigned tyep, 1331 enum amdgpu_interrupt_state state) 1332 { 1333 struct smu_context *smu = adev->powerplay.pp_handle; 1334 uint32_t low, high; 1335 uint32_t val = 0; 1336 1337 switch (state) { 1338 case AMDGPU_IRQ_STATE_DISABLE: 1339 /* For THM irqs */ 1340 val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL); 1341 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 1); 1342 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 1); 1343 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val); 1344 1345 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, 0); 1346 1347 /* For MP1 SW irqs */ 1348 val = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL); 1349 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1); 1350 WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL, val); 1351 1352 break; 1353 case AMDGPU_IRQ_STATE_ENABLE: 1354 /* For THM irqs */ 1355 low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP, 1356 smu->thermal_range.min / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES); 1357 high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP, 1358 smu->thermal_range.software_shutdown_temp); 1359 1360 val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL); 1361 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5); 1362 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); 1363 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0); 1364 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0); 1365 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff)); 1366 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff)); 1367 val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); 1368 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val); 1369 1370 val = (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT); 1371 val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT); 1372 val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT); 1373 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, val); 1374 1375 /* For MP1 SW irqs */ 1376 val = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT); 1377 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE); 1378 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0); 1379 WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT, val); 1380 1381 val = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL); 1382 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0); 1383 WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL, val); 1384 1385 break; 1386 default: 1387 break; 1388 } 1389 1390 return 0; 1391 } 1392 1393 #define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */ 1394 #define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */ 1395 1396 #define SMUIO_11_0__SRCID__SMUIO_GPIO19 83 1397 1398 static int smu_v11_0_irq_process(struct amdgpu_device *adev, 1399 struct amdgpu_irq_src *source, 1400 struct amdgpu_iv_entry *entry) 1401 { 1402 struct smu_context *smu = adev->powerplay.pp_handle; 1403 uint32_t client_id = entry->client_id; 1404 uint32_t src_id = entry->src_id; 1405 /* 1406 * ctxid is used to distinguish different 1407 * events for SMCToHost interrupt. 1408 */ 1409 uint32_t ctxid = entry->src_data[0]; 1410 uint32_t data; 1411 1412 if (client_id == SOC15_IH_CLIENTID_THM) { 1413 switch (src_id) { 1414 case THM_11_0__SRCID__THM_DIG_THERM_L2H: 1415 schedule_delayed_work(&smu->swctf_delayed_work, 1416 msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY)); 1417 break; 1418 case THM_11_0__SRCID__THM_DIG_THERM_H2L: 1419 dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n"); 1420 break; 1421 default: 1422 dev_emerg(adev->dev, "ERROR: GPU under temperature range unknown src id (%d)\n", 1423 src_id); 1424 break; 1425 } 1426 } else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO) { 1427 dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n"); 1428 /* 1429 * HW CTF just occurred. Shutdown to prevent further damage. 1430 */ 1431 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n"); 1432 orderly_poweroff(true); 1433 } else if (client_id == SOC15_IH_CLIENTID_MP1) { 1434 if (src_id == 0xfe) { 1435 /* ACK SMUToHost interrupt */ 1436 data = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL); 1437 data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1); 1438 WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL, data); 1439 1440 switch (ctxid) { 1441 case 0x3: 1442 dev_dbg(adev->dev, "Switched to AC mode!\n"); 1443 schedule_work(&smu->interrupt_work); 1444 break; 1445 case 0x4: 1446 dev_dbg(adev->dev, "Switched to DC mode!\n"); 1447 schedule_work(&smu->interrupt_work); 1448 break; 1449 case 0x7: 1450 /* 1451 * Increment the throttle interrupt counter 1452 */ 1453 atomic64_inc(&smu->throttle_int_counter); 1454 1455 if (!atomic_read(&adev->throttling_logging_enabled)) 1456 return 0; 1457 1458 if (__ratelimit(&adev->throttling_logging_rs)) 1459 schedule_work(&smu->throttling_logging_work); 1460 1461 break; 1462 } 1463 } 1464 } 1465 1466 return 0; 1467 } 1468 1469 static const struct amdgpu_irq_src_funcs smu_v11_0_irq_funcs = 1470 { 1471 .set = smu_v11_0_set_irq_state, 1472 .process = smu_v11_0_irq_process, 1473 }; 1474 1475 int smu_v11_0_register_irq_handler(struct smu_context *smu) 1476 { 1477 struct amdgpu_device *adev = smu->adev; 1478 struct amdgpu_irq_src *irq_src = &smu->irq_source; 1479 int ret = 0; 1480 1481 irq_src->num_types = 1; 1482 irq_src->funcs = &smu_v11_0_irq_funcs; 1483 1484 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM, 1485 THM_11_0__SRCID__THM_DIG_THERM_L2H, 1486 irq_src); 1487 if (ret) 1488 return ret; 1489 1490 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM, 1491 THM_11_0__SRCID__THM_DIG_THERM_H2L, 1492 irq_src); 1493 if (ret) 1494 return ret; 1495 1496 /* Register CTF(GPIO_19) interrupt */ 1497 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_ROM_SMUIO, 1498 SMUIO_11_0__SRCID__SMUIO_GPIO19, 1499 irq_src); 1500 if (ret) 1501 return ret; 1502 1503 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1, 1504 0xfe, 1505 irq_src); 1506 if (ret) 1507 return ret; 1508 1509 return ret; 1510 } 1511 1512 int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu, 1513 struct pp_smu_nv_clock_table *max_clocks) 1514 { 1515 struct smu_table_context *table_context = &smu->smu_table; 1516 struct smu_11_0_max_sustainable_clocks *sustainable_clocks = NULL; 1517 1518 if (!max_clocks || !table_context->max_sustainable_clocks) 1519 return -EINVAL; 1520 1521 sustainable_clocks = table_context->max_sustainable_clocks; 1522 1523 max_clocks->dcfClockInKhz = 1524 (unsigned int) sustainable_clocks->dcef_clock * 1000; 1525 max_clocks->displayClockInKhz = 1526 (unsigned int) sustainable_clocks->display_clock * 1000; 1527 max_clocks->phyClockInKhz = 1528 (unsigned int) sustainable_clocks->phy_clock * 1000; 1529 max_clocks->pixelClockInKhz = 1530 (unsigned int) sustainable_clocks->pixel_clock * 1000; 1531 max_clocks->uClockInKhz = 1532 (unsigned int) sustainable_clocks->uclock * 1000; 1533 max_clocks->socClockInKhz = 1534 (unsigned int) sustainable_clocks->soc_clock * 1000; 1535 max_clocks->dscClockInKhz = 0; 1536 max_clocks->dppClockInKhz = 0; 1537 max_clocks->fabricClockInKhz = 0; 1538 1539 return 0; 1540 } 1541 1542 int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu) 1543 { 1544 return smu_cmn_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL); 1545 } 1546 1547 int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, 1548 enum smu_baco_seq baco_seq) 1549 { 1550 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL); 1551 } 1552 1553 bool smu_v11_0_baco_is_support(struct smu_context *smu) 1554 { 1555 struct smu_baco_context *smu_baco = &smu->smu_baco; 1556 1557 if (amdgpu_sriov_vf(smu->adev) || !smu_baco->platform_support) 1558 return false; 1559 1560 /* return true if ASIC is in BACO state already */ 1561 if (smu_v11_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER) 1562 return true; 1563 1564 /* Arcturus does not support this bit mask */ 1565 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) && 1566 !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) 1567 return false; 1568 1569 return true; 1570 } 1571 1572 enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu) 1573 { 1574 struct smu_baco_context *smu_baco = &smu->smu_baco; 1575 1576 return smu_baco->state; 1577 } 1578 1579 #define D3HOT_BACO_SEQUENCE 0 1580 #define D3HOT_BAMACO_SEQUENCE 2 1581 1582 int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) 1583 { 1584 struct smu_baco_context *smu_baco = &smu->smu_baco; 1585 struct amdgpu_device *adev = smu->adev; 1586 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 1587 uint32_t data; 1588 int ret = 0; 1589 1590 if (smu_v11_0_baco_get_state(smu) == state) 1591 return 0; 1592 1593 if (state == SMU_BACO_STATE_ENTER) { 1594 switch (adev->ip_versions[MP1_HWIP][0]) { 1595 case IP_VERSION(11, 0, 7): 1596 case IP_VERSION(11, 0, 11): 1597 case IP_VERSION(11, 0, 12): 1598 case IP_VERSION(11, 0, 13): 1599 if (amdgpu_runtime_pm == 2) 1600 ret = smu_cmn_send_smc_msg_with_param(smu, 1601 SMU_MSG_EnterBaco, 1602 D3HOT_BAMACO_SEQUENCE, 1603 NULL); 1604 else 1605 ret = smu_cmn_send_smc_msg_with_param(smu, 1606 SMU_MSG_EnterBaco, 1607 D3HOT_BACO_SEQUENCE, 1608 NULL); 1609 break; 1610 default: 1611 if (!ras || !adev->ras_enabled || 1612 adev->gmc.xgmi.pending_reset) { 1613 if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 2)) { 1614 data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT); 1615 data |= 0x80000000; 1616 WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT, data); 1617 } else { 1618 data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL); 1619 data |= 0x80000000; 1620 WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data); 1621 } 1622 1623 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0, NULL); 1624 } else { 1625 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1, NULL); 1626 } 1627 break; 1628 } 1629 1630 } else { 1631 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_ExitBaco, NULL); 1632 if (ret) 1633 return ret; 1634 1635 /* clear vbios scratch 6 and 7 for coming asic reinit */ 1636 WREG32(adev->bios_scratch_reg_offset + 6, 0); 1637 WREG32(adev->bios_scratch_reg_offset + 7, 0); 1638 } 1639 1640 if (!ret) 1641 smu_baco->state = state; 1642 1643 return ret; 1644 } 1645 1646 int smu_v11_0_baco_enter(struct smu_context *smu) 1647 { 1648 int ret = 0; 1649 1650 ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_ENTER); 1651 if (ret) 1652 return ret; 1653 1654 msleep(10); 1655 1656 return ret; 1657 } 1658 1659 int smu_v11_0_baco_exit(struct smu_context *smu) 1660 { 1661 int ret; 1662 1663 ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_EXIT); 1664 if (!ret) { 1665 /* 1666 * Poll BACO exit status to ensure FW has completed 1667 * BACO exit process to avoid timing issues. 1668 */ 1669 smu_v11_0_poll_baco_exit(smu); 1670 } 1671 1672 return ret; 1673 } 1674 1675 int smu_v11_0_mode1_reset(struct smu_context *smu) 1676 { 1677 int ret = 0; 1678 1679 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL); 1680 if (!ret) 1681 msleep(SMU11_MODE1_RESET_WAIT_TIME_IN_MS); 1682 1683 return ret; 1684 } 1685 1686 int smu_v11_0_handle_passthrough_sbr(struct smu_context *smu, bool enable) 1687 { 1688 int ret = 0; 1689 1690 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_LightSBR, enable ? 1 : 0, NULL); 1691 1692 return ret; 1693 } 1694 1695 1696 int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, 1697 uint32_t *min, uint32_t *max) 1698 { 1699 int ret = 0, clk_id = 0; 1700 uint32_t param = 0; 1701 uint32_t clock_limit; 1702 1703 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) { 1704 switch (clk_type) { 1705 case SMU_MCLK: 1706 case SMU_UCLK: 1707 clock_limit = smu->smu_table.boot_values.uclk; 1708 break; 1709 case SMU_GFXCLK: 1710 case SMU_SCLK: 1711 clock_limit = smu->smu_table.boot_values.gfxclk; 1712 break; 1713 case SMU_SOCCLK: 1714 clock_limit = smu->smu_table.boot_values.socclk; 1715 break; 1716 default: 1717 clock_limit = 0; 1718 break; 1719 } 1720 1721 /* clock in Mhz unit */ 1722 if (min) 1723 *min = clock_limit / 100; 1724 if (max) 1725 *max = clock_limit / 100; 1726 1727 return 0; 1728 } 1729 1730 clk_id = smu_cmn_to_asic_specific_index(smu, 1731 CMN2ASIC_MAPPING_CLK, 1732 clk_type); 1733 if (clk_id < 0) { 1734 ret = -EINVAL; 1735 goto failed; 1736 } 1737 param = (clk_id & 0xffff) << 16; 1738 1739 if (max) { 1740 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param, max); 1741 if (ret) 1742 goto failed; 1743 } 1744 1745 if (min) { 1746 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min); 1747 if (ret) 1748 goto failed; 1749 } 1750 1751 failed: 1752 return ret; 1753 } 1754 1755 int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, 1756 enum smu_clk_type clk_type, 1757 uint32_t min, 1758 uint32_t max) 1759 { 1760 int ret = 0, clk_id = 0; 1761 uint32_t param; 1762 1763 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) 1764 return 0; 1765 1766 clk_id = smu_cmn_to_asic_specific_index(smu, 1767 CMN2ASIC_MAPPING_CLK, 1768 clk_type); 1769 if (clk_id < 0) 1770 return clk_id; 1771 1772 if (max > 0) { 1773 param = (uint32_t)((clk_id << 16) | (max & 0xffff)); 1774 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq, 1775 param, NULL); 1776 if (ret) 1777 goto out; 1778 } 1779 1780 if (min > 0) { 1781 param = (uint32_t)((clk_id << 16) | (min & 0xffff)); 1782 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq, 1783 param, NULL); 1784 if (ret) 1785 goto out; 1786 } 1787 1788 out: 1789 return ret; 1790 } 1791 1792 int smu_v11_0_set_hard_freq_limited_range(struct smu_context *smu, 1793 enum smu_clk_type clk_type, 1794 uint32_t min, 1795 uint32_t max) 1796 { 1797 int ret = 0, clk_id = 0; 1798 uint32_t param; 1799 1800 if (min <= 0 && max <= 0) 1801 return -EINVAL; 1802 1803 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) 1804 return 0; 1805 1806 clk_id = smu_cmn_to_asic_specific_index(smu, 1807 CMN2ASIC_MAPPING_CLK, 1808 clk_type); 1809 if (clk_id < 0) 1810 return clk_id; 1811 1812 if (max > 0) { 1813 param = (uint32_t)((clk_id << 16) | (max & 0xffff)); 1814 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq, 1815 param, NULL); 1816 if (ret) 1817 return ret; 1818 } 1819 1820 if (min > 0) { 1821 param = (uint32_t)((clk_id << 16) | (min & 0xffff)); 1822 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq, 1823 param, NULL); 1824 if (ret) 1825 return ret; 1826 } 1827 1828 return ret; 1829 } 1830 1831 int smu_v11_0_set_performance_level(struct smu_context *smu, 1832 enum amd_dpm_forced_level level) 1833 { 1834 struct smu_11_0_dpm_context *dpm_context = 1835 smu->smu_dpm.dpm_context; 1836 struct smu_11_0_dpm_table *gfx_table = 1837 &dpm_context->dpm_tables.gfx_table; 1838 struct smu_11_0_dpm_table *mem_table = 1839 &dpm_context->dpm_tables.uclk_table; 1840 struct smu_11_0_dpm_table *soc_table = 1841 &dpm_context->dpm_tables.soc_table; 1842 struct smu_umd_pstate_table *pstate_table = 1843 &smu->pstate_table; 1844 struct amdgpu_device *adev = smu->adev; 1845 uint32_t sclk_min = 0, sclk_max = 0; 1846 uint32_t mclk_min = 0, mclk_max = 0; 1847 uint32_t socclk_min = 0, socclk_max = 0; 1848 int ret = 0; 1849 1850 switch (level) { 1851 case AMD_DPM_FORCED_LEVEL_HIGH: 1852 sclk_min = sclk_max = gfx_table->max; 1853 mclk_min = mclk_max = mem_table->max; 1854 socclk_min = socclk_max = soc_table->max; 1855 break; 1856 case AMD_DPM_FORCED_LEVEL_LOW: 1857 sclk_min = sclk_max = gfx_table->min; 1858 mclk_min = mclk_max = mem_table->min; 1859 socclk_min = socclk_max = soc_table->min; 1860 break; 1861 case AMD_DPM_FORCED_LEVEL_AUTO: 1862 sclk_min = gfx_table->min; 1863 sclk_max = gfx_table->max; 1864 mclk_min = mem_table->min; 1865 mclk_max = mem_table->max; 1866 socclk_min = soc_table->min; 1867 socclk_max = soc_table->max; 1868 break; 1869 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 1870 sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard; 1871 mclk_min = mclk_max = pstate_table->uclk_pstate.standard; 1872 socclk_min = socclk_max = pstate_table->socclk_pstate.standard; 1873 break; 1874 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 1875 sclk_min = sclk_max = pstate_table->gfxclk_pstate.min; 1876 break; 1877 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 1878 mclk_min = mclk_max = pstate_table->uclk_pstate.min; 1879 break; 1880 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 1881 sclk_min = sclk_max = pstate_table->gfxclk_pstate.peak; 1882 mclk_min = mclk_max = pstate_table->uclk_pstate.peak; 1883 socclk_min = socclk_max = pstate_table->socclk_pstate.peak; 1884 break; 1885 case AMD_DPM_FORCED_LEVEL_MANUAL: 1886 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 1887 return 0; 1888 default: 1889 dev_err(adev->dev, "Invalid performance level %d\n", level); 1890 return -EINVAL; 1891 } 1892 1893 /* 1894 * Separate MCLK and SOCCLK soft min/max settings are not allowed 1895 * on Arcturus. 1896 */ 1897 if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 2)) { 1898 mclk_min = mclk_max = 0; 1899 socclk_min = socclk_max = 0; 1900 } 1901 1902 if (sclk_min && sclk_max) { 1903 ret = smu_v11_0_set_soft_freq_limited_range(smu, 1904 SMU_GFXCLK, 1905 sclk_min, 1906 sclk_max); 1907 if (ret) 1908 return ret; 1909 } 1910 1911 if (mclk_min && mclk_max) { 1912 ret = smu_v11_0_set_soft_freq_limited_range(smu, 1913 SMU_MCLK, 1914 mclk_min, 1915 mclk_max); 1916 if (ret) 1917 return ret; 1918 } 1919 1920 if (socclk_min && socclk_max) { 1921 ret = smu_v11_0_set_soft_freq_limited_range(smu, 1922 SMU_SOCCLK, 1923 socclk_min, 1924 socclk_max); 1925 if (ret) 1926 return ret; 1927 } 1928 1929 return ret; 1930 } 1931 1932 int smu_v11_0_set_power_source(struct smu_context *smu, 1933 enum smu_power_src_type power_src) 1934 { 1935 int pwr_source; 1936 1937 pwr_source = smu_cmn_to_asic_specific_index(smu, 1938 CMN2ASIC_MAPPING_PWR, 1939 (uint32_t)power_src); 1940 if (pwr_source < 0) 1941 return -EINVAL; 1942 1943 return smu_cmn_send_smc_msg_with_param(smu, 1944 SMU_MSG_NotifyPowerSource, 1945 pwr_source, 1946 NULL); 1947 } 1948 1949 int smu_v11_0_get_dpm_freq_by_index(struct smu_context *smu, 1950 enum smu_clk_type clk_type, 1951 uint16_t level, 1952 uint32_t *value) 1953 { 1954 int ret = 0, clk_id = 0; 1955 uint32_t param; 1956 1957 if (!value) 1958 return -EINVAL; 1959 1960 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) 1961 return 0; 1962 1963 clk_id = smu_cmn_to_asic_specific_index(smu, 1964 CMN2ASIC_MAPPING_CLK, 1965 clk_type); 1966 if (clk_id < 0) 1967 return clk_id; 1968 1969 param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff)); 1970 1971 ret = smu_cmn_send_smc_msg_with_param(smu, 1972 SMU_MSG_GetDpmFreqByIndex, 1973 param, 1974 value); 1975 if (ret) 1976 return ret; 1977 1978 /* 1979 * BIT31: 0 - Fine grained DPM, 1 - Dicrete DPM 1980 * now, we un-support it 1981 */ 1982 *value = *value & 0x7fffffff; 1983 1984 return ret; 1985 } 1986 1987 int smu_v11_0_get_dpm_level_count(struct smu_context *smu, 1988 enum smu_clk_type clk_type, 1989 uint32_t *value) 1990 { 1991 return smu_v11_0_get_dpm_freq_by_index(smu, 1992 clk_type, 1993 0xff, 1994 value); 1995 } 1996 1997 int smu_v11_0_set_single_dpm_table(struct smu_context *smu, 1998 enum smu_clk_type clk_type, 1999 struct smu_11_0_dpm_table *single_dpm_table) 2000 { 2001 int ret = 0; 2002 uint32_t clk; 2003 int i; 2004 2005 ret = smu_v11_0_get_dpm_level_count(smu, 2006 clk_type, 2007 &single_dpm_table->count); 2008 if (ret) { 2009 dev_err(smu->adev->dev, "[%s] failed to get dpm levels!\n", __func__); 2010 return ret; 2011 } 2012 2013 for (i = 0; i < single_dpm_table->count; i++) { 2014 ret = smu_v11_0_get_dpm_freq_by_index(smu, 2015 clk_type, 2016 i, 2017 &clk); 2018 if (ret) { 2019 dev_err(smu->adev->dev, "[%s] failed to get dpm freq by index!\n", __func__); 2020 return ret; 2021 } 2022 2023 single_dpm_table->dpm_levels[i].value = clk; 2024 single_dpm_table->dpm_levels[i].enabled = true; 2025 2026 if (i == 0) 2027 single_dpm_table->min = clk; 2028 else if (i == single_dpm_table->count - 1) 2029 single_dpm_table->max = clk; 2030 } 2031 2032 return 0; 2033 } 2034 2035 int smu_v11_0_get_dpm_level_range(struct smu_context *smu, 2036 enum smu_clk_type clk_type, 2037 uint32_t *min_value, 2038 uint32_t *max_value) 2039 { 2040 uint32_t level_count = 0; 2041 int ret = 0; 2042 2043 if (!min_value && !max_value) 2044 return -EINVAL; 2045 2046 if (min_value) { 2047 /* by default, level 0 clock value as min value */ 2048 ret = smu_v11_0_get_dpm_freq_by_index(smu, 2049 clk_type, 2050 0, 2051 min_value); 2052 if (ret) 2053 return ret; 2054 } 2055 2056 if (max_value) { 2057 ret = smu_v11_0_get_dpm_level_count(smu, 2058 clk_type, 2059 &level_count); 2060 if (ret) 2061 return ret; 2062 2063 ret = smu_v11_0_get_dpm_freq_by_index(smu, 2064 clk_type, 2065 level_count - 1, 2066 max_value); 2067 if (ret) 2068 return ret; 2069 } 2070 2071 return ret; 2072 } 2073 2074 int smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu) 2075 { 2076 struct amdgpu_device *adev = smu->adev; 2077 2078 return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) & 2079 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK) 2080 >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT; 2081 } 2082 2083 uint16_t smu_v11_0_get_current_pcie_link_width(struct smu_context *smu) 2084 { 2085 uint32_t width_level; 2086 2087 width_level = smu_v11_0_get_current_pcie_link_width_level(smu); 2088 if (width_level > LINK_WIDTH_MAX) 2089 width_level = 0; 2090 2091 return link_width[width_level]; 2092 } 2093 2094 int smu_v11_0_get_current_pcie_link_speed_level(struct smu_context *smu) 2095 { 2096 struct amdgpu_device *adev = smu->adev; 2097 2098 return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & 2099 PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) 2100 >> PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; 2101 } 2102 2103 uint16_t smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu) 2104 { 2105 uint32_t speed_level; 2106 2107 speed_level = smu_v11_0_get_current_pcie_link_speed_level(smu); 2108 if (speed_level > LINK_SPEED_MAX) 2109 speed_level = 0; 2110 2111 return link_speed[speed_level]; 2112 } 2113 2114 int smu_v11_0_gfx_ulv_control(struct smu_context *smu, 2115 bool enablement) 2116 { 2117 int ret = 0; 2118 2119 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_GFX_ULV_BIT)) 2120 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_GFX_ULV_BIT, enablement); 2121 2122 return ret; 2123 } 2124 2125 int smu_v11_0_deep_sleep_control(struct smu_context *smu, 2126 bool enablement) 2127 { 2128 struct amdgpu_device *adev = smu->adev; 2129 int ret = 0; 2130 2131 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_GFXCLK_BIT)) { 2132 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_GFXCLK_BIT, enablement); 2133 if (ret) { 2134 dev_err(adev->dev, "Failed to %s GFXCLK DS!\n", enablement ? "enable" : "disable"); 2135 return ret; 2136 } 2137 } 2138 2139 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_UCLK_BIT)) { 2140 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_UCLK_BIT, enablement); 2141 if (ret) { 2142 dev_err(adev->dev, "Failed to %s UCLK DS!\n", enablement ? "enable" : "disable"); 2143 return ret; 2144 } 2145 } 2146 2147 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_FCLK_BIT)) { 2148 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_FCLK_BIT, enablement); 2149 if (ret) { 2150 dev_err(adev->dev, "Failed to %s FCLK DS!\n", enablement ? "enable" : "disable"); 2151 return ret; 2152 } 2153 } 2154 2155 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_SOCCLK_BIT)) { 2156 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_SOCCLK_BIT, enablement); 2157 if (ret) { 2158 dev_err(adev->dev, "Failed to %s SOCCLK DS!\n", enablement ? "enable" : "disable"); 2159 return ret; 2160 } 2161 } 2162 2163 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_LCLK_BIT)) { 2164 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_LCLK_BIT, enablement); 2165 if (ret) { 2166 dev_err(adev->dev, "Failed to %s LCLK DS!\n", enablement ? "enable" : "disable"); 2167 return ret; 2168 } 2169 } 2170 2171 return ret; 2172 } 2173 2174 int smu_v11_0_restore_user_od_settings(struct smu_context *smu) 2175 { 2176 struct smu_table_context *table_context = &smu->smu_table; 2177 void *user_od_table = table_context->user_overdrive_table; 2178 int ret = 0; 2179 2180 ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)user_od_table, true); 2181 if (ret) 2182 dev_err(smu->adev->dev, "Failed to import overdrive table!\n"); 2183 2184 return ret; 2185 } 2186 2187 void smu_v11_0_set_smu_mailbox_registers(struct smu_context *smu) 2188 { 2189 struct amdgpu_device *adev = smu->adev; 2190 2191 smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82); 2192 smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66); 2193 smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90); 2194 } 2195