1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <drm/amdgpu_drm.h> 25 #include "amdgpu.h" 26 #include "atomfirmware.h" 27 #include "amdgpu_atomfirmware.h" 28 #include "atom.h" 29 #include "atombios.h" 30 #include "soc15_hw_ip.h" 31 32 union firmware_info { 33 struct atom_firmware_info_v3_1 v31; 34 struct atom_firmware_info_v3_2 v32; 35 struct atom_firmware_info_v3_3 v33; 36 struct atom_firmware_info_v3_4 v34; 37 }; 38 39 /* 40 * Helper function to query firmware capability 41 * 42 * @adev: amdgpu_device pointer 43 * 44 * Return firmware_capability in firmwareinfo table on success or 0 if not 45 */ 46 uint32_t amdgpu_atomfirmware_query_firmware_capability(struct amdgpu_device *adev) 47 { 48 struct amdgpu_mode_info *mode_info = &adev->mode_info; 49 int index; 50 u16 data_offset, size; 51 union firmware_info *firmware_info; 52 u8 frev, crev; 53 u32 fw_cap = 0; 54 55 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 56 firmwareinfo); 57 58 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, 59 index, &size, &frev, &crev, &data_offset)) { 60 /* support firmware_info 3.1 + */ 61 if ((frev == 3 && crev >= 1) || (frev > 3)) { 62 firmware_info = (union firmware_info *) 63 (mode_info->atom_context->bios + data_offset); 64 fw_cap = le32_to_cpu(firmware_info->v31.firmware_capability); 65 } 66 } 67 68 return fw_cap; 69 } 70 71 /* 72 * Helper function to query gpu virtualizaiton capability 73 * 74 * @adev: amdgpu_device pointer 75 * 76 * Return true if gpu virtualization is supported or false if not 77 */ 78 bool amdgpu_atomfirmware_gpu_virtualization_supported(struct amdgpu_device *adev) 79 { 80 u32 fw_cap; 81 82 fw_cap = adev->mode_info.firmware_flags; 83 84 return (fw_cap & ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION) ? true : false; 85 } 86 87 void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev) 88 { 89 int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 90 firmwareinfo); 91 uint16_t data_offset; 92 93 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL, 94 NULL, NULL, &data_offset)) { 95 struct atom_firmware_info_v3_1 *firmware_info = 96 (struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios + 97 data_offset); 98 99 adev->bios_scratch_reg_offset = 100 le32_to_cpu(firmware_info->bios_scratch_reg_startaddr); 101 } 102 } 103 104 static int amdgpu_atomfirmware_allocate_fb_v2_1(struct amdgpu_device *adev, 105 struct vram_usagebyfirmware_v2_1 *fw_usage, int *usage_bytes) 106 { 107 u32 start_addr, fw_size, drv_size; 108 109 start_addr = le32_to_cpu(fw_usage->start_address_in_kb); 110 fw_size = le16_to_cpu(fw_usage->used_by_firmware_in_kb); 111 drv_size = le16_to_cpu(fw_usage->used_by_driver_in_kb); 112 113 DRM_DEBUG("atom firmware v2_1 requested %08x %dkb fw %dkb drv\n", 114 start_addr, 115 fw_size, 116 drv_size); 117 118 if ((start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) == 119 (u32)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION << 120 ATOM_VRAM_OPERATION_FLAGS_SHIFT)) { 121 /* Firmware request VRAM reservation for SR-IOV */ 122 adev->mman.fw_vram_usage_start_offset = (start_addr & 123 (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10; 124 adev->mman.fw_vram_usage_size = fw_size << 10; 125 /* Use the default scratch size */ 126 *usage_bytes = 0; 127 } else { 128 *usage_bytes = drv_size << 10; 129 } 130 return 0; 131 } 132 133 static int amdgpu_atomfirmware_allocate_fb_v2_2(struct amdgpu_device *adev, 134 struct vram_usagebyfirmware_v2_2 *fw_usage, int *usage_bytes) 135 { 136 u32 fw_start_addr, fw_size, drv_start_addr, drv_size; 137 138 fw_start_addr = le32_to_cpu(fw_usage->fw_region_start_address_in_kb); 139 fw_size = le16_to_cpu(fw_usage->used_by_firmware_in_kb); 140 141 drv_start_addr = le32_to_cpu(fw_usage->driver_region0_start_address_in_kb); 142 drv_size = le32_to_cpu(fw_usage->used_by_driver_region0_in_kb); 143 144 DRM_DEBUG("atom requested fw start at %08x %dkb and drv start at %08x %dkb\n", 145 fw_start_addr, 146 fw_size, 147 drv_start_addr, 148 drv_size); 149 150 if (amdgpu_sriov_vf(adev) && 151 ((fw_start_addr & (ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION << 152 ATOM_VRAM_OPERATION_FLAGS_SHIFT)) == 0)) { 153 /* Firmware request VRAM reservation for SR-IOV */ 154 adev->mman.fw_vram_usage_start_offset = (fw_start_addr & 155 (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10; 156 adev->mman.fw_vram_usage_size = fw_size << 10; 157 } 158 159 if (amdgpu_sriov_vf(adev) && 160 ((drv_start_addr & (ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION << 161 ATOM_VRAM_OPERATION_FLAGS_SHIFT)) == 0)) { 162 /* driver request VRAM reservation for SR-IOV */ 163 adev->mman.drv_vram_usage_start_offset = (drv_start_addr & 164 (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10; 165 adev->mman.drv_vram_usage_size = drv_size << 10; 166 } 167 168 *usage_bytes = 0; 169 return 0; 170 } 171 172 int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev) 173 { 174 struct atom_context *ctx = adev->mode_info.atom_context; 175 int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 176 vram_usagebyfirmware); 177 struct vram_usagebyfirmware_v2_1 *fw_usage_v2_1; 178 struct vram_usagebyfirmware_v2_2 *fw_usage_v2_2; 179 u16 data_offset; 180 u8 frev, crev; 181 int usage_bytes = 0; 182 183 if (amdgpu_atom_parse_data_header(ctx, index, NULL, &frev, &crev, &data_offset)) { 184 if (frev == 2 && crev == 1) { 185 fw_usage_v2_1 = 186 (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset); 187 amdgpu_atomfirmware_allocate_fb_v2_1(adev, 188 fw_usage_v2_1, 189 &usage_bytes); 190 } else if (frev >= 2 && crev >= 2) { 191 fw_usage_v2_2 = 192 (struct vram_usagebyfirmware_v2_2 *)(ctx->bios + data_offset); 193 amdgpu_atomfirmware_allocate_fb_v2_2(adev, 194 fw_usage_v2_2, 195 &usage_bytes); 196 } 197 } 198 199 ctx->scratch_size_bytes = 0; 200 if (usage_bytes == 0) 201 usage_bytes = 20 * 1024; 202 /* allocate some scratch memory */ 203 ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL); 204 if (!ctx->scratch) 205 return -ENOMEM; 206 ctx->scratch_size_bytes = usage_bytes; 207 return 0; 208 } 209 210 union igp_info { 211 struct atom_integrated_system_info_v1_11 v11; 212 struct atom_integrated_system_info_v1_12 v12; 213 struct atom_integrated_system_info_v2_1 v21; 214 }; 215 216 union umc_info { 217 struct atom_umc_info_v3_1 v31; 218 struct atom_umc_info_v3_2 v32; 219 struct atom_umc_info_v3_3 v33; 220 }; 221 222 union vram_info { 223 struct atom_vram_info_header_v2_3 v23; 224 struct atom_vram_info_header_v2_4 v24; 225 struct atom_vram_info_header_v2_5 v25; 226 struct atom_vram_info_header_v2_6 v26; 227 struct atom_vram_info_header_v3_0 v30; 228 }; 229 230 union vram_module { 231 struct atom_vram_module_v9 v9; 232 struct atom_vram_module_v10 v10; 233 struct atom_vram_module_v11 v11; 234 struct atom_vram_module_v3_0 v30; 235 }; 236 237 static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev, 238 int atom_mem_type) 239 { 240 int vram_type; 241 242 if (adev->flags & AMD_IS_APU) { 243 switch (atom_mem_type) { 244 case Ddr2MemType: 245 case LpDdr2MemType: 246 vram_type = AMDGPU_VRAM_TYPE_DDR2; 247 break; 248 case Ddr3MemType: 249 case LpDdr3MemType: 250 vram_type = AMDGPU_VRAM_TYPE_DDR3; 251 break; 252 case Ddr4MemType: 253 vram_type = AMDGPU_VRAM_TYPE_DDR4; 254 break; 255 case LpDdr4MemType: 256 vram_type = AMDGPU_VRAM_TYPE_LPDDR4; 257 break; 258 case Ddr5MemType: 259 vram_type = AMDGPU_VRAM_TYPE_DDR5; 260 break; 261 case LpDdr5MemType: 262 vram_type = AMDGPU_VRAM_TYPE_LPDDR5; 263 break; 264 default: 265 vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; 266 break; 267 } 268 } else { 269 switch (atom_mem_type) { 270 case ATOM_DGPU_VRAM_TYPE_GDDR5: 271 vram_type = AMDGPU_VRAM_TYPE_GDDR5; 272 break; 273 case ATOM_DGPU_VRAM_TYPE_HBM2: 274 case ATOM_DGPU_VRAM_TYPE_HBM2E: 275 case ATOM_DGPU_VRAM_TYPE_HBM3: 276 vram_type = AMDGPU_VRAM_TYPE_HBM; 277 break; 278 case ATOM_DGPU_VRAM_TYPE_GDDR6: 279 vram_type = AMDGPU_VRAM_TYPE_GDDR6; 280 break; 281 default: 282 vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; 283 break; 284 } 285 } 286 287 return vram_type; 288 } 289 290 291 int 292 amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev, 293 int *vram_width, int *vram_type, 294 int *vram_vendor) 295 { 296 struct amdgpu_mode_info *mode_info = &adev->mode_info; 297 int index, i = 0; 298 u16 data_offset, size; 299 union igp_info *igp_info; 300 union vram_info *vram_info; 301 union vram_module *vram_module; 302 u8 frev, crev; 303 u8 mem_type; 304 u8 mem_vendor; 305 u32 mem_channel_number; 306 u32 mem_channel_width; 307 u32 module_id; 308 309 if (adev->flags & AMD_IS_APU) 310 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 311 integratedsysteminfo); 312 else 313 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 314 vram_info); 315 316 if (amdgpu_atom_parse_data_header(mode_info->atom_context, 317 index, &size, 318 &frev, &crev, &data_offset)) { 319 if (adev->flags & AMD_IS_APU) { 320 igp_info = (union igp_info *) 321 (mode_info->atom_context->bios + data_offset); 322 switch (frev) { 323 case 1: 324 switch (crev) { 325 case 11: 326 case 12: 327 mem_channel_number = igp_info->v11.umachannelnumber; 328 if (!mem_channel_number) 329 mem_channel_number = 1; 330 mem_type = igp_info->v11.memorytype; 331 if (mem_type == LpDdr5MemType) 332 mem_channel_width = 32; 333 else 334 mem_channel_width = 64; 335 if (vram_width) 336 *vram_width = mem_channel_number * mem_channel_width; 337 if (vram_type) 338 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 339 break; 340 default: 341 return -EINVAL; 342 } 343 break; 344 case 2: 345 switch (crev) { 346 case 1: 347 case 2: 348 mem_channel_number = igp_info->v21.umachannelnumber; 349 if (!mem_channel_number) 350 mem_channel_number = 1; 351 mem_type = igp_info->v21.memorytype; 352 if (mem_type == LpDdr5MemType) 353 mem_channel_width = 32; 354 else 355 mem_channel_width = 64; 356 if (vram_width) 357 *vram_width = mem_channel_number * mem_channel_width; 358 if (vram_type) 359 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 360 break; 361 default: 362 return -EINVAL; 363 } 364 break; 365 default: 366 return -EINVAL; 367 } 368 } else { 369 vram_info = (union vram_info *) 370 (mode_info->atom_context->bios + data_offset); 371 module_id = (RREG32(adev->bios_scratch_reg_offset + 4) & 0x00ff0000) >> 16; 372 if (frev == 3) { 373 switch (crev) { 374 /* v30 */ 375 case 0: 376 vram_module = (union vram_module *)vram_info->v30.vram_module; 377 mem_vendor = (vram_module->v30.dram_vendor_id) & 0xF; 378 if (vram_vendor) 379 *vram_vendor = mem_vendor; 380 mem_type = vram_info->v30.memory_type; 381 if (vram_type) 382 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 383 mem_channel_number = vram_info->v30.channel_num; 384 mem_channel_width = vram_info->v30.channel_width; 385 if (vram_width) 386 *vram_width = mem_channel_number * (1 << mem_channel_width); 387 break; 388 default: 389 return -EINVAL; 390 } 391 } else if (frev == 2) { 392 switch (crev) { 393 /* v23 */ 394 case 3: 395 if (module_id > vram_info->v23.vram_module_num) 396 module_id = 0; 397 vram_module = (union vram_module *)vram_info->v23.vram_module; 398 while (i < module_id) { 399 vram_module = (union vram_module *) 400 ((u8 *)vram_module + vram_module->v9.vram_module_size); 401 i++; 402 } 403 mem_type = vram_module->v9.memory_type; 404 if (vram_type) 405 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 406 mem_channel_number = vram_module->v9.channel_num; 407 mem_channel_width = vram_module->v9.channel_width; 408 if (vram_width) 409 *vram_width = mem_channel_number * (1 << mem_channel_width); 410 mem_vendor = (vram_module->v9.vender_rev_id) & 0xF; 411 if (vram_vendor) 412 *vram_vendor = mem_vendor; 413 break; 414 /* v24 */ 415 case 4: 416 if (module_id > vram_info->v24.vram_module_num) 417 module_id = 0; 418 vram_module = (union vram_module *)vram_info->v24.vram_module; 419 while (i < module_id) { 420 vram_module = (union vram_module *) 421 ((u8 *)vram_module + vram_module->v10.vram_module_size); 422 i++; 423 } 424 mem_type = vram_module->v10.memory_type; 425 if (vram_type) 426 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 427 mem_channel_number = vram_module->v10.channel_num; 428 mem_channel_width = vram_module->v10.channel_width; 429 if (vram_width) 430 *vram_width = mem_channel_number * (1 << mem_channel_width); 431 mem_vendor = (vram_module->v10.vender_rev_id) & 0xF; 432 if (vram_vendor) 433 *vram_vendor = mem_vendor; 434 break; 435 /* v25 */ 436 case 5: 437 if (module_id > vram_info->v25.vram_module_num) 438 module_id = 0; 439 vram_module = (union vram_module *)vram_info->v25.vram_module; 440 while (i < module_id) { 441 vram_module = (union vram_module *) 442 ((u8 *)vram_module + vram_module->v11.vram_module_size); 443 i++; 444 } 445 mem_type = vram_module->v11.memory_type; 446 if (vram_type) 447 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 448 mem_channel_number = vram_module->v11.channel_num; 449 mem_channel_width = vram_module->v11.channel_width; 450 if (vram_width) 451 *vram_width = mem_channel_number * (1 << mem_channel_width); 452 mem_vendor = (vram_module->v11.vender_rev_id) & 0xF; 453 if (vram_vendor) 454 *vram_vendor = mem_vendor; 455 break; 456 /* v26 */ 457 case 6: 458 if (module_id > vram_info->v26.vram_module_num) 459 module_id = 0; 460 vram_module = (union vram_module *)vram_info->v26.vram_module; 461 while (i < module_id) { 462 vram_module = (union vram_module *) 463 ((u8 *)vram_module + vram_module->v9.vram_module_size); 464 i++; 465 } 466 mem_type = vram_module->v9.memory_type; 467 if (vram_type) 468 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 469 mem_channel_number = vram_module->v9.channel_num; 470 mem_channel_width = vram_module->v9.channel_width; 471 if (vram_width) 472 *vram_width = mem_channel_number * (1 << mem_channel_width); 473 mem_vendor = (vram_module->v9.vender_rev_id) & 0xF; 474 if (vram_vendor) 475 *vram_vendor = mem_vendor; 476 break; 477 default: 478 return -EINVAL; 479 } 480 } else { 481 /* invalid frev */ 482 return -EINVAL; 483 } 484 } 485 486 } 487 488 return 0; 489 } 490 491 /* 492 * Return true if vbios enabled ecc by default, if umc info table is available 493 * or false if ecc is not enabled or umc info table is not available 494 */ 495 bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev) 496 { 497 struct amdgpu_mode_info *mode_info = &adev->mode_info; 498 int index; 499 u16 data_offset, size; 500 union umc_info *umc_info; 501 u8 frev, crev; 502 bool ecc_default_enabled = false; 503 u8 umc_config; 504 u32 umc_config1; 505 506 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 507 umc_info); 508 509 if (amdgpu_atom_parse_data_header(mode_info->atom_context, 510 index, &size, &frev, &crev, &data_offset)) { 511 if (frev == 3) { 512 umc_info = (union umc_info *) 513 (mode_info->atom_context->bios + data_offset); 514 switch (crev) { 515 case 1: 516 umc_config = le32_to_cpu(umc_info->v31.umc_config); 517 ecc_default_enabled = 518 (umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false; 519 break; 520 case 2: 521 umc_config = le32_to_cpu(umc_info->v32.umc_config); 522 ecc_default_enabled = 523 (umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false; 524 break; 525 case 3: 526 umc_config = le32_to_cpu(umc_info->v33.umc_config); 527 umc_config1 = le32_to_cpu(umc_info->v33.umc_config1); 528 ecc_default_enabled = 529 ((umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) || 530 (umc_config1 & UMC_CONFIG1__ENABLE_ECC_CAPABLE)) ? true : false; 531 break; 532 default: 533 /* unsupported crev */ 534 return false; 535 } 536 } 537 } 538 539 return ecc_default_enabled; 540 } 541 542 /* 543 * Helper function to query sram ecc capablity 544 * 545 * @adev: amdgpu_device pointer 546 * 547 * Return true if vbios supports sram ecc or false if not 548 */ 549 bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev) 550 { 551 u32 fw_cap; 552 553 fw_cap = adev->mode_info.firmware_flags; 554 555 return (fw_cap & ATOM_FIRMWARE_CAP_SRAM_ECC) ? true : false; 556 } 557 558 /* 559 * Helper function to query dynamic boot config capability 560 * 561 * @adev: amdgpu_device pointer 562 * 563 * Return true if vbios supports dynamic boot config or false if not 564 */ 565 bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *adev) 566 { 567 u32 fw_cap; 568 569 fw_cap = adev->mode_info.firmware_flags; 570 571 return (fw_cap & ATOM_FIRMWARE_CAP_DYNAMIC_BOOT_CFG_ENABLE) ? true : false; 572 } 573 574 /** 575 * amdgpu_atomfirmware_ras_rom_addr -- Get the RAS EEPROM addr from VBIOS 576 * @adev: amdgpu_device pointer 577 * @i2c_address: pointer to u8; if not NULL, will contain 578 * the RAS EEPROM address if the function returns true 579 * 580 * Return true if VBIOS supports RAS EEPROM address reporting, 581 * else return false. If true and @i2c_address is not NULL, 582 * will contain the RAS ROM address. 583 */ 584 bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, 585 u8 *i2c_address) 586 { 587 struct amdgpu_mode_info *mode_info = &adev->mode_info; 588 int index; 589 u16 data_offset, size; 590 union firmware_info *firmware_info; 591 u8 frev, crev; 592 593 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 594 firmwareinfo); 595 596 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, 597 index, &size, &frev, &crev, 598 &data_offset)) { 599 /* support firmware_info 3.4 + */ 600 if ((frev == 3 && crev >= 4) || (frev > 3)) { 601 firmware_info = (union firmware_info *) 602 (mode_info->atom_context->bios + data_offset); 603 /* The ras_rom_i2c_slave_addr should ideally 604 * be a 19-bit EEPROM address, which would be 605 * used as is by the driver; see top of 606 * amdgpu_eeprom.c. 607 * 608 * When this is the case, 0 is of course a 609 * valid RAS EEPROM address, in which case, 610 * we'll drop the first "if (firm...)" and only 611 * leave the check for the pointer. 612 * 613 * The reason this works right now is because 614 * ras_rom_i2c_slave_addr contains the EEPROM 615 * device type qualifier 1010b in the top 4 616 * bits. 617 */ 618 if (firmware_info->v34.ras_rom_i2c_slave_addr) { 619 if (i2c_address) 620 *i2c_address = firmware_info->v34.ras_rom_i2c_slave_addr; 621 return true; 622 } 623 } 624 } 625 626 return false; 627 } 628 629 630 union smu_info { 631 struct atom_smu_info_v3_1 v31; 632 struct atom_smu_info_v4_0 v40; 633 }; 634 635 union gfx_info { 636 struct atom_gfx_info_v2_2 v22; 637 struct atom_gfx_info_v2_4 v24; 638 struct atom_gfx_info_v2_7 v27; 639 struct atom_gfx_info_v3_0 v30; 640 }; 641 642 int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev) 643 { 644 struct amdgpu_mode_info *mode_info = &adev->mode_info; 645 struct amdgpu_pll *spll = &adev->clock.spll; 646 struct amdgpu_pll *mpll = &adev->clock.mpll; 647 uint8_t frev, crev; 648 uint16_t data_offset; 649 int ret = -EINVAL, index; 650 651 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 652 firmwareinfo); 653 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 654 &frev, &crev, &data_offset)) { 655 union firmware_info *firmware_info = 656 (union firmware_info *)(mode_info->atom_context->bios + 657 data_offset); 658 659 adev->clock.default_sclk = 660 le32_to_cpu(firmware_info->v31.bootup_sclk_in10khz); 661 adev->clock.default_mclk = 662 le32_to_cpu(firmware_info->v31.bootup_mclk_in10khz); 663 664 adev->pm.current_sclk = adev->clock.default_sclk; 665 adev->pm.current_mclk = adev->clock.default_mclk; 666 667 ret = 0; 668 } 669 670 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 671 smu_info); 672 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 673 &frev, &crev, &data_offset)) { 674 union smu_info *smu_info = 675 (union smu_info *)(mode_info->atom_context->bios + 676 data_offset); 677 678 /* system clock */ 679 if (frev == 3) 680 spll->reference_freq = le32_to_cpu(smu_info->v31.core_refclk_10khz); 681 else if (frev == 4) 682 spll->reference_freq = le32_to_cpu(smu_info->v40.core_refclk_10khz); 683 684 spll->reference_div = 0; 685 spll->min_post_div = 1; 686 spll->max_post_div = 1; 687 spll->min_ref_div = 2; 688 spll->max_ref_div = 0xff; 689 spll->min_feedback_div = 4; 690 spll->max_feedback_div = 0xff; 691 spll->best_vco = 0; 692 693 ret = 0; 694 } 695 696 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 697 umc_info); 698 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 699 &frev, &crev, &data_offset)) { 700 union umc_info *umc_info = 701 (union umc_info *)(mode_info->atom_context->bios + 702 data_offset); 703 704 /* memory clock */ 705 mpll->reference_freq = le32_to_cpu(umc_info->v31.mem_refclk_10khz); 706 707 mpll->reference_div = 0; 708 mpll->min_post_div = 1; 709 mpll->max_post_div = 1; 710 mpll->min_ref_div = 2; 711 mpll->max_ref_div = 0xff; 712 mpll->min_feedback_div = 4; 713 mpll->max_feedback_div = 0xff; 714 mpll->best_vco = 0; 715 716 ret = 0; 717 } 718 719 /* if asic is Navi+, the rlc reference clock is used for system clock 720 * from vbios gfx_info table */ 721 if (adev->asic_type >= CHIP_NAVI10) { 722 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 723 gfx_info); 724 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 725 &frev, &crev, &data_offset)) { 726 union gfx_info *gfx_info = (union gfx_info *) 727 (mode_info->atom_context->bios + data_offset); 728 if ((frev == 3) || 729 (frev == 2 && crev == 6)) { 730 spll->reference_freq = le32_to_cpu(gfx_info->v30.golden_tsc_count_lower_refclk); 731 ret = 0; 732 } else if ((frev == 2) && 733 (crev >= 2) && 734 (crev != 6)) { 735 spll->reference_freq = le32_to_cpu(gfx_info->v22.rlc_gpu_timer_refclk); 736 ret = 0; 737 } else { 738 BUG(); 739 } 740 } 741 } 742 743 return ret; 744 } 745 746 int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev) 747 { 748 struct amdgpu_mode_info *mode_info = &adev->mode_info; 749 int index; 750 uint8_t frev, crev; 751 uint16_t data_offset; 752 753 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 754 gfx_info); 755 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 756 &frev, &crev, &data_offset)) { 757 union gfx_info *gfx_info = (union gfx_info *) 758 (mode_info->atom_context->bios + data_offset); 759 if (frev == 2) { 760 switch (crev) { 761 case 4: 762 adev->gfx.config.max_shader_engines = gfx_info->v24.max_shader_engines; 763 adev->gfx.config.max_cu_per_sh = gfx_info->v24.max_cu_per_sh; 764 adev->gfx.config.max_sh_per_se = gfx_info->v24.max_sh_per_se; 765 adev->gfx.config.max_backends_per_se = gfx_info->v24.max_backends_per_se; 766 adev->gfx.config.max_texture_channel_caches = gfx_info->v24.max_texture_channel_caches; 767 adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v24.gc_num_gprs); 768 adev->gfx.config.max_gs_threads = gfx_info->v24.gc_num_max_gs_thds; 769 adev->gfx.config.gs_vgt_table_depth = gfx_info->v24.gc_gs_table_depth; 770 adev->gfx.config.gs_prim_buffer_depth = 771 le16_to_cpu(gfx_info->v24.gc_gsprim_buff_depth); 772 adev->gfx.config.double_offchip_lds_buf = 773 gfx_info->v24.gc_double_offchip_lds_buffer; 774 adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v24.gc_wave_size); 775 adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v24.gc_max_waves_per_simd); 776 adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v24.gc_max_scratch_slots_per_cu; 777 adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v24.gc_lds_size); 778 return 0; 779 case 7: 780 adev->gfx.config.max_shader_engines = gfx_info->v27.max_shader_engines; 781 adev->gfx.config.max_cu_per_sh = gfx_info->v27.max_cu_per_sh; 782 adev->gfx.config.max_sh_per_se = gfx_info->v27.max_sh_per_se; 783 adev->gfx.config.max_backends_per_se = gfx_info->v27.max_backends_per_se; 784 adev->gfx.config.max_texture_channel_caches = gfx_info->v27.max_texture_channel_caches; 785 adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v27.gc_num_gprs); 786 adev->gfx.config.max_gs_threads = gfx_info->v27.gc_num_max_gs_thds; 787 adev->gfx.config.gs_vgt_table_depth = gfx_info->v27.gc_gs_table_depth; 788 adev->gfx.config.gs_prim_buffer_depth = le16_to_cpu(gfx_info->v27.gc_gsprim_buff_depth); 789 adev->gfx.config.double_offchip_lds_buf = gfx_info->v27.gc_double_offchip_lds_buffer; 790 adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v27.gc_wave_size); 791 adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v27.gc_max_waves_per_simd); 792 adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v27.gc_max_scratch_slots_per_cu; 793 adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v27.gc_lds_size); 794 return 0; 795 default: 796 return -EINVAL; 797 } 798 } else if (frev == 3) { 799 switch (crev) { 800 case 0: 801 adev->gfx.config.max_shader_engines = gfx_info->v30.max_shader_engines; 802 adev->gfx.config.max_cu_per_sh = gfx_info->v30.max_cu_per_sh; 803 adev->gfx.config.max_sh_per_se = gfx_info->v30.max_sh_per_se; 804 adev->gfx.config.max_backends_per_se = gfx_info->v30.max_backends_per_se; 805 adev->gfx.config.max_texture_channel_caches = gfx_info->v30.max_texture_channel_caches; 806 return 0; 807 default: 808 return -EINVAL; 809 } 810 } else { 811 return -EINVAL; 812 } 813 814 } 815 return -EINVAL; 816 } 817 818 /* 819 * Helper function to query two stage mem training capability 820 * 821 * @adev: amdgpu_device pointer 822 * 823 * Return true if two stage mem training is supported or false if not 824 */ 825 bool amdgpu_atomfirmware_mem_training_supported(struct amdgpu_device *adev) 826 { 827 u32 fw_cap; 828 829 fw_cap = adev->mode_info.firmware_flags; 830 831 return (fw_cap & ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING) ? true : false; 832 } 833 834 int amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device *adev) 835 { 836 struct atom_context *ctx = adev->mode_info.atom_context; 837 union firmware_info *firmware_info; 838 int index; 839 u16 data_offset, size; 840 u8 frev, crev; 841 int fw_reserved_fb_size; 842 843 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 844 firmwareinfo); 845 846 if (!amdgpu_atom_parse_data_header(ctx, index, &size, 847 &frev, &crev, &data_offset)) 848 /* fail to parse data_header */ 849 return 0; 850 851 firmware_info = (union firmware_info *)(ctx->bios + data_offset); 852 853 if (frev != 3) 854 return -EINVAL; 855 856 switch (crev) { 857 case 4: 858 fw_reserved_fb_size = 859 (firmware_info->v34.fw_reserved_size_in_kb << 10); 860 break; 861 default: 862 fw_reserved_fb_size = 0; 863 break; 864 } 865 866 return fw_reserved_fb_size; 867 } 868 869 /* 870 * Helper function to execute asic_init table 871 * 872 * @adev: amdgpu_device pointer 873 * @fb_reset: flag to indicate whether fb is reset or not 874 * 875 * Return 0 if succeed, otherwise failed 876 */ 877 int amdgpu_atomfirmware_asic_init(struct amdgpu_device *adev, bool fb_reset) 878 { 879 struct amdgpu_mode_info *mode_info = &adev->mode_info; 880 struct atom_context *ctx; 881 uint8_t frev, crev; 882 uint16_t data_offset; 883 uint32_t bootup_sclk_in10khz, bootup_mclk_in10khz; 884 struct asic_init_ps_allocation_v2_1 asic_init_ps_v2_1; 885 int index; 886 887 if (!mode_info) 888 return -EINVAL; 889 890 ctx = mode_info->atom_context; 891 if (!ctx) 892 return -EINVAL; 893 894 /* query bootup sclk/mclk from firmware_info table */ 895 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 896 firmwareinfo); 897 if (amdgpu_atom_parse_data_header(ctx, index, NULL, 898 &frev, &crev, &data_offset)) { 899 union firmware_info *firmware_info = 900 (union firmware_info *)(ctx->bios + 901 data_offset); 902 903 bootup_sclk_in10khz = 904 le32_to_cpu(firmware_info->v31.bootup_sclk_in10khz); 905 bootup_mclk_in10khz = 906 le32_to_cpu(firmware_info->v31.bootup_mclk_in10khz); 907 } else { 908 return -EINVAL; 909 } 910 911 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1, 912 asic_init); 913 if (amdgpu_atom_parse_cmd_header(mode_info->atom_context, index, &frev, &crev)) { 914 if (frev == 2 && crev >= 1) { 915 memset(&asic_init_ps_v2_1, 0, sizeof(asic_init_ps_v2_1)); 916 asic_init_ps_v2_1.param.engineparam.sclkfreqin10khz = bootup_sclk_in10khz; 917 asic_init_ps_v2_1.param.memparam.mclkfreqin10khz = bootup_mclk_in10khz; 918 asic_init_ps_v2_1.param.engineparam.engineflag = b3NORMAL_ENGINE_INIT; 919 if (!fb_reset) 920 asic_init_ps_v2_1.param.memparam.memflag = b3DRAM_SELF_REFRESH_EXIT; 921 else 922 asic_init_ps_v2_1.param.memparam.memflag = 0; 923 } else { 924 return -EINVAL; 925 } 926 } else { 927 return -EINVAL; 928 } 929 930 return amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, (uint32_t *)&asic_init_ps_v2_1); 931 } 932