1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <drm/amdgpu_drm.h> 25 #include "amdgpu.h" 26 #include "atomfirmware.h" 27 #include "amdgpu_atomfirmware.h" 28 #include "atom.h" 29 #include "atombios.h" 30 #include "soc15_hw_ip.h" 31 32 union firmware_info { 33 struct atom_firmware_info_v3_1 v31; 34 struct atom_firmware_info_v3_2 v32; 35 struct atom_firmware_info_v3_3 v33; 36 struct atom_firmware_info_v3_4 v34; 37 }; 38 39 /* 40 * Helper function to query firmware capability 41 * 42 * @adev: amdgpu_device pointer 43 * 44 * Return firmware_capability in firmwareinfo table on success or 0 if not 45 */ 46 uint32_t amdgpu_atomfirmware_query_firmware_capability(struct amdgpu_device *adev) 47 { 48 struct amdgpu_mode_info *mode_info = &adev->mode_info; 49 int index; 50 u16 data_offset, size; 51 union firmware_info *firmware_info; 52 u8 frev, crev; 53 u32 fw_cap = 0; 54 55 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 56 firmwareinfo); 57 58 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, 59 index, &size, &frev, &crev, &data_offset)) { 60 /* support firmware_info 3.1 + */ 61 if ((frev == 3 && crev >= 1) || (frev > 3)) { 62 firmware_info = (union firmware_info *) 63 (mode_info->atom_context->bios + data_offset); 64 fw_cap = le32_to_cpu(firmware_info->v31.firmware_capability); 65 } 66 } 67 68 return fw_cap; 69 } 70 71 /* 72 * Helper function to query gpu virtualizaiton capability 73 * 74 * @adev: amdgpu_device pointer 75 * 76 * Return true if gpu virtualization is supported or false if not 77 */ 78 bool amdgpu_atomfirmware_gpu_virtualization_supported(struct amdgpu_device *adev) 79 { 80 u32 fw_cap; 81 82 fw_cap = adev->mode_info.firmware_flags; 83 84 return (fw_cap & ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION) ? true : false; 85 } 86 87 void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev) 88 { 89 int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 90 firmwareinfo); 91 uint16_t data_offset; 92 93 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL, 94 NULL, NULL, &data_offset)) { 95 struct atom_firmware_info_v3_1 *firmware_info = 96 (struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios + 97 data_offset); 98 99 adev->bios_scratch_reg_offset = 100 le32_to_cpu(firmware_info->bios_scratch_reg_startaddr); 101 } 102 } 103 104 static int amdgpu_atomfirmware_allocate_fb_v2_1(struct amdgpu_device *adev, 105 struct vram_usagebyfirmware_v2_1 *fw_usage, int *usage_bytes) 106 { 107 u32 start_addr, fw_size, drv_size; 108 109 start_addr = le32_to_cpu(fw_usage->start_address_in_kb); 110 fw_size = le16_to_cpu(fw_usage->used_by_firmware_in_kb); 111 drv_size = le16_to_cpu(fw_usage->used_by_driver_in_kb); 112 113 DRM_DEBUG("atom firmware v2_1 requested %08x %dkb fw %dkb drv\n", 114 start_addr, 115 fw_size, 116 drv_size); 117 118 if ((start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) == 119 (u32)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION << 120 ATOM_VRAM_OPERATION_FLAGS_SHIFT)) { 121 /* Firmware request VRAM reservation for SR-IOV */ 122 adev->mman.fw_vram_usage_start_offset = (start_addr & 123 (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10; 124 adev->mman.fw_vram_usage_size = fw_size << 10; 125 /* Use the default scratch size */ 126 *usage_bytes = 0; 127 } else { 128 *usage_bytes = drv_size << 10; 129 } 130 return 0; 131 } 132 133 static int amdgpu_atomfirmware_allocate_fb_v2_2(struct amdgpu_device *adev, 134 struct vram_usagebyfirmware_v2_2 *fw_usage, int *usage_bytes) 135 { 136 u32 fw_start_addr, fw_size, drv_start_addr, drv_size; 137 138 fw_start_addr = le32_to_cpu(fw_usage->fw_region_start_address_in_kb); 139 fw_size = le16_to_cpu(fw_usage->used_by_firmware_in_kb); 140 141 drv_start_addr = le32_to_cpu(fw_usage->driver_region0_start_address_in_kb); 142 drv_size = le32_to_cpu(fw_usage->used_by_driver_region0_in_kb); 143 144 DRM_DEBUG("atom requested fw start at %08x %dkb and drv start at %08x %dkb\n", 145 fw_start_addr, 146 fw_size, 147 drv_start_addr, 148 drv_size); 149 150 if (amdgpu_sriov_vf(adev) && 151 ((fw_start_addr & (ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION << 152 ATOM_VRAM_OPERATION_FLAGS_SHIFT)) == 0)) { 153 /* Firmware request VRAM reservation for SR-IOV */ 154 adev->mman.fw_vram_usage_start_offset = (fw_start_addr & 155 (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10; 156 adev->mman.fw_vram_usage_size = fw_size << 10; 157 } 158 159 if (amdgpu_sriov_vf(adev) && 160 ((drv_start_addr & (ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION << 161 ATOM_VRAM_OPERATION_FLAGS_SHIFT)) == 0)) { 162 /* driver request VRAM reservation for SR-IOV */ 163 adev->mman.drv_vram_usage_start_offset = (drv_start_addr & 164 (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10; 165 adev->mman.drv_vram_usage_size = drv_size << 10; 166 } 167 168 *usage_bytes = 0; 169 return 0; 170 } 171 172 int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev) 173 { 174 struct atom_context *ctx = adev->mode_info.atom_context; 175 int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 176 vram_usagebyfirmware); 177 struct vram_usagebyfirmware_v2_1 *fw_usage_v2_1; 178 struct vram_usagebyfirmware_v2_2 *fw_usage_v2_2; 179 u16 data_offset; 180 u8 frev, crev; 181 int usage_bytes = 0; 182 183 if (amdgpu_atom_parse_data_header(ctx, index, NULL, &frev, &crev, &data_offset)) { 184 if (frev == 2 && crev == 1) { 185 fw_usage_v2_1 = 186 (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset); 187 amdgpu_atomfirmware_allocate_fb_v2_1(adev, 188 fw_usage_v2_1, 189 &usage_bytes); 190 } else if (frev >= 2 && crev >= 2) { 191 fw_usage_v2_2 = 192 (struct vram_usagebyfirmware_v2_2 *)(ctx->bios + data_offset); 193 amdgpu_atomfirmware_allocate_fb_v2_2(adev, 194 fw_usage_v2_2, 195 &usage_bytes); 196 } 197 } 198 199 ctx->scratch_size_bytes = 0; 200 if (usage_bytes == 0) 201 usage_bytes = 20 * 1024; 202 /* allocate some scratch memory */ 203 ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL); 204 if (!ctx->scratch) 205 return -ENOMEM; 206 ctx->scratch_size_bytes = usage_bytes; 207 return 0; 208 } 209 210 union igp_info { 211 struct atom_integrated_system_info_v1_11 v11; 212 struct atom_integrated_system_info_v1_12 v12; 213 struct atom_integrated_system_info_v2_1 v21; 214 struct atom_integrated_system_info_v2_3 v23; 215 }; 216 217 union umc_info { 218 struct atom_umc_info_v3_1 v31; 219 struct atom_umc_info_v3_2 v32; 220 struct atom_umc_info_v3_3 v33; 221 struct atom_umc_info_v4_0 v40; 222 }; 223 224 union vram_info { 225 struct atom_vram_info_header_v2_3 v23; 226 struct atom_vram_info_header_v2_4 v24; 227 struct atom_vram_info_header_v2_5 v25; 228 struct atom_vram_info_header_v2_6 v26; 229 struct atom_vram_info_header_v3_0 v30; 230 }; 231 232 union vram_module { 233 struct atom_vram_module_v9 v9; 234 struct atom_vram_module_v10 v10; 235 struct atom_vram_module_v11 v11; 236 struct atom_vram_module_v3_0 v30; 237 }; 238 239 static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev, 240 int atom_mem_type) 241 { 242 int vram_type; 243 244 if (adev->flags & AMD_IS_APU) { 245 switch (atom_mem_type) { 246 case Ddr2MemType: 247 case LpDdr2MemType: 248 vram_type = AMDGPU_VRAM_TYPE_DDR2; 249 break; 250 case Ddr3MemType: 251 case LpDdr3MemType: 252 vram_type = AMDGPU_VRAM_TYPE_DDR3; 253 break; 254 case Ddr4MemType: 255 vram_type = AMDGPU_VRAM_TYPE_DDR4; 256 break; 257 case LpDdr4MemType: 258 vram_type = AMDGPU_VRAM_TYPE_LPDDR4; 259 break; 260 case Ddr5MemType: 261 vram_type = AMDGPU_VRAM_TYPE_DDR5; 262 break; 263 case LpDdr5MemType: 264 vram_type = AMDGPU_VRAM_TYPE_LPDDR5; 265 break; 266 default: 267 vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; 268 break; 269 } 270 } else { 271 switch (atom_mem_type) { 272 case ATOM_DGPU_VRAM_TYPE_GDDR5: 273 vram_type = AMDGPU_VRAM_TYPE_GDDR5; 274 break; 275 case ATOM_DGPU_VRAM_TYPE_HBM2: 276 case ATOM_DGPU_VRAM_TYPE_HBM2E: 277 case ATOM_DGPU_VRAM_TYPE_HBM3: 278 vram_type = AMDGPU_VRAM_TYPE_HBM; 279 break; 280 case ATOM_DGPU_VRAM_TYPE_GDDR6: 281 vram_type = AMDGPU_VRAM_TYPE_GDDR6; 282 break; 283 default: 284 vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; 285 break; 286 } 287 } 288 289 return vram_type; 290 } 291 292 293 int 294 amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev, 295 int *vram_width, int *vram_type, 296 int *vram_vendor) 297 { 298 struct amdgpu_mode_info *mode_info = &adev->mode_info; 299 int index, i = 0; 300 u16 data_offset, size; 301 union igp_info *igp_info; 302 union vram_info *vram_info; 303 union vram_module *vram_module; 304 u8 frev, crev; 305 u8 mem_type; 306 u8 mem_vendor; 307 u32 mem_channel_number; 308 u32 mem_channel_width; 309 u32 module_id; 310 311 if (adev->flags & AMD_IS_APU) 312 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 313 integratedsysteminfo); 314 else 315 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 316 vram_info); 317 318 if (amdgpu_atom_parse_data_header(mode_info->atom_context, 319 index, &size, 320 &frev, &crev, &data_offset)) { 321 if (adev->flags & AMD_IS_APU) { 322 igp_info = (union igp_info *) 323 (mode_info->atom_context->bios + data_offset); 324 switch (frev) { 325 case 1: 326 switch (crev) { 327 case 11: 328 case 12: 329 mem_channel_number = igp_info->v11.umachannelnumber; 330 if (!mem_channel_number) 331 mem_channel_number = 1; 332 mem_type = igp_info->v11.memorytype; 333 if (mem_type == LpDdr5MemType) 334 mem_channel_width = 32; 335 else 336 mem_channel_width = 64; 337 if (vram_width) 338 *vram_width = mem_channel_number * mem_channel_width; 339 if (vram_type) 340 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 341 break; 342 default: 343 return -EINVAL; 344 } 345 break; 346 case 2: 347 switch (crev) { 348 case 1: 349 case 2: 350 mem_channel_number = igp_info->v21.umachannelnumber; 351 if (!mem_channel_number) 352 mem_channel_number = 1; 353 mem_type = igp_info->v21.memorytype; 354 if (mem_type == LpDdr5MemType) 355 mem_channel_width = 32; 356 else 357 mem_channel_width = 64; 358 if (vram_width) 359 *vram_width = mem_channel_number * mem_channel_width; 360 if (vram_type) 361 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 362 break; 363 case 3: 364 mem_channel_number = igp_info->v23.umachannelnumber; 365 if (!mem_channel_number) 366 mem_channel_number = 1; 367 mem_type = igp_info->v23.memorytype; 368 if (mem_type == LpDdr5MemType) 369 mem_channel_width = 32; 370 else 371 mem_channel_width = 64; 372 if (vram_width) 373 *vram_width = mem_channel_number * mem_channel_width; 374 if (vram_type) 375 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 376 break; 377 default: 378 return -EINVAL; 379 } 380 break; 381 default: 382 return -EINVAL; 383 } 384 } else { 385 vram_info = (union vram_info *) 386 (mode_info->atom_context->bios + data_offset); 387 module_id = (RREG32(adev->bios_scratch_reg_offset + 4) & 0x00ff0000) >> 16; 388 if (frev == 3) { 389 switch (crev) { 390 /* v30 */ 391 case 0: 392 vram_module = (union vram_module *)vram_info->v30.vram_module; 393 mem_vendor = (vram_module->v30.dram_vendor_id) & 0xF; 394 if (vram_vendor) 395 *vram_vendor = mem_vendor; 396 mem_type = vram_info->v30.memory_type; 397 if (vram_type) 398 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 399 mem_channel_number = vram_info->v30.channel_num; 400 mem_channel_width = vram_info->v30.channel_width; 401 if (vram_width) 402 *vram_width = mem_channel_number * 16; 403 break; 404 default: 405 return -EINVAL; 406 } 407 } else if (frev == 2) { 408 switch (crev) { 409 /* v23 */ 410 case 3: 411 if (module_id > vram_info->v23.vram_module_num) 412 module_id = 0; 413 vram_module = (union vram_module *)vram_info->v23.vram_module; 414 while (i < module_id) { 415 vram_module = (union vram_module *) 416 ((u8 *)vram_module + vram_module->v9.vram_module_size); 417 i++; 418 } 419 mem_type = vram_module->v9.memory_type; 420 if (vram_type) 421 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 422 mem_channel_number = vram_module->v9.channel_num; 423 mem_channel_width = vram_module->v9.channel_width; 424 if (vram_width) 425 *vram_width = mem_channel_number * (1 << mem_channel_width); 426 mem_vendor = (vram_module->v9.vender_rev_id) & 0xF; 427 if (vram_vendor) 428 *vram_vendor = mem_vendor; 429 break; 430 /* v24 */ 431 case 4: 432 if (module_id > vram_info->v24.vram_module_num) 433 module_id = 0; 434 vram_module = (union vram_module *)vram_info->v24.vram_module; 435 while (i < module_id) { 436 vram_module = (union vram_module *) 437 ((u8 *)vram_module + vram_module->v10.vram_module_size); 438 i++; 439 } 440 mem_type = vram_module->v10.memory_type; 441 if (vram_type) 442 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 443 mem_channel_number = vram_module->v10.channel_num; 444 mem_channel_width = vram_module->v10.channel_width; 445 if (vram_width) 446 *vram_width = mem_channel_number * (1 << mem_channel_width); 447 mem_vendor = (vram_module->v10.vender_rev_id) & 0xF; 448 if (vram_vendor) 449 *vram_vendor = mem_vendor; 450 break; 451 /* v25 */ 452 case 5: 453 if (module_id > vram_info->v25.vram_module_num) 454 module_id = 0; 455 vram_module = (union vram_module *)vram_info->v25.vram_module; 456 while (i < module_id) { 457 vram_module = (union vram_module *) 458 ((u8 *)vram_module + vram_module->v11.vram_module_size); 459 i++; 460 } 461 mem_type = vram_module->v11.memory_type; 462 if (vram_type) 463 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 464 mem_channel_number = vram_module->v11.channel_num; 465 mem_channel_width = vram_module->v11.channel_width; 466 if (vram_width) 467 *vram_width = mem_channel_number * (1 << mem_channel_width); 468 mem_vendor = (vram_module->v11.vender_rev_id) & 0xF; 469 if (vram_vendor) 470 *vram_vendor = mem_vendor; 471 break; 472 /* v26 */ 473 case 6: 474 if (module_id > vram_info->v26.vram_module_num) 475 module_id = 0; 476 vram_module = (union vram_module *)vram_info->v26.vram_module; 477 while (i < module_id) { 478 vram_module = (union vram_module *) 479 ((u8 *)vram_module + vram_module->v9.vram_module_size); 480 i++; 481 } 482 mem_type = vram_module->v9.memory_type; 483 if (vram_type) 484 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 485 mem_channel_number = vram_module->v9.channel_num; 486 mem_channel_width = vram_module->v9.channel_width; 487 if (vram_width) 488 *vram_width = mem_channel_number * (1 << mem_channel_width); 489 mem_vendor = (vram_module->v9.vender_rev_id) & 0xF; 490 if (vram_vendor) 491 *vram_vendor = mem_vendor; 492 break; 493 default: 494 return -EINVAL; 495 } 496 } else { 497 /* invalid frev */ 498 return -EINVAL; 499 } 500 } 501 502 } 503 504 return 0; 505 } 506 507 /* 508 * Return true if vbios enabled ecc by default, if umc info table is available 509 * or false if ecc is not enabled or umc info table is not available 510 */ 511 bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev) 512 { 513 struct amdgpu_mode_info *mode_info = &adev->mode_info; 514 int index; 515 u16 data_offset, size; 516 union umc_info *umc_info; 517 u8 frev, crev; 518 bool ecc_default_enabled = false; 519 u8 umc_config; 520 u32 umc_config1; 521 522 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 523 umc_info); 524 525 if (amdgpu_atom_parse_data_header(mode_info->atom_context, 526 index, &size, &frev, &crev, &data_offset)) { 527 umc_info = (union umc_info *)(mode_info->atom_context->bios + data_offset); 528 if (frev == 3) { 529 switch (crev) { 530 case 1: 531 umc_config = le32_to_cpu(umc_info->v31.umc_config); 532 ecc_default_enabled = 533 (umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false; 534 break; 535 case 2: 536 umc_config = le32_to_cpu(umc_info->v32.umc_config); 537 ecc_default_enabled = 538 (umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false; 539 break; 540 case 3: 541 umc_config = le32_to_cpu(umc_info->v33.umc_config); 542 umc_config1 = le32_to_cpu(umc_info->v33.umc_config1); 543 ecc_default_enabled = 544 ((umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) || 545 (umc_config1 & UMC_CONFIG1__ENABLE_ECC_CAPABLE)) ? true : false; 546 break; 547 default: 548 /* unsupported crev */ 549 return false; 550 } 551 } else if (frev == 4) { 552 switch (crev) { 553 case 0: 554 umc_config1 = le32_to_cpu(umc_info->v40.umc_config1); 555 ecc_default_enabled = 556 (umc_config1 & UMC_CONFIG1__ENABLE_ECC_CAPABLE) ? true : false; 557 break; 558 default: 559 /* unsupported crev */ 560 return false; 561 } 562 } else { 563 /* unsupported frev */ 564 return false; 565 } 566 } 567 568 return ecc_default_enabled; 569 } 570 571 /* 572 * Helper function to query sram ecc capablity 573 * 574 * @adev: amdgpu_device pointer 575 * 576 * Return true if vbios supports sram ecc or false if not 577 */ 578 bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev) 579 { 580 u32 fw_cap; 581 582 fw_cap = adev->mode_info.firmware_flags; 583 584 return (fw_cap & ATOM_FIRMWARE_CAP_SRAM_ECC) ? true : false; 585 } 586 587 /* 588 * Helper function to query dynamic boot config capability 589 * 590 * @adev: amdgpu_device pointer 591 * 592 * Return true if vbios supports dynamic boot config or false if not 593 */ 594 bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *adev) 595 { 596 u32 fw_cap; 597 598 fw_cap = adev->mode_info.firmware_flags; 599 600 return (fw_cap & ATOM_FIRMWARE_CAP_DYNAMIC_BOOT_CFG_ENABLE) ? true : false; 601 } 602 603 /** 604 * amdgpu_atomfirmware_ras_rom_addr -- Get the RAS EEPROM addr from VBIOS 605 * @adev: amdgpu_device pointer 606 * @i2c_address: pointer to u8; if not NULL, will contain 607 * the RAS EEPROM address if the function returns true 608 * 609 * Return true if VBIOS supports RAS EEPROM address reporting, 610 * else return false. If true and @i2c_address is not NULL, 611 * will contain the RAS ROM address. 612 */ 613 bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, 614 u8 *i2c_address) 615 { 616 struct amdgpu_mode_info *mode_info = &adev->mode_info; 617 int index; 618 u16 data_offset, size; 619 union firmware_info *firmware_info; 620 u8 frev, crev; 621 622 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 623 firmwareinfo); 624 625 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, 626 index, &size, &frev, &crev, 627 &data_offset)) { 628 /* support firmware_info 3.4 + */ 629 if ((frev == 3 && crev >= 4) || (frev > 3)) { 630 firmware_info = (union firmware_info *) 631 (mode_info->atom_context->bios + data_offset); 632 /* The ras_rom_i2c_slave_addr should ideally 633 * be a 19-bit EEPROM address, which would be 634 * used as is by the driver; see top of 635 * amdgpu_eeprom.c. 636 * 637 * When this is the case, 0 is of course a 638 * valid RAS EEPROM address, in which case, 639 * we'll drop the first "if (firm...)" and only 640 * leave the check for the pointer. 641 * 642 * The reason this works right now is because 643 * ras_rom_i2c_slave_addr contains the EEPROM 644 * device type qualifier 1010b in the top 4 645 * bits. 646 */ 647 if (firmware_info->v34.ras_rom_i2c_slave_addr) { 648 if (i2c_address) 649 *i2c_address = firmware_info->v34.ras_rom_i2c_slave_addr; 650 return true; 651 } 652 } 653 } 654 655 return false; 656 } 657 658 659 union smu_info { 660 struct atom_smu_info_v3_1 v31; 661 struct atom_smu_info_v4_0 v40; 662 }; 663 664 union gfx_info { 665 struct atom_gfx_info_v2_2 v22; 666 struct atom_gfx_info_v2_4 v24; 667 struct atom_gfx_info_v2_7 v27; 668 struct atom_gfx_info_v3_0 v30; 669 }; 670 671 int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev) 672 { 673 struct amdgpu_mode_info *mode_info = &adev->mode_info; 674 struct amdgpu_pll *spll = &adev->clock.spll; 675 struct amdgpu_pll *mpll = &adev->clock.mpll; 676 uint8_t frev, crev; 677 uint16_t data_offset; 678 int ret = -EINVAL, index; 679 680 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 681 firmwareinfo); 682 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 683 &frev, &crev, &data_offset)) { 684 union firmware_info *firmware_info = 685 (union firmware_info *)(mode_info->atom_context->bios + 686 data_offset); 687 688 adev->clock.default_sclk = 689 le32_to_cpu(firmware_info->v31.bootup_sclk_in10khz); 690 adev->clock.default_mclk = 691 le32_to_cpu(firmware_info->v31.bootup_mclk_in10khz); 692 693 adev->pm.current_sclk = adev->clock.default_sclk; 694 adev->pm.current_mclk = adev->clock.default_mclk; 695 696 ret = 0; 697 } 698 699 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 700 smu_info); 701 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 702 &frev, &crev, &data_offset)) { 703 union smu_info *smu_info = 704 (union smu_info *)(mode_info->atom_context->bios + 705 data_offset); 706 707 /* system clock */ 708 if (frev == 3) 709 spll->reference_freq = le32_to_cpu(smu_info->v31.core_refclk_10khz); 710 else if (frev == 4) 711 spll->reference_freq = le32_to_cpu(smu_info->v40.core_refclk_10khz); 712 713 spll->reference_div = 0; 714 spll->min_post_div = 1; 715 spll->max_post_div = 1; 716 spll->min_ref_div = 2; 717 spll->max_ref_div = 0xff; 718 spll->min_feedback_div = 4; 719 spll->max_feedback_div = 0xff; 720 spll->best_vco = 0; 721 722 ret = 0; 723 } 724 725 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 726 umc_info); 727 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 728 &frev, &crev, &data_offset)) { 729 union umc_info *umc_info = 730 (union umc_info *)(mode_info->atom_context->bios + 731 data_offset); 732 733 /* memory clock */ 734 mpll->reference_freq = le32_to_cpu(umc_info->v31.mem_refclk_10khz); 735 736 mpll->reference_div = 0; 737 mpll->min_post_div = 1; 738 mpll->max_post_div = 1; 739 mpll->min_ref_div = 2; 740 mpll->max_ref_div = 0xff; 741 mpll->min_feedback_div = 4; 742 mpll->max_feedback_div = 0xff; 743 mpll->best_vco = 0; 744 745 ret = 0; 746 } 747 748 /* if asic is Navi+, the rlc reference clock is used for system clock 749 * from vbios gfx_info table */ 750 if (adev->asic_type >= CHIP_NAVI10) { 751 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 752 gfx_info); 753 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 754 &frev, &crev, &data_offset)) { 755 union gfx_info *gfx_info = (union gfx_info *) 756 (mode_info->atom_context->bios + data_offset); 757 if ((frev == 3) || 758 (frev == 2 && crev == 6)) { 759 spll->reference_freq = le32_to_cpu(gfx_info->v30.golden_tsc_count_lower_refclk); 760 ret = 0; 761 } else if ((frev == 2) && 762 (crev >= 2) && 763 (crev != 6)) { 764 spll->reference_freq = le32_to_cpu(gfx_info->v22.rlc_gpu_timer_refclk); 765 ret = 0; 766 } else { 767 BUG(); 768 } 769 } 770 } 771 772 return ret; 773 } 774 775 int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev) 776 { 777 struct amdgpu_mode_info *mode_info = &adev->mode_info; 778 int index; 779 uint8_t frev, crev; 780 uint16_t data_offset; 781 782 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 783 gfx_info); 784 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 785 &frev, &crev, &data_offset)) { 786 union gfx_info *gfx_info = (union gfx_info *) 787 (mode_info->atom_context->bios + data_offset); 788 if (frev == 2) { 789 switch (crev) { 790 case 4: 791 adev->gfx.config.max_shader_engines = gfx_info->v24.max_shader_engines; 792 adev->gfx.config.max_cu_per_sh = gfx_info->v24.max_cu_per_sh; 793 adev->gfx.config.max_sh_per_se = gfx_info->v24.max_sh_per_se; 794 adev->gfx.config.max_backends_per_se = gfx_info->v24.max_backends_per_se; 795 adev->gfx.config.max_texture_channel_caches = gfx_info->v24.max_texture_channel_caches; 796 adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v24.gc_num_gprs); 797 adev->gfx.config.max_gs_threads = gfx_info->v24.gc_num_max_gs_thds; 798 adev->gfx.config.gs_vgt_table_depth = gfx_info->v24.gc_gs_table_depth; 799 adev->gfx.config.gs_prim_buffer_depth = 800 le16_to_cpu(gfx_info->v24.gc_gsprim_buff_depth); 801 adev->gfx.config.double_offchip_lds_buf = 802 gfx_info->v24.gc_double_offchip_lds_buffer; 803 adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v24.gc_wave_size); 804 adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v24.gc_max_waves_per_simd); 805 adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v24.gc_max_scratch_slots_per_cu; 806 adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v24.gc_lds_size); 807 return 0; 808 case 7: 809 adev->gfx.config.max_shader_engines = gfx_info->v27.max_shader_engines; 810 adev->gfx.config.max_cu_per_sh = gfx_info->v27.max_cu_per_sh; 811 adev->gfx.config.max_sh_per_se = gfx_info->v27.max_sh_per_se; 812 adev->gfx.config.max_backends_per_se = gfx_info->v27.max_backends_per_se; 813 adev->gfx.config.max_texture_channel_caches = gfx_info->v27.max_texture_channel_caches; 814 adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v27.gc_num_gprs); 815 adev->gfx.config.max_gs_threads = gfx_info->v27.gc_num_max_gs_thds; 816 adev->gfx.config.gs_vgt_table_depth = gfx_info->v27.gc_gs_table_depth; 817 adev->gfx.config.gs_prim_buffer_depth = le16_to_cpu(gfx_info->v27.gc_gsprim_buff_depth); 818 adev->gfx.config.double_offchip_lds_buf = gfx_info->v27.gc_double_offchip_lds_buffer; 819 adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v27.gc_wave_size); 820 adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v27.gc_max_waves_per_simd); 821 adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v27.gc_max_scratch_slots_per_cu; 822 adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v27.gc_lds_size); 823 return 0; 824 default: 825 return -EINVAL; 826 } 827 } else if (frev == 3) { 828 switch (crev) { 829 case 0: 830 adev->gfx.config.max_shader_engines = gfx_info->v30.max_shader_engines; 831 adev->gfx.config.max_cu_per_sh = gfx_info->v30.max_cu_per_sh; 832 adev->gfx.config.max_sh_per_se = gfx_info->v30.max_sh_per_se; 833 adev->gfx.config.max_backends_per_se = gfx_info->v30.max_backends_per_se; 834 adev->gfx.config.max_texture_channel_caches = gfx_info->v30.max_texture_channel_caches; 835 return 0; 836 default: 837 return -EINVAL; 838 } 839 } else { 840 return -EINVAL; 841 } 842 843 } 844 return -EINVAL; 845 } 846 847 /* 848 * Helper function to query two stage mem training capability 849 * 850 * @adev: amdgpu_device pointer 851 * 852 * Return true if two stage mem training is supported or false if not 853 */ 854 bool amdgpu_atomfirmware_mem_training_supported(struct amdgpu_device *adev) 855 { 856 u32 fw_cap; 857 858 fw_cap = adev->mode_info.firmware_flags; 859 860 return (fw_cap & ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING) ? true : false; 861 } 862 863 int amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device *adev) 864 { 865 struct atom_context *ctx = adev->mode_info.atom_context; 866 union firmware_info *firmware_info; 867 int index; 868 u16 data_offset, size; 869 u8 frev, crev; 870 int fw_reserved_fb_size; 871 872 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 873 firmwareinfo); 874 875 if (!amdgpu_atom_parse_data_header(ctx, index, &size, 876 &frev, &crev, &data_offset)) 877 /* fail to parse data_header */ 878 return 0; 879 880 firmware_info = (union firmware_info *)(ctx->bios + data_offset); 881 882 if (frev != 3) 883 return -EINVAL; 884 885 switch (crev) { 886 case 4: 887 fw_reserved_fb_size = 888 (firmware_info->v34.fw_reserved_size_in_kb << 10); 889 break; 890 default: 891 fw_reserved_fb_size = 0; 892 break; 893 } 894 895 return fw_reserved_fb_size; 896 } 897 898 /* 899 * Helper function to execute asic_init table 900 * 901 * @adev: amdgpu_device pointer 902 * @fb_reset: flag to indicate whether fb is reset or not 903 * 904 * Return 0 if succeed, otherwise failed 905 */ 906 int amdgpu_atomfirmware_asic_init(struct amdgpu_device *adev, bool fb_reset) 907 { 908 struct amdgpu_mode_info *mode_info = &adev->mode_info; 909 struct atom_context *ctx; 910 uint8_t frev, crev; 911 uint16_t data_offset; 912 uint32_t bootup_sclk_in10khz, bootup_mclk_in10khz; 913 struct asic_init_ps_allocation_v2_1 asic_init_ps_v2_1; 914 int index; 915 916 if (!mode_info) 917 return -EINVAL; 918 919 ctx = mode_info->atom_context; 920 if (!ctx) 921 return -EINVAL; 922 923 /* query bootup sclk/mclk from firmware_info table */ 924 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 925 firmwareinfo); 926 if (amdgpu_atom_parse_data_header(ctx, index, NULL, 927 &frev, &crev, &data_offset)) { 928 union firmware_info *firmware_info = 929 (union firmware_info *)(ctx->bios + 930 data_offset); 931 932 bootup_sclk_in10khz = 933 le32_to_cpu(firmware_info->v31.bootup_sclk_in10khz); 934 bootup_mclk_in10khz = 935 le32_to_cpu(firmware_info->v31.bootup_mclk_in10khz); 936 } else { 937 return -EINVAL; 938 } 939 940 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1, 941 asic_init); 942 if (amdgpu_atom_parse_cmd_header(mode_info->atom_context, index, &frev, &crev)) { 943 if (frev == 2 && crev >= 1) { 944 memset(&asic_init_ps_v2_1, 0, sizeof(asic_init_ps_v2_1)); 945 asic_init_ps_v2_1.param.engineparam.sclkfreqin10khz = bootup_sclk_in10khz; 946 asic_init_ps_v2_1.param.memparam.mclkfreqin10khz = bootup_mclk_in10khz; 947 asic_init_ps_v2_1.param.engineparam.engineflag = b3NORMAL_ENGINE_INIT; 948 if (!fb_reset) 949 asic_init_ps_v2_1.param.memparam.memflag = b3DRAM_SELF_REFRESH_EXIT; 950 else 951 asic_init_ps_v2_1.param.memparam.memflag = 0; 952 } else { 953 return -EINVAL; 954 } 955 } else { 956 return -EINVAL; 957 } 958 959 return amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, (uint32_t *)&asic_init_ps_v2_1); 960 } 961