1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * 23 */ 24 #include <linux/list.h> 25 #include <linux/slab.h> 26 #include <linux/pci.h> 27 #include <linux/acpi.h> 28 #include <drm/drmP.h> 29 #include <linux/firmware.h> 30 #include <drm/amdgpu_drm.h> 31 #include "amdgpu.h" 32 #include "cgs_linux.h" 33 #include "atom.h" 34 #include "amdgpu_ucode.h" 35 36 struct amdgpu_cgs_device { 37 struct cgs_device base; 38 struct amdgpu_device *adev; 39 }; 40 41 #define CGS_FUNC_ADEV \ 42 struct amdgpu_device *adev = \ 43 ((struct amdgpu_cgs_device *)cgs_device)->adev 44 45 static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device, 46 enum cgs_gpu_mem_type type, 47 uint64_t size, uint64_t align, 48 cgs_handle_t *handle) 49 { 50 CGS_FUNC_ADEV; 51 uint16_t flags = 0; 52 int ret = 0; 53 uint32_t domain = 0; 54 struct amdgpu_bo *obj; 55 56 /* fail if the alignment is not a power of 2 */ 57 if (((align != 1) && (align & (align - 1))) 58 || size == 0 || align == 0) 59 return -EINVAL; 60 61 62 switch(type) { 63 case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB: 64 case CGS_GPU_MEM_TYPE__VISIBLE_FB: 65 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | 66 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; 67 domain = AMDGPU_GEM_DOMAIN_VRAM; 68 break; 69 case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB: 70 case CGS_GPU_MEM_TYPE__INVISIBLE_FB: 71 flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS | 72 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; 73 domain = AMDGPU_GEM_DOMAIN_VRAM; 74 break; 75 case CGS_GPU_MEM_TYPE__GART_CACHEABLE: 76 domain = AMDGPU_GEM_DOMAIN_GTT; 77 break; 78 case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE: 79 flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC; 80 domain = AMDGPU_GEM_DOMAIN_GTT; 81 break; 82 default: 83 return -EINVAL; 84 } 85 86 87 *handle = 0; 88 89 ret = amdgpu_bo_create(adev, size, align, true, domain, flags, 90 NULL, NULL, 0, &obj); 91 if (ret) { 92 DRM_ERROR("(%d) bo create failed\n", ret); 93 return ret; 94 } 95 *handle = (cgs_handle_t)obj; 96 97 return ret; 98 } 99 100 static int amdgpu_cgs_free_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle) 101 { 102 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; 103 104 if (obj) { 105 int r = amdgpu_bo_reserve(obj, true); 106 if (likely(r == 0)) { 107 amdgpu_bo_kunmap(obj); 108 amdgpu_bo_unpin(obj); 109 amdgpu_bo_unreserve(obj); 110 } 111 amdgpu_bo_unref(&obj); 112 113 } 114 return 0; 115 } 116 117 static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle, 118 uint64_t *mcaddr) 119 { 120 int r; 121 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; 122 123 WARN_ON_ONCE(obj->placement.num_placement > 1); 124 125 r = amdgpu_bo_reserve(obj, true); 126 if (unlikely(r != 0)) 127 return r; 128 r = amdgpu_bo_pin(obj, obj->preferred_domains, mcaddr); 129 amdgpu_bo_unreserve(obj); 130 return r; 131 } 132 133 static int amdgpu_cgs_gunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle) 134 { 135 int r; 136 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; 137 r = amdgpu_bo_reserve(obj, true); 138 if (unlikely(r != 0)) 139 return r; 140 r = amdgpu_bo_unpin(obj); 141 amdgpu_bo_unreserve(obj); 142 return r; 143 } 144 145 static int amdgpu_cgs_kmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle, 146 void **map) 147 { 148 int r; 149 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; 150 r = amdgpu_bo_reserve(obj, true); 151 if (unlikely(r != 0)) 152 return r; 153 r = amdgpu_bo_kmap(obj, map); 154 amdgpu_bo_unreserve(obj); 155 return r; 156 } 157 158 static int amdgpu_cgs_kunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle) 159 { 160 int r; 161 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; 162 r = amdgpu_bo_reserve(obj, true); 163 if (unlikely(r != 0)) 164 return r; 165 amdgpu_bo_kunmap(obj); 166 amdgpu_bo_unreserve(obj); 167 return r; 168 } 169 170 static uint32_t amdgpu_cgs_read_register(struct cgs_device *cgs_device, unsigned offset) 171 { 172 CGS_FUNC_ADEV; 173 return RREG32(offset); 174 } 175 176 static void amdgpu_cgs_write_register(struct cgs_device *cgs_device, unsigned offset, 177 uint32_t value) 178 { 179 CGS_FUNC_ADEV; 180 WREG32(offset, value); 181 } 182 183 static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device, 184 enum cgs_ind_reg space, 185 unsigned index) 186 { 187 CGS_FUNC_ADEV; 188 switch (space) { 189 case CGS_IND_REG__MMIO: 190 return RREG32_IDX(index); 191 case CGS_IND_REG__PCIE: 192 return RREG32_PCIE(index); 193 case CGS_IND_REG__SMC: 194 return RREG32_SMC(index); 195 case CGS_IND_REG__UVD_CTX: 196 return RREG32_UVD_CTX(index); 197 case CGS_IND_REG__DIDT: 198 return RREG32_DIDT(index); 199 case CGS_IND_REG_GC_CAC: 200 return RREG32_GC_CAC(index); 201 case CGS_IND_REG_SE_CAC: 202 return RREG32_SE_CAC(index); 203 case CGS_IND_REG__AUDIO_ENDPT: 204 DRM_ERROR("audio endpt register access not implemented.\n"); 205 return 0; 206 } 207 WARN(1, "Invalid indirect register space"); 208 return 0; 209 } 210 211 static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device, 212 enum cgs_ind_reg space, 213 unsigned index, uint32_t value) 214 { 215 CGS_FUNC_ADEV; 216 switch (space) { 217 case CGS_IND_REG__MMIO: 218 return WREG32_IDX(index, value); 219 case CGS_IND_REG__PCIE: 220 return WREG32_PCIE(index, value); 221 case CGS_IND_REG__SMC: 222 return WREG32_SMC(index, value); 223 case CGS_IND_REG__UVD_CTX: 224 return WREG32_UVD_CTX(index, value); 225 case CGS_IND_REG__DIDT: 226 return WREG32_DIDT(index, value); 227 case CGS_IND_REG_GC_CAC: 228 return WREG32_GC_CAC(index, value); 229 case CGS_IND_REG_SE_CAC: 230 return WREG32_SE_CAC(index, value); 231 case CGS_IND_REG__AUDIO_ENDPT: 232 DRM_ERROR("audio endpt register access not implemented.\n"); 233 return; 234 } 235 WARN(1, "Invalid indirect register space"); 236 } 237 238 static int amdgpu_cgs_get_pci_resource(struct cgs_device *cgs_device, 239 enum cgs_resource_type resource_type, 240 uint64_t size, 241 uint64_t offset, 242 uint64_t *resource_base) 243 { 244 CGS_FUNC_ADEV; 245 246 if (resource_base == NULL) 247 return -EINVAL; 248 249 switch (resource_type) { 250 case CGS_RESOURCE_TYPE_MMIO: 251 if (adev->rmmio_size == 0) 252 return -ENOENT; 253 if ((offset + size) > adev->rmmio_size) 254 return -EINVAL; 255 *resource_base = adev->rmmio_base; 256 return 0; 257 case CGS_RESOURCE_TYPE_DOORBELL: 258 if (adev->doorbell.size == 0) 259 return -ENOENT; 260 if ((offset + size) > adev->doorbell.size) 261 return -EINVAL; 262 *resource_base = adev->doorbell.base; 263 return 0; 264 case CGS_RESOURCE_TYPE_FB: 265 case CGS_RESOURCE_TYPE_IO: 266 case CGS_RESOURCE_TYPE_ROM: 267 default: 268 return -EINVAL; 269 } 270 } 271 272 static const void *amdgpu_cgs_atom_get_data_table(struct cgs_device *cgs_device, 273 unsigned table, uint16_t *size, 274 uint8_t *frev, uint8_t *crev) 275 { 276 CGS_FUNC_ADEV; 277 uint16_t data_start; 278 279 if (amdgpu_atom_parse_data_header( 280 adev->mode_info.atom_context, table, size, 281 frev, crev, &data_start)) 282 return (uint8_t*)adev->mode_info.atom_context->bios + 283 data_start; 284 285 return NULL; 286 } 287 288 static int amdgpu_cgs_atom_get_cmd_table_revs(struct cgs_device *cgs_device, unsigned table, 289 uint8_t *frev, uint8_t *crev) 290 { 291 CGS_FUNC_ADEV; 292 293 if (amdgpu_atom_parse_cmd_header( 294 adev->mode_info.atom_context, table, 295 frev, crev)) 296 return 0; 297 298 return -EINVAL; 299 } 300 301 static int amdgpu_cgs_atom_exec_cmd_table(struct cgs_device *cgs_device, unsigned table, 302 void *args) 303 { 304 CGS_FUNC_ADEV; 305 306 return amdgpu_atom_execute_table( 307 adev->mode_info.atom_context, table, args); 308 } 309 310 struct cgs_irq_params { 311 unsigned src_id; 312 cgs_irq_source_set_func_t set; 313 cgs_irq_handler_func_t handler; 314 void *private_data; 315 }; 316 317 static int cgs_set_irq_state(struct amdgpu_device *adev, 318 struct amdgpu_irq_src *src, 319 unsigned type, 320 enum amdgpu_interrupt_state state) 321 { 322 struct cgs_irq_params *irq_params = 323 (struct cgs_irq_params *)src->data; 324 if (!irq_params) 325 return -EINVAL; 326 if (!irq_params->set) 327 return -EINVAL; 328 return irq_params->set(irq_params->private_data, 329 irq_params->src_id, 330 type, 331 (int)state); 332 } 333 334 static int cgs_process_irq(struct amdgpu_device *adev, 335 struct amdgpu_irq_src *source, 336 struct amdgpu_iv_entry *entry) 337 { 338 struct cgs_irq_params *irq_params = 339 (struct cgs_irq_params *)source->data; 340 if (!irq_params) 341 return -EINVAL; 342 if (!irq_params->handler) 343 return -EINVAL; 344 return irq_params->handler(irq_params->private_data, 345 irq_params->src_id, 346 entry->iv_entry); 347 } 348 349 static const struct amdgpu_irq_src_funcs cgs_irq_funcs = { 350 .set = cgs_set_irq_state, 351 .process = cgs_process_irq, 352 }; 353 354 static int amdgpu_cgs_add_irq_source(void *cgs_device, 355 unsigned client_id, 356 unsigned src_id, 357 unsigned num_types, 358 cgs_irq_source_set_func_t set, 359 cgs_irq_handler_func_t handler, 360 void *private_data) 361 { 362 CGS_FUNC_ADEV; 363 int ret = 0; 364 struct cgs_irq_params *irq_params; 365 struct amdgpu_irq_src *source = 366 kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL); 367 if (!source) 368 return -ENOMEM; 369 irq_params = 370 kzalloc(sizeof(struct cgs_irq_params), GFP_KERNEL); 371 if (!irq_params) { 372 kfree(source); 373 return -ENOMEM; 374 } 375 source->num_types = num_types; 376 source->funcs = &cgs_irq_funcs; 377 irq_params->src_id = src_id; 378 irq_params->set = set; 379 irq_params->handler = handler; 380 irq_params->private_data = private_data; 381 source->data = (void *)irq_params; 382 ret = amdgpu_irq_add_id(adev, client_id, src_id, source); 383 if (ret) { 384 kfree(irq_params); 385 kfree(source); 386 } 387 388 return ret; 389 } 390 391 static int amdgpu_cgs_irq_get(void *cgs_device, unsigned client_id, 392 unsigned src_id, unsigned type) 393 { 394 CGS_FUNC_ADEV; 395 396 if (!adev->irq.client[client_id].sources) 397 return -EINVAL; 398 399 return amdgpu_irq_get(adev, adev->irq.client[client_id].sources[src_id], type); 400 } 401 402 static int amdgpu_cgs_irq_put(void *cgs_device, unsigned client_id, 403 unsigned src_id, unsigned type) 404 { 405 CGS_FUNC_ADEV; 406 407 if (!adev->irq.client[client_id].sources) 408 return -EINVAL; 409 410 return amdgpu_irq_put(adev, adev->irq.client[client_id].sources[src_id], type); 411 } 412 413 static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device, 414 enum amd_ip_block_type block_type, 415 enum amd_clockgating_state state) 416 { 417 CGS_FUNC_ADEV; 418 int i, r = -1; 419 420 for (i = 0; i < adev->num_ip_blocks; i++) { 421 if (!adev->ip_blocks[i].status.valid) 422 continue; 423 424 if (adev->ip_blocks[i].version->type == block_type) { 425 r = adev->ip_blocks[i].version->funcs->set_clockgating_state( 426 (void *)adev, 427 state); 428 break; 429 } 430 } 431 return r; 432 } 433 434 static int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device, 435 enum amd_ip_block_type block_type, 436 enum amd_powergating_state state) 437 { 438 CGS_FUNC_ADEV; 439 int i, r = -1; 440 441 for (i = 0; i < adev->num_ip_blocks; i++) { 442 if (!adev->ip_blocks[i].status.valid) 443 continue; 444 445 if (adev->ip_blocks[i].version->type == block_type) { 446 r = adev->ip_blocks[i].version->funcs->set_powergating_state( 447 (void *)adev, 448 state); 449 break; 450 } 451 } 452 return r; 453 } 454 455 456 static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type) 457 { 458 CGS_FUNC_ADEV; 459 enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM; 460 461 switch (fw_type) { 462 case CGS_UCODE_ID_SDMA0: 463 result = AMDGPU_UCODE_ID_SDMA0; 464 break; 465 case CGS_UCODE_ID_SDMA1: 466 result = AMDGPU_UCODE_ID_SDMA1; 467 break; 468 case CGS_UCODE_ID_CP_CE: 469 result = AMDGPU_UCODE_ID_CP_CE; 470 break; 471 case CGS_UCODE_ID_CP_PFP: 472 result = AMDGPU_UCODE_ID_CP_PFP; 473 break; 474 case CGS_UCODE_ID_CP_ME: 475 result = AMDGPU_UCODE_ID_CP_ME; 476 break; 477 case CGS_UCODE_ID_CP_MEC: 478 case CGS_UCODE_ID_CP_MEC_JT1: 479 result = AMDGPU_UCODE_ID_CP_MEC1; 480 break; 481 case CGS_UCODE_ID_CP_MEC_JT2: 482 /* for VI. JT2 should be the same as JT1, because: 483 1, MEC2 and MEC1 use exactly same FW. 484 2, JT2 is not pached but JT1 is. 485 */ 486 if (adev->asic_type >= CHIP_TOPAZ) 487 result = AMDGPU_UCODE_ID_CP_MEC1; 488 else 489 result = AMDGPU_UCODE_ID_CP_MEC2; 490 break; 491 case CGS_UCODE_ID_RLC_G: 492 result = AMDGPU_UCODE_ID_RLC_G; 493 break; 494 case CGS_UCODE_ID_STORAGE: 495 result = AMDGPU_UCODE_ID_STORAGE; 496 break; 497 default: 498 DRM_ERROR("Firmware type not supported\n"); 499 } 500 return result; 501 } 502 503 static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type) 504 { 505 CGS_FUNC_ADEV; 506 if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) { 507 release_firmware(adev->pm.fw); 508 adev->pm.fw = NULL; 509 return 0; 510 } 511 /* cannot release other firmware because they are not created by cgs */ 512 return -EINVAL; 513 } 514 515 static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device, 516 enum cgs_ucode_id type) 517 { 518 CGS_FUNC_ADEV; 519 uint16_t fw_version = 0; 520 521 switch (type) { 522 case CGS_UCODE_ID_SDMA0: 523 fw_version = adev->sdma.instance[0].fw_version; 524 break; 525 case CGS_UCODE_ID_SDMA1: 526 fw_version = adev->sdma.instance[1].fw_version; 527 break; 528 case CGS_UCODE_ID_CP_CE: 529 fw_version = adev->gfx.ce_fw_version; 530 break; 531 case CGS_UCODE_ID_CP_PFP: 532 fw_version = adev->gfx.pfp_fw_version; 533 break; 534 case CGS_UCODE_ID_CP_ME: 535 fw_version = adev->gfx.me_fw_version; 536 break; 537 case CGS_UCODE_ID_CP_MEC: 538 fw_version = adev->gfx.mec_fw_version; 539 break; 540 case CGS_UCODE_ID_CP_MEC_JT1: 541 fw_version = adev->gfx.mec_fw_version; 542 break; 543 case CGS_UCODE_ID_CP_MEC_JT2: 544 fw_version = adev->gfx.mec_fw_version; 545 break; 546 case CGS_UCODE_ID_RLC_G: 547 fw_version = adev->gfx.rlc_fw_version; 548 break; 549 case CGS_UCODE_ID_STORAGE: 550 break; 551 default: 552 DRM_ERROR("firmware type %d do not have version\n", type); 553 break; 554 } 555 return fw_version; 556 } 557 558 static int amdgpu_cgs_enter_safe_mode(struct cgs_device *cgs_device, 559 bool en) 560 { 561 CGS_FUNC_ADEV; 562 563 if (adev->gfx.rlc.funcs->enter_safe_mode == NULL || 564 adev->gfx.rlc.funcs->exit_safe_mode == NULL) 565 return 0; 566 567 if (en) 568 adev->gfx.rlc.funcs->enter_safe_mode(adev); 569 else 570 adev->gfx.rlc.funcs->exit_safe_mode(adev); 571 572 return 0; 573 } 574 575 static void amdgpu_cgs_lock_grbm_idx(struct cgs_device *cgs_device, 576 bool lock) 577 { 578 CGS_FUNC_ADEV; 579 580 if (lock) 581 mutex_lock(&adev->grbm_idx_mutex); 582 else 583 mutex_unlock(&adev->grbm_idx_mutex); 584 } 585 586 static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, 587 enum cgs_ucode_id type, 588 struct cgs_firmware_info *info) 589 { 590 CGS_FUNC_ADEV; 591 592 if ((CGS_UCODE_ID_SMU != type) && (CGS_UCODE_ID_SMU_SK != type)) { 593 uint64_t gpu_addr; 594 uint32_t data_size; 595 const struct gfx_firmware_header_v1_0 *header; 596 enum AMDGPU_UCODE_ID id; 597 struct amdgpu_firmware_info *ucode; 598 599 id = fw_type_convert(cgs_device, type); 600 ucode = &adev->firmware.ucode[id]; 601 if (ucode->fw == NULL) 602 return -EINVAL; 603 604 gpu_addr = ucode->mc_addr; 605 header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data; 606 data_size = le32_to_cpu(header->header.ucode_size_bytes); 607 608 if ((type == CGS_UCODE_ID_CP_MEC_JT1) || 609 (type == CGS_UCODE_ID_CP_MEC_JT2)) { 610 gpu_addr += ALIGN(le32_to_cpu(header->header.ucode_size_bytes), PAGE_SIZE); 611 data_size = le32_to_cpu(header->jt_size) << 2; 612 } 613 614 info->kptr = ucode->kaddr; 615 info->image_size = data_size; 616 info->mc_addr = gpu_addr; 617 info->version = (uint16_t)le32_to_cpu(header->header.ucode_version); 618 619 if (CGS_UCODE_ID_CP_MEC == type) 620 info->image_size = le32_to_cpu(header->jt_offset) << 2; 621 622 info->fw_version = amdgpu_get_firmware_version(cgs_device, type); 623 info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version); 624 } else { 625 char fw_name[30] = {0}; 626 int err = 0; 627 uint32_t ucode_size; 628 uint32_t ucode_start_address; 629 const uint8_t *src; 630 const struct smc_firmware_header_v1_0 *hdr; 631 const struct common_firmware_header *header; 632 struct amdgpu_firmware_info *ucode = NULL; 633 634 if (!adev->pm.fw) { 635 switch (adev->asic_type) { 636 case CHIP_TAHITI: 637 strcpy(fw_name, "radeon/tahiti_smc.bin"); 638 break; 639 case CHIP_PITCAIRN: 640 if ((adev->pdev->revision == 0x81) && 641 ((adev->pdev->device == 0x6810) || 642 (adev->pdev->device == 0x6811))) { 643 info->is_kicker = true; 644 strcpy(fw_name, "radeon/pitcairn_k_smc.bin"); 645 } else { 646 strcpy(fw_name, "radeon/pitcairn_smc.bin"); 647 } 648 break; 649 case CHIP_VERDE: 650 if (((adev->pdev->device == 0x6820) && 651 ((adev->pdev->revision == 0x81) || 652 (adev->pdev->revision == 0x83))) || 653 ((adev->pdev->device == 0x6821) && 654 ((adev->pdev->revision == 0x83) || 655 (adev->pdev->revision == 0x87))) || 656 ((adev->pdev->revision == 0x87) && 657 ((adev->pdev->device == 0x6823) || 658 (adev->pdev->device == 0x682b)))) { 659 info->is_kicker = true; 660 strcpy(fw_name, "radeon/verde_k_smc.bin"); 661 } else { 662 strcpy(fw_name, "radeon/verde_smc.bin"); 663 } 664 break; 665 case CHIP_OLAND: 666 if (((adev->pdev->revision == 0x81) && 667 ((adev->pdev->device == 0x6600) || 668 (adev->pdev->device == 0x6604) || 669 (adev->pdev->device == 0x6605) || 670 (adev->pdev->device == 0x6610))) || 671 ((adev->pdev->revision == 0x83) && 672 (adev->pdev->device == 0x6610))) { 673 info->is_kicker = true; 674 strcpy(fw_name, "radeon/oland_k_smc.bin"); 675 } else { 676 strcpy(fw_name, "radeon/oland_smc.bin"); 677 } 678 break; 679 case CHIP_HAINAN: 680 if (((adev->pdev->revision == 0x81) && 681 (adev->pdev->device == 0x6660)) || 682 ((adev->pdev->revision == 0x83) && 683 ((adev->pdev->device == 0x6660) || 684 (adev->pdev->device == 0x6663) || 685 (adev->pdev->device == 0x6665) || 686 (adev->pdev->device == 0x6667)))) { 687 info->is_kicker = true; 688 strcpy(fw_name, "radeon/hainan_k_smc.bin"); 689 } else if ((adev->pdev->revision == 0xc3) && 690 (adev->pdev->device == 0x6665)) { 691 info->is_kicker = true; 692 strcpy(fw_name, "radeon/banks_k_2_smc.bin"); 693 } else { 694 strcpy(fw_name, "radeon/hainan_smc.bin"); 695 } 696 break; 697 case CHIP_BONAIRE: 698 if ((adev->pdev->revision == 0x80) || 699 (adev->pdev->revision == 0x81) || 700 (adev->pdev->device == 0x665f)) { 701 info->is_kicker = true; 702 strcpy(fw_name, "radeon/bonaire_k_smc.bin"); 703 } else { 704 strcpy(fw_name, "radeon/bonaire_smc.bin"); 705 } 706 break; 707 case CHIP_HAWAII: 708 if (adev->pdev->revision == 0x80) { 709 info->is_kicker = true; 710 strcpy(fw_name, "radeon/hawaii_k_smc.bin"); 711 } else { 712 strcpy(fw_name, "radeon/hawaii_smc.bin"); 713 } 714 break; 715 case CHIP_TOPAZ: 716 if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) || 717 ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) || 718 ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) { 719 info->is_kicker = true; 720 strcpy(fw_name, "amdgpu/topaz_k_smc.bin"); 721 } else 722 strcpy(fw_name, "amdgpu/topaz_smc.bin"); 723 break; 724 case CHIP_TONGA: 725 if (((adev->pdev->device == 0x6939) && (adev->pdev->revision == 0xf1)) || 726 ((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1))) { 727 info->is_kicker = true; 728 strcpy(fw_name, "amdgpu/tonga_k_smc.bin"); 729 } else 730 strcpy(fw_name, "amdgpu/tonga_smc.bin"); 731 break; 732 case CHIP_FIJI: 733 strcpy(fw_name, "amdgpu/fiji_smc.bin"); 734 break; 735 case CHIP_POLARIS11: 736 if (type == CGS_UCODE_ID_SMU) { 737 if (((adev->pdev->device == 0x67ef) && 738 ((adev->pdev->revision == 0xe0) || 739 (adev->pdev->revision == 0xe2) || 740 (adev->pdev->revision == 0xe5))) || 741 ((adev->pdev->device == 0x67ff) && 742 ((adev->pdev->revision == 0xcf) || 743 (adev->pdev->revision == 0xef) || 744 (adev->pdev->revision == 0xff)))) { 745 info->is_kicker = true; 746 strcpy(fw_name, "amdgpu/polaris11_k_smc.bin"); 747 } else 748 strcpy(fw_name, "amdgpu/polaris11_smc.bin"); 749 } else if (type == CGS_UCODE_ID_SMU_SK) { 750 strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin"); 751 } 752 break; 753 case CHIP_POLARIS10: 754 if (type == CGS_UCODE_ID_SMU) { 755 if ((adev->pdev->device == 0x67df) && 756 ((adev->pdev->revision == 0xe0) || 757 (adev->pdev->revision == 0xe3) || 758 (adev->pdev->revision == 0xe4) || 759 (adev->pdev->revision == 0xe5) || 760 (adev->pdev->revision == 0xe7) || 761 (adev->pdev->revision == 0xef))) { 762 info->is_kicker = true; 763 strcpy(fw_name, "amdgpu/polaris10_k_smc.bin"); 764 } else 765 strcpy(fw_name, "amdgpu/polaris10_smc.bin"); 766 } else if (type == CGS_UCODE_ID_SMU_SK) { 767 strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin"); 768 } 769 break; 770 case CHIP_POLARIS12: 771 strcpy(fw_name, "amdgpu/polaris12_smc.bin"); 772 break; 773 case CHIP_VEGA10: 774 if ((adev->pdev->device == 0x687f) && 775 ((adev->pdev->revision == 0xc0) || 776 (adev->pdev->revision == 0xc1) || 777 (adev->pdev->revision == 0xc3))) 778 strcpy(fw_name, "amdgpu/vega10_acg_smc.bin"); 779 else 780 strcpy(fw_name, "amdgpu/vega10_smc.bin"); 781 break; 782 default: 783 DRM_ERROR("SMC firmware not supported\n"); 784 return -EINVAL; 785 } 786 787 err = request_firmware(&adev->pm.fw, fw_name, adev->dev); 788 if (err) { 789 DRM_ERROR("Failed to request firmware\n"); 790 return err; 791 } 792 793 err = amdgpu_ucode_validate(adev->pm.fw); 794 if (err) { 795 DRM_ERROR("Failed to load firmware \"%s\"", fw_name); 796 release_firmware(adev->pm.fw); 797 adev->pm.fw = NULL; 798 return err; 799 } 800 801 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 802 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; 803 ucode->ucode_id = AMDGPU_UCODE_ID_SMC; 804 ucode->fw = adev->pm.fw; 805 header = (const struct common_firmware_header *)ucode->fw->data; 806 adev->firmware.fw_size += 807 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 808 } 809 } 810 811 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; 812 amdgpu_ucode_print_smc_hdr(&hdr->header); 813 adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); 814 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes); 815 ucode_start_address = le32_to_cpu(hdr->ucode_start_addr); 816 src = (const uint8_t *)(adev->pm.fw->data + 817 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 818 819 info->version = adev->pm.fw_version; 820 info->image_size = ucode_size; 821 info->ucode_start_address = ucode_start_address; 822 info->kptr = (void *)src; 823 } 824 return 0; 825 } 826 827 static int amdgpu_cgs_is_virtualization_enabled(void *cgs_device) 828 { 829 CGS_FUNC_ADEV; 830 return amdgpu_sriov_vf(adev); 831 } 832 833 static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device, 834 struct cgs_system_info *sys_info) 835 { 836 CGS_FUNC_ADEV; 837 838 if (NULL == sys_info) 839 return -ENODEV; 840 841 if (sizeof(struct cgs_system_info) != sys_info->size) 842 return -ENODEV; 843 844 switch (sys_info->info_id) { 845 case CGS_SYSTEM_INFO_ADAPTER_BDF_ID: 846 sys_info->value = adev->pdev->devfn | (adev->pdev->bus->number << 8); 847 break; 848 case CGS_SYSTEM_INFO_PCIE_GEN_INFO: 849 sys_info->value = adev->pm.pcie_gen_mask; 850 break; 851 case CGS_SYSTEM_INFO_PCIE_MLW: 852 sys_info->value = adev->pm.pcie_mlw_mask; 853 break; 854 case CGS_SYSTEM_INFO_PCIE_DEV: 855 sys_info->value = adev->pdev->device; 856 break; 857 case CGS_SYSTEM_INFO_PCIE_REV: 858 sys_info->value = adev->pdev->revision; 859 break; 860 case CGS_SYSTEM_INFO_CG_FLAGS: 861 sys_info->value = adev->cg_flags; 862 break; 863 case CGS_SYSTEM_INFO_PG_FLAGS: 864 sys_info->value = adev->pg_flags; 865 break; 866 case CGS_SYSTEM_INFO_GFX_CU_INFO: 867 sys_info->value = adev->gfx.cu_info.number; 868 break; 869 case CGS_SYSTEM_INFO_GFX_SE_INFO: 870 sys_info->value = adev->gfx.config.max_shader_engines; 871 break; 872 case CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID: 873 sys_info->value = adev->pdev->subsystem_device; 874 break; 875 case CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID: 876 sys_info->value = adev->pdev->subsystem_vendor; 877 break; 878 case CGS_SYSTEM_INFO_PCIE_BUS_DEVFN: 879 sys_info->value = adev->pdev->devfn; 880 break; 881 default: 882 return -ENODEV; 883 } 884 885 return 0; 886 } 887 888 static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device, 889 struct cgs_display_info *info) 890 { 891 CGS_FUNC_ADEV; 892 struct cgs_mode_info *mode_info; 893 894 if (info == NULL) 895 return -EINVAL; 896 897 mode_info = info->mode_info; 898 if (mode_info) { 899 /* if the displays are off, vblank time is max */ 900 mode_info->vblank_time_us = 0xffffffff; 901 /* always set the reference clock */ 902 mode_info->ref_clock = adev->clock.spll.reference_freq; 903 } 904 905 if (!amdgpu_device_has_dc_support(adev)) { 906 struct amdgpu_crtc *amdgpu_crtc; 907 struct drm_device *ddev = adev->ddev; 908 struct drm_crtc *crtc; 909 uint32_t line_time_us, vblank_lines; 910 911 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { 912 list_for_each_entry(crtc, 913 &ddev->mode_config.crtc_list, head) { 914 amdgpu_crtc = to_amdgpu_crtc(crtc); 915 if (crtc->enabled) { 916 info->active_display_mask |= (1 << amdgpu_crtc->crtc_id); 917 info->display_count++; 918 } 919 if (mode_info != NULL && 920 crtc->enabled && amdgpu_crtc->enabled && 921 amdgpu_crtc->hw_mode.clock) { 922 line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) / 923 amdgpu_crtc->hw_mode.clock; 924 vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end - 925 amdgpu_crtc->hw_mode.crtc_vdisplay + 926 (amdgpu_crtc->v_border * 2); 927 mode_info->vblank_time_us = vblank_lines * line_time_us; 928 mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode); 929 mode_info->ref_clock = adev->clock.spll.reference_freq; 930 mode_info = NULL; 931 } 932 } 933 } 934 } else { 935 info->display_count = adev->pm.pm_display_cfg.num_display; 936 if (mode_info != NULL) { 937 mode_info->vblank_time_us = adev->pm.pm_display_cfg.min_vblank_time; 938 mode_info->refresh_rate = adev->pm.pm_display_cfg.vrefresh; 939 mode_info->ref_clock = adev->clock.spll.reference_freq; 940 } 941 } 942 return 0; 943 } 944 945 946 static int amdgpu_cgs_notify_dpm_enabled(struct cgs_device *cgs_device, bool enabled) 947 { 948 CGS_FUNC_ADEV; 949 950 adev->pm.dpm_enabled = enabled; 951 952 return 0; 953 } 954 955 /** \brief evaluate acpi namespace object, handle or pathname must be valid 956 * \param cgs_device 957 * \param info input/output arguments for the control method 958 * \return status 959 */ 960 961 #if defined(CONFIG_ACPI) 962 static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device, 963 struct cgs_acpi_method_info *info) 964 { 965 CGS_FUNC_ADEV; 966 acpi_handle handle; 967 struct acpi_object_list input; 968 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; 969 union acpi_object *params, *obj; 970 uint8_t name[5] = {'\0'}; 971 struct cgs_acpi_method_argument *argument; 972 uint32_t i, count; 973 acpi_status status; 974 int result; 975 976 handle = ACPI_HANDLE(&adev->pdev->dev); 977 if (!handle) 978 return -ENODEV; 979 980 memset(&input, 0, sizeof(struct acpi_object_list)); 981 982 /* validate input info */ 983 if (info->size != sizeof(struct cgs_acpi_method_info)) 984 return -EINVAL; 985 986 input.count = info->input_count; 987 if (info->input_count > 0) { 988 if (info->pinput_argument == NULL) 989 return -EINVAL; 990 argument = info->pinput_argument; 991 for (i = 0; i < info->input_count; i++) { 992 if (((argument->type == ACPI_TYPE_STRING) || 993 (argument->type == ACPI_TYPE_BUFFER)) && 994 (argument->pointer == NULL)) 995 return -EINVAL; 996 argument++; 997 } 998 } 999 1000 if (info->output_count > 0) { 1001 if (info->poutput_argument == NULL) 1002 return -EINVAL; 1003 argument = info->poutput_argument; 1004 for (i = 0; i < info->output_count; i++) { 1005 if (((argument->type == ACPI_TYPE_STRING) || 1006 (argument->type == ACPI_TYPE_BUFFER)) 1007 && (argument->pointer == NULL)) 1008 return -EINVAL; 1009 argument++; 1010 } 1011 } 1012 1013 /* The path name passed to acpi_evaluate_object should be null terminated */ 1014 if ((info->field & CGS_ACPI_FIELD_METHOD_NAME) != 0) { 1015 strncpy(name, (char *)&(info->name), sizeof(uint32_t)); 1016 name[4] = '\0'; 1017 } 1018 1019 /* parse input parameters */ 1020 if (input.count > 0) { 1021 input.pointer = params = 1022 kzalloc(sizeof(union acpi_object) * input.count, GFP_KERNEL); 1023 if (params == NULL) 1024 return -EINVAL; 1025 1026 argument = info->pinput_argument; 1027 1028 for (i = 0; i < input.count; i++) { 1029 params->type = argument->type; 1030 switch (params->type) { 1031 case ACPI_TYPE_INTEGER: 1032 params->integer.value = argument->value; 1033 break; 1034 case ACPI_TYPE_STRING: 1035 params->string.length = argument->data_length; 1036 params->string.pointer = argument->pointer; 1037 break; 1038 case ACPI_TYPE_BUFFER: 1039 params->buffer.length = argument->data_length; 1040 params->buffer.pointer = argument->pointer; 1041 break; 1042 default: 1043 break; 1044 } 1045 params++; 1046 argument++; 1047 } 1048 } 1049 1050 /* parse output info */ 1051 count = info->output_count; 1052 argument = info->poutput_argument; 1053 1054 /* evaluate the acpi method */ 1055 status = acpi_evaluate_object(handle, name, &input, &output); 1056 1057 if (ACPI_FAILURE(status)) { 1058 result = -EIO; 1059 goto free_input; 1060 } 1061 1062 /* return the output info */ 1063 obj = output.pointer; 1064 1065 if (count > 1) { 1066 if ((obj->type != ACPI_TYPE_PACKAGE) || 1067 (obj->package.count != count)) { 1068 result = -EIO; 1069 goto free_obj; 1070 } 1071 params = obj->package.elements; 1072 } else 1073 params = obj; 1074 1075 if (params == NULL) { 1076 result = -EIO; 1077 goto free_obj; 1078 } 1079 1080 for (i = 0; i < count; i++) { 1081 if (argument->type != params->type) { 1082 result = -EIO; 1083 goto free_obj; 1084 } 1085 switch (params->type) { 1086 case ACPI_TYPE_INTEGER: 1087 argument->value = params->integer.value; 1088 break; 1089 case ACPI_TYPE_STRING: 1090 if ((params->string.length != argument->data_length) || 1091 (params->string.pointer == NULL)) { 1092 result = -EIO; 1093 goto free_obj; 1094 } 1095 strncpy(argument->pointer, 1096 params->string.pointer, 1097 params->string.length); 1098 break; 1099 case ACPI_TYPE_BUFFER: 1100 if (params->buffer.pointer == NULL) { 1101 result = -EIO; 1102 goto free_obj; 1103 } 1104 memcpy(argument->pointer, 1105 params->buffer.pointer, 1106 argument->data_length); 1107 break; 1108 default: 1109 break; 1110 } 1111 argument++; 1112 params++; 1113 } 1114 1115 result = 0; 1116 free_obj: 1117 kfree(obj); 1118 free_input: 1119 kfree((void *)input.pointer); 1120 return result; 1121 } 1122 #else 1123 static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device, 1124 struct cgs_acpi_method_info *info) 1125 { 1126 return -EIO; 1127 } 1128 #endif 1129 1130 static int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device, 1131 uint32_t acpi_method, 1132 uint32_t acpi_function, 1133 void *pinput, void *poutput, 1134 uint32_t output_count, 1135 uint32_t input_size, 1136 uint32_t output_size) 1137 { 1138 struct cgs_acpi_method_argument acpi_input[2] = { {0}, {0} }; 1139 struct cgs_acpi_method_argument acpi_output = {0}; 1140 struct cgs_acpi_method_info info = {0}; 1141 1142 acpi_input[0].type = CGS_ACPI_TYPE_INTEGER; 1143 acpi_input[0].data_length = sizeof(uint32_t); 1144 acpi_input[0].value = acpi_function; 1145 1146 acpi_input[1].type = CGS_ACPI_TYPE_BUFFER; 1147 acpi_input[1].data_length = input_size; 1148 acpi_input[1].pointer = pinput; 1149 1150 acpi_output.type = CGS_ACPI_TYPE_BUFFER; 1151 acpi_output.data_length = output_size; 1152 acpi_output.pointer = poutput; 1153 1154 info.size = sizeof(struct cgs_acpi_method_info); 1155 info.field = CGS_ACPI_FIELD_METHOD_NAME | CGS_ACPI_FIELD_INPUT_ARGUMENT_COUNT; 1156 info.input_count = 2; 1157 info.name = acpi_method; 1158 info.pinput_argument = acpi_input; 1159 info.output_count = output_count; 1160 info.poutput_argument = &acpi_output; 1161 1162 return amdgpu_cgs_acpi_eval_object(cgs_device, &info); 1163 } 1164 1165 static const struct cgs_ops amdgpu_cgs_ops = { 1166 .alloc_gpu_mem = amdgpu_cgs_alloc_gpu_mem, 1167 .free_gpu_mem = amdgpu_cgs_free_gpu_mem, 1168 .gmap_gpu_mem = amdgpu_cgs_gmap_gpu_mem, 1169 .gunmap_gpu_mem = amdgpu_cgs_gunmap_gpu_mem, 1170 .kmap_gpu_mem = amdgpu_cgs_kmap_gpu_mem, 1171 .kunmap_gpu_mem = amdgpu_cgs_kunmap_gpu_mem, 1172 .read_register = amdgpu_cgs_read_register, 1173 .write_register = amdgpu_cgs_write_register, 1174 .read_ind_register = amdgpu_cgs_read_ind_register, 1175 .write_ind_register = amdgpu_cgs_write_ind_register, 1176 .get_pci_resource = amdgpu_cgs_get_pci_resource, 1177 .atom_get_data_table = amdgpu_cgs_atom_get_data_table, 1178 .atom_get_cmd_table_revs = amdgpu_cgs_atom_get_cmd_table_revs, 1179 .atom_exec_cmd_table = amdgpu_cgs_atom_exec_cmd_table, 1180 .get_firmware_info = amdgpu_cgs_get_firmware_info, 1181 .rel_firmware = amdgpu_cgs_rel_firmware, 1182 .set_powergating_state = amdgpu_cgs_set_powergating_state, 1183 .set_clockgating_state = amdgpu_cgs_set_clockgating_state, 1184 .get_active_displays_info = amdgpu_cgs_get_active_displays_info, 1185 .notify_dpm_enabled = amdgpu_cgs_notify_dpm_enabled, 1186 .call_acpi_method = amdgpu_cgs_call_acpi_method, 1187 .query_system_info = amdgpu_cgs_query_system_info, 1188 .is_virtualization_enabled = amdgpu_cgs_is_virtualization_enabled, 1189 .enter_safe_mode = amdgpu_cgs_enter_safe_mode, 1190 .lock_grbm_idx = amdgpu_cgs_lock_grbm_idx, 1191 }; 1192 1193 static const struct cgs_os_ops amdgpu_cgs_os_ops = { 1194 .add_irq_source = amdgpu_cgs_add_irq_source, 1195 .irq_get = amdgpu_cgs_irq_get, 1196 .irq_put = amdgpu_cgs_irq_put 1197 }; 1198 1199 struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev) 1200 { 1201 struct amdgpu_cgs_device *cgs_device = 1202 kmalloc(sizeof(*cgs_device), GFP_KERNEL); 1203 1204 if (!cgs_device) { 1205 DRM_ERROR("Couldn't allocate CGS device structure\n"); 1206 return NULL; 1207 } 1208 1209 cgs_device->base.ops = &amdgpu_cgs_ops; 1210 cgs_device->base.os_ops = &amdgpu_cgs_os_ops; 1211 cgs_device->adev = adev; 1212 1213 return (struct cgs_device *)cgs_device; 1214 } 1215 1216 void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device) 1217 { 1218 kfree(cgs_device); 1219 } 1220