1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * 23 */ 24 #include <linux/list.h> 25 #include <linux/slab.h> 26 #include <linux/pci.h> 27 #include <linux/acpi.h> 28 #include <drm/drmP.h> 29 #include <linux/firmware.h> 30 #include <drm/amdgpu_drm.h> 31 #include "amdgpu.h" 32 #include "cgs_linux.h" 33 #include "atom.h" 34 #include "amdgpu_ucode.h" 35 36 struct amdgpu_cgs_device { 37 struct cgs_device base; 38 struct amdgpu_device *adev; 39 }; 40 41 #define CGS_FUNC_ADEV \ 42 struct amdgpu_device *adev = \ 43 ((struct amdgpu_cgs_device *)cgs_device)->adev 44 45 static int amdgpu_cgs_gpu_mem_info(struct cgs_device *cgs_device, enum cgs_gpu_mem_type type, 46 uint64_t *mc_start, uint64_t *mc_size, 47 uint64_t *mem_size) 48 { 49 CGS_FUNC_ADEV; 50 switch(type) { 51 case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB: 52 case CGS_GPU_MEM_TYPE__VISIBLE_FB: 53 *mc_start = 0; 54 *mc_size = adev->mc.visible_vram_size; 55 *mem_size = adev->mc.visible_vram_size - adev->vram_pin_size; 56 break; 57 case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB: 58 case CGS_GPU_MEM_TYPE__INVISIBLE_FB: 59 *mc_start = adev->mc.visible_vram_size; 60 *mc_size = adev->mc.real_vram_size - adev->mc.visible_vram_size; 61 *mem_size = *mc_size; 62 break; 63 case CGS_GPU_MEM_TYPE__GART_CACHEABLE: 64 case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE: 65 *mc_start = adev->mc.gtt_start; 66 *mc_size = adev->mc.gtt_size; 67 *mem_size = adev->mc.gtt_size - adev->gart_pin_size; 68 break; 69 default: 70 return -EINVAL; 71 } 72 73 return 0; 74 } 75 76 static int amdgpu_cgs_gmap_kmem(struct cgs_device *cgs_device, void *kmem, 77 uint64_t size, 78 uint64_t min_offset, uint64_t max_offset, 79 cgs_handle_t *kmem_handle, uint64_t *mcaddr) 80 { 81 CGS_FUNC_ADEV; 82 int ret; 83 struct amdgpu_bo *bo; 84 struct page *kmem_page = vmalloc_to_page(kmem); 85 int npages = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT; 86 87 struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages); 88 ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false, 89 AMDGPU_GEM_DOMAIN_GTT, 0, sg, NULL, &bo); 90 if (ret) 91 return ret; 92 ret = amdgpu_bo_reserve(bo, false); 93 if (unlikely(ret != 0)) 94 return ret; 95 96 /* pin buffer into GTT */ 97 ret = amdgpu_bo_pin_restricted(bo, AMDGPU_GEM_DOMAIN_GTT, 98 min_offset, max_offset, mcaddr); 99 amdgpu_bo_unreserve(bo); 100 101 *kmem_handle = (cgs_handle_t)bo; 102 return ret; 103 } 104 105 static int amdgpu_cgs_gunmap_kmem(struct cgs_device *cgs_device, cgs_handle_t kmem_handle) 106 { 107 struct amdgpu_bo *obj = (struct amdgpu_bo *)kmem_handle; 108 109 if (obj) { 110 int r = amdgpu_bo_reserve(obj, false); 111 if (likely(r == 0)) { 112 amdgpu_bo_unpin(obj); 113 amdgpu_bo_unreserve(obj); 114 } 115 amdgpu_bo_unref(&obj); 116 117 } 118 return 0; 119 } 120 121 static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device, 122 enum cgs_gpu_mem_type type, 123 uint64_t size, uint64_t align, 124 uint64_t min_offset, uint64_t max_offset, 125 cgs_handle_t *handle) 126 { 127 CGS_FUNC_ADEV; 128 uint16_t flags = 0; 129 int ret = 0; 130 uint32_t domain = 0; 131 struct amdgpu_bo *obj; 132 struct ttm_placement placement; 133 struct ttm_place place; 134 135 if (min_offset > max_offset) { 136 BUG_ON(1); 137 return -EINVAL; 138 } 139 140 /* fail if the alignment is not a power of 2 */ 141 if (((align != 1) && (align & (align - 1))) 142 || size == 0 || align == 0) 143 return -EINVAL; 144 145 146 switch(type) { 147 case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB: 148 case CGS_GPU_MEM_TYPE__VISIBLE_FB: 149 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 150 domain = AMDGPU_GEM_DOMAIN_VRAM; 151 if (max_offset > adev->mc.real_vram_size) 152 return -EINVAL; 153 place.fpfn = min_offset >> PAGE_SHIFT; 154 place.lpfn = max_offset >> PAGE_SHIFT; 155 place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | 156 TTM_PL_FLAG_VRAM; 157 break; 158 case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB: 159 case CGS_GPU_MEM_TYPE__INVISIBLE_FB: 160 flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS; 161 domain = AMDGPU_GEM_DOMAIN_VRAM; 162 if (adev->mc.visible_vram_size < adev->mc.real_vram_size) { 163 place.fpfn = 164 max(min_offset, adev->mc.visible_vram_size) >> PAGE_SHIFT; 165 place.lpfn = 166 min(max_offset, adev->mc.real_vram_size) >> PAGE_SHIFT; 167 place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | 168 TTM_PL_FLAG_VRAM; 169 } 170 171 break; 172 case CGS_GPU_MEM_TYPE__GART_CACHEABLE: 173 domain = AMDGPU_GEM_DOMAIN_GTT; 174 place.fpfn = min_offset >> PAGE_SHIFT; 175 place.lpfn = max_offset >> PAGE_SHIFT; 176 place.flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT; 177 break; 178 case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE: 179 flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC; 180 domain = AMDGPU_GEM_DOMAIN_GTT; 181 place.fpfn = min_offset >> PAGE_SHIFT; 182 place.lpfn = max_offset >> PAGE_SHIFT; 183 place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT | 184 TTM_PL_FLAG_UNCACHED; 185 break; 186 default: 187 return -EINVAL; 188 } 189 190 191 *handle = 0; 192 193 placement.placement = &place; 194 placement.num_placement = 1; 195 placement.busy_placement = &place; 196 placement.num_busy_placement = 1; 197 198 ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE, 199 true, domain, flags, 200 NULL, &placement, NULL, 201 &obj); 202 if (ret) { 203 DRM_ERROR("(%d) bo create failed\n", ret); 204 return ret; 205 } 206 *handle = (cgs_handle_t)obj; 207 208 return ret; 209 } 210 211 static int amdgpu_cgs_free_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle) 212 { 213 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; 214 215 if (obj) { 216 int r = amdgpu_bo_reserve(obj, false); 217 if (likely(r == 0)) { 218 amdgpu_bo_kunmap(obj); 219 amdgpu_bo_unpin(obj); 220 amdgpu_bo_unreserve(obj); 221 } 222 amdgpu_bo_unref(&obj); 223 224 } 225 return 0; 226 } 227 228 static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle, 229 uint64_t *mcaddr) 230 { 231 int r; 232 u64 min_offset, max_offset; 233 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; 234 235 WARN_ON_ONCE(obj->placement.num_placement > 1); 236 237 min_offset = obj->placements[0].fpfn << PAGE_SHIFT; 238 max_offset = obj->placements[0].lpfn << PAGE_SHIFT; 239 240 r = amdgpu_bo_reserve(obj, false); 241 if (unlikely(r != 0)) 242 return r; 243 r = amdgpu_bo_pin_restricted(obj, AMDGPU_GEM_DOMAIN_GTT, 244 min_offset, max_offset, mcaddr); 245 amdgpu_bo_unreserve(obj); 246 return r; 247 } 248 249 static int amdgpu_cgs_gunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle) 250 { 251 int r; 252 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; 253 r = amdgpu_bo_reserve(obj, false); 254 if (unlikely(r != 0)) 255 return r; 256 r = amdgpu_bo_unpin(obj); 257 amdgpu_bo_unreserve(obj); 258 return r; 259 } 260 261 static int amdgpu_cgs_kmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle, 262 void **map) 263 { 264 int r; 265 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; 266 r = amdgpu_bo_reserve(obj, false); 267 if (unlikely(r != 0)) 268 return r; 269 r = amdgpu_bo_kmap(obj, map); 270 amdgpu_bo_unreserve(obj); 271 return r; 272 } 273 274 static int amdgpu_cgs_kunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle) 275 { 276 int r; 277 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; 278 r = amdgpu_bo_reserve(obj, false); 279 if (unlikely(r != 0)) 280 return r; 281 amdgpu_bo_kunmap(obj); 282 amdgpu_bo_unreserve(obj); 283 return r; 284 } 285 286 static uint32_t amdgpu_cgs_read_register(struct cgs_device *cgs_device, unsigned offset) 287 { 288 CGS_FUNC_ADEV; 289 return RREG32(offset); 290 } 291 292 static void amdgpu_cgs_write_register(struct cgs_device *cgs_device, unsigned offset, 293 uint32_t value) 294 { 295 CGS_FUNC_ADEV; 296 WREG32(offset, value); 297 } 298 299 static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device, 300 enum cgs_ind_reg space, 301 unsigned index) 302 { 303 CGS_FUNC_ADEV; 304 switch (space) { 305 case CGS_IND_REG__MMIO: 306 return RREG32_IDX(index); 307 case CGS_IND_REG__PCIE: 308 return RREG32_PCIE(index); 309 case CGS_IND_REG__SMC: 310 return RREG32_SMC(index); 311 case CGS_IND_REG__UVD_CTX: 312 return RREG32_UVD_CTX(index); 313 case CGS_IND_REG__DIDT: 314 return RREG32_DIDT(index); 315 case CGS_IND_REG_GC_CAC: 316 return RREG32_GC_CAC(index); 317 case CGS_IND_REG__AUDIO_ENDPT: 318 DRM_ERROR("audio endpt register access not implemented.\n"); 319 return 0; 320 } 321 WARN(1, "Invalid indirect register space"); 322 return 0; 323 } 324 325 static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device, 326 enum cgs_ind_reg space, 327 unsigned index, uint32_t value) 328 { 329 CGS_FUNC_ADEV; 330 switch (space) { 331 case CGS_IND_REG__MMIO: 332 return WREG32_IDX(index, value); 333 case CGS_IND_REG__PCIE: 334 return WREG32_PCIE(index, value); 335 case CGS_IND_REG__SMC: 336 return WREG32_SMC(index, value); 337 case CGS_IND_REG__UVD_CTX: 338 return WREG32_UVD_CTX(index, value); 339 case CGS_IND_REG__DIDT: 340 return WREG32_DIDT(index, value); 341 case CGS_IND_REG_GC_CAC: 342 return WREG32_GC_CAC(index, value); 343 case CGS_IND_REG__AUDIO_ENDPT: 344 DRM_ERROR("audio endpt register access not implemented.\n"); 345 return; 346 } 347 WARN(1, "Invalid indirect register space"); 348 } 349 350 static uint8_t amdgpu_cgs_read_pci_config_byte(struct cgs_device *cgs_device, unsigned addr) 351 { 352 CGS_FUNC_ADEV; 353 uint8_t val; 354 int ret = pci_read_config_byte(adev->pdev, addr, &val); 355 if (WARN(ret, "pci_read_config_byte error")) 356 return 0; 357 return val; 358 } 359 360 static uint16_t amdgpu_cgs_read_pci_config_word(struct cgs_device *cgs_device, unsigned addr) 361 { 362 CGS_FUNC_ADEV; 363 uint16_t val; 364 int ret = pci_read_config_word(adev->pdev, addr, &val); 365 if (WARN(ret, "pci_read_config_word error")) 366 return 0; 367 return val; 368 } 369 370 static uint32_t amdgpu_cgs_read_pci_config_dword(struct cgs_device *cgs_device, 371 unsigned addr) 372 { 373 CGS_FUNC_ADEV; 374 uint32_t val; 375 int ret = pci_read_config_dword(adev->pdev, addr, &val); 376 if (WARN(ret, "pci_read_config_dword error")) 377 return 0; 378 return val; 379 } 380 381 static void amdgpu_cgs_write_pci_config_byte(struct cgs_device *cgs_device, unsigned addr, 382 uint8_t value) 383 { 384 CGS_FUNC_ADEV; 385 int ret = pci_write_config_byte(adev->pdev, addr, value); 386 WARN(ret, "pci_write_config_byte error"); 387 } 388 389 static void amdgpu_cgs_write_pci_config_word(struct cgs_device *cgs_device, unsigned addr, 390 uint16_t value) 391 { 392 CGS_FUNC_ADEV; 393 int ret = pci_write_config_word(adev->pdev, addr, value); 394 WARN(ret, "pci_write_config_word error"); 395 } 396 397 static void amdgpu_cgs_write_pci_config_dword(struct cgs_device *cgs_device, unsigned addr, 398 uint32_t value) 399 { 400 CGS_FUNC_ADEV; 401 int ret = pci_write_config_dword(adev->pdev, addr, value); 402 WARN(ret, "pci_write_config_dword error"); 403 } 404 405 406 static int amdgpu_cgs_get_pci_resource(struct cgs_device *cgs_device, 407 enum cgs_resource_type resource_type, 408 uint64_t size, 409 uint64_t offset, 410 uint64_t *resource_base) 411 { 412 CGS_FUNC_ADEV; 413 414 if (resource_base == NULL) 415 return -EINVAL; 416 417 switch (resource_type) { 418 case CGS_RESOURCE_TYPE_MMIO: 419 if (adev->rmmio_size == 0) 420 return -ENOENT; 421 if ((offset + size) > adev->rmmio_size) 422 return -EINVAL; 423 *resource_base = adev->rmmio_base; 424 return 0; 425 case CGS_RESOURCE_TYPE_DOORBELL: 426 if (adev->doorbell.size == 0) 427 return -ENOENT; 428 if ((offset + size) > adev->doorbell.size) 429 return -EINVAL; 430 *resource_base = adev->doorbell.base; 431 return 0; 432 case CGS_RESOURCE_TYPE_FB: 433 case CGS_RESOURCE_TYPE_IO: 434 case CGS_RESOURCE_TYPE_ROM: 435 default: 436 return -EINVAL; 437 } 438 } 439 440 static const void *amdgpu_cgs_atom_get_data_table(struct cgs_device *cgs_device, 441 unsigned table, uint16_t *size, 442 uint8_t *frev, uint8_t *crev) 443 { 444 CGS_FUNC_ADEV; 445 uint16_t data_start; 446 447 if (amdgpu_atom_parse_data_header( 448 adev->mode_info.atom_context, table, size, 449 frev, crev, &data_start)) 450 return (uint8_t*)adev->mode_info.atom_context->bios + 451 data_start; 452 453 return NULL; 454 } 455 456 static int amdgpu_cgs_atom_get_cmd_table_revs(struct cgs_device *cgs_device, unsigned table, 457 uint8_t *frev, uint8_t *crev) 458 { 459 CGS_FUNC_ADEV; 460 461 if (amdgpu_atom_parse_cmd_header( 462 adev->mode_info.atom_context, table, 463 frev, crev)) 464 return 0; 465 466 return -EINVAL; 467 } 468 469 static int amdgpu_cgs_atom_exec_cmd_table(struct cgs_device *cgs_device, unsigned table, 470 void *args) 471 { 472 CGS_FUNC_ADEV; 473 474 return amdgpu_atom_execute_table( 475 adev->mode_info.atom_context, table, args); 476 } 477 478 static int amdgpu_cgs_create_pm_request(struct cgs_device *cgs_device, cgs_handle_t *request) 479 { 480 /* TODO */ 481 return 0; 482 } 483 484 static int amdgpu_cgs_destroy_pm_request(struct cgs_device *cgs_device, cgs_handle_t request) 485 { 486 /* TODO */ 487 return 0; 488 } 489 490 static int amdgpu_cgs_set_pm_request(struct cgs_device *cgs_device, cgs_handle_t request, 491 int active) 492 { 493 /* TODO */ 494 return 0; 495 } 496 497 static int amdgpu_cgs_pm_request_clock(struct cgs_device *cgs_device, cgs_handle_t request, 498 enum cgs_clock clock, unsigned freq) 499 { 500 /* TODO */ 501 return 0; 502 } 503 504 static int amdgpu_cgs_pm_request_engine(struct cgs_device *cgs_device, cgs_handle_t request, 505 enum cgs_engine engine, int powered) 506 { 507 /* TODO */ 508 return 0; 509 } 510 511 512 513 static int amdgpu_cgs_pm_query_clock_limits(struct cgs_device *cgs_device, 514 enum cgs_clock clock, 515 struct cgs_clock_limits *limits) 516 { 517 /* TODO */ 518 return 0; 519 } 520 521 static int amdgpu_cgs_set_camera_voltages(struct cgs_device *cgs_device, uint32_t mask, 522 const uint32_t *voltages) 523 { 524 DRM_ERROR("not implemented"); 525 return -EPERM; 526 } 527 528 struct cgs_irq_params { 529 unsigned src_id; 530 cgs_irq_source_set_func_t set; 531 cgs_irq_handler_func_t handler; 532 void *private_data; 533 }; 534 535 static int cgs_set_irq_state(struct amdgpu_device *adev, 536 struct amdgpu_irq_src *src, 537 unsigned type, 538 enum amdgpu_interrupt_state state) 539 { 540 struct cgs_irq_params *irq_params = 541 (struct cgs_irq_params *)src->data; 542 if (!irq_params) 543 return -EINVAL; 544 if (!irq_params->set) 545 return -EINVAL; 546 return irq_params->set(irq_params->private_data, 547 irq_params->src_id, 548 type, 549 (int)state); 550 } 551 552 static int cgs_process_irq(struct amdgpu_device *adev, 553 struct amdgpu_irq_src *source, 554 struct amdgpu_iv_entry *entry) 555 { 556 struct cgs_irq_params *irq_params = 557 (struct cgs_irq_params *)source->data; 558 if (!irq_params) 559 return -EINVAL; 560 if (!irq_params->handler) 561 return -EINVAL; 562 return irq_params->handler(irq_params->private_data, 563 irq_params->src_id, 564 entry->iv_entry); 565 } 566 567 static const struct amdgpu_irq_src_funcs cgs_irq_funcs = { 568 .set = cgs_set_irq_state, 569 .process = cgs_process_irq, 570 }; 571 572 static int amdgpu_cgs_add_irq_source(struct cgs_device *cgs_device, unsigned src_id, 573 unsigned num_types, 574 cgs_irq_source_set_func_t set, 575 cgs_irq_handler_func_t handler, 576 void *private_data) 577 { 578 CGS_FUNC_ADEV; 579 int ret = 0; 580 struct cgs_irq_params *irq_params; 581 struct amdgpu_irq_src *source = 582 kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL); 583 if (!source) 584 return -ENOMEM; 585 irq_params = 586 kzalloc(sizeof(struct cgs_irq_params), GFP_KERNEL); 587 if (!irq_params) { 588 kfree(source); 589 return -ENOMEM; 590 } 591 source->num_types = num_types; 592 source->funcs = &cgs_irq_funcs; 593 irq_params->src_id = src_id; 594 irq_params->set = set; 595 irq_params->handler = handler; 596 irq_params->private_data = private_data; 597 source->data = (void *)irq_params; 598 ret = amdgpu_irq_add_id(adev, src_id, source); 599 if (ret) { 600 kfree(irq_params); 601 kfree(source); 602 } 603 604 return ret; 605 } 606 607 static int amdgpu_cgs_irq_get(struct cgs_device *cgs_device, unsigned src_id, unsigned type) 608 { 609 CGS_FUNC_ADEV; 610 return amdgpu_irq_get(adev, adev->irq.sources[src_id], type); 611 } 612 613 static int amdgpu_cgs_irq_put(struct cgs_device *cgs_device, unsigned src_id, unsigned type) 614 { 615 CGS_FUNC_ADEV; 616 return amdgpu_irq_put(adev, adev->irq.sources[src_id], type); 617 } 618 619 static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device, 620 enum amd_ip_block_type block_type, 621 enum amd_clockgating_state state) 622 { 623 CGS_FUNC_ADEV; 624 int i, r = -1; 625 626 for (i = 0; i < adev->num_ip_blocks; i++) { 627 if (!adev->ip_block_status[i].valid) 628 continue; 629 630 if (adev->ip_blocks[i].type == block_type) { 631 r = adev->ip_blocks[i].funcs->set_clockgating_state( 632 (void *)adev, 633 state); 634 break; 635 } 636 } 637 return r; 638 } 639 640 static int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device, 641 enum amd_ip_block_type block_type, 642 enum amd_powergating_state state) 643 { 644 CGS_FUNC_ADEV; 645 int i, r = -1; 646 647 for (i = 0; i < adev->num_ip_blocks; i++) { 648 if (!adev->ip_block_status[i].valid) 649 continue; 650 651 if (adev->ip_blocks[i].type == block_type) { 652 r = adev->ip_blocks[i].funcs->set_powergating_state( 653 (void *)adev, 654 state); 655 break; 656 } 657 } 658 return r; 659 } 660 661 662 static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type) 663 { 664 CGS_FUNC_ADEV; 665 enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM; 666 667 switch (fw_type) { 668 case CGS_UCODE_ID_SDMA0: 669 result = AMDGPU_UCODE_ID_SDMA0; 670 break; 671 case CGS_UCODE_ID_SDMA1: 672 result = AMDGPU_UCODE_ID_SDMA1; 673 break; 674 case CGS_UCODE_ID_CP_CE: 675 result = AMDGPU_UCODE_ID_CP_CE; 676 break; 677 case CGS_UCODE_ID_CP_PFP: 678 result = AMDGPU_UCODE_ID_CP_PFP; 679 break; 680 case CGS_UCODE_ID_CP_ME: 681 result = AMDGPU_UCODE_ID_CP_ME; 682 break; 683 case CGS_UCODE_ID_CP_MEC: 684 case CGS_UCODE_ID_CP_MEC_JT1: 685 result = AMDGPU_UCODE_ID_CP_MEC1; 686 break; 687 case CGS_UCODE_ID_CP_MEC_JT2: 688 if (adev->asic_type == CHIP_TONGA || adev->asic_type == CHIP_POLARIS11 689 || adev->asic_type == CHIP_POLARIS10) 690 result = AMDGPU_UCODE_ID_CP_MEC2; 691 else 692 result = AMDGPU_UCODE_ID_CP_MEC1; 693 break; 694 case CGS_UCODE_ID_RLC_G: 695 result = AMDGPU_UCODE_ID_RLC_G; 696 break; 697 default: 698 DRM_ERROR("Firmware type not supported\n"); 699 } 700 return result; 701 } 702 703 static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type) 704 { 705 CGS_FUNC_ADEV; 706 if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) { 707 release_firmware(adev->pm.fw); 708 return 0; 709 } 710 /* cannot release other firmware because they are not created by cgs */ 711 return -EINVAL; 712 } 713 714 static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device, 715 enum cgs_ucode_id type) 716 { 717 CGS_FUNC_ADEV; 718 uint16_t fw_version; 719 720 switch (type) { 721 case CGS_UCODE_ID_SDMA0: 722 fw_version = adev->sdma.instance[0].fw_version; 723 break; 724 case CGS_UCODE_ID_SDMA1: 725 fw_version = adev->sdma.instance[1].fw_version; 726 break; 727 case CGS_UCODE_ID_CP_CE: 728 fw_version = adev->gfx.ce_fw_version; 729 break; 730 case CGS_UCODE_ID_CP_PFP: 731 fw_version = adev->gfx.pfp_fw_version; 732 break; 733 case CGS_UCODE_ID_CP_ME: 734 fw_version = adev->gfx.me_fw_version; 735 break; 736 case CGS_UCODE_ID_CP_MEC: 737 fw_version = adev->gfx.mec_fw_version; 738 break; 739 case CGS_UCODE_ID_CP_MEC_JT1: 740 fw_version = adev->gfx.mec_fw_version; 741 break; 742 case CGS_UCODE_ID_CP_MEC_JT2: 743 fw_version = adev->gfx.mec_fw_version; 744 break; 745 case CGS_UCODE_ID_RLC_G: 746 fw_version = adev->gfx.rlc_fw_version; 747 break; 748 default: 749 DRM_ERROR("firmware type %d do not have version\n", type); 750 fw_version = 0; 751 } 752 return fw_version; 753 } 754 755 static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, 756 enum cgs_ucode_id type, 757 struct cgs_firmware_info *info) 758 { 759 CGS_FUNC_ADEV; 760 761 if ((CGS_UCODE_ID_SMU != type) && (CGS_UCODE_ID_SMU_SK != type)) { 762 uint64_t gpu_addr; 763 uint32_t data_size; 764 const struct gfx_firmware_header_v1_0 *header; 765 enum AMDGPU_UCODE_ID id; 766 struct amdgpu_firmware_info *ucode; 767 768 id = fw_type_convert(cgs_device, type); 769 ucode = &adev->firmware.ucode[id]; 770 if (ucode->fw == NULL) 771 return -EINVAL; 772 773 gpu_addr = ucode->mc_addr; 774 header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data; 775 data_size = le32_to_cpu(header->header.ucode_size_bytes); 776 777 if ((type == CGS_UCODE_ID_CP_MEC_JT1) || 778 (type == CGS_UCODE_ID_CP_MEC_JT2)) { 779 gpu_addr += le32_to_cpu(header->jt_offset) << 2; 780 data_size = le32_to_cpu(header->jt_size) << 2; 781 } 782 info->mc_addr = gpu_addr; 783 info->image_size = data_size; 784 info->version = (uint16_t)le32_to_cpu(header->header.ucode_version); 785 info->fw_version = amdgpu_get_firmware_version(cgs_device, type); 786 info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version); 787 } else { 788 char fw_name[30] = {0}; 789 int err = 0; 790 uint32_t ucode_size; 791 uint32_t ucode_start_address; 792 const uint8_t *src; 793 const struct smc_firmware_header_v1_0 *hdr; 794 795 if (!adev->pm.fw) { 796 switch (adev->asic_type) { 797 case CHIP_TOPAZ: 798 if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) || 799 ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) || 800 ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) 801 strcpy(fw_name, "amdgpu/topaz_k_smc.bin"); 802 else 803 strcpy(fw_name, "amdgpu/topaz_smc.bin"); 804 break; 805 case CHIP_TONGA: 806 if (((adev->pdev->device == 0x6939) && (adev->pdev->revision == 0xf1)) || 807 ((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1))) 808 strcpy(fw_name, "amdgpu/tonga_k_smc.bin"); 809 else 810 strcpy(fw_name, "amdgpu/tonga_smc.bin"); 811 break; 812 case CHIP_FIJI: 813 strcpy(fw_name, "amdgpu/fiji_smc.bin"); 814 break; 815 case CHIP_POLARIS11: 816 if (type == CGS_UCODE_ID_SMU) 817 strcpy(fw_name, "amdgpu/polaris11_smc.bin"); 818 else if (type == CGS_UCODE_ID_SMU_SK) 819 strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin"); 820 break; 821 case CHIP_POLARIS10: 822 if (type == CGS_UCODE_ID_SMU) 823 strcpy(fw_name, "amdgpu/polaris10_smc.bin"); 824 else if (type == CGS_UCODE_ID_SMU_SK) 825 strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin"); 826 break; 827 default: 828 DRM_ERROR("SMC firmware not supported\n"); 829 return -EINVAL; 830 } 831 832 err = request_firmware(&adev->pm.fw, fw_name, adev->dev); 833 if (err) { 834 DRM_ERROR("Failed to request firmware\n"); 835 return err; 836 } 837 838 err = amdgpu_ucode_validate(adev->pm.fw); 839 if (err) { 840 DRM_ERROR("Failed to load firmware \"%s\"", fw_name); 841 release_firmware(adev->pm.fw); 842 adev->pm.fw = NULL; 843 return err; 844 } 845 } 846 847 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; 848 amdgpu_ucode_print_smc_hdr(&hdr->header); 849 adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); 850 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes); 851 ucode_start_address = le32_to_cpu(hdr->ucode_start_addr); 852 src = (const uint8_t *)(adev->pm.fw->data + 853 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 854 855 info->version = adev->pm.fw_version; 856 info->image_size = ucode_size; 857 info->ucode_start_address = ucode_start_address; 858 info->kptr = (void *)src; 859 } 860 return 0; 861 } 862 863 static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device, 864 struct cgs_system_info *sys_info) 865 { 866 CGS_FUNC_ADEV; 867 868 if (NULL == sys_info) 869 return -ENODEV; 870 871 if (sizeof(struct cgs_system_info) != sys_info->size) 872 return -ENODEV; 873 874 switch (sys_info->info_id) { 875 case CGS_SYSTEM_INFO_ADAPTER_BDF_ID: 876 sys_info->value = adev->pdev->devfn | (adev->pdev->bus->number << 8); 877 break; 878 case CGS_SYSTEM_INFO_PCIE_GEN_INFO: 879 sys_info->value = adev->pm.pcie_gen_mask; 880 break; 881 case CGS_SYSTEM_INFO_PCIE_MLW: 882 sys_info->value = adev->pm.pcie_mlw_mask; 883 break; 884 case CGS_SYSTEM_INFO_PCIE_DEV: 885 sys_info->value = adev->pdev->device; 886 break; 887 case CGS_SYSTEM_INFO_PCIE_REV: 888 sys_info->value = adev->pdev->revision; 889 break; 890 case CGS_SYSTEM_INFO_CG_FLAGS: 891 sys_info->value = adev->cg_flags; 892 break; 893 case CGS_SYSTEM_INFO_PG_FLAGS: 894 sys_info->value = adev->pg_flags; 895 break; 896 case CGS_SYSTEM_INFO_GFX_CU_INFO: 897 sys_info->value = adev->gfx.cu_info.number; 898 break; 899 case CGS_SYSTEM_INFO_GFX_SE_INFO: 900 sys_info->value = adev->gfx.config.max_shader_engines; 901 break; 902 case CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID: 903 sys_info->value = adev->pdev->subsystem_device; 904 break; 905 case CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID: 906 sys_info->value = adev->pdev->subsystem_vendor; 907 break; 908 default: 909 return -ENODEV; 910 } 911 912 return 0; 913 } 914 915 static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device, 916 struct cgs_display_info *info) 917 { 918 CGS_FUNC_ADEV; 919 struct amdgpu_crtc *amdgpu_crtc; 920 struct drm_device *ddev = adev->ddev; 921 struct drm_crtc *crtc; 922 uint32_t line_time_us, vblank_lines; 923 struct cgs_mode_info *mode_info; 924 925 if (info == NULL) 926 return -EINVAL; 927 928 mode_info = info->mode_info; 929 930 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { 931 list_for_each_entry(crtc, 932 &ddev->mode_config.crtc_list, head) { 933 amdgpu_crtc = to_amdgpu_crtc(crtc); 934 if (crtc->enabled) { 935 info->active_display_mask |= (1 << amdgpu_crtc->crtc_id); 936 info->display_count++; 937 } 938 if (mode_info != NULL && 939 crtc->enabled && amdgpu_crtc->enabled && 940 amdgpu_crtc->hw_mode.clock) { 941 line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) / 942 amdgpu_crtc->hw_mode.clock; 943 vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end - 944 amdgpu_crtc->hw_mode.crtc_vdisplay + 945 (amdgpu_crtc->v_border * 2); 946 mode_info->vblank_time_us = vblank_lines * line_time_us; 947 mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode); 948 mode_info->ref_clock = adev->clock.spll.reference_freq; 949 mode_info = NULL; 950 } 951 } 952 } 953 954 return 0; 955 } 956 957 958 static int amdgpu_cgs_notify_dpm_enabled(struct cgs_device *cgs_device, bool enabled) 959 { 960 CGS_FUNC_ADEV; 961 962 adev->pm.dpm_enabled = enabled; 963 964 return 0; 965 } 966 967 /** \brief evaluate acpi namespace object, handle or pathname must be valid 968 * \param cgs_device 969 * \param info input/output arguments for the control method 970 * \return status 971 */ 972 973 #if defined(CONFIG_ACPI) 974 static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device, 975 struct cgs_acpi_method_info *info) 976 { 977 CGS_FUNC_ADEV; 978 acpi_handle handle; 979 struct acpi_object_list input; 980 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; 981 union acpi_object *params, *obj; 982 uint8_t name[5] = {'\0'}; 983 struct cgs_acpi_method_argument *argument; 984 uint32_t i, count; 985 acpi_status status; 986 int result; 987 988 handle = ACPI_HANDLE(&adev->pdev->dev); 989 if (!handle) 990 return -ENODEV; 991 992 memset(&input, 0, sizeof(struct acpi_object_list)); 993 994 /* validate input info */ 995 if (info->size != sizeof(struct cgs_acpi_method_info)) 996 return -EINVAL; 997 998 input.count = info->input_count; 999 if (info->input_count > 0) { 1000 if (info->pinput_argument == NULL) 1001 return -EINVAL; 1002 argument = info->pinput_argument; 1003 for (i = 0; i < info->input_count; i++) { 1004 if (((argument->type == ACPI_TYPE_STRING) || 1005 (argument->type == ACPI_TYPE_BUFFER)) && 1006 (argument->pointer == NULL)) 1007 return -EINVAL; 1008 argument++; 1009 } 1010 } 1011 1012 if (info->output_count > 0) { 1013 if (info->poutput_argument == NULL) 1014 return -EINVAL; 1015 argument = info->poutput_argument; 1016 for (i = 0; i < info->output_count; i++) { 1017 if (((argument->type == ACPI_TYPE_STRING) || 1018 (argument->type == ACPI_TYPE_BUFFER)) 1019 && (argument->pointer == NULL)) 1020 return -EINVAL; 1021 argument++; 1022 } 1023 } 1024 1025 /* The path name passed to acpi_evaluate_object should be null terminated */ 1026 if ((info->field & CGS_ACPI_FIELD_METHOD_NAME) != 0) { 1027 strncpy(name, (char *)&(info->name), sizeof(uint32_t)); 1028 name[4] = '\0'; 1029 } 1030 1031 /* parse input parameters */ 1032 if (input.count > 0) { 1033 input.pointer = params = 1034 kzalloc(sizeof(union acpi_object) * input.count, GFP_KERNEL); 1035 if (params == NULL) 1036 return -EINVAL; 1037 1038 argument = info->pinput_argument; 1039 1040 for (i = 0; i < input.count; i++) { 1041 params->type = argument->type; 1042 switch (params->type) { 1043 case ACPI_TYPE_INTEGER: 1044 params->integer.value = argument->value; 1045 break; 1046 case ACPI_TYPE_STRING: 1047 params->string.length = argument->data_length; 1048 params->string.pointer = argument->pointer; 1049 break; 1050 case ACPI_TYPE_BUFFER: 1051 params->buffer.length = argument->data_length; 1052 params->buffer.pointer = argument->pointer; 1053 break; 1054 default: 1055 break; 1056 } 1057 params++; 1058 argument++; 1059 } 1060 } 1061 1062 /* parse output info */ 1063 count = info->output_count; 1064 argument = info->poutput_argument; 1065 1066 /* evaluate the acpi method */ 1067 status = acpi_evaluate_object(handle, name, &input, &output); 1068 1069 if (ACPI_FAILURE(status)) { 1070 result = -EIO; 1071 goto free_input; 1072 } 1073 1074 /* return the output info */ 1075 obj = output.pointer; 1076 1077 if (count > 1) { 1078 if ((obj->type != ACPI_TYPE_PACKAGE) || 1079 (obj->package.count != count)) { 1080 result = -EIO; 1081 goto free_obj; 1082 } 1083 params = obj->package.elements; 1084 } else 1085 params = obj; 1086 1087 if (params == NULL) { 1088 result = -EIO; 1089 goto free_obj; 1090 } 1091 1092 for (i = 0; i < count; i++) { 1093 if (argument->type != params->type) { 1094 result = -EIO; 1095 goto free_obj; 1096 } 1097 switch (params->type) { 1098 case ACPI_TYPE_INTEGER: 1099 argument->value = params->integer.value; 1100 break; 1101 case ACPI_TYPE_STRING: 1102 if ((params->string.length != argument->data_length) || 1103 (params->string.pointer == NULL)) { 1104 result = -EIO; 1105 goto free_obj; 1106 } 1107 strncpy(argument->pointer, 1108 params->string.pointer, 1109 params->string.length); 1110 break; 1111 case ACPI_TYPE_BUFFER: 1112 if (params->buffer.pointer == NULL) { 1113 result = -EIO; 1114 goto free_obj; 1115 } 1116 memcpy(argument->pointer, 1117 params->buffer.pointer, 1118 argument->data_length); 1119 break; 1120 default: 1121 break; 1122 } 1123 argument++; 1124 params++; 1125 } 1126 1127 result = 0; 1128 free_obj: 1129 kfree(obj); 1130 free_input: 1131 kfree((void *)input.pointer); 1132 return result; 1133 } 1134 #else 1135 static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device, 1136 struct cgs_acpi_method_info *info) 1137 { 1138 return -EIO; 1139 } 1140 #endif 1141 1142 static int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device, 1143 uint32_t acpi_method, 1144 uint32_t acpi_function, 1145 void *pinput, void *poutput, 1146 uint32_t output_count, 1147 uint32_t input_size, 1148 uint32_t output_size) 1149 { 1150 struct cgs_acpi_method_argument acpi_input[2] = { {0}, {0} }; 1151 struct cgs_acpi_method_argument acpi_output = {0}; 1152 struct cgs_acpi_method_info info = {0}; 1153 1154 acpi_input[0].type = CGS_ACPI_TYPE_INTEGER; 1155 acpi_input[0].data_length = sizeof(uint32_t); 1156 acpi_input[0].value = acpi_function; 1157 1158 acpi_input[1].type = CGS_ACPI_TYPE_BUFFER; 1159 acpi_input[1].data_length = input_size; 1160 acpi_input[1].pointer = pinput; 1161 1162 acpi_output.type = CGS_ACPI_TYPE_BUFFER; 1163 acpi_output.data_length = output_size; 1164 acpi_output.pointer = poutput; 1165 1166 info.size = sizeof(struct cgs_acpi_method_info); 1167 info.field = CGS_ACPI_FIELD_METHOD_NAME | CGS_ACPI_FIELD_INPUT_ARGUMENT_COUNT; 1168 info.input_count = 2; 1169 info.name = acpi_method; 1170 info.pinput_argument = acpi_input; 1171 info.output_count = output_count; 1172 info.poutput_argument = &acpi_output; 1173 1174 return amdgpu_cgs_acpi_eval_object(cgs_device, &info); 1175 } 1176 1177 static const struct cgs_ops amdgpu_cgs_ops = { 1178 amdgpu_cgs_gpu_mem_info, 1179 amdgpu_cgs_gmap_kmem, 1180 amdgpu_cgs_gunmap_kmem, 1181 amdgpu_cgs_alloc_gpu_mem, 1182 amdgpu_cgs_free_gpu_mem, 1183 amdgpu_cgs_gmap_gpu_mem, 1184 amdgpu_cgs_gunmap_gpu_mem, 1185 amdgpu_cgs_kmap_gpu_mem, 1186 amdgpu_cgs_kunmap_gpu_mem, 1187 amdgpu_cgs_read_register, 1188 amdgpu_cgs_write_register, 1189 amdgpu_cgs_read_ind_register, 1190 amdgpu_cgs_write_ind_register, 1191 amdgpu_cgs_read_pci_config_byte, 1192 amdgpu_cgs_read_pci_config_word, 1193 amdgpu_cgs_read_pci_config_dword, 1194 amdgpu_cgs_write_pci_config_byte, 1195 amdgpu_cgs_write_pci_config_word, 1196 amdgpu_cgs_write_pci_config_dword, 1197 amdgpu_cgs_get_pci_resource, 1198 amdgpu_cgs_atom_get_data_table, 1199 amdgpu_cgs_atom_get_cmd_table_revs, 1200 amdgpu_cgs_atom_exec_cmd_table, 1201 amdgpu_cgs_create_pm_request, 1202 amdgpu_cgs_destroy_pm_request, 1203 amdgpu_cgs_set_pm_request, 1204 amdgpu_cgs_pm_request_clock, 1205 amdgpu_cgs_pm_request_engine, 1206 amdgpu_cgs_pm_query_clock_limits, 1207 amdgpu_cgs_set_camera_voltages, 1208 amdgpu_cgs_get_firmware_info, 1209 amdgpu_cgs_rel_firmware, 1210 amdgpu_cgs_set_powergating_state, 1211 amdgpu_cgs_set_clockgating_state, 1212 amdgpu_cgs_get_active_displays_info, 1213 amdgpu_cgs_notify_dpm_enabled, 1214 amdgpu_cgs_call_acpi_method, 1215 amdgpu_cgs_query_system_info, 1216 }; 1217 1218 static const struct cgs_os_ops amdgpu_cgs_os_ops = { 1219 amdgpu_cgs_add_irq_source, 1220 amdgpu_cgs_irq_get, 1221 amdgpu_cgs_irq_put 1222 }; 1223 1224 struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev) 1225 { 1226 struct amdgpu_cgs_device *cgs_device = 1227 kmalloc(sizeof(*cgs_device), GFP_KERNEL); 1228 1229 if (!cgs_device) { 1230 DRM_ERROR("Couldn't allocate CGS device structure\n"); 1231 return NULL; 1232 } 1233 1234 cgs_device->base.ops = &amdgpu_cgs_ops; 1235 cgs_device->base.os_ops = &amdgpu_cgs_os_ops; 1236 cgs_device->adev = adev; 1237 1238 return (struct cgs_device *)cgs_device; 1239 } 1240 1241 void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device) 1242 { 1243 kfree(cgs_device); 1244 } 1245