1 /* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 * Authors: Christian König <christian.koenig@amd.com> 26 */ 27 28 #include <linux/firmware.h> 29 #include <linux/module.h> 30 31 #include <drm/drm.h> 32 33 #include "amdgpu.h" 34 #include "amdgpu_pm.h" 35 #include "amdgpu_vce.h" 36 #include "cikd.h" 37 38 /* 1 second timeout */ 39 #define VCE_IDLE_TIMEOUT msecs_to_jiffies(1000) 40 41 /* Firmware Names */ 42 #ifdef CONFIG_DRM_AMDGPU_CIK 43 #define FIRMWARE_BONAIRE "amdgpu/bonaire_vce.bin" 44 #define FIRMWARE_KABINI "amdgpu/kabini_vce.bin" 45 #define FIRMWARE_KAVERI "amdgpu/kaveri_vce.bin" 46 #define FIRMWARE_HAWAII "amdgpu/hawaii_vce.bin" 47 #define FIRMWARE_MULLINS "amdgpu/mullins_vce.bin" 48 #endif 49 #define FIRMWARE_TONGA "amdgpu/tonga_vce.bin" 50 #define FIRMWARE_CARRIZO "amdgpu/carrizo_vce.bin" 51 #define FIRMWARE_FIJI "amdgpu/fiji_vce.bin" 52 #define FIRMWARE_STONEY "amdgpu/stoney_vce.bin" 53 #define FIRMWARE_POLARIS10 "amdgpu/polaris10_vce.bin" 54 #define FIRMWARE_POLARIS11 "amdgpu/polaris11_vce.bin" 55 #define FIRMWARE_POLARIS12 "amdgpu/polaris12_vce.bin" 56 #define FIRMWARE_VEGAM "amdgpu/vegam_vce.bin" 57 58 #define FIRMWARE_VEGA10 "amdgpu/vega10_vce.bin" 59 #define FIRMWARE_VEGA12 "amdgpu/vega12_vce.bin" 60 #define FIRMWARE_VEGA20 "amdgpu/vega20_vce.bin" 61 62 #ifdef CONFIG_DRM_AMDGPU_CIK 63 MODULE_FIRMWARE(FIRMWARE_BONAIRE); 64 MODULE_FIRMWARE(FIRMWARE_KABINI); 65 MODULE_FIRMWARE(FIRMWARE_KAVERI); 66 MODULE_FIRMWARE(FIRMWARE_HAWAII); 67 MODULE_FIRMWARE(FIRMWARE_MULLINS); 68 #endif 69 MODULE_FIRMWARE(FIRMWARE_TONGA); 70 MODULE_FIRMWARE(FIRMWARE_CARRIZO); 71 MODULE_FIRMWARE(FIRMWARE_FIJI); 72 MODULE_FIRMWARE(FIRMWARE_STONEY); 73 MODULE_FIRMWARE(FIRMWARE_POLARIS10); 74 MODULE_FIRMWARE(FIRMWARE_POLARIS11); 75 MODULE_FIRMWARE(FIRMWARE_POLARIS12); 76 MODULE_FIRMWARE(FIRMWARE_VEGAM); 77 78 MODULE_FIRMWARE(FIRMWARE_VEGA10); 79 MODULE_FIRMWARE(FIRMWARE_VEGA12); 80 MODULE_FIRMWARE(FIRMWARE_VEGA20); 81 82 static void amdgpu_vce_idle_work_handler(struct work_struct *work); 83 84 /** 85 * amdgpu_vce_init - allocate memory, load vce firmware 86 * 87 * @adev: amdgpu_device pointer 88 * 89 * First step to get VCE online, allocate memory and load the firmware 90 */ 91 int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) 92 { 93 const char *fw_name; 94 const struct common_firmware_header *hdr; 95 unsigned ucode_version, version_major, version_minor, binary_id; 96 int i, r; 97 98 switch (adev->asic_type) { 99 #ifdef CONFIG_DRM_AMDGPU_CIK 100 case CHIP_BONAIRE: 101 fw_name = FIRMWARE_BONAIRE; 102 break; 103 case CHIP_KAVERI: 104 fw_name = FIRMWARE_KAVERI; 105 break; 106 case CHIP_KABINI: 107 fw_name = FIRMWARE_KABINI; 108 break; 109 case CHIP_HAWAII: 110 fw_name = FIRMWARE_HAWAII; 111 break; 112 case CHIP_MULLINS: 113 fw_name = FIRMWARE_MULLINS; 114 break; 115 #endif 116 case CHIP_TONGA: 117 fw_name = FIRMWARE_TONGA; 118 break; 119 case CHIP_CARRIZO: 120 fw_name = FIRMWARE_CARRIZO; 121 break; 122 case CHIP_FIJI: 123 fw_name = FIRMWARE_FIJI; 124 break; 125 case CHIP_STONEY: 126 fw_name = FIRMWARE_STONEY; 127 break; 128 case CHIP_POLARIS10: 129 fw_name = FIRMWARE_POLARIS10; 130 break; 131 case CHIP_POLARIS11: 132 fw_name = FIRMWARE_POLARIS11; 133 break; 134 case CHIP_POLARIS12: 135 fw_name = FIRMWARE_POLARIS12; 136 break; 137 case CHIP_VEGAM: 138 fw_name = FIRMWARE_VEGAM; 139 break; 140 case CHIP_VEGA10: 141 fw_name = FIRMWARE_VEGA10; 142 break; 143 case CHIP_VEGA12: 144 fw_name = FIRMWARE_VEGA12; 145 break; 146 case CHIP_VEGA20: 147 fw_name = FIRMWARE_VEGA20; 148 break; 149 150 default: 151 return -EINVAL; 152 } 153 154 r = request_firmware(&adev->vce.fw, fw_name, adev->dev); 155 if (r) { 156 dev_err(adev->dev, "amdgpu_vce: Can't load firmware \"%s\"\n", 157 fw_name); 158 return r; 159 } 160 161 r = amdgpu_ucode_validate(adev->vce.fw); 162 if (r) { 163 dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n", 164 fw_name); 165 release_firmware(adev->vce.fw); 166 adev->vce.fw = NULL; 167 return r; 168 } 169 170 hdr = (const struct common_firmware_header *)adev->vce.fw->data; 171 172 ucode_version = le32_to_cpu(hdr->ucode_version); 173 version_major = (ucode_version >> 20) & 0xfff; 174 version_minor = (ucode_version >> 8) & 0xfff; 175 binary_id = ucode_version & 0xff; 176 DRM_INFO("Found VCE firmware Version: %hhd.%hhd Binary ID: %hhd\n", 177 version_major, version_minor, binary_id); 178 adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) | 179 (binary_id << 8)); 180 181 r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE, 182 AMDGPU_GEM_DOMAIN_VRAM, &adev->vce.vcpu_bo, 183 &adev->vce.gpu_addr, &adev->vce.cpu_addr); 184 if (r) { 185 dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r); 186 return r; 187 } 188 189 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { 190 atomic_set(&adev->vce.handles[i], 0); 191 adev->vce.filp[i] = NULL; 192 } 193 194 INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler); 195 mutex_init(&adev->vce.idle_mutex); 196 197 return 0; 198 } 199 200 /** 201 * amdgpu_vce_fini - free memory 202 * 203 * @adev: amdgpu_device pointer 204 * 205 * Last step on VCE teardown, free firmware memory 206 */ 207 int amdgpu_vce_sw_fini(struct amdgpu_device *adev) 208 { 209 unsigned i; 210 211 if (adev->vce.vcpu_bo == NULL) 212 return 0; 213 214 drm_sched_entity_destroy(&adev->vce.entity); 215 216 amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr, 217 (void **)&adev->vce.cpu_addr); 218 219 for (i = 0; i < adev->vce.num_rings; i++) 220 amdgpu_ring_fini(&adev->vce.ring[i]); 221 222 release_firmware(adev->vce.fw); 223 mutex_destroy(&adev->vce.idle_mutex); 224 225 return 0; 226 } 227 228 /** 229 * amdgpu_vce_entity_init - init entity 230 * 231 * @adev: amdgpu_device pointer 232 * 233 */ 234 int amdgpu_vce_entity_init(struct amdgpu_device *adev) 235 { 236 struct amdgpu_ring *ring; 237 struct drm_sched_rq *rq; 238 int r; 239 240 ring = &adev->vce.ring[0]; 241 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; 242 r = drm_sched_entity_init(&adev->vce.entity, &rq, 1, NULL); 243 if (r != 0) { 244 DRM_ERROR("Failed setting up VCE run queue.\n"); 245 return r; 246 } 247 248 return 0; 249 } 250 251 /** 252 * amdgpu_vce_suspend - unpin VCE fw memory 253 * 254 * @adev: amdgpu_device pointer 255 * 256 */ 257 int amdgpu_vce_suspend(struct amdgpu_device *adev) 258 { 259 int i; 260 261 cancel_delayed_work_sync(&adev->vce.idle_work); 262 263 if (adev->vce.vcpu_bo == NULL) 264 return 0; 265 266 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) 267 if (atomic_read(&adev->vce.handles[i])) 268 break; 269 270 if (i == AMDGPU_MAX_VCE_HANDLES) 271 return 0; 272 273 /* TODO: suspending running encoding sessions isn't supported */ 274 return -EINVAL; 275 } 276 277 /** 278 * amdgpu_vce_resume - pin VCE fw memory 279 * 280 * @adev: amdgpu_device pointer 281 * 282 */ 283 int amdgpu_vce_resume(struct amdgpu_device *adev) 284 { 285 void *cpu_addr; 286 const struct common_firmware_header *hdr; 287 unsigned offset; 288 int r; 289 290 if (adev->vce.vcpu_bo == NULL) 291 return -EINVAL; 292 293 r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false); 294 if (r) { 295 dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r); 296 return r; 297 } 298 299 r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr); 300 if (r) { 301 amdgpu_bo_unreserve(adev->vce.vcpu_bo); 302 dev_err(adev->dev, "(%d) VCE map failed\n", r); 303 return r; 304 } 305 306 hdr = (const struct common_firmware_header *)adev->vce.fw->data; 307 offset = le32_to_cpu(hdr->ucode_array_offset_bytes); 308 memcpy_toio(cpu_addr, adev->vce.fw->data + offset, 309 adev->vce.fw->size - offset); 310 311 amdgpu_bo_kunmap(adev->vce.vcpu_bo); 312 313 amdgpu_bo_unreserve(adev->vce.vcpu_bo); 314 315 return 0; 316 } 317 318 /** 319 * amdgpu_vce_idle_work_handler - power off VCE 320 * 321 * @work: pointer to work structure 322 * 323 * power of VCE when it's not used any more 324 */ 325 static void amdgpu_vce_idle_work_handler(struct work_struct *work) 326 { 327 struct amdgpu_device *adev = 328 container_of(work, struct amdgpu_device, vce.idle_work.work); 329 unsigned i, count = 0; 330 331 for (i = 0; i < adev->vce.num_rings; i++) 332 count += amdgpu_fence_count_emitted(&adev->vce.ring[i]); 333 334 if (count == 0) { 335 if (adev->pm.dpm_enabled) { 336 amdgpu_dpm_enable_vce(adev, false); 337 } else { 338 amdgpu_asic_set_vce_clocks(adev, 0, 0); 339 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 340 AMD_PG_STATE_GATE); 341 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 342 AMD_CG_STATE_GATE); 343 } 344 } else { 345 schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT); 346 } 347 } 348 349 /** 350 * amdgpu_vce_ring_begin_use - power up VCE 351 * 352 * @ring: amdgpu ring 353 * 354 * Make sure VCE is powerd up when we want to use it 355 */ 356 void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring) 357 { 358 struct amdgpu_device *adev = ring->adev; 359 bool set_clocks; 360 361 if (amdgpu_sriov_vf(adev)) 362 return; 363 364 mutex_lock(&adev->vce.idle_mutex); 365 set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work); 366 if (set_clocks) { 367 if (adev->pm.dpm_enabled) { 368 amdgpu_dpm_enable_vce(adev, true); 369 } else { 370 amdgpu_asic_set_vce_clocks(adev, 53300, 40000); 371 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 372 AMD_CG_STATE_UNGATE); 373 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 374 AMD_PG_STATE_UNGATE); 375 376 } 377 } 378 mutex_unlock(&adev->vce.idle_mutex); 379 } 380 381 /** 382 * amdgpu_vce_ring_end_use - power VCE down 383 * 384 * @ring: amdgpu ring 385 * 386 * Schedule work to power VCE down again 387 */ 388 void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring) 389 { 390 if (!amdgpu_sriov_vf(ring->adev)) 391 schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT); 392 } 393 394 /** 395 * amdgpu_vce_free_handles - free still open VCE handles 396 * 397 * @adev: amdgpu_device pointer 398 * @filp: drm file pointer 399 * 400 * Close all VCE handles still open by this file pointer 401 */ 402 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp) 403 { 404 struct amdgpu_ring *ring = &adev->vce.ring[0]; 405 int i, r; 406 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { 407 uint32_t handle = atomic_read(&adev->vce.handles[i]); 408 409 if (!handle || adev->vce.filp[i] != filp) 410 continue; 411 412 r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL); 413 if (r) 414 DRM_ERROR("Error destroying VCE handle (%d)!\n", r); 415 416 adev->vce.filp[i] = NULL; 417 atomic_set(&adev->vce.handles[i], 0); 418 } 419 } 420 421 /** 422 * amdgpu_vce_get_create_msg - generate a VCE create msg 423 * 424 * @adev: amdgpu_device pointer 425 * @ring: ring we should submit the msg to 426 * @handle: VCE session handle to use 427 * @fence: optional fence to return 428 * 429 * Open up a stream for HW test 430 */ 431 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, 432 struct amdgpu_bo *bo, 433 struct dma_fence **fence) 434 { 435 const unsigned ib_size_dw = 1024; 436 struct amdgpu_job *job; 437 struct amdgpu_ib *ib; 438 struct dma_fence *f = NULL; 439 uint64_t addr; 440 int i, r; 441 442 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); 443 if (r) 444 return r; 445 446 ib = &job->ibs[0]; 447 448 addr = amdgpu_bo_gpu_offset(bo); 449 450 /* stitch together an VCE create msg */ 451 ib->length_dw = 0; 452 ib->ptr[ib->length_dw++] = 0x0000000c; /* len */ 453 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */ 454 ib->ptr[ib->length_dw++] = handle; 455 456 if ((ring->adev->vce.fw_version >> 24) >= 52) 457 ib->ptr[ib->length_dw++] = 0x00000040; /* len */ 458 else 459 ib->ptr[ib->length_dw++] = 0x00000030; /* len */ 460 ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */ 461 ib->ptr[ib->length_dw++] = 0x00000000; 462 ib->ptr[ib->length_dw++] = 0x00000042; 463 ib->ptr[ib->length_dw++] = 0x0000000a; 464 ib->ptr[ib->length_dw++] = 0x00000001; 465 ib->ptr[ib->length_dw++] = 0x00000080; 466 ib->ptr[ib->length_dw++] = 0x00000060; 467 ib->ptr[ib->length_dw++] = 0x00000100; 468 ib->ptr[ib->length_dw++] = 0x00000100; 469 ib->ptr[ib->length_dw++] = 0x0000000c; 470 ib->ptr[ib->length_dw++] = 0x00000000; 471 if ((ring->adev->vce.fw_version >> 24) >= 52) { 472 ib->ptr[ib->length_dw++] = 0x00000000; 473 ib->ptr[ib->length_dw++] = 0x00000000; 474 ib->ptr[ib->length_dw++] = 0x00000000; 475 ib->ptr[ib->length_dw++] = 0x00000000; 476 } 477 478 ib->ptr[ib->length_dw++] = 0x00000014; /* len */ 479 ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */ 480 ib->ptr[ib->length_dw++] = upper_32_bits(addr); 481 ib->ptr[ib->length_dw++] = addr; 482 ib->ptr[ib->length_dw++] = 0x00000001; 483 484 for (i = ib->length_dw; i < ib_size_dw; ++i) 485 ib->ptr[i] = 0x0; 486 487 r = amdgpu_job_submit_direct(job, ring, &f); 488 if (r) 489 goto err; 490 491 if (fence) 492 *fence = dma_fence_get(f); 493 dma_fence_put(f); 494 return 0; 495 496 err: 497 amdgpu_job_free(job); 498 return r; 499 } 500 501 /** 502 * amdgpu_vce_get_destroy_msg - generate a VCE destroy msg 503 * 504 * @adev: amdgpu_device pointer 505 * @ring: ring we should submit the msg to 506 * @handle: VCE session handle to use 507 * @fence: optional fence to return 508 * 509 * Close up a stream for HW test or if userspace failed to do so 510 */ 511 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, 512 bool direct, struct dma_fence **fence) 513 { 514 const unsigned ib_size_dw = 1024; 515 struct amdgpu_job *job; 516 struct amdgpu_ib *ib; 517 struct dma_fence *f = NULL; 518 int i, r; 519 520 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); 521 if (r) 522 return r; 523 524 ib = &job->ibs[0]; 525 526 /* stitch together an VCE destroy msg */ 527 ib->length_dw = 0; 528 ib->ptr[ib->length_dw++] = 0x0000000c; /* len */ 529 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */ 530 ib->ptr[ib->length_dw++] = handle; 531 532 ib->ptr[ib->length_dw++] = 0x00000020; /* len */ 533 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */ 534 ib->ptr[ib->length_dw++] = 0xffffffff; /* next task info, set to 0xffffffff if no */ 535 ib->ptr[ib->length_dw++] = 0x00000001; /* destroy session */ 536 ib->ptr[ib->length_dw++] = 0x00000000; 537 ib->ptr[ib->length_dw++] = 0x00000000; 538 ib->ptr[ib->length_dw++] = 0xffffffff; /* feedback is not needed, set to 0xffffffff and firmware will not output feedback */ 539 ib->ptr[ib->length_dw++] = 0x00000000; 540 541 ib->ptr[ib->length_dw++] = 0x00000008; /* len */ 542 ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */ 543 544 for (i = ib->length_dw; i < ib_size_dw; ++i) 545 ib->ptr[i] = 0x0; 546 547 if (direct) 548 r = amdgpu_job_submit_direct(job, ring, &f); 549 else 550 r = amdgpu_job_submit(job, &ring->adev->vce.entity, 551 AMDGPU_FENCE_OWNER_UNDEFINED, &f); 552 if (r) 553 goto err; 554 555 if (fence) 556 *fence = dma_fence_get(f); 557 dma_fence_put(f); 558 return 0; 559 560 err: 561 amdgpu_job_free(job); 562 return r; 563 } 564 565 /** 566 * amdgpu_vce_cs_validate_bo - make sure not to cross 4GB boundary 567 * 568 * @p: parser context 569 * @lo: address of lower dword 570 * @hi: address of higher dword 571 * @size: minimum size 572 * @index: bs/fb index 573 * 574 * Make sure that no BO cross a 4GB boundary. 575 */ 576 static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p, uint32_t ib_idx, 577 int lo, int hi, unsigned size, int32_t index) 578 { 579 int64_t offset = ((uint64_t)size) * ((int64_t)index); 580 struct ttm_operation_ctx ctx = { false, false }; 581 struct amdgpu_bo_va_mapping *mapping; 582 unsigned i, fpfn, lpfn; 583 struct amdgpu_bo *bo; 584 uint64_t addr; 585 int r; 586 587 addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) | 588 ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32; 589 if (index >= 0) { 590 addr += offset; 591 fpfn = PAGE_ALIGN(offset) >> PAGE_SHIFT; 592 lpfn = 0x100000000ULL >> PAGE_SHIFT; 593 } else { 594 fpfn = 0; 595 lpfn = (0x100000000ULL - PAGE_ALIGN(offset)) >> PAGE_SHIFT; 596 } 597 598 r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping); 599 if (r) { 600 DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n", 601 addr, lo, hi, size, index); 602 return r; 603 } 604 605 for (i = 0; i < bo->placement.num_placement; ++i) { 606 bo->placements[i].fpfn = max(bo->placements[i].fpfn, fpfn); 607 bo->placements[i].lpfn = bo->placements[i].lpfn ? 608 min(bo->placements[i].lpfn, lpfn) : lpfn; 609 } 610 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 611 } 612 613 614 /** 615 * amdgpu_vce_cs_reloc - command submission relocation 616 * 617 * @p: parser context 618 * @lo: address of lower dword 619 * @hi: address of higher dword 620 * @size: minimum size 621 * 622 * Patch relocation inside command stream with real buffer address 623 */ 624 static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, 625 int lo, int hi, unsigned size, uint32_t index) 626 { 627 struct amdgpu_bo_va_mapping *mapping; 628 struct amdgpu_bo *bo; 629 uint64_t addr; 630 int r; 631 632 if (index == 0xffffffff) 633 index = 0; 634 635 addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) | 636 ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32; 637 addr += ((uint64_t)size) * ((uint64_t)index); 638 639 r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping); 640 if (r) { 641 DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n", 642 addr, lo, hi, size, index); 643 return r; 644 } 645 646 if ((addr + (uint64_t)size) > 647 (mapping->last + 1) * AMDGPU_GPU_PAGE_SIZE) { 648 DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n", 649 addr, lo, hi); 650 return -EINVAL; 651 } 652 653 addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE; 654 addr += amdgpu_bo_gpu_offset(bo); 655 addr -= ((uint64_t)size) * ((uint64_t)index); 656 657 amdgpu_set_ib_value(p, ib_idx, lo, lower_32_bits(addr)); 658 amdgpu_set_ib_value(p, ib_idx, hi, upper_32_bits(addr)); 659 660 return 0; 661 } 662 663 /** 664 * amdgpu_vce_validate_handle - validate stream handle 665 * 666 * @p: parser context 667 * @handle: handle to validate 668 * @allocated: allocated a new handle? 669 * 670 * Validates the handle and return the found session index or -EINVAL 671 * we we don't have another free session index. 672 */ 673 static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p, 674 uint32_t handle, uint32_t *allocated) 675 { 676 unsigned i; 677 678 /* validate the handle */ 679 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { 680 if (atomic_read(&p->adev->vce.handles[i]) == handle) { 681 if (p->adev->vce.filp[i] != p->filp) { 682 DRM_ERROR("VCE handle collision detected!\n"); 683 return -EINVAL; 684 } 685 return i; 686 } 687 } 688 689 /* handle not found try to alloc a new one */ 690 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { 691 if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) { 692 p->adev->vce.filp[i] = p->filp; 693 p->adev->vce.img_size[i] = 0; 694 *allocated |= 1 << i; 695 return i; 696 } 697 } 698 699 DRM_ERROR("No more free VCE handles!\n"); 700 return -EINVAL; 701 } 702 703 /** 704 * amdgpu_vce_cs_parse - parse and validate the command stream 705 * 706 * @p: parser context 707 * 708 */ 709 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) 710 { 711 struct amdgpu_ib *ib = &p->job->ibs[ib_idx]; 712 unsigned fb_idx = 0, bs_idx = 0; 713 int session_idx = -1; 714 uint32_t destroyed = 0; 715 uint32_t created = 0; 716 uint32_t allocated = 0; 717 uint32_t tmp, handle = 0; 718 uint32_t *size = &tmp; 719 unsigned idx; 720 int i, r = 0; 721 722 p->job->vm = NULL; 723 ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); 724 725 for (idx = 0; idx < ib->length_dw;) { 726 uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx); 727 uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1); 728 729 if ((len < 8) || (len & 3)) { 730 DRM_ERROR("invalid VCE command length (%d)!\n", len); 731 r = -EINVAL; 732 goto out; 733 } 734 735 switch (cmd) { 736 case 0x00000002: /* task info */ 737 fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6); 738 bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7); 739 break; 740 741 case 0x03000001: /* encode */ 742 r = amdgpu_vce_validate_bo(p, ib_idx, idx + 10, 743 idx + 9, 0, 0); 744 if (r) 745 goto out; 746 747 r = amdgpu_vce_validate_bo(p, ib_idx, idx + 12, 748 idx + 11, 0, 0); 749 if (r) 750 goto out; 751 break; 752 753 case 0x05000001: /* context buffer */ 754 r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3, 755 idx + 2, 0, 0); 756 if (r) 757 goto out; 758 break; 759 760 case 0x05000004: /* video bitstream buffer */ 761 tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4); 762 r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3, idx + 2, 763 tmp, bs_idx); 764 if (r) 765 goto out; 766 break; 767 768 case 0x05000005: /* feedback buffer */ 769 r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3, idx + 2, 770 4096, fb_idx); 771 if (r) 772 goto out; 773 break; 774 775 case 0x0500000d: /* MV buffer */ 776 r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3, 777 idx + 2, 0, 0); 778 if (r) 779 goto out; 780 781 r = amdgpu_vce_validate_bo(p, ib_idx, idx + 8, 782 idx + 7, 0, 0); 783 if (r) 784 goto out; 785 break; 786 } 787 788 idx += len / 4; 789 } 790 791 for (idx = 0; idx < ib->length_dw;) { 792 uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx); 793 uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1); 794 795 switch (cmd) { 796 case 0x00000001: /* session */ 797 handle = amdgpu_get_ib_value(p, ib_idx, idx + 2); 798 session_idx = amdgpu_vce_validate_handle(p, handle, 799 &allocated); 800 if (session_idx < 0) { 801 r = session_idx; 802 goto out; 803 } 804 size = &p->adev->vce.img_size[session_idx]; 805 break; 806 807 case 0x00000002: /* task info */ 808 fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6); 809 bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7); 810 break; 811 812 case 0x01000001: /* create */ 813 created |= 1 << session_idx; 814 if (destroyed & (1 << session_idx)) { 815 destroyed &= ~(1 << session_idx); 816 allocated |= 1 << session_idx; 817 818 } else if (!(allocated & (1 << session_idx))) { 819 DRM_ERROR("Handle already in use!\n"); 820 r = -EINVAL; 821 goto out; 822 } 823 824 *size = amdgpu_get_ib_value(p, ib_idx, idx + 8) * 825 amdgpu_get_ib_value(p, ib_idx, idx + 10) * 826 8 * 3 / 2; 827 break; 828 829 case 0x04000001: /* config extension */ 830 case 0x04000002: /* pic control */ 831 case 0x04000005: /* rate control */ 832 case 0x04000007: /* motion estimation */ 833 case 0x04000008: /* rdo */ 834 case 0x04000009: /* vui */ 835 case 0x05000002: /* auxiliary buffer */ 836 case 0x05000009: /* clock table */ 837 break; 838 839 case 0x0500000c: /* hw config */ 840 switch (p->adev->asic_type) { 841 #ifdef CONFIG_DRM_AMDGPU_CIK 842 case CHIP_KAVERI: 843 case CHIP_MULLINS: 844 #endif 845 case CHIP_CARRIZO: 846 break; 847 default: 848 r = -EINVAL; 849 goto out; 850 } 851 break; 852 853 case 0x03000001: /* encode */ 854 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9, 855 *size, 0); 856 if (r) 857 goto out; 858 859 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 12, idx + 11, 860 *size / 3, 0); 861 if (r) 862 goto out; 863 break; 864 865 case 0x02000001: /* destroy */ 866 destroyed |= 1 << session_idx; 867 break; 868 869 case 0x05000001: /* context buffer */ 870 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2, 871 *size * 2, 0); 872 if (r) 873 goto out; 874 break; 875 876 case 0x05000004: /* video bitstream buffer */ 877 tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4); 878 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2, 879 tmp, bs_idx); 880 if (r) 881 goto out; 882 break; 883 884 case 0x05000005: /* feedback buffer */ 885 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2, 886 4096, fb_idx); 887 if (r) 888 goto out; 889 break; 890 891 case 0x0500000d: /* MV buffer */ 892 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, 893 idx + 2, *size, 0); 894 if (r) 895 goto out; 896 897 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 8, 898 idx + 7, *size / 12, 0); 899 if (r) 900 goto out; 901 break; 902 903 default: 904 DRM_ERROR("invalid VCE command (0x%x)!\n", cmd); 905 r = -EINVAL; 906 goto out; 907 } 908 909 if (session_idx == -1) { 910 DRM_ERROR("no session command at start of IB\n"); 911 r = -EINVAL; 912 goto out; 913 } 914 915 idx += len / 4; 916 } 917 918 if (allocated & ~created) { 919 DRM_ERROR("New session without create command!\n"); 920 r = -ENOENT; 921 } 922 923 out: 924 if (!r) { 925 /* No error, free all destroyed handle slots */ 926 tmp = destroyed; 927 } else { 928 /* Error during parsing, free all allocated handle slots */ 929 tmp = allocated; 930 } 931 932 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) 933 if (tmp & (1 << i)) 934 atomic_set(&p->adev->vce.handles[i], 0); 935 936 return r; 937 } 938 939 /** 940 * amdgpu_vce_cs_parse_vm - parse the command stream in VM mode 941 * 942 * @p: parser context 943 * 944 */ 945 int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx) 946 { 947 struct amdgpu_ib *ib = &p->job->ibs[ib_idx]; 948 int session_idx = -1; 949 uint32_t destroyed = 0; 950 uint32_t created = 0; 951 uint32_t allocated = 0; 952 uint32_t tmp, handle = 0; 953 int i, r = 0, idx = 0; 954 955 while (idx < ib->length_dw) { 956 uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx); 957 uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1); 958 959 if ((len < 8) || (len & 3)) { 960 DRM_ERROR("invalid VCE command length (%d)!\n", len); 961 r = -EINVAL; 962 goto out; 963 } 964 965 switch (cmd) { 966 case 0x00000001: /* session */ 967 handle = amdgpu_get_ib_value(p, ib_idx, idx + 2); 968 session_idx = amdgpu_vce_validate_handle(p, handle, 969 &allocated); 970 if (session_idx < 0) { 971 r = session_idx; 972 goto out; 973 } 974 break; 975 976 case 0x01000001: /* create */ 977 created |= 1 << session_idx; 978 if (destroyed & (1 << session_idx)) { 979 destroyed &= ~(1 << session_idx); 980 allocated |= 1 << session_idx; 981 982 } else if (!(allocated & (1 << session_idx))) { 983 DRM_ERROR("Handle already in use!\n"); 984 r = -EINVAL; 985 goto out; 986 } 987 988 break; 989 990 case 0x02000001: /* destroy */ 991 destroyed |= 1 << session_idx; 992 break; 993 994 default: 995 break; 996 } 997 998 if (session_idx == -1) { 999 DRM_ERROR("no session command at start of IB\n"); 1000 r = -EINVAL; 1001 goto out; 1002 } 1003 1004 idx += len / 4; 1005 } 1006 1007 if (allocated & ~created) { 1008 DRM_ERROR("New session without create command!\n"); 1009 r = -ENOENT; 1010 } 1011 1012 out: 1013 if (!r) { 1014 /* No error, free all destroyed handle slots */ 1015 tmp = destroyed; 1016 amdgpu_ib_free(p->adev, ib, NULL); 1017 } else { 1018 /* Error during parsing, free all allocated handle slots */ 1019 tmp = allocated; 1020 } 1021 1022 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) 1023 if (tmp & (1 << i)) 1024 atomic_set(&p->adev->vce.handles[i], 0); 1025 1026 return r; 1027 } 1028 1029 /** 1030 * amdgpu_vce_ring_emit_ib - execute indirect buffer 1031 * 1032 * @ring: engine to use 1033 * @ib: the IB to execute 1034 * 1035 */ 1036 void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, 1037 struct amdgpu_job *job, 1038 struct amdgpu_ib *ib, 1039 uint32_t flags) 1040 { 1041 amdgpu_ring_write(ring, VCE_CMD_IB); 1042 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); 1043 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 1044 amdgpu_ring_write(ring, ib->length_dw); 1045 } 1046 1047 /** 1048 * amdgpu_vce_ring_emit_fence - add a fence command to the ring 1049 * 1050 * @ring: engine to use 1051 * @fence: the fence 1052 * 1053 */ 1054 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 1055 unsigned flags) 1056 { 1057 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 1058 1059 amdgpu_ring_write(ring, VCE_CMD_FENCE); 1060 amdgpu_ring_write(ring, addr); 1061 amdgpu_ring_write(ring, upper_32_bits(addr)); 1062 amdgpu_ring_write(ring, seq); 1063 amdgpu_ring_write(ring, VCE_CMD_TRAP); 1064 amdgpu_ring_write(ring, VCE_CMD_END); 1065 } 1066 1067 /** 1068 * amdgpu_vce_ring_test_ring - test if VCE ring is working 1069 * 1070 * @ring: the engine to test on 1071 * 1072 */ 1073 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring) 1074 { 1075 struct amdgpu_device *adev = ring->adev; 1076 uint32_t rptr; 1077 unsigned i; 1078 int r, timeout = adev->usec_timeout; 1079 1080 /* skip ring test for sriov*/ 1081 if (amdgpu_sriov_vf(adev)) 1082 return 0; 1083 1084 r = amdgpu_ring_alloc(ring, 16); 1085 if (r) 1086 return r; 1087 1088 rptr = amdgpu_ring_get_rptr(ring); 1089 1090 amdgpu_ring_write(ring, VCE_CMD_END); 1091 amdgpu_ring_commit(ring); 1092 1093 for (i = 0; i < timeout; i++) { 1094 if (amdgpu_ring_get_rptr(ring) != rptr) 1095 break; 1096 udelay(1); 1097 } 1098 1099 if (i >= timeout) 1100 r = -ETIMEDOUT; 1101 1102 return r; 1103 } 1104 1105 /** 1106 * amdgpu_vce_ring_test_ib - test if VCE IBs are working 1107 * 1108 * @ring: the engine to test on 1109 * 1110 */ 1111 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout) 1112 { 1113 struct dma_fence *fence = NULL; 1114 struct amdgpu_bo *bo = NULL; 1115 long r; 1116 1117 /* skip vce ring1/2 ib test for now, since it's not reliable */ 1118 if (ring != &ring->adev->vce.ring[0]) 1119 return 0; 1120 1121 r = amdgpu_bo_create_reserved(ring->adev, 512, PAGE_SIZE, 1122 AMDGPU_GEM_DOMAIN_VRAM, 1123 &bo, NULL, NULL); 1124 if (r) 1125 return r; 1126 1127 r = amdgpu_vce_get_create_msg(ring, 1, bo, NULL); 1128 if (r) 1129 goto error; 1130 1131 r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence); 1132 if (r) 1133 goto error; 1134 1135 r = dma_fence_wait_timeout(fence, false, timeout); 1136 if (r == 0) 1137 r = -ETIMEDOUT; 1138 else if (r > 0) 1139 r = 0; 1140 1141 error: 1142 dma_fence_put(fence); 1143 amdgpu_bo_unreserve(bo); 1144 amdgpu_bo_unref(&bo); 1145 return r; 1146 } 1147