1 /* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 * Authors: Christian König <christian.koenig@amd.com> 26 */ 27 28 #include <linux/firmware.h> 29 #include <linux/module.h> 30 #include <drm/drmP.h> 31 #include <drm/drm.h> 32 33 #include "amdgpu.h" 34 #include "amdgpu_pm.h" 35 #include "amdgpu_vce.h" 36 #include "cikd.h" 37 38 /* 1 second timeout */ 39 #define VCE_IDLE_TIMEOUT_MS 1000 40 41 /* Firmware Names */ 42 #ifdef CONFIG_DRM_AMDGPU_CIK 43 #define FIRMWARE_BONAIRE "radeon/bonaire_vce.bin" 44 #define FIRMWARE_KABINI "radeon/kabini_vce.bin" 45 #define FIRMWARE_KAVERI "radeon/kaveri_vce.bin" 46 #define FIRMWARE_HAWAII "radeon/hawaii_vce.bin" 47 #define FIRMWARE_MULLINS "radeon/mullins_vce.bin" 48 #endif 49 #define FIRMWARE_TONGA "amdgpu/tonga_vce.bin" 50 #define FIRMWARE_CARRIZO "amdgpu/carrizo_vce.bin" 51 #define FIRMWARE_FIJI "amdgpu/fiji_vce.bin" 52 #define FIRMWARE_STONEY "amdgpu/stoney_vce.bin" 53 54 #ifdef CONFIG_DRM_AMDGPU_CIK 55 MODULE_FIRMWARE(FIRMWARE_BONAIRE); 56 MODULE_FIRMWARE(FIRMWARE_KABINI); 57 MODULE_FIRMWARE(FIRMWARE_KAVERI); 58 MODULE_FIRMWARE(FIRMWARE_HAWAII); 59 MODULE_FIRMWARE(FIRMWARE_MULLINS); 60 #endif 61 MODULE_FIRMWARE(FIRMWARE_TONGA); 62 MODULE_FIRMWARE(FIRMWARE_CARRIZO); 63 MODULE_FIRMWARE(FIRMWARE_FIJI); 64 MODULE_FIRMWARE(FIRMWARE_STONEY); 65 66 static void amdgpu_vce_idle_work_handler(struct work_struct *work); 67 68 /** 69 * amdgpu_vce_init - allocate memory, load vce firmware 70 * 71 * @adev: amdgpu_device pointer 72 * 73 * First step to get VCE online, allocate memory and load the firmware 74 */ 75 int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) 76 { 77 const char *fw_name; 78 const struct common_firmware_header *hdr; 79 unsigned ucode_version, version_major, version_minor, binary_id; 80 int i, r; 81 82 INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler); 83 84 switch (adev->asic_type) { 85 #ifdef CONFIG_DRM_AMDGPU_CIK 86 case CHIP_BONAIRE: 87 fw_name = FIRMWARE_BONAIRE; 88 break; 89 case CHIP_KAVERI: 90 fw_name = FIRMWARE_KAVERI; 91 break; 92 case CHIP_KABINI: 93 fw_name = FIRMWARE_KABINI; 94 break; 95 case CHIP_HAWAII: 96 fw_name = FIRMWARE_HAWAII; 97 break; 98 case CHIP_MULLINS: 99 fw_name = FIRMWARE_MULLINS; 100 break; 101 #endif 102 case CHIP_TONGA: 103 fw_name = FIRMWARE_TONGA; 104 break; 105 case CHIP_CARRIZO: 106 fw_name = FIRMWARE_CARRIZO; 107 break; 108 case CHIP_FIJI: 109 fw_name = FIRMWARE_FIJI; 110 break; 111 case CHIP_STONEY: 112 fw_name = FIRMWARE_STONEY; 113 break; 114 115 default: 116 return -EINVAL; 117 } 118 119 r = request_firmware(&adev->vce.fw, fw_name, adev->dev); 120 if (r) { 121 dev_err(adev->dev, "amdgpu_vce: Can't load firmware \"%s\"\n", 122 fw_name); 123 return r; 124 } 125 126 r = amdgpu_ucode_validate(adev->vce.fw); 127 if (r) { 128 dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n", 129 fw_name); 130 release_firmware(adev->vce.fw); 131 adev->vce.fw = NULL; 132 return r; 133 } 134 135 hdr = (const struct common_firmware_header *)adev->vce.fw->data; 136 137 ucode_version = le32_to_cpu(hdr->ucode_version); 138 version_major = (ucode_version >> 20) & 0xfff; 139 version_minor = (ucode_version >> 8) & 0xfff; 140 binary_id = ucode_version & 0xff; 141 DRM_INFO("Found VCE firmware Version: %hhd.%hhd Binary ID: %hhd\n", 142 version_major, version_minor, binary_id); 143 adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) | 144 (binary_id << 8)); 145 146 /* allocate firmware, stack and heap BO */ 147 148 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, 149 AMDGPU_GEM_DOMAIN_VRAM, 150 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 151 NULL, NULL, &adev->vce.vcpu_bo); 152 if (r) { 153 dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r); 154 return r; 155 } 156 157 r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false); 158 if (r) { 159 amdgpu_bo_unref(&adev->vce.vcpu_bo); 160 dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r); 161 return r; 162 } 163 164 r = amdgpu_bo_pin(adev->vce.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM, 165 &adev->vce.gpu_addr); 166 amdgpu_bo_unreserve(adev->vce.vcpu_bo); 167 if (r) { 168 amdgpu_bo_unref(&adev->vce.vcpu_bo); 169 dev_err(adev->dev, "(%d) VCE bo pin failed\n", r); 170 return r; 171 } 172 173 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { 174 atomic_set(&adev->vce.handles[i], 0); 175 adev->vce.filp[i] = NULL; 176 } 177 178 return 0; 179 } 180 181 /** 182 * amdgpu_vce_fini - free memory 183 * 184 * @adev: amdgpu_device pointer 185 * 186 * Last step on VCE teardown, free firmware memory 187 */ 188 int amdgpu_vce_sw_fini(struct amdgpu_device *adev) 189 { 190 if (adev->vce.vcpu_bo == NULL) 191 return 0; 192 193 amdgpu_bo_unref(&adev->vce.vcpu_bo); 194 195 amdgpu_ring_fini(&adev->vce.ring[0]); 196 amdgpu_ring_fini(&adev->vce.ring[1]); 197 198 release_firmware(adev->vce.fw); 199 200 return 0; 201 } 202 203 /** 204 * amdgpu_vce_suspend - unpin VCE fw memory 205 * 206 * @adev: amdgpu_device pointer 207 * 208 */ 209 int amdgpu_vce_suspend(struct amdgpu_device *adev) 210 { 211 int i; 212 213 if (adev->vce.vcpu_bo == NULL) 214 return 0; 215 216 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) 217 if (atomic_read(&adev->vce.handles[i])) 218 break; 219 220 if (i == AMDGPU_MAX_VCE_HANDLES) 221 return 0; 222 223 /* TODO: suspending running encoding sessions isn't supported */ 224 return -EINVAL; 225 } 226 227 /** 228 * amdgpu_vce_resume - pin VCE fw memory 229 * 230 * @adev: amdgpu_device pointer 231 * 232 */ 233 int amdgpu_vce_resume(struct amdgpu_device *adev) 234 { 235 void *cpu_addr; 236 const struct common_firmware_header *hdr; 237 unsigned offset; 238 int r; 239 240 if (adev->vce.vcpu_bo == NULL) 241 return -EINVAL; 242 243 r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false); 244 if (r) { 245 dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r); 246 return r; 247 } 248 249 r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr); 250 if (r) { 251 amdgpu_bo_unreserve(adev->vce.vcpu_bo); 252 dev_err(adev->dev, "(%d) VCE map failed\n", r); 253 return r; 254 } 255 256 hdr = (const struct common_firmware_header *)adev->vce.fw->data; 257 offset = le32_to_cpu(hdr->ucode_array_offset_bytes); 258 memcpy(cpu_addr, (adev->vce.fw->data) + offset, 259 (adev->vce.fw->size) - offset); 260 261 amdgpu_bo_kunmap(adev->vce.vcpu_bo); 262 263 amdgpu_bo_unreserve(adev->vce.vcpu_bo); 264 265 return 0; 266 } 267 268 /** 269 * amdgpu_vce_idle_work_handler - power off VCE 270 * 271 * @work: pointer to work structure 272 * 273 * power of VCE when it's not used any more 274 */ 275 static void amdgpu_vce_idle_work_handler(struct work_struct *work) 276 { 277 struct amdgpu_device *adev = 278 container_of(work, struct amdgpu_device, vce.idle_work.work); 279 280 if ((amdgpu_fence_count_emitted(&adev->vce.ring[0]) == 0) && 281 (amdgpu_fence_count_emitted(&adev->vce.ring[1]) == 0)) { 282 if (adev->pm.dpm_enabled) { 283 amdgpu_dpm_enable_vce(adev, false); 284 } else { 285 amdgpu_asic_set_vce_clocks(adev, 0, 0); 286 } 287 } else { 288 schedule_delayed_work(&adev->vce.idle_work, 289 msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS)); 290 } 291 } 292 293 /** 294 * amdgpu_vce_note_usage - power up VCE 295 * 296 * @adev: amdgpu_device pointer 297 * 298 * Make sure VCE is powerd up when we want to use it 299 */ 300 static void amdgpu_vce_note_usage(struct amdgpu_device *adev) 301 { 302 bool streams_changed = false; 303 bool set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work); 304 set_clocks &= schedule_delayed_work(&adev->vce.idle_work, 305 msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS)); 306 307 if (adev->pm.dpm_enabled) { 308 /* XXX figure out if the streams changed */ 309 streams_changed = false; 310 } 311 312 if (set_clocks || streams_changed) { 313 if (adev->pm.dpm_enabled) { 314 amdgpu_dpm_enable_vce(adev, true); 315 } else { 316 amdgpu_asic_set_vce_clocks(adev, 53300, 40000); 317 } 318 } 319 } 320 321 /** 322 * amdgpu_vce_free_handles - free still open VCE handles 323 * 324 * @adev: amdgpu_device pointer 325 * @filp: drm file pointer 326 * 327 * Close all VCE handles still open by this file pointer 328 */ 329 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp) 330 { 331 struct amdgpu_ring *ring = &adev->vce.ring[0]; 332 int i, r; 333 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { 334 uint32_t handle = atomic_read(&adev->vce.handles[i]); 335 if (!handle || adev->vce.filp[i] != filp) 336 continue; 337 338 amdgpu_vce_note_usage(adev); 339 340 r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL); 341 if (r) 342 DRM_ERROR("Error destroying VCE handle (%d)!\n", r); 343 344 adev->vce.filp[i] = NULL; 345 atomic_set(&adev->vce.handles[i], 0); 346 } 347 } 348 349 /** 350 * amdgpu_vce_get_create_msg - generate a VCE create msg 351 * 352 * @adev: amdgpu_device pointer 353 * @ring: ring we should submit the msg to 354 * @handle: VCE session handle to use 355 * @fence: optional fence to return 356 * 357 * Open up a stream for HW test 358 */ 359 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, 360 struct fence **fence) 361 { 362 const unsigned ib_size_dw = 1024; 363 struct amdgpu_job *job; 364 struct amdgpu_ib *ib; 365 struct fence *f = NULL; 366 uint64_t dummy; 367 int i, r; 368 369 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); 370 if (r) 371 return r; 372 373 ib = &job->ibs[0]; 374 375 dummy = ib->gpu_addr + 1024; 376 377 /* stitch together an VCE create msg */ 378 ib->length_dw = 0; 379 ib->ptr[ib->length_dw++] = 0x0000000c; /* len */ 380 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */ 381 ib->ptr[ib->length_dw++] = handle; 382 383 if ((ring->adev->vce.fw_version >> 24) >= 52) 384 ib->ptr[ib->length_dw++] = 0x00000040; /* len */ 385 else 386 ib->ptr[ib->length_dw++] = 0x00000030; /* len */ 387 ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */ 388 ib->ptr[ib->length_dw++] = 0x00000000; 389 ib->ptr[ib->length_dw++] = 0x00000042; 390 ib->ptr[ib->length_dw++] = 0x0000000a; 391 ib->ptr[ib->length_dw++] = 0x00000001; 392 ib->ptr[ib->length_dw++] = 0x00000080; 393 ib->ptr[ib->length_dw++] = 0x00000060; 394 ib->ptr[ib->length_dw++] = 0x00000100; 395 ib->ptr[ib->length_dw++] = 0x00000100; 396 ib->ptr[ib->length_dw++] = 0x0000000c; 397 ib->ptr[ib->length_dw++] = 0x00000000; 398 if ((ring->adev->vce.fw_version >> 24) >= 52) { 399 ib->ptr[ib->length_dw++] = 0x00000000; 400 ib->ptr[ib->length_dw++] = 0x00000000; 401 ib->ptr[ib->length_dw++] = 0x00000000; 402 ib->ptr[ib->length_dw++] = 0x00000000; 403 } 404 405 ib->ptr[ib->length_dw++] = 0x00000014; /* len */ 406 ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */ 407 ib->ptr[ib->length_dw++] = upper_32_bits(dummy); 408 ib->ptr[ib->length_dw++] = dummy; 409 ib->ptr[ib->length_dw++] = 0x00000001; 410 411 for (i = ib->length_dw; i < ib_size_dw; ++i) 412 ib->ptr[i] = 0x0; 413 414 r = amdgpu_ib_schedule(ring, 1, ib, AMDGPU_FENCE_OWNER_UNDEFINED, 415 NULL, &f); 416 if (r) 417 goto err; 418 419 amdgpu_job_free(job); 420 if (fence) 421 *fence = fence_get(f); 422 fence_put(f); 423 return 0; 424 425 err: 426 amdgpu_job_free(job); 427 return r; 428 } 429 430 /** 431 * amdgpu_vce_get_destroy_msg - generate a VCE destroy msg 432 * 433 * @adev: amdgpu_device pointer 434 * @ring: ring we should submit the msg to 435 * @handle: VCE session handle to use 436 * @fence: optional fence to return 437 * 438 * Close up a stream for HW test or if userspace failed to do so 439 */ 440 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, 441 bool direct, struct fence **fence) 442 { 443 const unsigned ib_size_dw = 1024; 444 struct amdgpu_job *job; 445 struct amdgpu_ib *ib; 446 struct fence *f = NULL; 447 uint64_t dummy; 448 int i, r; 449 450 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); 451 if (r) 452 return r; 453 454 ib = &job->ibs[0]; 455 dummy = ib->gpu_addr + 1024; 456 457 /* stitch together an VCE destroy msg */ 458 ib->length_dw = 0; 459 ib->ptr[ib->length_dw++] = 0x0000000c; /* len */ 460 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */ 461 ib->ptr[ib->length_dw++] = handle; 462 463 ib->ptr[ib->length_dw++] = 0x00000014; /* len */ 464 ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */ 465 ib->ptr[ib->length_dw++] = upper_32_bits(dummy); 466 ib->ptr[ib->length_dw++] = dummy; 467 ib->ptr[ib->length_dw++] = 0x00000001; 468 469 ib->ptr[ib->length_dw++] = 0x00000008; /* len */ 470 ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */ 471 472 for (i = ib->length_dw; i < ib_size_dw; ++i) 473 ib->ptr[i] = 0x0; 474 475 if (direct) { 476 r = amdgpu_ib_schedule(ring, 1, ib, 477 AMDGPU_FENCE_OWNER_UNDEFINED, 478 NULL, &f); 479 if (r) 480 goto err; 481 482 amdgpu_job_free(job); 483 } else { 484 r = amdgpu_job_submit(job, ring, 485 AMDGPU_FENCE_OWNER_UNDEFINED, &f); 486 if (r) 487 goto err; 488 } 489 490 if (fence) 491 *fence = fence_get(f); 492 fence_put(f); 493 return 0; 494 495 err: 496 amdgpu_job_free(job); 497 return r; 498 } 499 500 /** 501 * amdgpu_vce_cs_reloc - command submission relocation 502 * 503 * @p: parser context 504 * @lo: address of lower dword 505 * @hi: address of higher dword 506 * @size: minimum size 507 * 508 * Patch relocation inside command stream with real buffer address 509 */ 510 static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, 511 int lo, int hi, unsigned size, uint32_t index) 512 { 513 struct amdgpu_bo_va_mapping *mapping; 514 struct amdgpu_bo *bo; 515 uint64_t addr; 516 517 if (index == 0xffffffff) 518 index = 0; 519 520 addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) | 521 ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32; 522 addr += ((uint64_t)size) * ((uint64_t)index); 523 524 mapping = amdgpu_cs_find_mapping(p, addr, &bo); 525 if (mapping == NULL) { 526 DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n", 527 addr, lo, hi, size, index); 528 return -EINVAL; 529 } 530 531 if ((addr + (uint64_t)size) > 532 ((uint64_t)mapping->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) { 533 DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n", 534 addr, lo, hi); 535 return -EINVAL; 536 } 537 538 addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE; 539 addr += amdgpu_bo_gpu_offset(bo); 540 addr -= ((uint64_t)size) * ((uint64_t)index); 541 542 amdgpu_set_ib_value(p, ib_idx, lo, lower_32_bits(addr)); 543 amdgpu_set_ib_value(p, ib_idx, hi, upper_32_bits(addr)); 544 545 return 0; 546 } 547 548 /** 549 * amdgpu_vce_validate_handle - validate stream handle 550 * 551 * @p: parser context 552 * @handle: handle to validate 553 * @allocated: allocated a new handle? 554 * 555 * Validates the handle and return the found session index or -EINVAL 556 * we we don't have another free session index. 557 */ 558 static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p, 559 uint32_t handle, bool *allocated) 560 { 561 unsigned i; 562 563 *allocated = false; 564 565 /* validate the handle */ 566 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { 567 if (atomic_read(&p->adev->vce.handles[i]) == handle) { 568 if (p->adev->vce.filp[i] != p->filp) { 569 DRM_ERROR("VCE handle collision detected!\n"); 570 return -EINVAL; 571 } 572 return i; 573 } 574 } 575 576 /* handle not found try to alloc a new one */ 577 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { 578 if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) { 579 p->adev->vce.filp[i] = p->filp; 580 p->adev->vce.img_size[i] = 0; 581 *allocated = true; 582 return i; 583 } 584 } 585 586 DRM_ERROR("No more free VCE handles!\n"); 587 return -EINVAL; 588 } 589 590 /** 591 * amdgpu_vce_cs_parse - parse and validate the command stream 592 * 593 * @p: parser context 594 * 595 */ 596 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) 597 { 598 struct amdgpu_ib *ib = &p->job->ibs[ib_idx]; 599 unsigned fb_idx = 0, bs_idx = 0; 600 int session_idx = -1; 601 bool destroyed = false; 602 bool created = false; 603 bool allocated = false; 604 uint32_t tmp, handle = 0; 605 uint32_t *size = &tmp; 606 int i, r = 0, idx = 0; 607 608 amdgpu_vce_note_usage(p->adev); 609 610 while (idx < ib->length_dw) { 611 uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx); 612 uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1); 613 614 if ((len < 8) || (len & 3)) { 615 DRM_ERROR("invalid VCE command length (%d)!\n", len); 616 r = -EINVAL; 617 goto out; 618 } 619 620 if (destroyed) { 621 DRM_ERROR("No other command allowed after destroy!\n"); 622 r = -EINVAL; 623 goto out; 624 } 625 626 switch (cmd) { 627 case 0x00000001: // session 628 handle = amdgpu_get_ib_value(p, ib_idx, idx + 2); 629 session_idx = amdgpu_vce_validate_handle(p, handle, 630 &allocated); 631 if (session_idx < 0) 632 return session_idx; 633 size = &p->adev->vce.img_size[session_idx]; 634 break; 635 636 case 0x00000002: // task info 637 fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6); 638 bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7); 639 break; 640 641 case 0x01000001: // create 642 created = true; 643 if (!allocated) { 644 DRM_ERROR("Handle already in use!\n"); 645 r = -EINVAL; 646 goto out; 647 } 648 649 *size = amdgpu_get_ib_value(p, ib_idx, idx + 8) * 650 amdgpu_get_ib_value(p, ib_idx, idx + 10) * 651 8 * 3 / 2; 652 break; 653 654 case 0x04000001: // config extension 655 case 0x04000002: // pic control 656 case 0x04000005: // rate control 657 case 0x04000007: // motion estimation 658 case 0x04000008: // rdo 659 case 0x04000009: // vui 660 case 0x05000002: // auxiliary buffer 661 break; 662 663 case 0x03000001: // encode 664 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9, 665 *size, 0); 666 if (r) 667 goto out; 668 669 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 12, idx + 11, 670 *size / 3, 0); 671 if (r) 672 goto out; 673 break; 674 675 case 0x02000001: // destroy 676 destroyed = true; 677 break; 678 679 case 0x05000001: // context buffer 680 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2, 681 *size * 2, 0); 682 if (r) 683 goto out; 684 break; 685 686 case 0x05000004: // video bitstream buffer 687 tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4); 688 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2, 689 tmp, bs_idx); 690 if (r) 691 goto out; 692 break; 693 694 case 0x05000005: // feedback buffer 695 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2, 696 4096, fb_idx); 697 if (r) 698 goto out; 699 break; 700 701 default: 702 DRM_ERROR("invalid VCE command (0x%x)!\n", cmd); 703 r = -EINVAL; 704 goto out; 705 } 706 707 if (session_idx == -1) { 708 DRM_ERROR("no session command at start of IB\n"); 709 r = -EINVAL; 710 goto out; 711 } 712 713 idx += len / 4; 714 } 715 716 if (allocated && !created) { 717 DRM_ERROR("New session without create command!\n"); 718 r = -ENOENT; 719 } 720 721 out: 722 if ((!r && destroyed) || (r && allocated)) { 723 /* 724 * IB contains a destroy msg or we have allocated an 725 * handle and got an error, anyway free the handle 726 */ 727 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) 728 atomic_cmpxchg(&p->adev->vce.handles[i], handle, 0); 729 } 730 731 return r; 732 } 733 734 /** 735 * amdgpu_vce_ring_emit_ib - execute indirect buffer 736 * 737 * @ring: engine to use 738 * @ib: the IB to execute 739 * 740 */ 741 void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) 742 { 743 amdgpu_ring_write(ring, VCE_CMD_IB); 744 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); 745 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 746 amdgpu_ring_write(ring, ib->length_dw); 747 } 748 749 /** 750 * amdgpu_vce_ring_emit_fence - add a fence command to the ring 751 * 752 * @ring: engine to use 753 * @fence: the fence 754 * 755 */ 756 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 757 unsigned flags) 758 { 759 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 760 761 amdgpu_ring_write(ring, VCE_CMD_FENCE); 762 amdgpu_ring_write(ring, addr); 763 amdgpu_ring_write(ring, upper_32_bits(addr)); 764 amdgpu_ring_write(ring, seq); 765 amdgpu_ring_write(ring, VCE_CMD_TRAP); 766 amdgpu_ring_write(ring, VCE_CMD_END); 767 } 768 769 /** 770 * amdgpu_vce_ring_test_ring - test if VCE ring is working 771 * 772 * @ring: the engine to test on 773 * 774 */ 775 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring) 776 { 777 struct amdgpu_device *adev = ring->adev; 778 uint32_t rptr = amdgpu_ring_get_rptr(ring); 779 unsigned i; 780 int r; 781 782 r = amdgpu_ring_alloc(ring, 16); 783 if (r) { 784 DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n", 785 ring->idx, r); 786 return r; 787 } 788 amdgpu_ring_write(ring, VCE_CMD_END); 789 amdgpu_ring_commit(ring); 790 791 for (i = 0; i < adev->usec_timeout; i++) { 792 if (amdgpu_ring_get_rptr(ring) != rptr) 793 break; 794 DRM_UDELAY(1); 795 } 796 797 if (i < adev->usec_timeout) { 798 DRM_INFO("ring test on %d succeeded in %d usecs\n", 799 ring->idx, i); 800 } else { 801 DRM_ERROR("amdgpu: ring %d test failed\n", 802 ring->idx); 803 r = -ETIMEDOUT; 804 } 805 806 return r; 807 } 808 809 /** 810 * amdgpu_vce_ring_test_ib - test if VCE IBs are working 811 * 812 * @ring: the engine to test on 813 * 814 */ 815 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring) 816 { 817 struct fence *fence = NULL; 818 int r; 819 820 /* skip vce ring1 ib test for now, since it's not reliable */ 821 if (ring == &ring->adev->vce.ring[1]) 822 return 0; 823 824 r = amdgpu_vce_get_create_msg(ring, 1, NULL); 825 if (r) { 826 DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r); 827 goto error; 828 } 829 830 r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence); 831 if (r) { 832 DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); 833 goto error; 834 } 835 836 r = fence_wait(fence, false); 837 if (r) { 838 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); 839 } else { 840 DRM_INFO("ib test on ring %d succeeded\n", ring->idx); 841 } 842 error: 843 fence_put(fence); 844 return r; 845 } 846