1 /* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 * Authors: Christian König <christian.koenig@amd.com> 26 */ 27 28 #include <linux/firmware.h> 29 #include <linux/module.h> 30 #include <drm/drmP.h> 31 #include <drm/drm.h> 32 33 #include "radeon.h" 34 #include "radeon_asic.h" 35 #include "sid.h" 36 37 /* 1 second timeout */ 38 #define VCE_IDLE_TIMEOUT_MS 1000 39 40 /* Firmware Names */ 41 #define FIRMWARE_BONAIRE "radeon/BONAIRE_vce.bin" 42 43 MODULE_FIRMWARE(FIRMWARE_BONAIRE); 44 45 static void radeon_vce_idle_work_handler(struct work_struct *work); 46 47 /** 48 * radeon_vce_init - allocate memory, load vce firmware 49 * 50 * @rdev: radeon_device pointer 51 * 52 * First step to get VCE online, allocate memory and load the firmware 53 */ 54 int radeon_vce_init(struct radeon_device *rdev) 55 { 56 static const char *fw_version = "[ATI LIB=VCEFW,"; 57 static const char *fb_version = "[ATI LIB=VCEFWSTATS,"; 58 unsigned long size; 59 const char *fw_name, *c; 60 uint8_t start, mid, end; 61 int i, r; 62 63 INIT_DELAYED_WORK(&rdev->vce.idle_work, radeon_vce_idle_work_handler); 64 65 switch (rdev->family) { 66 case CHIP_BONAIRE: 67 case CHIP_KAVERI: 68 case CHIP_KABINI: 69 fw_name = FIRMWARE_BONAIRE; 70 break; 71 72 default: 73 return -EINVAL; 74 } 75 76 r = request_firmware(&rdev->vce_fw, fw_name, rdev->dev); 77 if (r) { 78 dev_err(rdev->dev, "radeon_vce: Can't load firmware \"%s\"\n", 79 fw_name); 80 return r; 81 } 82 83 /* search for firmware version */ 84 85 size = rdev->vce_fw->size - strlen(fw_version) - 9; 86 c = rdev->vce_fw->data; 87 for (;size > 0; --size, ++c) 88 if (strncmp(c, fw_version, strlen(fw_version)) == 0) 89 break; 90 91 if (size == 0) 92 return -EINVAL; 93 94 c += strlen(fw_version); 95 if (sscanf(c, "%2hhd.%2hhd.%2hhd]", &start, &mid, &end) != 3) 96 return -EINVAL; 97 98 /* search for feedback version */ 99 100 size = rdev->vce_fw->size - strlen(fb_version) - 3; 101 c = rdev->vce_fw->data; 102 for (;size > 0; --size, ++c) 103 if (strncmp(c, fb_version, strlen(fb_version)) == 0) 104 break; 105 106 if (size == 0) 107 return -EINVAL; 108 109 c += strlen(fb_version); 110 if (sscanf(c, "%2u]", &rdev->vce.fb_version) != 1) 111 return -EINVAL; 112 113 DRM_INFO("Found VCE firmware/feedback version %hhd.%hhd.%hhd / %d!\n", 114 start, mid, end, rdev->vce.fb_version); 115 116 rdev->vce.fw_version = (start << 24) | (mid << 16) | (end << 8); 117 118 /* we can only work with this fw version for now */ 119 if (rdev->vce.fw_version != ((40 << 24) | (2 << 16) | (2 << 8))) 120 return -EINVAL; 121 122 /* allocate firmware, stack and heap BO */ 123 124 size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size) + 125 RADEON_VCE_STACK_SIZE + RADEON_VCE_HEAP_SIZE; 126 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, 127 RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->vce.vcpu_bo); 128 if (r) { 129 dev_err(rdev->dev, "(%d) failed to allocate VCE bo\n", r); 130 return r; 131 } 132 133 r = radeon_bo_reserve(rdev->vce.vcpu_bo, false); 134 if (r) { 135 radeon_bo_unref(&rdev->vce.vcpu_bo); 136 dev_err(rdev->dev, "(%d) failed to reserve VCE bo\n", r); 137 return r; 138 } 139 140 r = radeon_bo_pin(rdev->vce.vcpu_bo, RADEON_GEM_DOMAIN_VRAM, 141 &rdev->vce.gpu_addr); 142 radeon_bo_unreserve(rdev->vce.vcpu_bo); 143 if (r) { 144 radeon_bo_unref(&rdev->vce.vcpu_bo); 145 dev_err(rdev->dev, "(%d) VCE bo pin failed\n", r); 146 return r; 147 } 148 149 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { 150 atomic_set(&rdev->vce.handles[i], 0); 151 rdev->vce.filp[i] = NULL; 152 } 153 154 return 0; 155 } 156 157 /** 158 * radeon_vce_fini - free memory 159 * 160 * @rdev: radeon_device pointer 161 * 162 * Last step on VCE teardown, free firmware memory 163 */ 164 void radeon_vce_fini(struct radeon_device *rdev) 165 { 166 if (rdev->vce.vcpu_bo == NULL) 167 return; 168 169 radeon_bo_unref(&rdev->vce.vcpu_bo); 170 171 release_firmware(rdev->vce_fw); 172 } 173 174 /** 175 * radeon_vce_suspend - unpin VCE fw memory 176 * 177 * @rdev: radeon_device pointer 178 * 179 */ 180 int radeon_vce_suspend(struct radeon_device *rdev) 181 { 182 int i; 183 184 if (rdev->vce.vcpu_bo == NULL) 185 return 0; 186 187 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) 188 if (atomic_read(&rdev->vce.handles[i])) 189 break; 190 191 if (i == RADEON_MAX_VCE_HANDLES) 192 return 0; 193 194 /* TODO: suspending running encoding sessions isn't supported */ 195 return -EINVAL; 196 } 197 198 /** 199 * radeon_vce_resume - pin VCE fw memory 200 * 201 * @rdev: radeon_device pointer 202 * 203 */ 204 int radeon_vce_resume(struct radeon_device *rdev) 205 { 206 void *cpu_addr; 207 int r; 208 209 if (rdev->vce.vcpu_bo == NULL) 210 return -EINVAL; 211 212 r = radeon_bo_reserve(rdev->vce.vcpu_bo, false); 213 if (r) { 214 dev_err(rdev->dev, "(%d) failed to reserve VCE bo\n", r); 215 return r; 216 } 217 218 r = radeon_bo_kmap(rdev->vce.vcpu_bo, &cpu_addr); 219 if (r) { 220 radeon_bo_unreserve(rdev->vce.vcpu_bo); 221 dev_err(rdev->dev, "(%d) VCE map failed\n", r); 222 return r; 223 } 224 225 memcpy(cpu_addr, rdev->vce_fw->data, rdev->vce_fw->size); 226 227 radeon_bo_kunmap(rdev->vce.vcpu_bo); 228 229 radeon_bo_unreserve(rdev->vce.vcpu_bo); 230 231 return 0; 232 } 233 234 /** 235 * radeon_vce_idle_work_handler - power off VCE 236 * 237 * @work: pointer to work structure 238 * 239 * power of VCE when it's not used any more 240 */ 241 static void radeon_vce_idle_work_handler(struct work_struct *work) 242 { 243 struct radeon_device *rdev = 244 container_of(work, struct radeon_device, vce.idle_work.work); 245 246 if ((radeon_fence_count_emitted(rdev, TN_RING_TYPE_VCE1_INDEX) == 0) && 247 (radeon_fence_count_emitted(rdev, TN_RING_TYPE_VCE2_INDEX) == 0)) { 248 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { 249 radeon_dpm_enable_vce(rdev, false); 250 } else { 251 radeon_set_vce_clocks(rdev, 0, 0); 252 } 253 } else { 254 schedule_delayed_work(&rdev->vce.idle_work, 255 msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS)); 256 } 257 } 258 259 /** 260 * radeon_vce_note_usage - power up VCE 261 * 262 * @rdev: radeon_device pointer 263 * 264 * Make sure VCE is powerd up when we want to use it 265 */ 266 void radeon_vce_note_usage(struct radeon_device *rdev) 267 { 268 bool streams_changed = false; 269 bool set_clocks = !cancel_delayed_work_sync(&rdev->vce.idle_work); 270 set_clocks &= schedule_delayed_work(&rdev->vce.idle_work, 271 msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS)); 272 273 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { 274 /* XXX figure out if the streams changed */ 275 streams_changed = false; 276 } 277 278 if (set_clocks || streams_changed) { 279 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { 280 radeon_dpm_enable_vce(rdev, true); 281 } else { 282 radeon_set_vce_clocks(rdev, 53300, 40000); 283 } 284 } 285 } 286 287 /** 288 * radeon_vce_free_handles - free still open VCE handles 289 * 290 * @rdev: radeon_device pointer 291 * @filp: drm file pointer 292 * 293 * Close all VCE handles still open by this file pointer 294 */ 295 void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp) 296 { 297 int i, r; 298 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { 299 uint32_t handle = atomic_read(&rdev->vce.handles[i]); 300 if (!handle || rdev->vce.filp[i] != filp) 301 continue; 302 303 radeon_vce_note_usage(rdev); 304 305 r = radeon_vce_get_destroy_msg(rdev, TN_RING_TYPE_VCE1_INDEX, 306 handle, NULL); 307 if (r) 308 DRM_ERROR("Error destroying VCE handle (%d)!\n", r); 309 310 rdev->vce.filp[i] = NULL; 311 atomic_set(&rdev->vce.handles[i], 0); 312 } 313 } 314 315 /** 316 * radeon_vce_get_create_msg - generate a VCE create msg 317 * 318 * @rdev: radeon_device pointer 319 * @ring: ring we should submit the msg to 320 * @handle: VCE session handle to use 321 * @fence: optional fence to return 322 * 323 * Open up a stream for HW test 324 */ 325 int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring, 326 uint32_t handle, struct radeon_fence **fence) 327 { 328 const unsigned ib_size_dw = 1024; 329 struct radeon_ib ib; 330 uint64_t dummy; 331 int i, r; 332 333 r = radeon_ib_get(rdev, ring, &ib, NULL, ib_size_dw * 4); 334 if (r) { 335 DRM_ERROR("radeon: failed to get ib (%d).\n", r); 336 return r; 337 } 338 339 dummy = ib.gpu_addr + 1024; 340 341 /* stitch together an VCE create msg */ 342 ib.length_dw = 0; 343 ib.ptr[ib.length_dw++] = 0x0000000c; /* len */ 344 ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */ 345 ib.ptr[ib.length_dw++] = handle; 346 347 ib.ptr[ib.length_dw++] = 0x00000030; /* len */ 348 ib.ptr[ib.length_dw++] = 0x01000001; /* create cmd */ 349 ib.ptr[ib.length_dw++] = 0x00000000; 350 ib.ptr[ib.length_dw++] = 0x00000042; 351 ib.ptr[ib.length_dw++] = 0x0000000a; 352 ib.ptr[ib.length_dw++] = 0x00000001; 353 ib.ptr[ib.length_dw++] = 0x00000080; 354 ib.ptr[ib.length_dw++] = 0x00000060; 355 ib.ptr[ib.length_dw++] = 0x00000100; 356 ib.ptr[ib.length_dw++] = 0x00000100; 357 ib.ptr[ib.length_dw++] = 0x0000000c; 358 ib.ptr[ib.length_dw++] = 0x00000000; 359 360 ib.ptr[ib.length_dw++] = 0x00000014; /* len */ 361 ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */ 362 ib.ptr[ib.length_dw++] = upper_32_bits(dummy); 363 ib.ptr[ib.length_dw++] = dummy; 364 ib.ptr[ib.length_dw++] = 0x00000001; 365 366 for (i = ib.length_dw; i < ib_size_dw; ++i) 367 ib.ptr[i] = 0x0; 368 369 r = radeon_ib_schedule(rdev, &ib, NULL); 370 if (r) { 371 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 372 } 373 374 if (fence) 375 *fence = radeon_fence_ref(ib.fence); 376 377 radeon_ib_free(rdev, &ib); 378 379 return r; 380 } 381 382 /** 383 * radeon_vce_get_destroy_msg - generate a VCE destroy msg 384 * 385 * @rdev: radeon_device pointer 386 * @ring: ring we should submit the msg to 387 * @handle: VCE session handle to use 388 * @fence: optional fence to return 389 * 390 * Close up a stream for HW test or if userspace failed to do so 391 */ 392 int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring, 393 uint32_t handle, struct radeon_fence **fence) 394 { 395 const unsigned ib_size_dw = 1024; 396 struct radeon_ib ib; 397 uint64_t dummy; 398 int i, r; 399 400 r = radeon_ib_get(rdev, ring, &ib, NULL, ib_size_dw * 4); 401 if (r) { 402 DRM_ERROR("radeon: failed to get ib (%d).\n", r); 403 return r; 404 } 405 406 dummy = ib.gpu_addr + 1024; 407 408 /* stitch together an VCE destroy msg */ 409 ib.length_dw = 0; 410 ib.ptr[ib.length_dw++] = 0x0000000c; /* len */ 411 ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */ 412 ib.ptr[ib.length_dw++] = handle; 413 414 ib.ptr[ib.length_dw++] = 0x00000014; /* len */ 415 ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */ 416 ib.ptr[ib.length_dw++] = upper_32_bits(dummy); 417 ib.ptr[ib.length_dw++] = dummy; 418 ib.ptr[ib.length_dw++] = 0x00000001; 419 420 ib.ptr[ib.length_dw++] = 0x00000008; /* len */ 421 ib.ptr[ib.length_dw++] = 0x02000001; /* destroy cmd */ 422 423 for (i = ib.length_dw; i < ib_size_dw; ++i) 424 ib.ptr[i] = 0x0; 425 426 r = radeon_ib_schedule(rdev, &ib, NULL); 427 if (r) { 428 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 429 } 430 431 if (fence) 432 *fence = radeon_fence_ref(ib.fence); 433 434 radeon_ib_free(rdev, &ib); 435 436 return r; 437 } 438 439 /** 440 * radeon_vce_cs_reloc - command submission relocation 441 * 442 * @p: parser context 443 * @lo: address of lower dword 444 * @hi: address of higher dword 445 * 446 * Patch relocation inside command stream with real buffer address 447 */ 448 int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi) 449 { 450 struct radeon_cs_chunk *relocs_chunk; 451 uint64_t offset; 452 unsigned idx; 453 454 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 455 offset = radeon_get_ib_value(p, lo); 456 idx = radeon_get_ib_value(p, hi); 457 458 if (idx >= relocs_chunk->length_dw) { 459 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", 460 idx, relocs_chunk->length_dw); 461 return -EINVAL; 462 } 463 464 offset += p->relocs_ptr[(idx / 4)]->gpu_offset; 465 466 p->ib.ptr[lo] = offset & 0xFFFFFFFF; 467 p->ib.ptr[hi] = offset >> 32; 468 469 return 0; 470 } 471 472 /** 473 * radeon_vce_cs_parse - parse and validate the command stream 474 * 475 * @p: parser context 476 * 477 */ 478 int radeon_vce_cs_parse(struct radeon_cs_parser *p) 479 { 480 uint32_t handle = 0; 481 bool destroy = false; 482 int i, r; 483 484 while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) { 485 uint32_t len = radeon_get_ib_value(p, p->idx); 486 uint32_t cmd = radeon_get_ib_value(p, p->idx + 1); 487 488 if ((len < 8) || (len & 3)) { 489 DRM_ERROR("invalid VCE command length (%d)!\n", len); 490 return -EINVAL; 491 } 492 493 switch (cmd) { 494 case 0x00000001: // session 495 handle = radeon_get_ib_value(p, p->idx + 2); 496 break; 497 498 case 0x00000002: // task info 499 case 0x01000001: // create 500 case 0x04000001: // config extension 501 case 0x04000002: // pic control 502 case 0x04000005: // rate control 503 case 0x04000007: // motion estimation 504 case 0x04000008: // rdo 505 break; 506 507 case 0x03000001: // encode 508 r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9); 509 if (r) 510 return r; 511 512 r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11); 513 if (r) 514 return r; 515 break; 516 517 case 0x02000001: // destroy 518 destroy = true; 519 break; 520 521 case 0x05000001: // context buffer 522 case 0x05000004: // video bitstream buffer 523 case 0x05000005: // feedback buffer 524 r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2); 525 if (r) 526 return r; 527 break; 528 529 default: 530 DRM_ERROR("invalid VCE command (0x%x)!\n", cmd); 531 return -EINVAL; 532 } 533 534 p->idx += len / 4; 535 } 536 537 if (destroy) { 538 /* IB contains a destroy msg, free the handle */ 539 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) 540 atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0); 541 542 return 0; 543 } 544 545 /* create or encode, validate the handle */ 546 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { 547 if (atomic_read(&p->rdev->vce.handles[i]) == handle) 548 return 0; 549 } 550 551 /* handle not found try to alloc a new one */ 552 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { 553 if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) { 554 p->rdev->vce.filp[i] = p->filp; 555 return 0; 556 } 557 } 558 559 DRM_ERROR("No more free VCE handles!\n"); 560 return -EINVAL; 561 } 562 563 /** 564 * radeon_vce_semaphore_emit - emit a semaphore command 565 * 566 * @rdev: radeon_device pointer 567 * @ring: engine to use 568 * @semaphore: address of semaphore 569 * @emit_wait: true=emit wait, false=emit signal 570 * 571 */ 572 bool radeon_vce_semaphore_emit(struct radeon_device *rdev, 573 struct radeon_ring *ring, 574 struct radeon_semaphore *semaphore, 575 bool emit_wait) 576 { 577 uint64_t addr = semaphore->gpu_addr; 578 579 radeon_ring_write(ring, VCE_CMD_SEMAPHORE); 580 radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF); 581 radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF); 582 radeon_ring_write(ring, 0x01003000 | (emit_wait ? 1 : 0)); 583 if (!emit_wait) 584 radeon_ring_write(ring, VCE_CMD_END); 585 586 return true; 587 } 588 589 /** 590 * radeon_vce_ib_execute - execute indirect buffer 591 * 592 * @rdev: radeon_device pointer 593 * @ib: the IB to execute 594 * 595 */ 596 void radeon_vce_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 597 { 598 struct radeon_ring *ring = &rdev->ring[ib->ring]; 599 radeon_ring_write(ring, VCE_CMD_IB); 600 radeon_ring_write(ring, ib->gpu_addr); 601 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr)); 602 radeon_ring_write(ring, ib->length_dw); 603 } 604 605 /** 606 * radeon_vce_fence_emit - add a fence command to the ring 607 * 608 * @rdev: radeon_device pointer 609 * @fence: the fence 610 * 611 */ 612 void radeon_vce_fence_emit(struct radeon_device *rdev, 613 struct radeon_fence *fence) 614 { 615 struct radeon_ring *ring = &rdev->ring[fence->ring]; 616 uint32_t addr = rdev->fence_drv[fence->ring].gpu_addr; 617 618 radeon_ring_write(ring, VCE_CMD_FENCE); 619 radeon_ring_write(ring, addr); 620 radeon_ring_write(ring, upper_32_bits(addr)); 621 radeon_ring_write(ring, fence->seq); 622 radeon_ring_write(ring, VCE_CMD_TRAP); 623 radeon_ring_write(ring, VCE_CMD_END); 624 } 625 626 /** 627 * radeon_vce_ring_test - test if VCE ring is working 628 * 629 * @rdev: radeon_device pointer 630 * @ring: the engine to test on 631 * 632 */ 633 int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) 634 { 635 uint32_t rptr = vce_v1_0_get_rptr(rdev, ring); 636 unsigned i; 637 int r; 638 639 r = radeon_ring_lock(rdev, ring, 16); 640 if (r) { 641 DRM_ERROR("radeon: vce failed to lock ring %d (%d).\n", 642 ring->idx, r); 643 return r; 644 } 645 radeon_ring_write(ring, VCE_CMD_END); 646 radeon_ring_unlock_commit(rdev, ring); 647 648 for (i = 0; i < rdev->usec_timeout; i++) { 649 if (vce_v1_0_get_rptr(rdev, ring) != rptr) 650 break; 651 DRM_UDELAY(1); 652 } 653 654 if (i < rdev->usec_timeout) { 655 DRM_INFO("ring test on %d succeeded in %d usecs\n", 656 ring->idx, i); 657 } else { 658 DRM_ERROR("radeon: ring %d test failed\n", 659 ring->idx); 660 r = -ETIMEDOUT; 661 } 662 663 return r; 664 } 665 666 /** 667 * radeon_vce_ib_test - test if VCE IBs are working 668 * 669 * @rdev: radeon_device pointer 670 * @ring: the engine to test on 671 * 672 */ 673 int radeon_vce_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) 674 { 675 struct radeon_fence *fence = NULL; 676 int r; 677 678 r = radeon_vce_get_create_msg(rdev, ring->idx, 1, NULL); 679 if (r) { 680 DRM_ERROR("radeon: failed to get create msg (%d).\n", r); 681 goto error; 682 } 683 684 r = radeon_vce_get_destroy_msg(rdev, ring->idx, 1, &fence); 685 if (r) { 686 DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r); 687 goto error; 688 } 689 690 r = radeon_fence_wait(fence, false); 691 if (r) { 692 DRM_ERROR("radeon: fence wait failed (%d).\n", r); 693 } else { 694 DRM_INFO("ib test on ring %d succeeded\n", ring->idx); 695 } 696 error: 697 radeon_fence_unref(&fence); 698 return r; 699 } 700