1 /* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 * Authors: Christian König <christian.koenig@amd.com> 26 */ 27 28 #include <linux/firmware.h> 29 #include <linux/module.h> 30 31 #include <drm/drm.h> 32 33 #include "radeon.h" 34 #include "radeon_asic.h" 35 #include "sid.h" 36 37 /* 1 second timeout */ 38 #define VCE_IDLE_TIMEOUT_MS 1000 39 40 /* Firmware Names */ 41 #define FIRMWARE_TAHITI "radeon/TAHITI_vce.bin" 42 #define FIRMWARE_BONAIRE "radeon/BONAIRE_vce.bin" 43 44 MODULE_FIRMWARE(FIRMWARE_TAHITI); 45 MODULE_FIRMWARE(FIRMWARE_BONAIRE); 46 47 static void radeon_vce_idle_work_handler(struct work_struct *work); 48 49 /** 50 * radeon_vce_init - allocate memory, load vce firmware 51 * 52 * @rdev: radeon_device pointer 53 * 54 * First step to get VCE online, allocate memory and load the firmware 55 */ 56 int radeon_vce_init(struct radeon_device *rdev) 57 { 58 static const char *fw_version = "[ATI LIB=VCEFW,"; 59 static const char *fb_version = "[ATI LIB=VCEFWSTATS,"; 60 unsigned long size; 61 const char *fw_name, *c; 62 uint8_t start, mid, end; 63 int i, r; 64 65 INIT_DELAYED_WORK(&rdev->vce.idle_work, radeon_vce_idle_work_handler); 66 67 switch (rdev->family) { 68 case CHIP_TAHITI: 69 case CHIP_PITCAIRN: 70 case CHIP_VERDE: 71 case CHIP_OLAND: 72 case CHIP_ARUBA: 73 fw_name = FIRMWARE_TAHITI; 74 break; 75 76 case CHIP_BONAIRE: 77 case CHIP_KAVERI: 78 case CHIP_KABINI: 79 case CHIP_HAWAII: 80 case CHIP_MULLINS: 81 fw_name = FIRMWARE_BONAIRE; 82 break; 83 84 default: 85 return -EINVAL; 86 } 87 88 r = request_firmware(&rdev->vce_fw, fw_name, rdev->dev); 89 if (r) { 90 dev_err(rdev->dev, "radeon_vce: Can't load firmware \"%s\"\n", 91 fw_name); 92 return r; 93 } 94 95 /* search for firmware version */ 96 97 size = rdev->vce_fw->size - strlen(fw_version) - 9; 98 c = rdev->vce_fw->data; 99 for (;size > 0; --size, ++c) 100 if (strncmp(c, fw_version, strlen(fw_version)) == 0) 101 break; 102 103 if (size == 0) 104 return -EINVAL; 105 106 c += strlen(fw_version); 107 if (sscanf(c, "%2hhd.%2hhd.%2hhd]", &start, &mid, &end) != 3) 108 return -EINVAL; 109 110 /* search for feedback version */ 111 112 size = rdev->vce_fw->size - strlen(fb_version) - 3; 113 c = rdev->vce_fw->data; 114 for (;size > 0; --size, ++c) 115 if (strncmp(c, fb_version, strlen(fb_version)) == 0) 116 break; 117 118 if (size == 0) 119 return -EINVAL; 120 121 c += strlen(fb_version); 122 if (sscanf(c, "%2u]", &rdev->vce.fb_version) != 1) 123 return -EINVAL; 124 125 DRM_INFO("Found VCE firmware/feedback version %hhd.%hhd.%hhd / %d!\n", 126 start, mid, end, rdev->vce.fb_version); 127 128 rdev->vce.fw_version = (start << 24) | (mid << 16) | (end << 8); 129 130 /* we can only work with this fw version for now */ 131 if ((rdev->vce.fw_version != ((40 << 24) | (2 << 16) | (2 << 8))) && 132 (rdev->vce.fw_version != ((50 << 24) | (0 << 16) | (1 << 8))) && 133 (rdev->vce.fw_version != ((50 << 24) | (1 << 16) | (2 << 8)))) 134 return -EINVAL; 135 136 /* allocate firmware, stack and heap BO */ 137 138 if (rdev->family < CHIP_BONAIRE) 139 size = vce_v1_0_bo_size(rdev); 140 else 141 size = vce_v2_0_bo_size(rdev); 142 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, 143 RADEON_GEM_DOMAIN_VRAM, 0, NULL, NULL, 144 &rdev->vce.vcpu_bo); 145 if (r) { 146 dev_err(rdev->dev, "(%d) failed to allocate VCE bo\n", r); 147 return r; 148 } 149 150 r = radeon_bo_reserve(rdev->vce.vcpu_bo, false); 151 if (r) { 152 radeon_bo_unref(&rdev->vce.vcpu_bo); 153 dev_err(rdev->dev, "(%d) failed to reserve VCE bo\n", r); 154 return r; 155 } 156 157 r = radeon_bo_pin(rdev->vce.vcpu_bo, RADEON_GEM_DOMAIN_VRAM, 158 &rdev->vce.gpu_addr); 159 radeon_bo_unreserve(rdev->vce.vcpu_bo); 160 if (r) { 161 radeon_bo_unref(&rdev->vce.vcpu_bo); 162 dev_err(rdev->dev, "(%d) VCE bo pin failed\n", r); 163 return r; 164 } 165 166 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { 167 atomic_set(&rdev->vce.handles[i], 0); 168 rdev->vce.filp[i] = NULL; 169 } 170 171 return 0; 172 } 173 174 /** 175 * radeon_vce_fini - free memory 176 * 177 * @rdev: radeon_device pointer 178 * 179 * Last step on VCE teardown, free firmware memory 180 */ 181 void radeon_vce_fini(struct radeon_device *rdev) 182 { 183 if (rdev->vce.vcpu_bo == NULL) 184 return; 185 186 radeon_bo_unref(&rdev->vce.vcpu_bo); 187 188 release_firmware(rdev->vce_fw); 189 } 190 191 /** 192 * radeon_vce_suspend - unpin VCE fw memory 193 * 194 * @rdev: radeon_device pointer 195 * 196 */ 197 int radeon_vce_suspend(struct radeon_device *rdev) 198 { 199 int i; 200 201 if (rdev->vce.vcpu_bo == NULL) 202 return 0; 203 204 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) 205 if (atomic_read(&rdev->vce.handles[i])) 206 break; 207 208 if (i == RADEON_MAX_VCE_HANDLES) 209 return 0; 210 211 /* TODO: suspending running encoding sessions isn't supported */ 212 return -EINVAL; 213 } 214 215 /** 216 * radeon_vce_resume - pin VCE fw memory 217 * 218 * @rdev: radeon_device pointer 219 * 220 */ 221 int radeon_vce_resume(struct radeon_device *rdev) 222 { 223 void *cpu_addr; 224 int r; 225 226 if (rdev->vce.vcpu_bo == NULL) 227 return -EINVAL; 228 229 r = radeon_bo_reserve(rdev->vce.vcpu_bo, false); 230 if (r) { 231 dev_err(rdev->dev, "(%d) failed to reserve VCE bo\n", r); 232 return r; 233 } 234 235 r = radeon_bo_kmap(rdev->vce.vcpu_bo, &cpu_addr); 236 if (r) { 237 radeon_bo_unreserve(rdev->vce.vcpu_bo); 238 dev_err(rdev->dev, "(%d) VCE map failed\n", r); 239 return r; 240 } 241 242 memset(cpu_addr, 0, radeon_bo_size(rdev->vce.vcpu_bo)); 243 if (rdev->family < CHIP_BONAIRE) 244 r = vce_v1_0_load_fw(rdev, cpu_addr); 245 else 246 memcpy(cpu_addr, rdev->vce_fw->data, rdev->vce_fw->size); 247 248 radeon_bo_kunmap(rdev->vce.vcpu_bo); 249 250 radeon_bo_unreserve(rdev->vce.vcpu_bo); 251 252 return r; 253 } 254 255 /** 256 * radeon_vce_idle_work_handler - power off VCE 257 * 258 * @work: pointer to work structure 259 * 260 * power of VCE when it's not used any more 261 */ 262 static void radeon_vce_idle_work_handler(struct work_struct *work) 263 { 264 struct radeon_device *rdev = 265 container_of(work, struct radeon_device, vce.idle_work.work); 266 267 if ((radeon_fence_count_emitted(rdev, TN_RING_TYPE_VCE1_INDEX) == 0) && 268 (radeon_fence_count_emitted(rdev, TN_RING_TYPE_VCE2_INDEX) == 0)) { 269 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { 270 radeon_dpm_enable_vce(rdev, false); 271 } else { 272 radeon_set_vce_clocks(rdev, 0, 0); 273 } 274 } else { 275 schedule_delayed_work(&rdev->vce.idle_work, 276 msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS)); 277 } 278 } 279 280 /** 281 * radeon_vce_note_usage - power up VCE 282 * 283 * @rdev: radeon_device pointer 284 * 285 * Make sure VCE is powerd up when we want to use it 286 */ 287 void radeon_vce_note_usage(struct radeon_device *rdev) 288 { 289 bool streams_changed = false; 290 bool set_clocks = !cancel_delayed_work_sync(&rdev->vce.idle_work); 291 set_clocks &= schedule_delayed_work(&rdev->vce.idle_work, 292 msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS)); 293 294 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { 295 /* XXX figure out if the streams changed */ 296 streams_changed = false; 297 } 298 299 if (set_clocks || streams_changed) { 300 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { 301 radeon_dpm_enable_vce(rdev, true); 302 } else { 303 radeon_set_vce_clocks(rdev, 53300, 40000); 304 } 305 } 306 } 307 308 /** 309 * radeon_vce_free_handles - free still open VCE handles 310 * 311 * @rdev: radeon_device pointer 312 * @filp: drm file pointer 313 * 314 * Close all VCE handles still open by this file pointer 315 */ 316 void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp) 317 { 318 int i, r; 319 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { 320 uint32_t handle = atomic_read(&rdev->vce.handles[i]); 321 if (!handle || rdev->vce.filp[i] != filp) 322 continue; 323 324 radeon_vce_note_usage(rdev); 325 326 r = radeon_vce_get_destroy_msg(rdev, TN_RING_TYPE_VCE1_INDEX, 327 handle, NULL); 328 if (r) 329 DRM_ERROR("Error destroying VCE handle (%d)!\n", r); 330 331 rdev->vce.filp[i] = NULL; 332 atomic_set(&rdev->vce.handles[i], 0); 333 } 334 } 335 336 /** 337 * radeon_vce_get_create_msg - generate a VCE create msg 338 * 339 * @rdev: radeon_device pointer 340 * @ring: ring we should submit the msg to 341 * @handle: VCE session handle to use 342 * @fence: optional fence to return 343 * 344 * Open up a stream for HW test 345 */ 346 int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring, 347 uint32_t handle, struct radeon_fence **fence) 348 { 349 const unsigned ib_size_dw = 1024; 350 struct radeon_ib ib; 351 uint64_t dummy; 352 int i, r; 353 354 r = radeon_ib_get(rdev, ring, &ib, NULL, ib_size_dw * 4); 355 if (r) { 356 DRM_ERROR("radeon: failed to get ib (%d).\n", r); 357 return r; 358 } 359 360 dummy = ib.gpu_addr + 1024; 361 362 /* stitch together an VCE create msg */ 363 ib.length_dw = 0; 364 ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000c); /* len */ 365 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001); /* session cmd */ 366 ib.ptr[ib.length_dw++] = cpu_to_le32(handle); 367 368 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000030); /* len */ 369 ib.ptr[ib.length_dw++] = cpu_to_le32(0x01000001); /* create cmd */ 370 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000000); 371 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000042); 372 ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000a); 373 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001); 374 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000080); 375 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000060); 376 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000100); 377 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000100); 378 ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000c); 379 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000000); 380 381 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000014); /* len */ 382 ib.ptr[ib.length_dw++] = cpu_to_le32(0x05000005); /* feedback buffer */ 383 ib.ptr[ib.length_dw++] = cpu_to_le32(upper_32_bits(dummy)); 384 ib.ptr[ib.length_dw++] = cpu_to_le32(dummy); 385 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001); 386 387 for (i = ib.length_dw; i < ib_size_dw; ++i) 388 ib.ptr[i] = cpu_to_le32(0x0); 389 390 r = radeon_ib_schedule(rdev, &ib, NULL, false); 391 if (r) 392 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 393 394 395 if (fence) 396 *fence = radeon_fence_ref(ib.fence); 397 398 radeon_ib_free(rdev, &ib); 399 400 return r; 401 } 402 403 /** 404 * radeon_vce_get_destroy_msg - generate a VCE destroy msg 405 * 406 * @rdev: radeon_device pointer 407 * @ring: ring we should submit the msg to 408 * @handle: VCE session handle to use 409 * @fence: optional fence to return 410 * 411 * Close up a stream for HW test or if userspace failed to do so 412 */ 413 int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring, 414 uint32_t handle, struct radeon_fence **fence) 415 { 416 const unsigned ib_size_dw = 1024; 417 struct radeon_ib ib; 418 uint64_t dummy; 419 int i, r; 420 421 r = radeon_ib_get(rdev, ring, &ib, NULL, ib_size_dw * 4); 422 if (r) { 423 DRM_ERROR("radeon: failed to get ib (%d).\n", r); 424 return r; 425 } 426 427 dummy = ib.gpu_addr + 1024; 428 429 /* stitch together an VCE destroy msg */ 430 ib.length_dw = 0; 431 ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000c); /* len */ 432 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001); /* session cmd */ 433 ib.ptr[ib.length_dw++] = cpu_to_le32(handle); 434 435 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000014); /* len */ 436 ib.ptr[ib.length_dw++] = cpu_to_le32(0x05000005); /* feedback buffer */ 437 ib.ptr[ib.length_dw++] = cpu_to_le32(upper_32_bits(dummy)); 438 ib.ptr[ib.length_dw++] = cpu_to_le32(dummy); 439 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001); 440 441 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000008); /* len */ 442 ib.ptr[ib.length_dw++] = cpu_to_le32(0x02000001); /* destroy cmd */ 443 444 for (i = ib.length_dw; i < ib_size_dw; ++i) 445 ib.ptr[i] = cpu_to_le32(0x0); 446 447 r = radeon_ib_schedule(rdev, &ib, NULL, false); 448 if (r) { 449 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 450 } 451 452 if (fence) 453 *fence = radeon_fence_ref(ib.fence); 454 455 radeon_ib_free(rdev, &ib); 456 457 return r; 458 } 459 460 /** 461 * radeon_vce_cs_reloc - command submission relocation 462 * 463 * @p: parser context 464 * @lo: address of lower dword 465 * @hi: address of higher dword 466 * @size: size of checker for relocation buffer 467 * 468 * Patch relocation inside command stream with real buffer address 469 */ 470 int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, 471 unsigned size) 472 { 473 struct radeon_cs_chunk *relocs_chunk; 474 struct radeon_bo_list *reloc; 475 uint64_t start, end, offset; 476 unsigned idx; 477 478 relocs_chunk = p->chunk_relocs; 479 offset = radeon_get_ib_value(p, lo); 480 idx = radeon_get_ib_value(p, hi); 481 482 if (idx >= relocs_chunk->length_dw) { 483 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", 484 idx, relocs_chunk->length_dw); 485 return -EINVAL; 486 } 487 488 reloc = &p->relocs[(idx / 4)]; 489 start = reloc->gpu_offset; 490 end = start + radeon_bo_size(reloc->robj); 491 start += offset; 492 493 p->ib.ptr[lo] = start & 0xFFFFFFFF; 494 p->ib.ptr[hi] = start >> 32; 495 496 if (end <= start) { 497 DRM_ERROR("invalid reloc offset %llX!\n", offset); 498 return -EINVAL; 499 } 500 if ((end - start) < size) { 501 DRM_ERROR("buffer to small (%d / %d)!\n", 502 (unsigned)(end - start), size); 503 return -EINVAL; 504 } 505 506 return 0; 507 } 508 509 /** 510 * radeon_vce_validate_handle - validate stream handle 511 * 512 * @p: parser context 513 * @handle: handle to validate 514 * @allocated: allocated a new handle? 515 * 516 * Validates the handle and return the found session index or -EINVAL 517 * we we don't have another free session index. 518 */ 519 static int radeon_vce_validate_handle(struct radeon_cs_parser *p, 520 uint32_t handle, bool *allocated) 521 { 522 unsigned i; 523 524 *allocated = false; 525 526 /* validate the handle */ 527 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { 528 if (atomic_read(&p->rdev->vce.handles[i]) == handle) { 529 if (p->rdev->vce.filp[i] != p->filp) { 530 DRM_ERROR("VCE handle collision detected!\n"); 531 return -EINVAL; 532 } 533 return i; 534 } 535 } 536 537 /* handle not found try to alloc a new one */ 538 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { 539 if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) { 540 p->rdev->vce.filp[i] = p->filp; 541 p->rdev->vce.img_size[i] = 0; 542 *allocated = true; 543 return i; 544 } 545 } 546 547 DRM_ERROR("No more free VCE handles!\n"); 548 return -EINVAL; 549 } 550 551 /** 552 * radeon_vce_cs_parse - parse and validate the command stream 553 * 554 * @p: parser context 555 * 556 */ 557 int radeon_vce_cs_parse(struct radeon_cs_parser *p) 558 { 559 int session_idx = -1; 560 bool destroyed = false, created = false, allocated = false; 561 uint32_t tmp, handle = 0; 562 uint32_t *size = &tmp; 563 int i, r = 0; 564 565 while (p->idx < p->chunk_ib->length_dw) { 566 uint32_t len = radeon_get_ib_value(p, p->idx); 567 uint32_t cmd = radeon_get_ib_value(p, p->idx + 1); 568 569 if ((len < 8) || (len & 3)) { 570 DRM_ERROR("invalid VCE command length (%d)!\n", len); 571 r = -EINVAL; 572 goto out; 573 } 574 575 if (destroyed) { 576 DRM_ERROR("No other command allowed after destroy!\n"); 577 r = -EINVAL; 578 goto out; 579 } 580 581 switch (cmd) { 582 case 0x00000001: // session 583 handle = radeon_get_ib_value(p, p->idx + 2); 584 session_idx = radeon_vce_validate_handle(p, handle, 585 &allocated); 586 if (session_idx < 0) 587 return session_idx; 588 size = &p->rdev->vce.img_size[session_idx]; 589 break; 590 591 case 0x00000002: // task info 592 break; 593 594 case 0x01000001: // create 595 created = true; 596 if (!allocated) { 597 DRM_ERROR("Handle already in use!\n"); 598 r = -EINVAL; 599 goto out; 600 } 601 602 *size = radeon_get_ib_value(p, p->idx + 8) * 603 radeon_get_ib_value(p, p->idx + 10) * 604 8 * 3 / 2; 605 break; 606 607 case 0x04000001: // config extension 608 case 0x04000002: // pic control 609 case 0x04000005: // rate control 610 case 0x04000007: // motion estimation 611 case 0x04000008: // rdo 612 case 0x04000009: // vui 613 break; 614 615 case 0x03000001: // encode 616 r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9, 617 *size); 618 if (r) 619 goto out; 620 621 r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11, 622 *size / 3); 623 if (r) 624 goto out; 625 break; 626 627 case 0x02000001: // destroy 628 destroyed = true; 629 break; 630 631 case 0x05000001: // context buffer 632 r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, 633 *size * 2); 634 if (r) 635 goto out; 636 break; 637 638 case 0x05000004: // video bitstream buffer 639 tmp = radeon_get_ib_value(p, p->idx + 4); 640 r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, 641 tmp); 642 if (r) 643 goto out; 644 break; 645 646 case 0x05000005: // feedback buffer 647 r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, 648 4096); 649 if (r) 650 goto out; 651 break; 652 653 default: 654 DRM_ERROR("invalid VCE command (0x%x)!\n", cmd); 655 r = -EINVAL; 656 goto out; 657 } 658 659 if (session_idx == -1) { 660 DRM_ERROR("no session command at start of IB\n"); 661 r = -EINVAL; 662 goto out; 663 } 664 665 p->idx += len / 4; 666 } 667 668 if (allocated && !created) { 669 DRM_ERROR("New session without create command!\n"); 670 r = -ENOENT; 671 } 672 673 out: 674 if ((!r && destroyed) || (r && allocated)) { 675 /* 676 * IB contains a destroy msg or we have allocated an 677 * handle and got an error, anyway free the handle 678 */ 679 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) 680 atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0); 681 } 682 683 return r; 684 } 685 686 /** 687 * radeon_vce_semaphore_emit - emit a semaphore command 688 * 689 * @rdev: radeon_device pointer 690 * @ring: engine to use 691 * @semaphore: address of semaphore 692 * @emit_wait: true=emit wait, false=emit signal 693 * 694 */ 695 bool radeon_vce_semaphore_emit(struct radeon_device *rdev, 696 struct radeon_ring *ring, 697 struct radeon_semaphore *semaphore, 698 bool emit_wait) 699 { 700 uint64_t addr = semaphore->gpu_addr; 701 702 radeon_ring_write(ring, cpu_to_le32(VCE_CMD_SEMAPHORE)); 703 radeon_ring_write(ring, cpu_to_le32((addr >> 3) & 0x000FFFFF)); 704 radeon_ring_write(ring, cpu_to_le32((addr >> 23) & 0x000FFFFF)); 705 radeon_ring_write(ring, cpu_to_le32(0x01003000 | (emit_wait ? 1 : 0))); 706 if (!emit_wait) 707 radeon_ring_write(ring, cpu_to_le32(VCE_CMD_END)); 708 709 return true; 710 } 711 712 /** 713 * radeon_vce_ib_execute - execute indirect buffer 714 * 715 * @rdev: radeon_device pointer 716 * @ib: the IB to execute 717 * 718 */ 719 void radeon_vce_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 720 { 721 struct radeon_ring *ring = &rdev->ring[ib->ring]; 722 radeon_ring_write(ring, cpu_to_le32(VCE_CMD_IB)); 723 radeon_ring_write(ring, cpu_to_le32(ib->gpu_addr)); 724 radeon_ring_write(ring, cpu_to_le32(upper_32_bits(ib->gpu_addr))); 725 radeon_ring_write(ring, cpu_to_le32(ib->length_dw)); 726 } 727 728 /** 729 * radeon_vce_fence_emit - add a fence command to the ring 730 * 731 * @rdev: radeon_device pointer 732 * @fence: the fence 733 * 734 */ 735 void radeon_vce_fence_emit(struct radeon_device *rdev, 736 struct radeon_fence *fence) 737 { 738 struct radeon_ring *ring = &rdev->ring[fence->ring]; 739 uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr; 740 741 radeon_ring_write(ring, cpu_to_le32(VCE_CMD_FENCE)); 742 radeon_ring_write(ring, cpu_to_le32(addr)); 743 radeon_ring_write(ring, cpu_to_le32(upper_32_bits(addr))); 744 radeon_ring_write(ring, cpu_to_le32(fence->seq)); 745 radeon_ring_write(ring, cpu_to_le32(VCE_CMD_TRAP)); 746 radeon_ring_write(ring, cpu_to_le32(VCE_CMD_END)); 747 } 748 749 /** 750 * radeon_vce_ring_test - test if VCE ring is working 751 * 752 * @rdev: radeon_device pointer 753 * @ring: the engine to test on 754 * 755 */ 756 int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) 757 { 758 uint32_t rptr = vce_v1_0_get_rptr(rdev, ring); 759 unsigned i; 760 int r; 761 762 r = radeon_ring_lock(rdev, ring, 16); 763 if (r) { 764 DRM_ERROR("radeon: vce failed to lock ring %d (%d).\n", 765 ring->idx, r); 766 return r; 767 } 768 radeon_ring_write(ring, cpu_to_le32(VCE_CMD_END)); 769 radeon_ring_unlock_commit(rdev, ring, false); 770 771 for (i = 0; i < rdev->usec_timeout; i++) { 772 if (vce_v1_0_get_rptr(rdev, ring) != rptr) 773 break; 774 udelay(1); 775 } 776 777 if (i < rdev->usec_timeout) { 778 DRM_INFO("ring test on %d succeeded in %d usecs\n", 779 ring->idx, i); 780 } else { 781 DRM_ERROR("radeon: ring %d test failed\n", 782 ring->idx); 783 r = -ETIMEDOUT; 784 } 785 786 return r; 787 } 788 789 /** 790 * radeon_vce_ib_test - test if VCE IBs are working 791 * 792 * @rdev: radeon_device pointer 793 * @ring: the engine to test on 794 * 795 */ 796 int radeon_vce_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) 797 { 798 struct radeon_fence *fence = NULL; 799 int r; 800 801 r = radeon_vce_get_create_msg(rdev, ring->idx, 1, NULL); 802 if (r) { 803 DRM_ERROR("radeon: failed to get create msg (%d).\n", r); 804 goto error; 805 } 806 807 r = radeon_vce_get_destroy_msg(rdev, ring->idx, 1, &fence); 808 if (r) { 809 DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r); 810 goto error; 811 } 812 813 r = radeon_fence_wait_timeout(fence, false, usecs_to_jiffies( 814 RADEON_USEC_IB_TEST_TIMEOUT)); 815 if (r < 0) { 816 DRM_ERROR("radeon: fence wait failed (%d).\n", r); 817 } else if (r == 0) { 818 DRM_ERROR("radeon: fence wait timed out.\n"); 819 r = -ETIMEDOUT; 820 } else { 821 DRM_INFO("ib test on ring %d succeeded\n", ring->idx); 822 r = 0; 823 } 824 error: 825 radeon_fence_unref(&fence); 826 return r; 827 } 828