1 /* 2 * Copyright 2011 Advanced Micro Devices, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26 /* 27 * Authors: 28 * Christian König <deathsimple@vodafone.de> 29 */ 30 31 #include <linux/firmware.h> 32 #include <linux/module.h> 33 #include <drm/drmP.h> 34 #include <drm/drm.h> 35 36 #include "amdgpu.h" 37 #include "amdgpu_pm.h" 38 #include "amdgpu_uvd.h" 39 #include "cikd.h" 40 #include "uvd/uvd_4_2_d.h" 41 42 /* 1 second timeout */ 43 #define UVD_IDLE_TIMEOUT msecs_to_jiffies(1000) 44 45 /* Firmware versions for VI */ 46 #define FW_1_65_10 ((1 << 24) | (65 << 16) | (10 << 8)) 47 #define FW_1_87_11 ((1 << 24) | (87 << 16) | (11 << 8)) 48 #define FW_1_87_12 ((1 << 24) | (87 << 16) | (12 << 8)) 49 #define FW_1_37_15 ((1 << 24) | (37 << 16) | (15 << 8)) 50 51 /* Polaris10/11 firmware version */ 52 #define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8)) 53 54 /* Firmware Names */ 55 #ifdef CONFIG_DRM_AMDGPU_CIK 56 #define FIRMWARE_BONAIRE "radeon/bonaire_uvd.bin" 57 #define FIRMWARE_KABINI "radeon/kabini_uvd.bin" 58 #define FIRMWARE_KAVERI "radeon/kaveri_uvd.bin" 59 #define FIRMWARE_HAWAII "radeon/hawaii_uvd.bin" 60 #define FIRMWARE_MULLINS "radeon/mullins_uvd.bin" 61 #endif 62 #define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin" 63 #define FIRMWARE_CARRIZO "amdgpu/carrizo_uvd.bin" 64 #define FIRMWARE_FIJI "amdgpu/fiji_uvd.bin" 65 #define FIRMWARE_STONEY "amdgpu/stoney_uvd.bin" 66 #define FIRMWARE_POLARIS10 "amdgpu/polaris10_uvd.bin" 67 #define FIRMWARE_POLARIS11 "amdgpu/polaris11_uvd.bin" 68 #define FIRMWARE_POLARIS12 "amdgpu/polaris12_uvd.bin" 69 #define FIRMWARE_VEGAM "amdgpu/vegam_uvd.bin" 70 71 #define FIRMWARE_VEGA10 "amdgpu/vega10_uvd.bin" 72 #define FIRMWARE_VEGA12 "amdgpu/vega12_uvd.bin" 73 #define FIRMWARE_VEGA20 "amdgpu/vega20_uvd.bin" 74 75 /* These are common relative offsets for all asics, from uvd_7_0_offset.h, */ 76 #define UVD_GPCOM_VCPU_CMD 0x03c3 77 #define UVD_GPCOM_VCPU_DATA0 0x03c4 78 #define UVD_GPCOM_VCPU_DATA1 0x03c5 79 #define UVD_NO_OP 0x03ff 80 #define UVD_BASE_SI 0x3800 81 82 /** 83 * amdgpu_uvd_cs_ctx - Command submission parser context 84 * 85 * Used for emulating virtual memory support on UVD 4.2. 86 */ 87 struct amdgpu_uvd_cs_ctx { 88 struct amdgpu_cs_parser *parser; 89 unsigned reg, count; 90 unsigned data0, data1; 91 unsigned idx; 92 unsigned ib_idx; 93 94 /* does the IB has a msg command */ 95 bool has_msg_cmd; 96 97 /* minimum buffer sizes */ 98 unsigned *buf_sizes; 99 }; 100 101 #ifdef CONFIG_DRM_AMDGPU_CIK 102 MODULE_FIRMWARE(FIRMWARE_BONAIRE); 103 MODULE_FIRMWARE(FIRMWARE_KABINI); 104 MODULE_FIRMWARE(FIRMWARE_KAVERI); 105 MODULE_FIRMWARE(FIRMWARE_HAWAII); 106 MODULE_FIRMWARE(FIRMWARE_MULLINS); 107 #endif 108 MODULE_FIRMWARE(FIRMWARE_TONGA); 109 MODULE_FIRMWARE(FIRMWARE_CARRIZO); 110 MODULE_FIRMWARE(FIRMWARE_FIJI); 111 MODULE_FIRMWARE(FIRMWARE_STONEY); 112 MODULE_FIRMWARE(FIRMWARE_POLARIS10); 113 MODULE_FIRMWARE(FIRMWARE_POLARIS11); 114 MODULE_FIRMWARE(FIRMWARE_POLARIS12); 115 MODULE_FIRMWARE(FIRMWARE_VEGAM); 116 117 MODULE_FIRMWARE(FIRMWARE_VEGA10); 118 MODULE_FIRMWARE(FIRMWARE_VEGA12); 119 MODULE_FIRMWARE(FIRMWARE_VEGA20); 120 121 static void amdgpu_uvd_idle_work_handler(struct work_struct *work); 122 123 int amdgpu_uvd_sw_init(struct amdgpu_device *adev) 124 { 125 struct amdgpu_ring *ring; 126 struct drm_sched_rq *rq; 127 unsigned long bo_size; 128 const char *fw_name; 129 const struct common_firmware_header *hdr; 130 unsigned version_major, version_minor, family_id; 131 int i, j, r; 132 133 INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler); 134 135 switch (adev->asic_type) { 136 #ifdef CONFIG_DRM_AMDGPU_CIK 137 case CHIP_BONAIRE: 138 fw_name = FIRMWARE_BONAIRE; 139 break; 140 case CHIP_KABINI: 141 fw_name = FIRMWARE_KABINI; 142 break; 143 case CHIP_KAVERI: 144 fw_name = FIRMWARE_KAVERI; 145 break; 146 case CHIP_HAWAII: 147 fw_name = FIRMWARE_HAWAII; 148 break; 149 case CHIP_MULLINS: 150 fw_name = FIRMWARE_MULLINS; 151 break; 152 #endif 153 case CHIP_TONGA: 154 fw_name = FIRMWARE_TONGA; 155 break; 156 case CHIP_FIJI: 157 fw_name = FIRMWARE_FIJI; 158 break; 159 case CHIP_CARRIZO: 160 fw_name = FIRMWARE_CARRIZO; 161 break; 162 case CHIP_STONEY: 163 fw_name = FIRMWARE_STONEY; 164 break; 165 case CHIP_POLARIS10: 166 fw_name = FIRMWARE_POLARIS10; 167 break; 168 case CHIP_POLARIS11: 169 fw_name = FIRMWARE_POLARIS11; 170 break; 171 case CHIP_POLARIS12: 172 fw_name = FIRMWARE_POLARIS12; 173 break; 174 case CHIP_VEGA10: 175 fw_name = FIRMWARE_VEGA10; 176 break; 177 case CHIP_VEGA12: 178 fw_name = FIRMWARE_VEGA12; 179 break; 180 case CHIP_VEGAM: 181 fw_name = FIRMWARE_VEGAM; 182 break; 183 case CHIP_VEGA20: 184 fw_name = FIRMWARE_VEGA20; 185 break; 186 default: 187 return -EINVAL; 188 } 189 190 r = request_firmware(&adev->uvd.fw, fw_name, adev->dev); 191 if (r) { 192 dev_err(adev->dev, "amdgpu_uvd: Can't load firmware \"%s\"\n", 193 fw_name); 194 return r; 195 } 196 197 r = amdgpu_ucode_validate(adev->uvd.fw); 198 if (r) { 199 dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n", 200 fw_name); 201 release_firmware(adev->uvd.fw); 202 adev->uvd.fw = NULL; 203 return r; 204 } 205 206 /* Set the default UVD handles that the firmware can handle */ 207 adev->uvd.max_handles = AMDGPU_DEFAULT_UVD_HANDLES; 208 209 hdr = (const struct common_firmware_header *)adev->uvd.fw->data; 210 family_id = le32_to_cpu(hdr->ucode_version) & 0xff; 211 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; 212 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; 213 DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n", 214 version_major, version_minor, family_id); 215 216 /* 217 * Limit the number of UVD handles depending on microcode major 218 * and minor versions. The firmware version which has 40 UVD 219 * instances support is 1.80. So all subsequent versions should 220 * also have the same support. 221 */ 222 if ((version_major > 0x01) || 223 ((version_major == 0x01) && (version_minor >= 0x50))) 224 adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES; 225 226 adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) | 227 (family_id << 8)); 228 229 if ((adev->asic_type == CHIP_POLARIS10 || 230 adev->asic_type == CHIP_POLARIS11) && 231 (adev->uvd.fw_version < FW_1_66_16)) 232 DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n", 233 version_major, version_minor); 234 235 bo_size = AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE 236 + AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles; 237 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 238 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); 239 240 for (j = 0; j < adev->uvd.num_uvd_inst; j++) { 241 242 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, 243 AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo, 244 &adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr); 245 if (r) { 246 dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); 247 return r; 248 } 249 250 ring = &adev->uvd.inst[j].ring; 251 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; 252 r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity, 253 rq, NULL); 254 if (r != 0) { 255 DRM_ERROR("Failed setting up UVD(%d) run queue.\n", j); 256 return r; 257 } 258 259 for (i = 0; i < adev->uvd.max_handles; ++i) { 260 atomic_set(&adev->uvd.inst[j].handles[i], 0); 261 adev->uvd.inst[j].filp[i] = NULL; 262 } 263 } 264 /* from uvd v5.0 HW addressing capacity increased to 64 bits */ 265 if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0)) 266 adev->uvd.address_64_bit = true; 267 268 switch (adev->asic_type) { 269 case CHIP_TONGA: 270 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10; 271 break; 272 case CHIP_CARRIZO: 273 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_11; 274 break; 275 case CHIP_FIJI: 276 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_12; 277 break; 278 case CHIP_STONEY: 279 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_37_15; 280 break; 281 default: 282 adev->uvd.use_ctx_buf = adev->asic_type >= CHIP_POLARIS10; 283 } 284 285 return 0; 286 } 287 288 int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) 289 { 290 int i, j; 291 292 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { 293 kfree(adev->uvd.inst[j].saved_bo); 294 295 drm_sched_entity_fini(&adev->uvd.inst[j].ring.sched, &adev->uvd.inst[j].entity); 296 297 amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo, 298 &adev->uvd.inst[j].gpu_addr, 299 (void **)&adev->uvd.inst[j].cpu_addr); 300 301 amdgpu_ring_fini(&adev->uvd.inst[j].ring); 302 303 for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i) 304 amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]); 305 } 306 release_firmware(adev->uvd.fw); 307 308 return 0; 309 } 310 311 int amdgpu_uvd_suspend(struct amdgpu_device *adev) 312 { 313 unsigned size; 314 void *ptr; 315 int i, j; 316 317 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { 318 if (adev->uvd.inst[j].vcpu_bo == NULL) 319 continue; 320 321 cancel_delayed_work_sync(&adev->uvd.inst[j].idle_work); 322 323 /* only valid for physical mode */ 324 if (adev->asic_type < CHIP_POLARIS10) { 325 for (i = 0; i < adev->uvd.max_handles; ++i) 326 if (atomic_read(&adev->uvd.inst[j].handles[i])) 327 break; 328 329 if (i == adev->uvd.max_handles) 330 continue; 331 } 332 333 size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo); 334 ptr = adev->uvd.inst[j].cpu_addr; 335 336 adev->uvd.inst[j].saved_bo = kmalloc(size, GFP_KERNEL); 337 if (!adev->uvd.inst[j].saved_bo) 338 return -ENOMEM; 339 340 memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size); 341 } 342 return 0; 343 } 344 345 int amdgpu_uvd_resume(struct amdgpu_device *adev) 346 { 347 unsigned size; 348 void *ptr; 349 int i; 350 351 for (i = 0; i < adev->uvd.num_uvd_inst; i++) { 352 if (adev->uvd.inst[i].vcpu_bo == NULL) 353 return -EINVAL; 354 355 size = amdgpu_bo_size(adev->uvd.inst[i].vcpu_bo); 356 ptr = adev->uvd.inst[i].cpu_addr; 357 358 if (adev->uvd.inst[i].saved_bo != NULL) { 359 memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size); 360 kfree(adev->uvd.inst[i].saved_bo); 361 adev->uvd.inst[i].saved_bo = NULL; 362 } else { 363 const struct common_firmware_header *hdr; 364 unsigned offset; 365 366 hdr = (const struct common_firmware_header *)adev->uvd.fw->data; 367 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 368 offset = le32_to_cpu(hdr->ucode_array_offset_bytes); 369 memcpy_toio(adev->uvd.inst[i].cpu_addr, adev->uvd.fw->data + offset, 370 le32_to_cpu(hdr->ucode_size_bytes)); 371 size -= le32_to_cpu(hdr->ucode_size_bytes); 372 ptr += le32_to_cpu(hdr->ucode_size_bytes); 373 } 374 memset_io(ptr, 0, size); 375 /* to restore uvd fence seq */ 376 amdgpu_fence_driver_force_completion(&adev->uvd.inst[i].ring); 377 } 378 } 379 return 0; 380 } 381 382 void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) 383 { 384 struct amdgpu_ring *ring; 385 int i, j, r; 386 387 for (j = 0; j < adev->uvd.num_uvd_inst; j++) { 388 ring = &adev->uvd.inst[j].ring; 389 390 for (i = 0; i < adev->uvd.max_handles; ++i) { 391 uint32_t handle = atomic_read(&adev->uvd.inst[j].handles[i]); 392 if (handle != 0 && adev->uvd.inst[j].filp[i] == filp) { 393 struct dma_fence *fence; 394 395 r = amdgpu_uvd_get_destroy_msg(ring, handle, 396 false, &fence); 397 if (r) { 398 DRM_ERROR("Error destroying UVD(%d) %d!\n", j, r); 399 continue; 400 } 401 402 dma_fence_wait(fence, false); 403 dma_fence_put(fence); 404 405 adev->uvd.inst[j].filp[i] = NULL; 406 atomic_set(&adev->uvd.inst[j].handles[i], 0); 407 } 408 } 409 } 410 } 411 412 static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo) 413 { 414 int i; 415 for (i = 0; i < abo->placement.num_placement; ++i) { 416 abo->placements[i].fpfn = 0 >> PAGE_SHIFT; 417 abo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT; 418 } 419 } 420 421 static u64 amdgpu_uvd_get_addr_from_ctx(struct amdgpu_uvd_cs_ctx *ctx) 422 { 423 uint32_t lo, hi; 424 uint64_t addr; 425 426 lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0); 427 hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1); 428 addr = ((uint64_t)lo) | (((uint64_t)hi) << 32); 429 430 return addr; 431 } 432 433 /** 434 * amdgpu_uvd_cs_pass1 - first parsing round 435 * 436 * @ctx: UVD parser context 437 * 438 * Make sure UVD message and feedback buffers are in VRAM and 439 * nobody is violating an 256MB boundary. 440 */ 441 static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx) 442 { 443 struct ttm_operation_ctx tctx = { false, false }; 444 struct amdgpu_bo_va_mapping *mapping; 445 struct amdgpu_bo *bo; 446 uint32_t cmd; 447 uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx); 448 int r = 0; 449 450 r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping); 451 if (r) { 452 DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr); 453 return r; 454 } 455 456 if (!ctx->parser->adev->uvd.address_64_bit) { 457 /* check if it's a message or feedback command */ 458 cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1; 459 if (cmd == 0x0 || cmd == 0x3) { 460 /* yes, force it into VRAM */ 461 uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM; 462 amdgpu_ttm_placement_from_domain(bo, domain); 463 } 464 amdgpu_uvd_force_into_uvd_segment(bo); 465 466 r = ttm_bo_validate(&bo->tbo, &bo->placement, &tctx); 467 } 468 469 return r; 470 } 471 472 /** 473 * amdgpu_uvd_cs_msg_decode - handle UVD decode message 474 * 475 * @msg: pointer to message structure 476 * @buf_sizes: returned buffer sizes 477 * 478 * Peek into the decode message and calculate the necessary buffer sizes. 479 */ 480 static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg, 481 unsigned buf_sizes[]) 482 { 483 unsigned stream_type = msg[4]; 484 unsigned width = msg[6]; 485 unsigned height = msg[7]; 486 unsigned dpb_size = msg[9]; 487 unsigned pitch = msg[28]; 488 unsigned level = msg[57]; 489 490 unsigned width_in_mb = width / 16; 491 unsigned height_in_mb = ALIGN(height / 16, 2); 492 unsigned fs_in_mb = width_in_mb * height_in_mb; 493 494 unsigned image_size, tmp, min_dpb_size, num_dpb_buffer; 495 unsigned min_ctx_size = ~0; 496 497 image_size = width * height; 498 image_size += image_size / 2; 499 image_size = ALIGN(image_size, 1024); 500 501 switch (stream_type) { 502 case 0: /* H264 */ 503 switch(level) { 504 case 30: 505 num_dpb_buffer = 8100 / fs_in_mb; 506 break; 507 case 31: 508 num_dpb_buffer = 18000 / fs_in_mb; 509 break; 510 case 32: 511 num_dpb_buffer = 20480 / fs_in_mb; 512 break; 513 case 41: 514 num_dpb_buffer = 32768 / fs_in_mb; 515 break; 516 case 42: 517 num_dpb_buffer = 34816 / fs_in_mb; 518 break; 519 case 50: 520 num_dpb_buffer = 110400 / fs_in_mb; 521 break; 522 case 51: 523 num_dpb_buffer = 184320 / fs_in_mb; 524 break; 525 default: 526 num_dpb_buffer = 184320 / fs_in_mb; 527 break; 528 } 529 num_dpb_buffer++; 530 if (num_dpb_buffer > 17) 531 num_dpb_buffer = 17; 532 533 /* reference picture buffer */ 534 min_dpb_size = image_size * num_dpb_buffer; 535 536 /* macroblock context buffer */ 537 min_dpb_size += width_in_mb * height_in_mb * num_dpb_buffer * 192; 538 539 /* IT surface buffer */ 540 min_dpb_size += width_in_mb * height_in_mb * 32; 541 break; 542 543 case 1: /* VC1 */ 544 545 /* reference picture buffer */ 546 min_dpb_size = image_size * 3; 547 548 /* CONTEXT_BUFFER */ 549 min_dpb_size += width_in_mb * height_in_mb * 128; 550 551 /* IT surface buffer */ 552 min_dpb_size += width_in_mb * 64; 553 554 /* DB surface buffer */ 555 min_dpb_size += width_in_mb * 128; 556 557 /* BP */ 558 tmp = max(width_in_mb, height_in_mb); 559 min_dpb_size += ALIGN(tmp * 7 * 16, 64); 560 break; 561 562 case 3: /* MPEG2 */ 563 564 /* reference picture buffer */ 565 min_dpb_size = image_size * 3; 566 break; 567 568 case 4: /* MPEG4 */ 569 570 /* reference picture buffer */ 571 min_dpb_size = image_size * 3; 572 573 /* CM */ 574 min_dpb_size += width_in_mb * height_in_mb * 64; 575 576 /* IT surface buffer */ 577 min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64); 578 break; 579 580 case 7: /* H264 Perf */ 581 switch(level) { 582 case 30: 583 num_dpb_buffer = 8100 / fs_in_mb; 584 break; 585 case 31: 586 num_dpb_buffer = 18000 / fs_in_mb; 587 break; 588 case 32: 589 num_dpb_buffer = 20480 / fs_in_mb; 590 break; 591 case 41: 592 num_dpb_buffer = 32768 / fs_in_mb; 593 break; 594 case 42: 595 num_dpb_buffer = 34816 / fs_in_mb; 596 break; 597 case 50: 598 num_dpb_buffer = 110400 / fs_in_mb; 599 break; 600 case 51: 601 num_dpb_buffer = 184320 / fs_in_mb; 602 break; 603 default: 604 num_dpb_buffer = 184320 / fs_in_mb; 605 break; 606 } 607 num_dpb_buffer++; 608 if (num_dpb_buffer > 17) 609 num_dpb_buffer = 17; 610 611 /* reference picture buffer */ 612 min_dpb_size = image_size * num_dpb_buffer; 613 614 if (!adev->uvd.use_ctx_buf){ 615 /* macroblock context buffer */ 616 min_dpb_size += 617 width_in_mb * height_in_mb * num_dpb_buffer * 192; 618 619 /* IT surface buffer */ 620 min_dpb_size += width_in_mb * height_in_mb * 32; 621 } else { 622 /* macroblock context buffer */ 623 min_ctx_size = 624 width_in_mb * height_in_mb * num_dpb_buffer * 192; 625 } 626 break; 627 628 case 8: /* MJPEG */ 629 min_dpb_size = 0; 630 break; 631 632 case 16: /* H265 */ 633 image_size = (ALIGN(width, 16) * ALIGN(height, 16) * 3) / 2; 634 image_size = ALIGN(image_size, 256); 635 636 num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2; 637 min_dpb_size = image_size * num_dpb_buffer; 638 min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16) 639 * 16 * num_dpb_buffer + 52 * 1024; 640 break; 641 642 default: 643 DRM_ERROR("UVD codec not handled %d!\n", stream_type); 644 return -EINVAL; 645 } 646 647 if (width > pitch) { 648 DRM_ERROR("Invalid UVD decoding target pitch!\n"); 649 return -EINVAL; 650 } 651 652 if (dpb_size < min_dpb_size) { 653 DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n", 654 dpb_size, min_dpb_size); 655 return -EINVAL; 656 } 657 658 buf_sizes[0x1] = dpb_size; 659 buf_sizes[0x2] = image_size; 660 buf_sizes[0x4] = min_ctx_size; 661 return 0; 662 } 663 664 /** 665 * amdgpu_uvd_cs_msg - handle UVD message 666 * 667 * @ctx: UVD parser context 668 * @bo: buffer object containing the message 669 * @offset: offset into the buffer object 670 * 671 * Peek into the UVD message and extract the session id. 672 * Make sure that we don't open up to many sessions. 673 */ 674 static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, 675 struct amdgpu_bo *bo, unsigned offset) 676 { 677 struct amdgpu_device *adev = ctx->parser->adev; 678 int32_t *msg, msg_type, handle; 679 void *ptr; 680 long r; 681 int i; 682 uint32_t ip_instance = ctx->parser->job->ring->me; 683 684 if (offset & 0x3F) { 685 DRM_ERROR("UVD(%d) messages must be 64 byte aligned!\n", ip_instance); 686 return -EINVAL; 687 } 688 689 r = amdgpu_bo_kmap(bo, &ptr); 690 if (r) { 691 DRM_ERROR("Failed mapping the UVD(%d) message (%ld)!\n", ip_instance, r); 692 return r; 693 } 694 695 msg = ptr + offset; 696 697 msg_type = msg[1]; 698 handle = msg[2]; 699 700 if (handle == 0) { 701 DRM_ERROR("Invalid UVD(%d) handle!\n", ip_instance); 702 return -EINVAL; 703 } 704 705 switch (msg_type) { 706 case 0: 707 /* it's a create msg, calc image size (width * height) */ 708 amdgpu_bo_kunmap(bo); 709 710 /* try to alloc a new handle */ 711 for (i = 0; i < adev->uvd.max_handles; ++i) { 712 if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) { 713 DRM_ERROR("(%d)Handle 0x%x already in use!\n", ip_instance, handle); 714 return -EINVAL; 715 } 716 717 if (!atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], 0, handle)) { 718 adev->uvd.inst[ip_instance].filp[i] = ctx->parser->filp; 719 return 0; 720 } 721 } 722 723 DRM_ERROR("No more free UVD(%d) handles!\n", ip_instance); 724 return -ENOSPC; 725 726 case 1: 727 /* it's a decode msg, calc buffer sizes */ 728 r = amdgpu_uvd_cs_msg_decode(adev, msg, ctx->buf_sizes); 729 amdgpu_bo_kunmap(bo); 730 if (r) 731 return r; 732 733 /* validate the handle */ 734 for (i = 0; i < adev->uvd.max_handles; ++i) { 735 if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) { 736 if (adev->uvd.inst[ip_instance].filp[i] != ctx->parser->filp) { 737 DRM_ERROR("UVD(%d) handle collision detected!\n", ip_instance); 738 return -EINVAL; 739 } 740 return 0; 741 } 742 } 743 744 DRM_ERROR("Invalid UVD(%d) handle 0x%x!\n", ip_instance, handle); 745 return -ENOENT; 746 747 case 2: 748 /* it's a destroy msg, free the handle */ 749 for (i = 0; i < adev->uvd.max_handles; ++i) 750 atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], handle, 0); 751 amdgpu_bo_kunmap(bo); 752 return 0; 753 754 default: 755 DRM_ERROR("Illegal UVD(%d) message type (%d)!\n", ip_instance, msg_type); 756 return -EINVAL; 757 } 758 BUG(); 759 return -EINVAL; 760 } 761 762 /** 763 * amdgpu_uvd_cs_pass2 - second parsing round 764 * 765 * @ctx: UVD parser context 766 * 767 * Patch buffer addresses, make sure buffer sizes are correct. 768 */ 769 static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx) 770 { 771 struct amdgpu_bo_va_mapping *mapping; 772 struct amdgpu_bo *bo; 773 uint32_t cmd; 774 uint64_t start, end; 775 uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx); 776 int r; 777 778 r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping); 779 if (r) { 780 DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr); 781 return r; 782 } 783 784 start = amdgpu_bo_gpu_offset(bo); 785 786 end = (mapping->last + 1 - mapping->start); 787 end = end * AMDGPU_GPU_PAGE_SIZE + start; 788 789 addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE; 790 start += addr; 791 792 amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data0, 793 lower_32_bits(start)); 794 amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data1, 795 upper_32_bits(start)); 796 797 cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1; 798 if (cmd < 0x4) { 799 if ((end - start) < ctx->buf_sizes[cmd]) { 800 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, 801 (unsigned)(end - start), 802 ctx->buf_sizes[cmd]); 803 return -EINVAL; 804 } 805 806 } else if (cmd == 0x206) { 807 if ((end - start) < ctx->buf_sizes[4]) { 808 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, 809 (unsigned)(end - start), 810 ctx->buf_sizes[4]); 811 return -EINVAL; 812 } 813 } else if ((cmd != 0x100) && (cmd != 0x204)) { 814 DRM_ERROR("invalid UVD command %X!\n", cmd); 815 return -EINVAL; 816 } 817 818 if (!ctx->parser->adev->uvd.address_64_bit) { 819 if ((start >> 28) != ((end - 1) >> 28)) { 820 DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n", 821 start, end); 822 return -EINVAL; 823 } 824 825 if ((cmd == 0 || cmd == 0x3) && 826 (start >> 28) != (ctx->parser->adev->uvd.inst->gpu_addr >> 28)) { 827 DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", 828 start, end); 829 return -EINVAL; 830 } 831 } 832 833 if (cmd == 0) { 834 ctx->has_msg_cmd = true; 835 r = amdgpu_uvd_cs_msg(ctx, bo, addr); 836 if (r) 837 return r; 838 } else if (!ctx->has_msg_cmd) { 839 DRM_ERROR("Message needed before other commands are send!\n"); 840 return -EINVAL; 841 } 842 843 return 0; 844 } 845 846 /** 847 * amdgpu_uvd_cs_reg - parse register writes 848 * 849 * @ctx: UVD parser context 850 * @cb: callback function 851 * 852 * Parse the register writes, call cb on each complete command. 853 */ 854 static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx, 855 int (*cb)(struct amdgpu_uvd_cs_ctx *ctx)) 856 { 857 struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx]; 858 int i, r; 859 860 ctx->idx++; 861 for (i = 0; i <= ctx->count; ++i) { 862 unsigned reg = ctx->reg + i; 863 864 if (ctx->idx >= ib->length_dw) { 865 DRM_ERROR("Register command after end of CS!\n"); 866 return -EINVAL; 867 } 868 869 switch (reg) { 870 case mmUVD_GPCOM_VCPU_DATA0: 871 ctx->data0 = ctx->idx; 872 break; 873 case mmUVD_GPCOM_VCPU_DATA1: 874 ctx->data1 = ctx->idx; 875 break; 876 case mmUVD_GPCOM_VCPU_CMD: 877 r = cb(ctx); 878 if (r) 879 return r; 880 break; 881 case mmUVD_ENGINE_CNTL: 882 case mmUVD_NO_OP: 883 break; 884 default: 885 DRM_ERROR("Invalid reg 0x%X!\n", reg); 886 return -EINVAL; 887 } 888 ctx->idx++; 889 } 890 return 0; 891 } 892 893 /** 894 * amdgpu_uvd_cs_packets - parse UVD packets 895 * 896 * @ctx: UVD parser context 897 * @cb: callback function 898 * 899 * Parse the command stream packets. 900 */ 901 static int amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx *ctx, 902 int (*cb)(struct amdgpu_uvd_cs_ctx *ctx)) 903 { 904 struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx]; 905 int r; 906 907 for (ctx->idx = 0 ; ctx->idx < ib->length_dw; ) { 908 uint32_t cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx); 909 unsigned type = CP_PACKET_GET_TYPE(cmd); 910 switch (type) { 911 case PACKET_TYPE0: 912 ctx->reg = CP_PACKET0_GET_REG(cmd); 913 ctx->count = CP_PACKET_GET_COUNT(cmd); 914 r = amdgpu_uvd_cs_reg(ctx, cb); 915 if (r) 916 return r; 917 break; 918 case PACKET_TYPE2: 919 ++ctx->idx; 920 break; 921 default: 922 DRM_ERROR("Unknown packet type %d !\n", type); 923 return -EINVAL; 924 } 925 } 926 return 0; 927 } 928 929 /** 930 * amdgpu_uvd_ring_parse_cs - UVD command submission parser 931 * 932 * @parser: Command submission parser context 933 * 934 * Parse the command stream, patch in addresses as necessary. 935 */ 936 int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) 937 { 938 struct amdgpu_uvd_cs_ctx ctx = {}; 939 unsigned buf_sizes[] = { 940 [0x00000000] = 2048, 941 [0x00000001] = 0xFFFFFFFF, 942 [0x00000002] = 0xFFFFFFFF, 943 [0x00000003] = 2048, 944 [0x00000004] = 0xFFFFFFFF, 945 }; 946 struct amdgpu_ib *ib = &parser->job->ibs[ib_idx]; 947 int r; 948 949 parser->job->vm = NULL; 950 ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); 951 952 if (ib->length_dw % 16) { 953 DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n", 954 ib->length_dw); 955 return -EINVAL; 956 } 957 958 ctx.parser = parser; 959 ctx.buf_sizes = buf_sizes; 960 ctx.ib_idx = ib_idx; 961 962 /* first round only required on chips without UVD 64 bit address support */ 963 if (!parser->adev->uvd.address_64_bit) { 964 /* first round, make sure the buffers are actually in the UVD segment */ 965 r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass1); 966 if (r) 967 return r; 968 } 969 970 /* second round, patch buffer addresses into the command stream */ 971 r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass2); 972 if (r) 973 return r; 974 975 if (!ctx.has_msg_cmd) { 976 DRM_ERROR("UVD-IBs need a msg command!\n"); 977 return -EINVAL; 978 } 979 980 return 0; 981 } 982 983 static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, 984 bool direct, struct dma_fence **fence) 985 { 986 struct amdgpu_device *adev = ring->adev; 987 struct dma_fence *f = NULL; 988 struct amdgpu_job *job; 989 struct amdgpu_ib *ib; 990 uint32_t data[4]; 991 uint64_t addr; 992 long r; 993 int i; 994 unsigned offset_idx = 0; 995 unsigned offset[3] = { UVD_BASE_SI, 0, 0 }; 996 997 amdgpu_bo_kunmap(bo); 998 amdgpu_bo_unpin(bo); 999 1000 if (!ring->adev->uvd.address_64_bit) { 1001 struct ttm_operation_ctx ctx = { true, false }; 1002 1003 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); 1004 amdgpu_uvd_force_into_uvd_segment(bo); 1005 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 1006 if (r) 1007 goto err; 1008 } 1009 1010 r = amdgpu_job_alloc_with_ib(adev, 64, &job); 1011 if (r) 1012 goto err; 1013 1014 if (adev->asic_type >= CHIP_VEGA10) { 1015 offset_idx = 1 + ring->me; 1016 offset[1] = adev->reg_offset[UVD_HWIP][0][1]; 1017 offset[2] = adev->reg_offset[UVD_HWIP][1][1]; 1018 } 1019 1020 data[0] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA0, 0); 1021 data[1] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA1, 0); 1022 data[2] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_CMD, 0); 1023 data[3] = PACKET0(offset[offset_idx] + UVD_NO_OP, 0); 1024 1025 ib = &job->ibs[0]; 1026 addr = amdgpu_bo_gpu_offset(bo); 1027 ib->ptr[0] = data[0]; 1028 ib->ptr[1] = addr; 1029 ib->ptr[2] = data[1]; 1030 ib->ptr[3] = addr >> 32; 1031 ib->ptr[4] = data[2]; 1032 ib->ptr[5] = 0; 1033 for (i = 6; i < 16; i += 2) { 1034 ib->ptr[i] = data[3]; 1035 ib->ptr[i+1] = 0; 1036 } 1037 ib->length_dw = 16; 1038 1039 if (direct) { 1040 r = reservation_object_wait_timeout_rcu(bo->tbo.resv, 1041 true, false, 1042 msecs_to_jiffies(10)); 1043 if (r == 0) 1044 r = -ETIMEDOUT; 1045 if (r < 0) 1046 goto err_free; 1047 1048 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); 1049 job->fence = dma_fence_get(f); 1050 if (r) 1051 goto err_free; 1052 1053 amdgpu_job_free(job); 1054 } else { 1055 r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv, 1056 AMDGPU_FENCE_OWNER_UNDEFINED, false); 1057 if (r) 1058 goto err_free; 1059 1060 r = amdgpu_job_submit(job, ring, &adev->uvd.inst[ring->me].entity, 1061 AMDGPU_FENCE_OWNER_UNDEFINED, &f); 1062 if (r) 1063 goto err_free; 1064 } 1065 1066 amdgpu_bo_fence(bo, f, false); 1067 amdgpu_bo_unreserve(bo); 1068 amdgpu_bo_unref(&bo); 1069 1070 if (fence) 1071 *fence = dma_fence_get(f); 1072 dma_fence_put(f); 1073 1074 return 0; 1075 1076 err_free: 1077 amdgpu_job_free(job); 1078 1079 err: 1080 amdgpu_bo_unreserve(bo); 1081 amdgpu_bo_unref(&bo); 1082 return r; 1083 } 1084 1085 /* multiple fence commands without any stream commands in between can 1086 crash the vcpu so just try to emmit a dummy create/destroy msg to 1087 avoid this */ 1088 int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, 1089 struct dma_fence **fence) 1090 { 1091 struct amdgpu_device *adev = ring->adev; 1092 struct amdgpu_bo *bo = NULL; 1093 uint32_t *msg; 1094 int r, i; 1095 1096 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE, 1097 AMDGPU_GEM_DOMAIN_VRAM, 1098 &bo, NULL, (void **)&msg); 1099 if (r) 1100 return r; 1101 1102 /* stitch together an UVD create msg */ 1103 msg[0] = cpu_to_le32(0x00000de4); 1104 msg[1] = cpu_to_le32(0x00000000); 1105 msg[2] = cpu_to_le32(handle); 1106 msg[3] = cpu_to_le32(0x00000000); 1107 msg[4] = cpu_to_le32(0x00000000); 1108 msg[5] = cpu_to_le32(0x00000000); 1109 msg[6] = cpu_to_le32(0x00000000); 1110 msg[7] = cpu_to_le32(0x00000780); 1111 msg[8] = cpu_to_le32(0x00000440); 1112 msg[9] = cpu_to_le32(0x00000000); 1113 msg[10] = cpu_to_le32(0x01b37000); 1114 for (i = 11; i < 1024; ++i) 1115 msg[i] = cpu_to_le32(0x0); 1116 1117 return amdgpu_uvd_send_msg(ring, bo, true, fence); 1118 } 1119 1120 int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, 1121 bool direct, struct dma_fence **fence) 1122 { 1123 struct amdgpu_device *adev = ring->adev; 1124 struct amdgpu_bo *bo = NULL; 1125 uint32_t *msg; 1126 int r, i; 1127 1128 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE, 1129 AMDGPU_GEM_DOMAIN_VRAM, 1130 &bo, NULL, (void **)&msg); 1131 if (r) 1132 return r; 1133 1134 /* stitch together an UVD destroy msg */ 1135 msg[0] = cpu_to_le32(0x00000de4); 1136 msg[1] = cpu_to_le32(0x00000002); 1137 msg[2] = cpu_to_le32(handle); 1138 msg[3] = cpu_to_le32(0x00000000); 1139 for (i = 4; i < 1024; ++i) 1140 msg[i] = cpu_to_le32(0x0); 1141 1142 return amdgpu_uvd_send_msg(ring, bo, direct, fence); 1143 } 1144 1145 static void amdgpu_uvd_idle_work_handler(struct work_struct *work) 1146 { 1147 struct amdgpu_device *adev = 1148 container_of(work, struct amdgpu_device, uvd.inst->idle_work.work); 1149 unsigned fences = 0, i, j; 1150 1151 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { 1152 fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring); 1153 for (j = 0; j < adev->uvd.num_enc_rings; ++j) { 1154 fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]); 1155 } 1156 } 1157 1158 if (fences == 0) { 1159 if (adev->pm.dpm_enabled) { 1160 amdgpu_dpm_enable_uvd(adev, false); 1161 } else { 1162 amdgpu_asic_set_uvd_clocks(adev, 0, 0); 1163 /* shutdown the UVD block */ 1164 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, 1165 AMD_PG_STATE_GATE); 1166 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, 1167 AMD_CG_STATE_GATE); 1168 } 1169 } else { 1170 schedule_delayed_work(&adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT); 1171 } 1172 } 1173 1174 void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) 1175 { 1176 struct amdgpu_device *adev = ring->adev; 1177 bool set_clocks; 1178 1179 if (amdgpu_sriov_vf(adev)) 1180 return; 1181 1182 set_clocks = !cancel_delayed_work_sync(&adev->uvd.inst->idle_work); 1183 if (set_clocks) { 1184 if (adev->pm.dpm_enabled) { 1185 amdgpu_dpm_enable_uvd(adev, true); 1186 } else { 1187 amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); 1188 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, 1189 AMD_CG_STATE_UNGATE); 1190 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, 1191 AMD_PG_STATE_UNGATE); 1192 } 1193 } 1194 } 1195 1196 void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring) 1197 { 1198 if (!amdgpu_sriov_vf(ring->adev)) 1199 schedule_delayed_work(&ring->adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT); 1200 } 1201 1202 /** 1203 * amdgpu_uvd_ring_test_ib - test ib execution 1204 * 1205 * @ring: amdgpu_ring pointer 1206 * 1207 * Test if we can successfully execute an IB 1208 */ 1209 int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) 1210 { 1211 struct dma_fence *fence; 1212 long r; 1213 uint32_t ip_instance = ring->me; 1214 1215 r = amdgpu_uvd_get_create_msg(ring, 1, NULL); 1216 if (r) { 1217 DRM_ERROR("amdgpu: (%d)failed to get create msg (%ld).\n", ip_instance, r); 1218 goto error; 1219 } 1220 1221 r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); 1222 if (r) { 1223 DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ip_instance, r); 1224 goto error; 1225 } 1226 1227 r = dma_fence_wait_timeout(fence, false, timeout); 1228 if (r == 0) { 1229 DRM_ERROR("amdgpu: (%d)IB test timed out.\n", ip_instance); 1230 r = -ETIMEDOUT; 1231 } else if (r < 0) { 1232 DRM_ERROR("amdgpu: (%d)fence wait failed (%ld).\n", ip_instance, r); 1233 } else { 1234 DRM_DEBUG("ib test on (%d)ring %d succeeded\n", ip_instance, ring->idx); 1235 r = 0; 1236 } 1237 1238 dma_fence_put(fence); 1239 1240 error: 1241 return r; 1242 } 1243 1244 /** 1245 * amdgpu_uvd_used_handles - returns used UVD handles 1246 * 1247 * @adev: amdgpu_device pointer 1248 * 1249 * Returns the number of UVD handles in use 1250 */ 1251 uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev) 1252 { 1253 unsigned i; 1254 uint32_t used_handles = 0; 1255 1256 for (i = 0; i < adev->uvd.max_handles; ++i) { 1257 /* 1258 * Handles can be freed in any order, and not 1259 * necessarily linear. So we need to count 1260 * all non-zero handles. 1261 */ 1262 if (atomic_read(&adev->uvd.inst->handles[i])) 1263 used_handles++; 1264 } 1265 1266 return used_handles; 1267 } 1268