1 /* 2 * Copyright 2011 Advanced Micro Devices, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26 /* 27 * Authors: 28 * Christian König <deathsimple@vodafone.de> 29 */ 30 31 #include <linux/firmware.h> 32 #include <linux/module.h> 33 34 #include <drm/drm.h> 35 36 #include "amdgpu.h" 37 #include "amdgpu_pm.h" 38 #include "amdgpu_uvd.h" 39 #include "cikd.h" 40 #include "uvd/uvd_4_2_d.h" 41 42 #include "amdgpu_ras.h" 43 44 /* 1 second timeout */ 45 #define UVD_IDLE_TIMEOUT msecs_to_jiffies(1000) 46 47 /* Firmware versions for VI */ 48 #define FW_1_65_10 ((1 << 24) | (65 << 16) | (10 << 8)) 49 #define FW_1_87_11 ((1 << 24) | (87 << 16) | (11 << 8)) 50 #define FW_1_87_12 ((1 << 24) | (87 << 16) | (12 << 8)) 51 #define FW_1_37_15 ((1 << 24) | (37 << 16) | (15 << 8)) 52 53 /* Polaris10/11 firmware version */ 54 #define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8)) 55 56 /* Firmware Names */ 57 #ifdef CONFIG_DRM_AMDGPU_CIK 58 #define FIRMWARE_BONAIRE "amdgpu/bonaire_uvd.bin" 59 #define FIRMWARE_KABINI "amdgpu/kabini_uvd.bin" 60 #define FIRMWARE_KAVERI "amdgpu/kaveri_uvd.bin" 61 #define FIRMWARE_HAWAII "amdgpu/hawaii_uvd.bin" 62 #define FIRMWARE_MULLINS "amdgpu/mullins_uvd.bin" 63 #endif 64 #define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin" 65 #define FIRMWARE_CARRIZO "amdgpu/carrizo_uvd.bin" 66 #define FIRMWARE_FIJI "amdgpu/fiji_uvd.bin" 67 #define FIRMWARE_STONEY "amdgpu/stoney_uvd.bin" 68 #define FIRMWARE_POLARIS10 "amdgpu/polaris10_uvd.bin" 69 #define FIRMWARE_POLARIS11 "amdgpu/polaris11_uvd.bin" 70 #define FIRMWARE_POLARIS12 "amdgpu/polaris12_uvd.bin" 71 #define FIRMWARE_VEGAM "amdgpu/vegam_uvd.bin" 72 73 #define FIRMWARE_VEGA10 "amdgpu/vega10_uvd.bin" 74 #define FIRMWARE_VEGA12 "amdgpu/vega12_uvd.bin" 75 #define FIRMWARE_VEGA20 "amdgpu/vega20_uvd.bin" 76 77 /* These are common relative offsets for all asics, from uvd_7_0_offset.h, */ 78 #define UVD_GPCOM_VCPU_CMD 0x03c3 79 #define UVD_GPCOM_VCPU_DATA0 0x03c4 80 #define UVD_GPCOM_VCPU_DATA1 0x03c5 81 #define UVD_NO_OP 0x03ff 82 #define UVD_BASE_SI 0x3800 83 84 /** 85 * amdgpu_uvd_cs_ctx - Command submission parser context 86 * 87 * Used for emulating virtual memory support on UVD 4.2. 88 */ 89 struct amdgpu_uvd_cs_ctx { 90 struct amdgpu_cs_parser *parser; 91 unsigned reg, count; 92 unsigned data0, data1; 93 unsigned idx; 94 unsigned ib_idx; 95 96 /* does the IB has a msg command */ 97 bool has_msg_cmd; 98 99 /* minimum buffer sizes */ 100 unsigned *buf_sizes; 101 }; 102 103 #ifdef CONFIG_DRM_AMDGPU_CIK 104 MODULE_FIRMWARE(FIRMWARE_BONAIRE); 105 MODULE_FIRMWARE(FIRMWARE_KABINI); 106 MODULE_FIRMWARE(FIRMWARE_KAVERI); 107 MODULE_FIRMWARE(FIRMWARE_HAWAII); 108 MODULE_FIRMWARE(FIRMWARE_MULLINS); 109 #endif 110 MODULE_FIRMWARE(FIRMWARE_TONGA); 111 MODULE_FIRMWARE(FIRMWARE_CARRIZO); 112 MODULE_FIRMWARE(FIRMWARE_FIJI); 113 MODULE_FIRMWARE(FIRMWARE_STONEY); 114 MODULE_FIRMWARE(FIRMWARE_POLARIS10); 115 MODULE_FIRMWARE(FIRMWARE_POLARIS11); 116 MODULE_FIRMWARE(FIRMWARE_POLARIS12); 117 MODULE_FIRMWARE(FIRMWARE_VEGAM); 118 119 MODULE_FIRMWARE(FIRMWARE_VEGA10); 120 MODULE_FIRMWARE(FIRMWARE_VEGA12); 121 MODULE_FIRMWARE(FIRMWARE_VEGA20); 122 123 static void amdgpu_uvd_idle_work_handler(struct work_struct *work); 124 125 int amdgpu_uvd_sw_init(struct amdgpu_device *adev) 126 { 127 unsigned long bo_size; 128 const char *fw_name; 129 const struct common_firmware_header *hdr; 130 unsigned family_id; 131 int i, j, r; 132 133 INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler); 134 135 switch (adev->asic_type) { 136 #ifdef CONFIG_DRM_AMDGPU_CIK 137 case CHIP_BONAIRE: 138 fw_name = FIRMWARE_BONAIRE; 139 break; 140 case CHIP_KABINI: 141 fw_name = FIRMWARE_KABINI; 142 break; 143 case CHIP_KAVERI: 144 fw_name = FIRMWARE_KAVERI; 145 break; 146 case CHIP_HAWAII: 147 fw_name = FIRMWARE_HAWAII; 148 break; 149 case CHIP_MULLINS: 150 fw_name = FIRMWARE_MULLINS; 151 break; 152 #endif 153 case CHIP_TONGA: 154 fw_name = FIRMWARE_TONGA; 155 break; 156 case CHIP_FIJI: 157 fw_name = FIRMWARE_FIJI; 158 break; 159 case CHIP_CARRIZO: 160 fw_name = FIRMWARE_CARRIZO; 161 break; 162 case CHIP_STONEY: 163 fw_name = FIRMWARE_STONEY; 164 break; 165 case CHIP_POLARIS10: 166 fw_name = FIRMWARE_POLARIS10; 167 break; 168 case CHIP_POLARIS11: 169 fw_name = FIRMWARE_POLARIS11; 170 break; 171 case CHIP_POLARIS12: 172 fw_name = FIRMWARE_POLARIS12; 173 break; 174 case CHIP_VEGA10: 175 fw_name = FIRMWARE_VEGA10; 176 break; 177 case CHIP_VEGA12: 178 fw_name = FIRMWARE_VEGA12; 179 break; 180 case CHIP_VEGAM: 181 fw_name = FIRMWARE_VEGAM; 182 break; 183 case CHIP_VEGA20: 184 fw_name = FIRMWARE_VEGA20; 185 break; 186 default: 187 return -EINVAL; 188 } 189 190 r = request_firmware(&adev->uvd.fw, fw_name, adev->dev); 191 if (r) { 192 dev_err(adev->dev, "amdgpu_uvd: Can't load firmware \"%s\"\n", 193 fw_name); 194 return r; 195 } 196 197 r = amdgpu_ucode_validate(adev->uvd.fw); 198 if (r) { 199 dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n", 200 fw_name); 201 release_firmware(adev->uvd.fw); 202 adev->uvd.fw = NULL; 203 return r; 204 } 205 206 /* Set the default UVD handles that the firmware can handle */ 207 adev->uvd.max_handles = AMDGPU_DEFAULT_UVD_HANDLES; 208 209 hdr = (const struct common_firmware_header *)adev->uvd.fw->data; 210 family_id = le32_to_cpu(hdr->ucode_version) & 0xff; 211 212 if (adev->asic_type < CHIP_VEGA20) { 213 unsigned version_major, version_minor; 214 215 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; 216 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; 217 DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n", 218 version_major, version_minor, family_id); 219 220 /* 221 * Limit the number of UVD handles depending on microcode major 222 * and minor versions. The firmware version which has 40 UVD 223 * instances support is 1.80. So all subsequent versions should 224 * also have the same support. 225 */ 226 if ((version_major > 0x01) || 227 ((version_major == 0x01) && (version_minor >= 0x50))) 228 adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES; 229 230 adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) | 231 (family_id << 8)); 232 233 if ((adev->asic_type == CHIP_POLARIS10 || 234 adev->asic_type == CHIP_POLARIS11) && 235 (adev->uvd.fw_version < FW_1_66_16)) 236 DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n", 237 version_major, version_minor); 238 } else { 239 unsigned int enc_major, enc_minor, dec_minor; 240 241 dec_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; 242 enc_minor = (le32_to_cpu(hdr->ucode_version) >> 24) & 0x3f; 243 enc_major = (le32_to_cpu(hdr->ucode_version) >> 30) & 0x3; 244 DRM_INFO("Found UVD firmware ENC: %hu.%hu DEC: .%hu Family ID: %hu\n", 245 enc_major, enc_minor, dec_minor, family_id); 246 247 adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES; 248 249 adev->uvd.fw_version = le32_to_cpu(hdr->ucode_version); 250 } 251 252 bo_size = AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE 253 + AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles; 254 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 255 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); 256 257 for (j = 0; j < adev->uvd.num_uvd_inst; j++) { 258 if (adev->uvd.harvest_config & (1 << j)) 259 continue; 260 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, 261 AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo, 262 &adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr); 263 if (r) { 264 dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); 265 return r; 266 } 267 } 268 269 for (i = 0; i < adev->uvd.max_handles; ++i) { 270 atomic_set(&adev->uvd.handles[i], 0); 271 adev->uvd.filp[i] = NULL; 272 } 273 274 /* from uvd v5.0 HW addressing capacity increased to 64 bits */ 275 if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0)) 276 adev->uvd.address_64_bit = true; 277 278 switch (adev->asic_type) { 279 case CHIP_TONGA: 280 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10; 281 break; 282 case CHIP_CARRIZO: 283 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_11; 284 break; 285 case CHIP_FIJI: 286 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_12; 287 break; 288 case CHIP_STONEY: 289 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_37_15; 290 break; 291 default: 292 adev->uvd.use_ctx_buf = adev->asic_type >= CHIP_POLARIS10; 293 } 294 295 return 0; 296 } 297 298 int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) 299 { 300 int i, j; 301 302 cancel_delayed_work_sync(&adev->uvd.idle_work); 303 drm_sched_entity_destroy(&adev->uvd.entity); 304 305 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { 306 if (adev->uvd.harvest_config & (1 << j)) 307 continue; 308 kvfree(adev->uvd.inst[j].saved_bo); 309 310 amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo, 311 &adev->uvd.inst[j].gpu_addr, 312 (void **)&adev->uvd.inst[j].cpu_addr); 313 314 amdgpu_ring_fini(&adev->uvd.inst[j].ring); 315 316 for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i) 317 amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]); 318 } 319 release_firmware(adev->uvd.fw); 320 321 return 0; 322 } 323 324 /** 325 * amdgpu_uvd_entity_init - init entity 326 * 327 * @adev: amdgpu_device pointer 328 * 329 */ 330 int amdgpu_uvd_entity_init(struct amdgpu_device *adev) 331 { 332 struct amdgpu_ring *ring; 333 struct drm_sched_rq *rq; 334 int r; 335 336 ring = &adev->uvd.inst[0].ring; 337 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; 338 r = drm_sched_entity_init(&adev->uvd.entity, &rq, 1, NULL); 339 if (r) { 340 DRM_ERROR("Failed setting up UVD kernel entity.\n"); 341 return r; 342 } 343 344 return 0; 345 } 346 347 int amdgpu_uvd_suspend(struct amdgpu_device *adev) 348 { 349 unsigned size; 350 void *ptr; 351 int i, j; 352 bool in_ras_intr = amdgpu_ras_intr_triggered(); 353 354 cancel_delayed_work_sync(&adev->uvd.idle_work); 355 356 /* only valid for physical mode */ 357 if (adev->asic_type < CHIP_POLARIS10) { 358 for (i = 0; i < adev->uvd.max_handles; ++i) 359 if (atomic_read(&adev->uvd.handles[i])) 360 break; 361 362 if (i == adev->uvd.max_handles) 363 return 0; 364 } 365 366 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { 367 if (adev->uvd.harvest_config & (1 << j)) 368 continue; 369 if (adev->uvd.inst[j].vcpu_bo == NULL) 370 continue; 371 372 size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo); 373 ptr = adev->uvd.inst[j].cpu_addr; 374 375 adev->uvd.inst[j].saved_bo = kvmalloc(size, GFP_KERNEL); 376 if (!adev->uvd.inst[j].saved_bo) 377 return -ENOMEM; 378 379 /* re-write 0 since err_event_athub will corrupt VCPU buffer */ 380 if (in_ras_intr) 381 memset(adev->uvd.inst[j].saved_bo, 0, size); 382 else 383 memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size); 384 } 385 386 if (in_ras_intr) 387 DRM_WARN("UVD VCPU state may lost due to RAS ERREVENT_ATHUB_INTERRUPT\n"); 388 389 return 0; 390 } 391 392 int amdgpu_uvd_resume(struct amdgpu_device *adev) 393 { 394 unsigned size; 395 void *ptr; 396 int i; 397 398 for (i = 0; i < adev->uvd.num_uvd_inst; i++) { 399 if (adev->uvd.harvest_config & (1 << i)) 400 continue; 401 if (adev->uvd.inst[i].vcpu_bo == NULL) 402 return -EINVAL; 403 404 size = amdgpu_bo_size(adev->uvd.inst[i].vcpu_bo); 405 ptr = adev->uvd.inst[i].cpu_addr; 406 407 if (adev->uvd.inst[i].saved_bo != NULL) { 408 memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size); 409 kvfree(adev->uvd.inst[i].saved_bo); 410 adev->uvd.inst[i].saved_bo = NULL; 411 } else { 412 const struct common_firmware_header *hdr; 413 unsigned offset; 414 415 hdr = (const struct common_firmware_header *)adev->uvd.fw->data; 416 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 417 offset = le32_to_cpu(hdr->ucode_array_offset_bytes); 418 memcpy_toio(adev->uvd.inst[i].cpu_addr, adev->uvd.fw->data + offset, 419 le32_to_cpu(hdr->ucode_size_bytes)); 420 size -= le32_to_cpu(hdr->ucode_size_bytes); 421 ptr += le32_to_cpu(hdr->ucode_size_bytes); 422 } 423 memset_io(ptr, 0, size); 424 /* to restore uvd fence seq */ 425 amdgpu_fence_driver_force_completion(&adev->uvd.inst[i].ring); 426 } 427 } 428 return 0; 429 } 430 431 void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) 432 { 433 struct amdgpu_ring *ring = &adev->uvd.inst[0].ring; 434 int i, r; 435 436 for (i = 0; i < adev->uvd.max_handles; ++i) { 437 uint32_t handle = atomic_read(&adev->uvd.handles[i]); 438 439 if (handle != 0 && adev->uvd.filp[i] == filp) { 440 struct dma_fence *fence; 441 442 r = amdgpu_uvd_get_destroy_msg(ring, handle, false, 443 &fence); 444 if (r) { 445 DRM_ERROR("Error destroying UVD %d!\n", r); 446 continue; 447 } 448 449 dma_fence_wait(fence, false); 450 dma_fence_put(fence); 451 452 adev->uvd.filp[i] = NULL; 453 atomic_set(&adev->uvd.handles[i], 0); 454 } 455 } 456 } 457 458 static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo) 459 { 460 int i; 461 for (i = 0; i < abo->placement.num_placement; ++i) { 462 abo->placements[i].fpfn = 0 >> PAGE_SHIFT; 463 abo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT; 464 } 465 } 466 467 static u64 amdgpu_uvd_get_addr_from_ctx(struct amdgpu_uvd_cs_ctx *ctx) 468 { 469 uint32_t lo, hi; 470 uint64_t addr; 471 472 lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0); 473 hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1); 474 addr = ((uint64_t)lo) | (((uint64_t)hi) << 32); 475 476 return addr; 477 } 478 479 /** 480 * amdgpu_uvd_cs_pass1 - first parsing round 481 * 482 * @ctx: UVD parser context 483 * 484 * Make sure UVD message and feedback buffers are in VRAM and 485 * nobody is violating an 256MB boundary. 486 */ 487 static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx) 488 { 489 struct ttm_operation_ctx tctx = { false, false }; 490 struct amdgpu_bo_va_mapping *mapping; 491 struct amdgpu_bo *bo; 492 uint32_t cmd; 493 uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx); 494 int r = 0; 495 496 r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping); 497 if (r) { 498 DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr); 499 return r; 500 } 501 502 if (!ctx->parser->adev->uvd.address_64_bit) { 503 /* check if it's a message or feedback command */ 504 cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1; 505 if (cmd == 0x0 || cmd == 0x3) { 506 /* yes, force it into VRAM */ 507 uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM; 508 amdgpu_bo_placement_from_domain(bo, domain); 509 } 510 amdgpu_uvd_force_into_uvd_segment(bo); 511 512 r = ttm_bo_validate(&bo->tbo, &bo->placement, &tctx); 513 } 514 515 return r; 516 } 517 518 /** 519 * amdgpu_uvd_cs_msg_decode - handle UVD decode message 520 * 521 * @msg: pointer to message structure 522 * @buf_sizes: returned buffer sizes 523 * 524 * Peek into the decode message and calculate the necessary buffer sizes. 525 */ 526 static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg, 527 unsigned buf_sizes[]) 528 { 529 unsigned stream_type = msg[4]; 530 unsigned width = msg[6]; 531 unsigned height = msg[7]; 532 unsigned dpb_size = msg[9]; 533 unsigned pitch = msg[28]; 534 unsigned level = msg[57]; 535 536 unsigned width_in_mb = width / 16; 537 unsigned height_in_mb = ALIGN(height / 16, 2); 538 unsigned fs_in_mb = width_in_mb * height_in_mb; 539 540 unsigned image_size, tmp, min_dpb_size, num_dpb_buffer; 541 unsigned min_ctx_size = ~0; 542 543 image_size = width * height; 544 image_size += image_size / 2; 545 image_size = ALIGN(image_size, 1024); 546 547 switch (stream_type) { 548 case 0: /* H264 */ 549 switch(level) { 550 case 30: 551 num_dpb_buffer = 8100 / fs_in_mb; 552 break; 553 case 31: 554 num_dpb_buffer = 18000 / fs_in_mb; 555 break; 556 case 32: 557 num_dpb_buffer = 20480 / fs_in_mb; 558 break; 559 case 41: 560 num_dpb_buffer = 32768 / fs_in_mb; 561 break; 562 case 42: 563 num_dpb_buffer = 34816 / fs_in_mb; 564 break; 565 case 50: 566 num_dpb_buffer = 110400 / fs_in_mb; 567 break; 568 case 51: 569 num_dpb_buffer = 184320 / fs_in_mb; 570 break; 571 default: 572 num_dpb_buffer = 184320 / fs_in_mb; 573 break; 574 } 575 num_dpb_buffer++; 576 if (num_dpb_buffer > 17) 577 num_dpb_buffer = 17; 578 579 /* reference picture buffer */ 580 min_dpb_size = image_size * num_dpb_buffer; 581 582 /* macroblock context buffer */ 583 min_dpb_size += width_in_mb * height_in_mb * num_dpb_buffer * 192; 584 585 /* IT surface buffer */ 586 min_dpb_size += width_in_mb * height_in_mb * 32; 587 break; 588 589 case 1: /* VC1 */ 590 591 /* reference picture buffer */ 592 min_dpb_size = image_size * 3; 593 594 /* CONTEXT_BUFFER */ 595 min_dpb_size += width_in_mb * height_in_mb * 128; 596 597 /* IT surface buffer */ 598 min_dpb_size += width_in_mb * 64; 599 600 /* DB surface buffer */ 601 min_dpb_size += width_in_mb * 128; 602 603 /* BP */ 604 tmp = max(width_in_mb, height_in_mb); 605 min_dpb_size += ALIGN(tmp * 7 * 16, 64); 606 break; 607 608 case 3: /* MPEG2 */ 609 610 /* reference picture buffer */ 611 min_dpb_size = image_size * 3; 612 break; 613 614 case 4: /* MPEG4 */ 615 616 /* reference picture buffer */ 617 min_dpb_size = image_size * 3; 618 619 /* CM */ 620 min_dpb_size += width_in_mb * height_in_mb * 64; 621 622 /* IT surface buffer */ 623 min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64); 624 break; 625 626 case 7: /* H264 Perf */ 627 switch(level) { 628 case 30: 629 num_dpb_buffer = 8100 / fs_in_mb; 630 break; 631 case 31: 632 num_dpb_buffer = 18000 / fs_in_mb; 633 break; 634 case 32: 635 num_dpb_buffer = 20480 / fs_in_mb; 636 break; 637 case 41: 638 num_dpb_buffer = 32768 / fs_in_mb; 639 break; 640 case 42: 641 num_dpb_buffer = 34816 / fs_in_mb; 642 break; 643 case 50: 644 num_dpb_buffer = 110400 / fs_in_mb; 645 break; 646 case 51: 647 num_dpb_buffer = 184320 / fs_in_mb; 648 break; 649 default: 650 num_dpb_buffer = 184320 / fs_in_mb; 651 break; 652 } 653 num_dpb_buffer++; 654 if (num_dpb_buffer > 17) 655 num_dpb_buffer = 17; 656 657 /* reference picture buffer */ 658 min_dpb_size = image_size * num_dpb_buffer; 659 660 if (!adev->uvd.use_ctx_buf){ 661 /* macroblock context buffer */ 662 min_dpb_size += 663 width_in_mb * height_in_mb * num_dpb_buffer * 192; 664 665 /* IT surface buffer */ 666 min_dpb_size += width_in_mb * height_in_mb * 32; 667 } else { 668 /* macroblock context buffer */ 669 min_ctx_size = 670 width_in_mb * height_in_mb * num_dpb_buffer * 192; 671 } 672 break; 673 674 case 8: /* MJPEG */ 675 min_dpb_size = 0; 676 break; 677 678 case 16: /* H265 */ 679 image_size = (ALIGN(width, 16) * ALIGN(height, 16) * 3) / 2; 680 image_size = ALIGN(image_size, 256); 681 682 num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2; 683 min_dpb_size = image_size * num_dpb_buffer; 684 min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16) 685 * 16 * num_dpb_buffer + 52 * 1024; 686 break; 687 688 default: 689 DRM_ERROR("UVD codec not handled %d!\n", stream_type); 690 return -EINVAL; 691 } 692 693 if (width > pitch) { 694 DRM_ERROR("Invalid UVD decoding target pitch!\n"); 695 return -EINVAL; 696 } 697 698 if (dpb_size < min_dpb_size) { 699 DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n", 700 dpb_size, min_dpb_size); 701 return -EINVAL; 702 } 703 704 buf_sizes[0x1] = dpb_size; 705 buf_sizes[0x2] = image_size; 706 buf_sizes[0x4] = min_ctx_size; 707 /* store image width to adjust nb memory pstate */ 708 adev->uvd.decode_image_width = width; 709 return 0; 710 } 711 712 /** 713 * amdgpu_uvd_cs_msg - handle UVD message 714 * 715 * @ctx: UVD parser context 716 * @bo: buffer object containing the message 717 * @offset: offset into the buffer object 718 * 719 * Peek into the UVD message and extract the session id. 720 * Make sure that we don't open up to many sessions. 721 */ 722 static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, 723 struct amdgpu_bo *bo, unsigned offset) 724 { 725 struct amdgpu_device *adev = ctx->parser->adev; 726 int32_t *msg, msg_type, handle; 727 void *ptr; 728 long r; 729 int i; 730 731 if (offset & 0x3F) { 732 DRM_ERROR("UVD messages must be 64 byte aligned!\n"); 733 return -EINVAL; 734 } 735 736 r = amdgpu_bo_kmap(bo, &ptr); 737 if (r) { 738 DRM_ERROR("Failed mapping the UVD) message (%ld)!\n", r); 739 return r; 740 } 741 742 msg = ptr + offset; 743 744 msg_type = msg[1]; 745 handle = msg[2]; 746 747 if (handle == 0) { 748 DRM_ERROR("Invalid UVD handle!\n"); 749 return -EINVAL; 750 } 751 752 switch (msg_type) { 753 case 0: 754 /* it's a create msg, calc image size (width * height) */ 755 amdgpu_bo_kunmap(bo); 756 757 /* try to alloc a new handle */ 758 for (i = 0; i < adev->uvd.max_handles; ++i) { 759 if (atomic_read(&adev->uvd.handles[i]) == handle) { 760 DRM_ERROR(")Handle 0x%x already in use!\n", 761 handle); 762 return -EINVAL; 763 } 764 765 if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) { 766 adev->uvd.filp[i] = ctx->parser->filp; 767 return 0; 768 } 769 } 770 771 DRM_ERROR("No more free UVD handles!\n"); 772 return -ENOSPC; 773 774 case 1: 775 /* it's a decode msg, calc buffer sizes */ 776 r = amdgpu_uvd_cs_msg_decode(adev, msg, ctx->buf_sizes); 777 amdgpu_bo_kunmap(bo); 778 if (r) 779 return r; 780 781 /* validate the handle */ 782 for (i = 0; i < adev->uvd.max_handles; ++i) { 783 if (atomic_read(&adev->uvd.handles[i]) == handle) { 784 if (adev->uvd.filp[i] != ctx->parser->filp) { 785 DRM_ERROR("UVD handle collision detected!\n"); 786 return -EINVAL; 787 } 788 return 0; 789 } 790 } 791 792 DRM_ERROR("Invalid UVD handle 0x%x!\n", handle); 793 return -ENOENT; 794 795 case 2: 796 /* it's a destroy msg, free the handle */ 797 for (i = 0; i < adev->uvd.max_handles; ++i) 798 atomic_cmpxchg(&adev->uvd.handles[i], handle, 0); 799 amdgpu_bo_kunmap(bo); 800 return 0; 801 802 default: 803 DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); 804 return -EINVAL; 805 } 806 BUG(); 807 return -EINVAL; 808 } 809 810 /** 811 * amdgpu_uvd_cs_pass2 - second parsing round 812 * 813 * @ctx: UVD parser context 814 * 815 * Patch buffer addresses, make sure buffer sizes are correct. 816 */ 817 static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx) 818 { 819 struct amdgpu_bo_va_mapping *mapping; 820 struct amdgpu_bo *bo; 821 uint32_t cmd; 822 uint64_t start, end; 823 uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx); 824 int r; 825 826 r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping); 827 if (r) { 828 DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr); 829 return r; 830 } 831 832 start = amdgpu_bo_gpu_offset(bo); 833 834 end = (mapping->last + 1 - mapping->start); 835 end = end * AMDGPU_GPU_PAGE_SIZE + start; 836 837 addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE; 838 start += addr; 839 840 amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data0, 841 lower_32_bits(start)); 842 amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data1, 843 upper_32_bits(start)); 844 845 cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1; 846 if (cmd < 0x4) { 847 if ((end - start) < ctx->buf_sizes[cmd]) { 848 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, 849 (unsigned)(end - start), 850 ctx->buf_sizes[cmd]); 851 return -EINVAL; 852 } 853 854 } else if (cmd == 0x206) { 855 if ((end - start) < ctx->buf_sizes[4]) { 856 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, 857 (unsigned)(end - start), 858 ctx->buf_sizes[4]); 859 return -EINVAL; 860 } 861 } else if ((cmd != 0x100) && (cmd != 0x204)) { 862 DRM_ERROR("invalid UVD command %X!\n", cmd); 863 return -EINVAL; 864 } 865 866 if (!ctx->parser->adev->uvd.address_64_bit) { 867 if ((start >> 28) != ((end - 1) >> 28)) { 868 DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n", 869 start, end); 870 return -EINVAL; 871 } 872 873 if ((cmd == 0 || cmd == 0x3) && 874 (start >> 28) != (ctx->parser->adev->uvd.inst->gpu_addr >> 28)) { 875 DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", 876 start, end); 877 return -EINVAL; 878 } 879 } 880 881 if (cmd == 0) { 882 ctx->has_msg_cmd = true; 883 r = amdgpu_uvd_cs_msg(ctx, bo, addr); 884 if (r) 885 return r; 886 } else if (!ctx->has_msg_cmd) { 887 DRM_ERROR("Message needed before other commands are send!\n"); 888 return -EINVAL; 889 } 890 891 return 0; 892 } 893 894 /** 895 * amdgpu_uvd_cs_reg - parse register writes 896 * 897 * @ctx: UVD parser context 898 * @cb: callback function 899 * 900 * Parse the register writes, call cb on each complete command. 901 */ 902 static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx, 903 int (*cb)(struct amdgpu_uvd_cs_ctx *ctx)) 904 { 905 struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx]; 906 int i, r; 907 908 ctx->idx++; 909 for (i = 0; i <= ctx->count; ++i) { 910 unsigned reg = ctx->reg + i; 911 912 if (ctx->idx >= ib->length_dw) { 913 DRM_ERROR("Register command after end of CS!\n"); 914 return -EINVAL; 915 } 916 917 switch (reg) { 918 case mmUVD_GPCOM_VCPU_DATA0: 919 ctx->data0 = ctx->idx; 920 break; 921 case mmUVD_GPCOM_VCPU_DATA1: 922 ctx->data1 = ctx->idx; 923 break; 924 case mmUVD_GPCOM_VCPU_CMD: 925 r = cb(ctx); 926 if (r) 927 return r; 928 break; 929 case mmUVD_ENGINE_CNTL: 930 case mmUVD_NO_OP: 931 break; 932 default: 933 DRM_ERROR("Invalid reg 0x%X!\n", reg); 934 return -EINVAL; 935 } 936 ctx->idx++; 937 } 938 return 0; 939 } 940 941 /** 942 * amdgpu_uvd_cs_packets - parse UVD packets 943 * 944 * @ctx: UVD parser context 945 * @cb: callback function 946 * 947 * Parse the command stream packets. 948 */ 949 static int amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx *ctx, 950 int (*cb)(struct amdgpu_uvd_cs_ctx *ctx)) 951 { 952 struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx]; 953 int r; 954 955 for (ctx->idx = 0 ; ctx->idx < ib->length_dw; ) { 956 uint32_t cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx); 957 unsigned type = CP_PACKET_GET_TYPE(cmd); 958 switch (type) { 959 case PACKET_TYPE0: 960 ctx->reg = CP_PACKET0_GET_REG(cmd); 961 ctx->count = CP_PACKET_GET_COUNT(cmd); 962 r = amdgpu_uvd_cs_reg(ctx, cb); 963 if (r) 964 return r; 965 break; 966 case PACKET_TYPE2: 967 ++ctx->idx; 968 break; 969 default: 970 DRM_ERROR("Unknown packet type %d !\n", type); 971 return -EINVAL; 972 } 973 } 974 return 0; 975 } 976 977 /** 978 * amdgpu_uvd_ring_parse_cs - UVD command submission parser 979 * 980 * @parser: Command submission parser context 981 * 982 * Parse the command stream, patch in addresses as necessary. 983 */ 984 int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) 985 { 986 struct amdgpu_uvd_cs_ctx ctx = {}; 987 unsigned buf_sizes[] = { 988 [0x00000000] = 2048, 989 [0x00000001] = 0xFFFFFFFF, 990 [0x00000002] = 0xFFFFFFFF, 991 [0x00000003] = 2048, 992 [0x00000004] = 0xFFFFFFFF, 993 }; 994 struct amdgpu_ib *ib = &parser->job->ibs[ib_idx]; 995 int r; 996 997 parser->job->vm = NULL; 998 ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); 999 1000 if (ib->length_dw % 16) { 1001 DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n", 1002 ib->length_dw); 1003 return -EINVAL; 1004 } 1005 1006 ctx.parser = parser; 1007 ctx.buf_sizes = buf_sizes; 1008 ctx.ib_idx = ib_idx; 1009 1010 /* first round only required on chips without UVD 64 bit address support */ 1011 if (!parser->adev->uvd.address_64_bit) { 1012 /* first round, make sure the buffers are actually in the UVD segment */ 1013 r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass1); 1014 if (r) 1015 return r; 1016 } 1017 1018 /* second round, patch buffer addresses into the command stream */ 1019 r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass2); 1020 if (r) 1021 return r; 1022 1023 if (!ctx.has_msg_cmd) { 1024 DRM_ERROR("UVD-IBs need a msg command!\n"); 1025 return -EINVAL; 1026 } 1027 1028 return 0; 1029 } 1030 1031 static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, 1032 bool direct, struct dma_fence **fence) 1033 { 1034 struct amdgpu_device *adev = ring->adev; 1035 struct dma_fence *f = NULL; 1036 struct amdgpu_job *job; 1037 struct amdgpu_ib *ib; 1038 uint32_t data[4]; 1039 uint64_t addr; 1040 long r; 1041 int i; 1042 unsigned offset_idx = 0; 1043 unsigned offset[3] = { UVD_BASE_SI, 0, 0 }; 1044 1045 amdgpu_bo_kunmap(bo); 1046 amdgpu_bo_unpin(bo); 1047 1048 if (!ring->adev->uvd.address_64_bit) { 1049 struct ttm_operation_ctx ctx = { true, false }; 1050 1051 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); 1052 amdgpu_uvd_force_into_uvd_segment(bo); 1053 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 1054 if (r) 1055 goto err; 1056 } 1057 1058 r = amdgpu_job_alloc_with_ib(adev, 64, &job); 1059 if (r) 1060 goto err; 1061 1062 if (adev->asic_type >= CHIP_VEGA10) { 1063 offset_idx = 1 + ring->me; 1064 offset[1] = adev->reg_offset[UVD_HWIP][0][1]; 1065 offset[2] = adev->reg_offset[UVD_HWIP][1][1]; 1066 } 1067 1068 data[0] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA0, 0); 1069 data[1] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA1, 0); 1070 data[2] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_CMD, 0); 1071 data[3] = PACKET0(offset[offset_idx] + UVD_NO_OP, 0); 1072 1073 ib = &job->ibs[0]; 1074 addr = amdgpu_bo_gpu_offset(bo); 1075 ib->ptr[0] = data[0]; 1076 ib->ptr[1] = addr; 1077 ib->ptr[2] = data[1]; 1078 ib->ptr[3] = addr >> 32; 1079 ib->ptr[4] = data[2]; 1080 ib->ptr[5] = 0; 1081 for (i = 6; i < 16; i += 2) { 1082 ib->ptr[i] = data[3]; 1083 ib->ptr[i+1] = 0; 1084 } 1085 ib->length_dw = 16; 1086 1087 if (direct) { 1088 r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, 1089 true, false, 1090 msecs_to_jiffies(10)); 1091 if (r == 0) 1092 r = -ETIMEDOUT; 1093 if (r < 0) 1094 goto err_free; 1095 1096 r = amdgpu_job_submit_direct(job, ring, &f); 1097 if (r) 1098 goto err_free; 1099 } else { 1100 r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.base.resv, 1101 AMDGPU_FENCE_OWNER_UNDEFINED, false); 1102 if (r) 1103 goto err_free; 1104 1105 r = amdgpu_job_submit(job, &adev->uvd.entity, 1106 AMDGPU_FENCE_OWNER_UNDEFINED, &f); 1107 if (r) 1108 goto err_free; 1109 } 1110 1111 amdgpu_bo_fence(bo, f, false); 1112 amdgpu_bo_unreserve(bo); 1113 amdgpu_bo_unref(&bo); 1114 1115 if (fence) 1116 *fence = dma_fence_get(f); 1117 dma_fence_put(f); 1118 1119 return 0; 1120 1121 err_free: 1122 amdgpu_job_free(job); 1123 1124 err: 1125 amdgpu_bo_unreserve(bo); 1126 amdgpu_bo_unref(&bo); 1127 return r; 1128 } 1129 1130 /* multiple fence commands without any stream commands in between can 1131 crash the vcpu so just try to emmit a dummy create/destroy msg to 1132 avoid this */ 1133 int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, 1134 struct dma_fence **fence) 1135 { 1136 struct amdgpu_device *adev = ring->adev; 1137 struct amdgpu_bo *bo = NULL; 1138 uint32_t *msg; 1139 int r, i; 1140 1141 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE, 1142 AMDGPU_GEM_DOMAIN_VRAM, 1143 &bo, NULL, (void **)&msg); 1144 if (r) 1145 return r; 1146 1147 /* stitch together an UVD create msg */ 1148 msg[0] = cpu_to_le32(0x00000de4); 1149 msg[1] = cpu_to_le32(0x00000000); 1150 msg[2] = cpu_to_le32(handle); 1151 msg[3] = cpu_to_le32(0x00000000); 1152 msg[4] = cpu_to_le32(0x00000000); 1153 msg[5] = cpu_to_le32(0x00000000); 1154 msg[6] = cpu_to_le32(0x00000000); 1155 msg[7] = cpu_to_le32(0x00000780); 1156 msg[8] = cpu_to_le32(0x00000440); 1157 msg[9] = cpu_to_le32(0x00000000); 1158 msg[10] = cpu_to_le32(0x01b37000); 1159 for (i = 11; i < 1024; ++i) 1160 msg[i] = cpu_to_le32(0x0); 1161 1162 return amdgpu_uvd_send_msg(ring, bo, true, fence); 1163 } 1164 1165 int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, 1166 bool direct, struct dma_fence **fence) 1167 { 1168 struct amdgpu_device *adev = ring->adev; 1169 struct amdgpu_bo *bo = NULL; 1170 uint32_t *msg; 1171 int r, i; 1172 1173 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE, 1174 AMDGPU_GEM_DOMAIN_VRAM, 1175 &bo, NULL, (void **)&msg); 1176 if (r) 1177 return r; 1178 1179 /* stitch together an UVD destroy msg */ 1180 msg[0] = cpu_to_le32(0x00000de4); 1181 msg[1] = cpu_to_le32(0x00000002); 1182 msg[2] = cpu_to_le32(handle); 1183 msg[3] = cpu_to_le32(0x00000000); 1184 for (i = 4; i < 1024; ++i) 1185 msg[i] = cpu_to_le32(0x0); 1186 1187 return amdgpu_uvd_send_msg(ring, bo, direct, fence); 1188 } 1189 1190 static void amdgpu_uvd_idle_work_handler(struct work_struct *work) 1191 { 1192 struct amdgpu_device *adev = 1193 container_of(work, struct amdgpu_device, uvd.idle_work.work); 1194 unsigned fences = 0, i, j; 1195 1196 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { 1197 if (adev->uvd.harvest_config & (1 << i)) 1198 continue; 1199 fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring); 1200 for (j = 0; j < adev->uvd.num_enc_rings; ++j) { 1201 fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]); 1202 } 1203 } 1204 1205 if (fences == 0) { 1206 if (adev->pm.dpm_enabled) { 1207 amdgpu_dpm_enable_uvd(adev, false); 1208 } else { 1209 amdgpu_asic_set_uvd_clocks(adev, 0, 0); 1210 /* shutdown the UVD block */ 1211 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, 1212 AMD_PG_STATE_GATE); 1213 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, 1214 AMD_CG_STATE_GATE); 1215 } 1216 } else { 1217 schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT); 1218 } 1219 } 1220 1221 void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) 1222 { 1223 struct amdgpu_device *adev = ring->adev; 1224 bool set_clocks; 1225 1226 if (amdgpu_sriov_vf(adev)) 1227 return; 1228 1229 set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); 1230 if (set_clocks) { 1231 if (adev->pm.dpm_enabled) { 1232 amdgpu_dpm_enable_uvd(adev, true); 1233 } else { 1234 amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); 1235 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, 1236 AMD_CG_STATE_UNGATE); 1237 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, 1238 AMD_PG_STATE_UNGATE); 1239 } 1240 } 1241 } 1242 1243 void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring) 1244 { 1245 if (!amdgpu_sriov_vf(ring->adev)) 1246 schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT); 1247 } 1248 1249 /** 1250 * amdgpu_uvd_ring_test_ib - test ib execution 1251 * 1252 * @ring: amdgpu_ring pointer 1253 * 1254 * Test if we can successfully execute an IB 1255 */ 1256 int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) 1257 { 1258 struct dma_fence *fence; 1259 long r; 1260 1261 r = amdgpu_uvd_get_create_msg(ring, 1, NULL); 1262 if (r) 1263 goto error; 1264 1265 r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); 1266 if (r) 1267 goto error; 1268 1269 r = dma_fence_wait_timeout(fence, false, timeout); 1270 if (r == 0) 1271 r = -ETIMEDOUT; 1272 else if (r > 0) 1273 r = 0; 1274 1275 dma_fence_put(fence); 1276 1277 error: 1278 return r; 1279 } 1280 1281 /** 1282 * amdgpu_uvd_used_handles - returns used UVD handles 1283 * 1284 * @adev: amdgpu_device pointer 1285 * 1286 * Returns the number of UVD handles in use 1287 */ 1288 uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev) 1289 { 1290 unsigned i; 1291 uint32_t used_handles = 0; 1292 1293 for (i = 0; i < adev->uvd.max_handles; ++i) { 1294 /* 1295 * Handles can be freed in any order, and not 1296 * necessarily linear. So we need to count 1297 * all non-zero handles. 1298 */ 1299 if (atomic_read(&adev->uvd.handles[i])) 1300 used_handles++; 1301 } 1302 1303 return used_handles; 1304 } 1305