1 /* 2 * Copyright 2011 Advanced Micro Devices, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26 /* 27 * Authors: 28 * Christian König <deathsimple@vodafone.de> 29 */ 30 31 #include <linux/firmware.h> 32 #include <linux/module.h> 33 #include <drm/drmP.h> 34 #include <drm/drm.h> 35 36 #include "amdgpu.h" 37 #include "amdgpu_pm.h" 38 #include "amdgpu_uvd.h" 39 #include "cikd.h" 40 #include "uvd/uvd_4_2_d.h" 41 42 /* 1 second timeout */ 43 #define UVD_IDLE_TIMEOUT msecs_to_jiffies(1000) 44 45 /* Firmware versions for VI */ 46 #define FW_1_65_10 ((1 << 24) | (65 << 16) | (10 << 8)) 47 #define FW_1_87_11 ((1 << 24) | (87 << 16) | (11 << 8)) 48 #define FW_1_87_12 ((1 << 24) | (87 << 16) | (12 << 8)) 49 #define FW_1_37_15 ((1 << 24) | (37 << 16) | (15 << 8)) 50 51 /* Polaris10/11 firmware version */ 52 #define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8)) 53 54 /* Firmware Names */ 55 #ifdef CONFIG_DRM_AMDGPU_CIK 56 #define FIRMWARE_BONAIRE "radeon/bonaire_uvd.bin" 57 #define FIRMWARE_KABINI "radeon/kabini_uvd.bin" 58 #define FIRMWARE_KAVERI "radeon/kaveri_uvd.bin" 59 #define FIRMWARE_HAWAII "radeon/hawaii_uvd.bin" 60 #define FIRMWARE_MULLINS "radeon/mullins_uvd.bin" 61 #endif 62 #define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin" 63 #define FIRMWARE_CARRIZO "amdgpu/carrizo_uvd.bin" 64 #define FIRMWARE_FIJI "amdgpu/fiji_uvd.bin" 65 #define FIRMWARE_STONEY "amdgpu/stoney_uvd.bin" 66 #define FIRMWARE_POLARIS10 "amdgpu/polaris10_uvd.bin" 67 #define FIRMWARE_POLARIS11 "amdgpu/polaris11_uvd.bin" 68 #define FIRMWARE_POLARIS12 "amdgpu/polaris12_uvd.bin" 69 70 #define FIRMWARE_VEGA10 "amdgpu/vega10_uvd.bin" 71 72 #define mmUVD_GPCOM_VCPU_DATA0_VEGA10 (0x03c4 + 0x7e00) 73 #define mmUVD_GPCOM_VCPU_DATA1_VEGA10 (0x03c5 + 0x7e00) 74 #define mmUVD_GPCOM_VCPU_CMD_VEGA10 (0x03c3 + 0x7e00) 75 #define mmUVD_NO_OP_VEGA10 (0x03ff + 0x7e00) 76 #define mmUVD_ENGINE_CNTL_VEGA10 (0x03c6 + 0x7e00) 77 78 /** 79 * amdgpu_uvd_cs_ctx - Command submission parser context 80 * 81 * Used for emulating virtual memory support on UVD 4.2. 82 */ 83 struct amdgpu_uvd_cs_ctx { 84 struct amdgpu_cs_parser *parser; 85 unsigned reg, count; 86 unsigned data0, data1; 87 unsigned idx; 88 unsigned ib_idx; 89 90 /* does the IB has a msg command */ 91 bool has_msg_cmd; 92 93 /* minimum buffer sizes */ 94 unsigned *buf_sizes; 95 }; 96 97 #ifdef CONFIG_DRM_AMDGPU_CIK 98 MODULE_FIRMWARE(FIRMWARE_BONAIRE); 99 MODULE_FIRMWARE(FIRMWARE_KABINI); 100 MODULE_FIRMWARE(FIRMWARE_KAVERI); 101 MODULE_FIRMWARE(FIRMWARE_HAWAII); 102 MODULE_FIRMWARE(FIRMWARE_MULLINS); 103 #endif 104 MODULE_FIRMWARE(FIRMWARE_TONGA); 105 MODULE_FIRMWARE(FIRMWARE_CARRIZO); 106 MODULE_FIRMWARE(FIRMWARE_FIJI); 107 MODULE_FIRMWARE(FIRMWARE_STONEY); 108 MODULE_FIRMWARE(FIRMWARE_POLARIS10); 109 MODULE_FIRMWARE(FIRMWARE_POLARIS11); 110 MODULE_FIRMWARE(FIRMWARE_POLARIS12); 111 112 MODULE_FIRMWARE(FIRMWARE_VEGA10); 113 114 static void amdgpu_uvd_idle_work_handler(struct work_struct *work); 115 116 int amdgpu_uvd_sw_init(struct amdgpu_device *adev) 117 { 118 struct amdgpu_ring *ring; 119 struct amd_sched_rq *rq; 120 unsigned long bo_size; 121 const char *fw_name; 122 const struct common_firmware_header *hdr; 123 unsigned version_major, version_minor, family_id; 124 int i, r; 125 126 INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler); 127 128 switch (adev->asic_type) { 129 #ifdef CONFIG_DRM_AMDGPU_CIK 130 case CHIP_BONAIRE: 131 fw_name = FIRMWARE_BONAIRE; 132 break; 133 case CHIP_KABINI: 134 fw_name = FIRMWARE_KABINI; 135 break; 136 case CHIP_KAVERI: 137 fw_name = FIRMWARE_KAVERI; 138 break; 139 case CHIP_HAWAII: 140 fw_name = FIRMWARE_HAWAII; 141 break; 142 case CHIP_MULLINS: 143 fw_name = FIRMWARE_MULLINS; 144 break; 145 #endif 146 case CHIP_TONGA: 147 fw_name = FIRMWARE_TONGA; 148 break; 149 case CHIP_FIJI: 150 fw_name = FIRMWARE_FIJI; 151 break; 152 case CHIP_CARRIZO: 153 fw_name = FIRMWARE_CARRIZO; 154 break; 155 case CHIP_STONEY: 156 fw_name = FIRMWARE_STONEY; 157 break; 158 case CHIP_POLARIS10: 159 fw_name = FIRMWARE_POLARIS10; 160 break; 161 case CHIP_POLARIS11: 162 fw_name = FIRMWARE_POLARIS11; 163 break; 164 case CHIP_VEGA10: 165 fw_name = FIRMWARE_VEGA10; 166 break; 167 case CHIP_POLARIS12: 168 fw_name = FIRMWARE_POLARIS12; 169 break; 170 default: 171 return -EINVAL; 172 } 173 174 r = request_firmware(&adev->uvd.fw, fw_name, adev->dev); 175 if (r) { 176 dev_err(adev->dev, "amdgpu_uvd: Can't load firmware \"%s\"\n", 177 fw_name); 178 return r; 179 } 180 181 r = amdgpu_ucode_validate(adev->uvd.fw); 182 if (r) { 183 dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n", 184 fw_name); 185 release_firmware(adev->uvd.fw); 186 adev->uvd.fw = NULL; 187 return r; 188 } 189 190 /* Set the default UVD handles that the firmware can handle */ 191 adev->uvd.max_handles = AMDGPU_DEFAULT_UVD_HANDLES; 192 193 hdr = (const struct common_firmware_header *)adev->uvd.fw->data; 194 family_id = le32_to_cpu(hdr->ucode_version) & 0xff; 195 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; 196 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; 197 DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n", 198 version_major, version_minor, family_id); 199 200 /* 201 * Limit the number of UVD handles depending on microcode major 202 * and minor versions. The firmware version which has 40 UVD 203 * instances support is 1.80. So all subsequent versions should 204 * also have the same support. 205 */ 206 if ((version_major > 0x01) || 207 ((version_major == 0x01) && (version_minor >= 0x50))) 208 adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES; 209 210 adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) | 211 (family_id << 8)); 212 213 if ((adev->asic_type == CHIP_POLARIS10 || 214 adev->asic_type == CHIP_POLARIS11) && 215 (adev->uvd.fw_version < FW_1_66_16)) 216 DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n", 217 version_major, version_minor); 218 219 bo_size = AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE 220 + AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles; 221 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 222 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); 223 224 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, 225 AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.vcpu_bo, 226 &adev->uvd.gpu_addr, &adev->uvd.cpu_addr); 227 if (r) { 228 dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); 229 return r; 230 } 231 232 ring = &adev->uvd.ring; 233 rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL]; 234 r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity, 235 rq, amdgpu_sched_jobs); 236 if (r != 0) { 237 DRM_ERROR("Failed setting up UVD run queue.\n"); 238 return r; 239 } 240 241 for (i = 0; i < adev->uvd.max_handles; ++i) { 242 atomic_set(&adev->uvd.handles[i], 0); 243 adev->uvd.filp[i] = NULL; 244 } 245 246 /* from uvd v5.0 HW addressing capacity increased to 64 bits */ 247 if (!amdgpu_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0)) 248 adev->uvd.address_64_bit = true; 249 250 switch (adev->asic_type) { 251 case CHIP_TONGA: 252 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10; 253 break; 254 case CHIP_CARRIZO: 255 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_11; 256 break; 257 case CHIP_FIJI: 258 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_12; 259 break; 260 case CHIP_STONEY: 261 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_37_15; 262 break; 263 default: 264 adev->uvd.use_ctx_buf = adev->asic_type >= CHIP_POLARIS10; 265 } 266 267 return 0; 268 } 269 270 int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) 271 { 272 kfree(adev->uvd.saved_bo); 273 274 amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity); 275 276 amdgpu_bo_free_kernel(&adev->uvd.vcpu_bo, 277 &adev->uvd.gpu_addr, 278 (void **)&adev->uvd.cpu_addr); 279 280 amdgpu_ring_fini(&adev->uvd.ring); 281 282 release_firmware(adev->uvd.fw); 283 284 return 0; 285 } 286 287 int amdgpu_uvd_suspend(struct amdgpu_device *adev) 288 { 289 unsigned size; 290 void *ptr; 291 int i; 292 293 if (adev->uvd.vcpu_bo == NULL) 294 return 0; 295 296 for (i = 0; i < adev->uvd.max_handles; ++i) 297 if (atomic_read(&adev->uvd.handles[i])) 298 break; 299 300 if (i == AMDGPU_MAX_UVD_HANDLES) 301 return 0; 302 303 cancel_delayed_work_sync(&adev->uvd.idle_work); 304 305 size = amdgpu_bo_size(adev->uvd.vcpu_bo); 306 ptr = adev->uvd.cpu_addr; 307 308 adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL); 309 if (!adev->uvd.saved_bo) 310 return -ENOMEM; 311 312 memcpy_fromio(adev->uvd.saved_bo, ptr, size); 313 314 return 0; 315 } 316 317 int amdgpu_uvd_resume(struct amdgpu_device *adev) 318 { 319 unsigned size; 320 void *ptr; 321 322 if (adev->uvd.vcpu_bo == NULL) 323 return -EINVAL; 324 325 size = amdgpu_bo_size(adev->uvd.vcpu_bo); 326 ptr = adev->uvd.cpu_addr; 327 328 if (adev->uvd.saved_bo != NULL) { 329 memcpy_toio(ptr, adev->uvd.saved_bo, size); 330 kfree(adev->uvd.saved_bo); 331 adev->uvd.saved_bo = NULL; 332 } else { 333 const struct common_firmware_header *hdr; 334 unsigned offset; 335 336 hdr = (const struct common_firmware_header *)adev->uvd.fw->data; 337 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 338 offset = le32_to_cpu(hdr->ucode_array_offset_bytes); 339 memcpy_toio(adev->uvd.cpu_addr, adev->uvd.fw->data + offset, 340 le32_to_cpu(hdr->ucode_size_bytes)); 341 size -= le32_to_cpu(hdr->ucode_size_bytes); 342 ptr += le32_to_cpu(hdr->ucode_size_bytes); 343 } 344 memset_io(ptr, 0, size); 345 } 346 347 return 0; 348 } 349 350 void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) 351 { 352 struct amdgpu_ring *ring = &adev->uvd.ring; 353 int i, r; 354 355 for (i = 0; i < adev->uvd.max_handles; ++i) { 356 uint32_t handle = atomic_read(&adev->uvd.handles[i]); 357 if (handle != 0 && adev->uvd.filp[i] == filp) { 358 struct dma_fence *fence; 359 360 r = amdgpu_uvd_get_destroy_msg(ring, handle, 361 false, &fence); 362 if (r) { 363 DRM_ERROR("Error destroying UVD (%d)!\n", r); 364 continue; 365 } 366 367 dma_fence_wait(fence, false); 368 dma_fence_put(fence); 369 370 adev->uvd.filp[i] = NULL; 371 atomic_set(&adev->uvd.handles[i], 0); 372 } 373 } 374 } 375 376 static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo) 377 { 378 int i; 379 for (i = 0; i < abo->placement.num_placement; ++i) { 380 abo->placements[i].fpfn = 0 >> PAGE_SHIFT; 381 abo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT; 382 } 383 } 384 385 static u64 amdgpu_uvd_get_addr_from_ctx(struct amdgpu_uvd_cs_ctx *ctx) 386 { 387 uint32_t lo, hi; 388 uint64_t addr; 389 390 lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0); 391 hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1); 392 addr = ((uint64_t)lo) | (((uint64_t)hi) << 32); 393 394 return addr; 395 } 396 397 /** 398 * amdgpu_uvd_cs_pass1 - first parsing round 399 * 400 * @ctx: UVD parser context 401 * 402 * Make sure UVD message and feedback buffers are in VRAM and 403 * nobody is violating an 256MB boundary. 404 */ 405 static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx) 406 { 407 struct amdgpu_bo_va_mapping *mapping; 408 struct amdgpu_bo *bo; 409 uint32_t cmd; 410 uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx); 411 int r = 0; 412 413 mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo); 414 if (mapping == NULL) { 415 DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr); 416 return -EINVAL; 417 } 418 419 if (!ctx->parser->adev->uvd.address_64_bit) { 420 /* check if it's a message or feedback command */ 421 cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1; 422 if (cmd == 0x0 || cmd == 0x3) { 423 /* yes, force it into VRAM */ 424 uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM; 425 amdgpu_ttm_placement_from_domain(bo, domain); 426 } 427 amdgpu_uvd_force_into_uvd_segment(bo); 428 429 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 430 } 431 432 return r; 433 } 434 435 /** 436 * amdgpu_uvd_cs_msg_decode - handle UVD decode message 437 * 438 * @msg: pointer to message structure 439 * @buf_sizes: returned buffer sizes 440 * 441 * Peek into the decode message and calculate the necessary buffer sizes. 442 */ 443 static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg, 444 unsigned buf_sizes[]) 445 { 446 unsigned stream_type = msg[4]; 447 unsigned width = msg[6]; 448 unsigned height = msg[7]; 449 unsigned dpb_size = msg[9]; 450 unsigned pitch = msg[28]; 451 unsigned level = msg[57]; 452 453 unsigned width_in_mb = width / 16; 454 unsigned height_in_mb = ALIGN(height / 16, 2); 455 unsigned fs_in_mb = width_in_mb * height_in_mb; 456 457 unsigned image_size, tmp, min_dpb_size, num_dpb_buffer; 458 unsigned min_ctx_size = ~0; 459 460 image_size = width * height; 461 image_size += image_size / 2; 462 image_size = ALIGN(image_size, 1024); 463 464 switch (stream_type) { 465 case 0: /* H264 */ 466 switch(level) { 467 case 30: 468 num_dpb_buffer = 8100 / fs_in_mb; 469 break; 470 case 31: 471 num_dpb_buffer = 18000 / fs_in_mb; 472 break; 473 case 32: 474 num_dpb_buffer = 20480 / fs_in_mb; 475 break; 476 case 41: 477 num_dpb_buffer = 32768 / fs_in_mb; 478 break; 479 case 42: 480 num_dpb_buffer = 34816 / fs_in_mb; 481 break; 482 case 50: 483 num_dpb_buffer = 110400 / fs_in_mb; 484 break; 485 case 51: 486 num_dpb_buffer = 184320 / fs_in_mb; 487 break; 488 default: 489 num_dpb_buffer = 184320 / fs_in_mb; 490 break; 491 } 492 num_dpb_buffer++; 493 if (num_dpb_buffer > 17) 494 num_dpb_buffer = 17; 495 496 /* reference picture buffer */ 497 min_dpb_size = image_size * num_dpb_buffer; 498 499 /* macroblock context buffer */ 500 min_dpb_size += width_in_mb * height_in_mb * num_dpb_buffer * 192; 501 502 /* IT surface buffer */ 503 min_dpb_size += width_in_mb * height_in_mb * 32; 504 break; 505 506 case 1: /* VC1 */ 507 508 /* reference picture buffer */ 509 min_dpb_size = image_size * 3; 510 511 /* CONTEXT_BUFFER */ 512 min_dpb_size += width_in_mb * height_in_mb * 128; 513 514 /* IT surface buffer */ 515 min_dpb_size += width_in_mb * 64; 516 517 /* DB surface buffer */ 518 min_dpb_size += width_in_mb * 128; 519 520 /* BP */ 521 tmp = max(width_in_mb, height_in_mb); 522 min_dpb_size += ALIGN(tmp * 7 * 16, 64); 523 break; 524 525 case 3: /* MPEG2 */ 526 527 /* reference picture buffer */ 528 min_dpb_size = image_size * 3; 529 break; 530 531 case 4: /* MPEG4 */ 532 533 /* reference picture buffer */ 534 min_dpb_size = image_size * 3; 535 536 /* CM */ 537 min_dpb_size += width_in_mb * height_in_mb * 64; 538 539 /* IT surface buffer */ 540 min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64); 541 break; 542 543 case 7: /* H264 Perf */ 544 switch(level) { 545 case 30: 546 num_dpb_buffer = 8100 / fs_in_mb; 547 break; 548 case 31: 549 num_dpb_buffer = 18000 / fs_in_mb; 550 break; 551 case 32: 552 num_dpb_buffer = 20480 / fs_in_mb; 553 break; 554 case 41: 555 num_dpb_buffer = 32768 / fs_in_mb; 556 break; 557 case 42: 558 num_dpb_buffer = 34816 / fs_in_mb; 559 break; 560 case 50: 561 num_dpb_buffer = 110400 / fs_in_mb; 562 break; 563 case 51: 564 num_dpb_buffer = 184320 / fs_in_mb; 565 break; 566 default: 567 num_dpb_buffer = 184320 / fs_in_mb; 568 break; 569 } 570 num_dpb_buffer++; 571 if (num_dpb_buffer > 17) 572 num_dpb_buffer = 17; 573 574 /* reference picture buffer */ 575 min_dpb_size = image_size * num_dpb_buffer; 576 577 if (!adev->uvd.use_ctx_buf){ 578 /* macroblock context buffer */ 579 min_dpb_size += 580 width_in_mb * height_in_mb * num_dpb_buffer * 192; 581 582 /* IT surface buffer */ 583 min_dpb_size += width_in_mb * height_in_mb * 32; 584 } else { 585 /* macroblock context buffer */ 586 min_ctx_size = 587 width_in_mb * height_in_mb * num_dpb_buffer * 192; 588 } 589 break; 590 591 case 16: /* H265 */ 592 image_size = (ALIGN(width, 16) * ALIGN(height, 16) * 3) / 2; 593 image_size = ALIGN(image_size, 256); 594 595 num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2; 596 min_dpb_size = image_size * num_dpb_buffer; 597 min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16) 598 * 16 * num_dpb_buffer + 52 * 1024; 599 break; 600 601 default: 602 DRM_ERROR("UVD codec not handled %d!\n", stream_type); 603 return -EINVAL; 604 } 605 606 if (width > pitch) { 607 DRM_ERROR("Invalid UVD decoding target pitch!\n"); 608 return -EINVAL; 609 } 610 611 if (dpb_size < min_dpb_size) { 612 DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n", 613 dpb_size, min_dpb_size); 614 return -EINVAL; 615 } 616 617 buf_sizes[0x1] = dpb_size; 618 buf_sizes[0x2] = image_size; 619 buf_sizes[0x4] = min_ctx_size; 620 return 0; 621 } 622 623 /** 624 * amdgpu_uvd_cs_msg - handle UVD message 625 * 626 * @ctx: UVD parser context 627 * @bo: buffer object containing the message 628 * @offset: offset into the buffer object 629 * 630 * Peek into the UVD message and extract the session id. 631 * Make sure that we don't open up to many sessions. 632 */ 633 static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, 634 struct amdgpu_bo *bo, unsigned offset) 635 { 636 struct amdgpu_device *adev = ctx->parser->adev; 637 int32_t *msg, msg_type, handle; 638 void *ptr; 639 long r; 640 int i; 641 642 if (offset & 0x3F) { 643 DRM_ERROR("UVD messages must be 64 byte aligned!\n"); 644 return -EINVAL; 645 } 646 647 r = amdgpu_bo_kmap(bo, &ptr); 648 if (r) { 649 DRM_ERROR("Failed mapping the UVD message (%ld)!\n", r); 650 return r; 651 } 652 653 msg = ptr + offset; 654 655 msg_type = msg[1]; 656 handle = msg[2]; 657 658 if (handle == 0) { 659 DRM_ERROR("Invalid UVD handle!\n"); 660 return -EINVAL; 661 } 662 663 switch (msg_type) { 664 case 0: 665 /* it's a create msg, calc image size (width * height) */ 666 amdgpu_bo_kunmap(bo); 667 668 /* try to alloc a new handle */ 669 for (i = 0; i < adev->uvd.max_handles; ++i) { 670 if (atomic_read(&adev->uvd.handles[i]) == handle) { 671 DRM_ERROR("Handle 0x%x already in use!\n", handle); 672 return -EINVAL; 673 } 674 675 if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) { 676 adev->uvd.filp[i] = ctx->parser->filp; 677 return 0; 678 } 679 } 680 681 DRM_ERROR("No more free UVD handles!\n"); 682 return -ENOSPC; 683 684 case 1: 685 /* it's a decode msg, calc buffer sizes */ 686 r = amdgpu_uvd_cs_msg_decode(adev, msg, ctx->buf_sizes); 687 amdgpu_bo_kunmap(bo); 688 if (r) 689 return r; 690 691 /* validate the handle */ 692 for (i = 0; i < adev->uvd.max_handles; ++i) { 693 if (atomic_read(&adev->uvd.handles[i]) == handle) { 694 if (adev->uvd.filp[i] != ctx->parser->filp) { 695 DRM_ERROR("UVD handle collision detected!\n"); 696 return -EINVAL; 697 } 698 return 0; 699 } 700 } 701 702 DRM_ERROR("Invalid UVD handle 0x%x!\n", handle); 703 return -ENOENT; 704 705 case 2: 706 /* it's a destroy msg, free the handle */ 707 for (i = 0; i < adev->uvd.max_handles; ++i) 708 atomic_cmpxchg(&adev->uvd.handles[i], handle, 0); 709 amdgpu_bo_kunmap(bo); 710 return 0; 711 712 default: 713 DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); 714 return -EINVAL; 715 } 716 BUG(); 717 return -EINVAL; 718 } 719 720 /** 721 * amdgpu_uvd_cs_pass2 - second parsing round 722 * 723 * @ctx: UVD parser context 724 * 725 * Patch buffer addresses, make sure buffer sizes are correct. 726 */ 727 static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx) 728 { 729 struct amdgpu_bo_va_mapping *mapping; 730 struct amdgpu_bo *bo; 731 uint32_t cmd; 732 uint64_t start, end; 733 uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx); 734 int r; 735 736 mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo); 737 if (mapping == NULL) { 738 DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr); 739 return -EINVAL; 740 } 741 742 start = amdgpu_bo_gpu_offset(bo); 743 744 end = (mapping->last + 1 - mapping->start); 745 end = end * AMDGPU_GPU_PAGE_SIZE + start; 746 747 addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE; 748 start += addr; 749 750 amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data0, 751 lower_32_bits(start)); 752 amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data1, 753 upper_32_bits(start)); 754 755 cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1; 756 if (cmd < 0x4) { 757 if ((end - start) < ctx->buf_sizes[cmd]) { 758 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, 759 (unsigned)(end - start), 760 ctx->buf_sizes[cmd]); 761 return -EINVAL; 762 } 763 764 } else if (cmd == 0x206) { 765 if ((end - start) < ctx->buf_sizes[4]) { 766 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, 767 (unsigned)(end - start), 768 ctx->buf_sizes[4]); 769 return -EINVAL; 770 } 771 } else if ((cmd != 0x100) && (cmd != 0x204)) { 772 DRM_ERROR("invalid UVD command %X!\n", cmd); 773 return -EINVAL; 774 } 775 776 if (!ctx->parser->adev->uvd.address_64_bit) { 777 if ((start >> 28) != ((end - 1) >> 28)) { 778 DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n", 779 start, end); 780 return -EINVAL; 781 } 782 783 if ((cmd == 0 || cmd == 0x3) && 784 (start >> 28) != (ctx->parser->adev->uvd.gpu_addr >> 28)) { 785 DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", 786 start, end); 787 return -EINVAL; 788 } 789 } 790 791 if (cmd == 0) { 792 ctx->has_msg_cmd = true; 793 r = amdgpu_uvd_cs_msg(ctx, bo, addr); 794 if (r) 795 return r; 796 } else if (!ctx->has_msg_cmd) { 797 DRM_ERROR("Message needed before other commands are send!\n"); 798 return -EINVAL; 799 } 800 801 return 0; 802 } 803 804 /** 805 * amdgpu_uvd_cs_reg - parse register writes 806 * 807 * @ctx: UVD parser context 808 * @cb: callback function 809 * 810 * Parse the register writes, call cb on each complete command. 811 */ 812 static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx, 813 int (*cb)(struct amdgpu_uvd_cs_ctx *ctx)) 814 { 815 struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx]; 816 int i, r; 817 818 ctx->idx++; 819 for (i = 0; i <= ctx->count; ++i) { 820 unsigned reg = ctx->reg + i; 821 822 if (ctx->idx >= ib->length_dw) { 823 DRM_ERROR("Register command after end of CS!\n"); 824 return -EINVAL; 825 } 826 827 switch (reg) { 828 case mmUVD_GPCOM_VCPU_DATA0: 829 ctx->data0 = ctx->idx; 830 break; 831 case mmUVD_GPCOM_VCPU_DATA1: 832 ctx->data1 = ctx->idx; 833 break; 834 case mmUVD_GPCOM_VCPU_CMD: 835 r = cb(ctx); 836 if (r) 837 return r; 838 break; 839 case mmUVD_ENGINE_CNTL: 840 case mmUVD_NO_OP: 841 break; 842 default: 843 DRM_ERROR("Invalid reg 0x%X!\n", reg); 844 return -EINVAL; 845 } 846 ctx->idx++; 847 } 848 return 0; 849 } 850 851 /** 852 * amdgpu_uvd_cs_packets - parse UVD packets 853 * 854 * @ctx: UVD parser context 855 * @cb: callback function 856 * 857 * Parse the command stream packets. 858 */ 859 static int amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx *ctx, 860 int (*cb)(struct amdgpu_uvd_cs_ctx *ctx)) 861 { 862 struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx]; 863 int r; 864 865 for (ctx->idx = 0 ; ctx->idx < ib->length_dw; ) { 866 uint32_t cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx); 867 unsigned type = CP_PACKET_GET_TYPE(cmd); 868 switch (type) { 869 case PACKET_TYPE0: 870 ctx->reg = CP_PACKET0_GET_REG(cmd); 871 ctx->count = CP_PACKET_GET_COUNT(cmd); 872 r = amdgpu_uvd_cs_reg(ctx, cb); 873 if (r) 874 return r; 875 break; 876 case PACKET_TYPE2: 877 ++ctx->idx; 878 break; 879 default: 880 DRM_ERROR("Unknown packet type %d !\n", type); 881 return -EINVAL; 882 } 883 } 884 return 0; 885 } 886 887 /** 888 * amdgpu_uvd_ring_parse_cs - UVD command submission parser 889 * 890 * @parser: Command submission parser context 891 * 892 * Parse the command stream, patch in addresses as necessary. 893 */ 894 int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) 895 { 896 struct amdgpu_uvd_cs_ctx ctx = {}; 897 unsigned buf_sizes[] = { 898 [0x00000000] = 2048, 899 [0x00000001] = 0xFFFFFFFF, 900 [0x00000002] = 0xFFFFFFFF, 901 [0x00000003] = 2048, 902 [0x00000004] = 0xFFFFFFFF, 903 }; 904 struct amdgpu_ib *ib = &parser->job->ibs[ib_idx]; 905 int r; 906 907 parser->job->vm = NULL; 908 ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); 909 910 if (ib->length_dw % 16) { 911 DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n", 912 ib->length_dw); 913 return -EINVAL; 914 } 915 916 r = amdgpu_cs_sysvm_access_required(parser); 917 if (r) 918 return r; 919 920 ctx.parser = parser; 921 ctx.buf_sizes = buf_sizes; 922 ctx.ib_idx = ib_idx; 923 924 /* first round only required on chips without UVD 64 bit address support */ 925 if (!parser->adev->uvd.address_64_bit) { 926 /* first round, make sure the buffers are actually in the UVD segment */ 927 r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass1); 928 if (r) 929 return r; 930 } 931 932 /* second round, patch buffer addresses into the command stream */ 933 r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass2); 934 if (r) 935 return r; 936 937 if (!ctx.has_msg_cmd) { 938 DRM_ERROR("UVD-IBs need a msg command!\n"); 939 return -EINVAL; 940 } 941 942 return 0; 943 } 944 945 static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, 946 bool direct, struct dma_fence **fence) 947 { 948 struct ttm_validate_buffer tv; 949 struct ww_acquire_ctx ticket; 950 struct list_head head; 951 struct amdgpu_job *job; 952 struct amdgpu_ib *ib; 953 struct dma_fence *f = NULL; 954 struct amdgpu_device *adev = ring->adev; 955 uint64_t addr; 956 uint32_t data[4]; 957 int i, r; 958 959 memset(&tv, 0, sizeof(tv)); 960 tv.bo = &bo->tbo; 961 962 INIT_LIST_HEAD(&head); 963 list_add(&tv.head, &head); 964 965 r = ttm_eu_reserve_buffers(&ticket, &head, true, NULL); 966 if (r) 967 return r; 968 969 if (!ring->adev->uvd.address_64_bit) { 970 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); 971 amdgpu_uvd_force_into_uvd_segment(bo); 972 } 973 974 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 975 if (r) 976 goto err; 977 978 r = amdgpu_job_alloc_with_ib(adev, 64, &job); 979 if (r) 980 goto err; 981 982 if (adev->asic_type >= CHIP_VEGA10) { 983 data[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0_VEGA10, 0); 984 data[1] = PACKET0(mmUVD_GPCOM_VCPU_DATA1_VEGA10, 0); 985 data[2] = PACKET0(mmUVD_GPCOM_VCPU_CMD_VEGA10, 0); 986 data[3] = PACKET0(mmUVD_NO_OP_VEGA10, 0); 987 } else { 988 data[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0); 989 data[1] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0); 990 data[2] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0); 991 data[3] = PACKET0(mmUVD_NO_OP, 0); 992 } 993 994 ib = &job->ibs[0]; 995 addr = amdgpu_bo_gpu_offset(bo); 996 ib->ptr[0] = data[0]; 997 ib->ptr[1] = addr; 998 ib->ptr[2] = data[1]; 999 ib->ptr[3] = addr >> 32; 1000 ib->ptr[4] = data[2]; 1001 ib->ptr[5] = 0; 1002 for (i = 6; i < 16; i += 2) { 1003 ib->ptr[i] = data[3]; 1004 ib->ptr[i+1] = 0; 1005 } 1006 ib->length_dw = 16; 1007 1008 if (direct) { 1009 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); 1010 job->fence = dma_fence_get(f); 1011 if (r) 1012 goto err_free; 1013 1014 amdgpu_job_free(job); 1015 } else { 1016 r = amdgpu_job_submit(job, ring, &adev->uvd.entity, 1017 AMDGPU_FENCE_OWNER_UNDEFINED, &f); 1018 if (r) 1019 goto err_free; 1020 } 1021 1022 ttm_eu_fence_buffer_objects(&ticket, &head, f); 1023 1024 if (fence) 1025 *fence = dma_fence_get(f); 1026 amdgpu_bo_unref(&bo); 1027 dma_fence_put(f); 1028 1029 return 0; 1030 1031 err_free: 1032 amdgpu_job_free(job); 1033 1034 err: 1035 ttm_eu_backoff_reservation(&ticket, &head); 1036 return r; 1037 } 1038 1039 /* multiple fence commands without any stream commands in between can 1040 crash the vcpu so just try to emmit a dummy create/destroy msg to 1041 avoid this */ 1042 int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, 1043 struct dma_fence **fence) 1044 { 1045 struct amdgpu_device *adev = ring->adev; 1046 struct amdgpu_bo *bo; 1047 uint32_t *msg; 1048 int r, i; 1049 1050 r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, 1051 AMDGPU_GEM_DOMAIN_VRAM, 1052 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | 1053 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, 1054 NULL, NULL, &bo); 1055 if (r) 1056 return r; 1057 1058 r = amdgpu_bo_reserve(bo, false); 1059 if (r) { 1060 amdgpu_bo_unref(&bo); 1061 return r; 1062 } 1063 1064 r = amdgpu_bo_kmap(bo, (void **)&msg); 1065 if (r) { 1066 amdgpu_bo_unreserve(bo); 1067 amdgpu_bo_unref(&bo); 1068 return r; 1069 } 1070 1071 /* stitch together an UVD create msg */ 1072 msg[0] = cpu_to_le32(0x00000de4); 1073 msg[1] = cpu_to_le32(0x00000000); 1074 msg[2] = cpu_to_le32(handle); 1075 msg[3] = cpu_to_le32(0x00000000); 1076 msg[4] = cpu_to_le32(0x00000000); 1077 msg[5] = cpu_to_le32(0x00000000); 1078 msg[6] = cpu_to_le32(0x00000000); 1079 msg[7] = cpu_to_le32(0x00000780); 1080 msg[8] = cpu_to_le32(0x00000440); 1081 msg[9] = cpu_to_le32(0x00000000); 1082 msg[10] = cpu_to_le32(0x01b37000); 1083 for (i = 11; i < 1024; ++i) 1084 msg[i] = cpu_to_le32(0x0); 1085 1086 amdgpu_bo_kunmap(bo); 1087 amdgpu_bo_unreserve(bo); 1088 1089 return amdgpu_uvd_send_msg(ring, bo, true, fence); 1090 } 1091 1092 int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, 1093 bool direct, struct dma_fence **fence) 1094 { 1095 struct amdgpu_device *adev = ring->adev; 1096 struct amdgpu_bo *bo; 1097 uint32_t *msg; 1098 int r, i; 1099 1100 r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, 1101 AMDGPU_GEM_DOMAIN_VRAM, 1102 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | 1103 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, 1104 NULL, NULL, &bo); 1105 if (r) 1106 return r; 1107 1108 r = amdgpu_bo_reserve(bo, false); 1109 if (r) { 1110 amdgpu_bo_unref(&bo); 1111 return r; 1112 } 1113 1114 r = amdgpu_bo_kmap(bo, (void **)&msg); 1115 if (r) { 1116 amdgpu_bo_unreserve(bo); 1117 amdgpu_bo_unref(&bo); 1118 return r; 1119 } 1120 1121 /* stitch together an UVD destroy msg */ 1122 msg[0] = cpu_to_le32(0x00000de4); 1123 msg[1] = cpu_to_le32(0x00000002); 1124 msg[2] = cpu_to_le32(handle); 1125 msg[3] = cpu_to_le32(0x00000000); 1126 for (i = 4; i < 1024; ++i) 1127 msg[i] = cpu_to_le32(0x0); 1128 1129 amdgpu_bo_kunmap(bo); 1130 amdgpu_bo_unreserve(bo); 1131 1132 return amdgpu_uvd_send_msg(ring, bo, direct, fence); 1133 } 1134 1135 static void amdgpu_uvd_idle_work_handler(struct work_struct *work) 1136 { 1137 struct amdgpu_device *adev = 1138 container_of(work, struct amdgpu_device, uvd.idle_work.work); 1139 unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.ring); 1140 1141 if (amdgpu_sriov_vf(adev)) 1142 return; 1143 1144 if (fences == 0) { 1145 if (adev->pm.dpm_enabled) { 1146 amdgpu_dpm_enable_uvd(adev, false); 1147 } else { 1148 amdgpu_asic_set_uvd_clocks(adev, 0, 0); 1149 /* shutdown the UVD block */ 1150 amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, 1151 AMD_PG_STATE_GATE); 1152 amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, 1153 AMD_CG_STATE_GATE); 1154 } 1155 } else { 1156 schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT); 1157 } 1158 } 1159 1160 void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) 1161 { 1162 struct amdgpu_device *adev = ring->adev; 1163 bool set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); 1164 1165 if (amdgpu_sriov_vf(adev)) 1166 return; 1167 1168 if (set_clocks) { 1169 if (adev->pm.dpm_enabled) { 1170 amdgpu_dpm_enable_uvd(adev, true); 1171 } else { 1172 amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); 1173 amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, 1174 AMD_CG_STATE_UNGATE); 1175 amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, 1176 AMD_PG_STATE_UNGATE); 1177 } 1178 } 1179 } 1180 1181 void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring) 1182 { 1183 schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT); 1184 } 1185 1186 /** 1187 * amdgpu_uvd_ring_test_ib - test ib execution 1188 * 1189 * @ring: amdgpu_ring pointer 1190 * 1191 * Test if we can successfully execute an IB 1192 */ 1193 int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) 1194 { 1195 struct dma_fence *fence; 1196 long r; 1197 1198 r = amdgpu_uvd_get_create_msg(ring, 1, NULL); 1199 if (r) { 1200 DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r); 1201 goto error; 1202 } 1203 1204 r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); 1205 if (r) { 1206 DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); 1207 goto error; 1208 } 1209 1210 r = dma_fence_wait_timeout(fence, false, timeout); 1211 if (r == 0) { 1212 DRM_ERROR("amdgpu: IB test timed out.\n"); 1213 r = -ETIMEDOUT; 1214 } else if (r < 0) { 1215 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); 1216 } else { 1217 DRM_INFO("ib test on ring %d succeeded\n", ring->idx); 1218 r = 0; 1219 } 1220 1221 dma_fence_put(fence); 1222 1223 error: 1224 return r; 1225 } 1226 1227 /** 1228 * amdgpu_uvd_used_handles - returns used UVD handles 1229 * 1230 * @adev: amdgpu_device pointer 1231 * 1232 * Returns the number of UVD handles in use 1233 */ 1234 uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev) 1235 { 1236 unsigned i; 1237 uint32_t used_handles = 0; 1238 1239 for (i = 0; i < adev->uvd.max_handles; ++i) { 1240 /* 1241 * Handles can be freed in any order, and not 1242 * necessarily linear. So we need to count 1243 * all non-zero handles. 1244 */ 1245 if (atomic_read(&adev->uvd.handles[i])) 1246 used_handles++; 1247 } 1248 1249 return used_handles; 1250 } 1251