1 /* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 * Authors: Christian König <christian.koenig@amd.com> 26 */ 27 28 #include <linux/firmware.h> 29 #include <linux/module.h> 30 #include <drm/drmP.h> 31 #include <drm/drm.h> 32 33 #include "amdgpu.h" 34 #include "amdgpu_pm.h" 35 #include "amdgpu_vce.h" 36 #include "cikd.h" 37 38 /* 1 second timeout */ 39 #define VCE_IDLE_TIMEOUT msecs_to_jiffies(1000) 40 41 /* Firmware Names */ 42 #ifdef CONFIG_DRM_AMDGPU_CIK 43 #define FIRMWARE_BONAIRE "radeon/bonaire_vce.bin" 44 #define FIRMWARE_KABINI "radeon/kabini_vce.bin" 45 #define FIRMWARE_KAVERI "radeon/kaveri_vce.bin" 46 #define FIRMWARE_HAWAII "radeon/hawaii_vce.bin" 47 #define FIRMWARE_MULLINS "radeon/mullins_vce.bin" 48 #endif 49 #define FIRMWARE_TONGA "amdgpu/tonga_vce.bin" 50 #define FIRMWARE_CARRIZO "amdgpu/carrizo_vce.bin" 51 #define FIRMWARE_FIJI "amdgpu/fiji_vce.bin" 52 #define FIRMWARE_STONEY "amdgpu/stoney_vce.bin" 53 #define FIRMWARE_POLARIS10 "amdgpu/polaris10_vce.bin" 54 #define FIRMWARE_POLARIS11 "amdgpu/polaris11_vce.bin" 55 #define FIRMWARE_POLARIS12 "amdgpu/polaris12_vce.bin" 56 #define FIRMWARE_VEGAM "amdgpu/vegam_vce.bin" 57 58 #define FIRMWARE_VEGA10 "amdgpu/vega10_vce.bin" 59 #define FIRMWARE_VEGA12 "amdgpu/vega12_vce.bin" 60 #define FIRMWARE_VEGA20 "amdgpu/vega20_vce.bin" 61 62 #ifdef CONFIG_DRM_AMDGPU_CIK 63 MODULE_FIRMWARE(FIRMWARE_BONAIRE); 64 MODULE_FIRMWARE(FIRMWARE_KABINI); 65 MODULE_FIRMWARE(FIRMWARE_KAVERI); 66 MODULE_FIRMWARE(FIRMWARE_HAWAII); 67 MODULE_FIRMWARE(FIRMWARE_MULLINS); 68 #endif 69 MODULE_FIRMWARE(FIRMWARE_TONGA); 70 MODULE_FIRMWARE(FIRMWARE_CARRIZO); 71 MODULE_FIRMWARE(FIRMWARE_FIJI); 72 MODULE_FIRMWARE(FIRMWARE_STONEY); 73 MODULE_FIRMWARE(FIRMWARE_POLARIS10); 74 MODULE_FIRMWARE(FIRMWARE_POLARIS11); 75 MODULE_FIRMWARE(FIRMWARE_POLARIS12); 76 MODULE_FIRMWARE(FIRMWARE_VEGAM); 77 78 MODULE_FIRMWARE(FIRMWARE_VEGA10); 79 MODULE_FIRMWARE(FIRMWARE_VEGA12); 80 MODULE_FIRMWARE(FIRMWARE_VEGA20); 81 82 static void amdgpu_vce_idle_work_handler(struct work_struct *work); 83 84 /** 85 * amdgpu_vce_init - allocate memory, load vce firmware 86 * 87 * @adev: amdgpu_device pointer 88 * 89 * First step to get VCE online, allocate memory and load the firmware 90 */ 91 int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) 92 { 93 struct amdgpu_ring *ring; 94 struct drm_sched_rq *rq; 95 const char *fw_name; 96 const struct common_firmware_header *hdr; 97 unsigned ucode_version, version_major, version_minor, binary_id; 98 int i, r; 99 100 switch (adev->asic_type) { 101 #ifdef CONFIG_DRM_AMDGPU_CIK 102 case CHIP_BONAIRE: 103 fw_name = FIRMWARE_BONAIRE; 104 break; 105 case CHIP_KAVERI: 106 fw_name = FIRMWARE_KAVERI; 107 break; 108 case CHIP_KABINI: 109 fw_name = FIRMWARE_KABINI; 110 break; 111 case CHIP_HAWAII: 112 fw_name = FIRMWARE_HAWAII; 113 break; 114 case CHIP_MULLINS: 115 fw_name = FIRMWARE_MULLINS; 116 break; 117 #endif 118 case CHIP_TONGA: 119 fw_name = FIRMWARE_TONGA; 120 break; 121 case CHIP_CARRIZO: 122 fw_name = FIRMWARE_CARRIZO; 123 break; 124 case CHIP_FIJI: 125 fw_name = FIRMWARE_FIJI; 126 break; 127 case CHIP_STONEY: 128 fw_name = FIRMWARE_STONEY; 129 break; 130 case CHIP_POLARIS10: 131 fw_name = FIRMWARE_POLARIS10; 132 break; 133 case CHIP_POLARIS11: 134 fw_name = FIRMWARE_POLARIS11; 135 break; 136 case CHIP_POLARIS12: 137 fw_name = FIRMWARE_POLARIS12; 138 break; 139 case CHIP_VEGAM: 140 fw_name = FIRMWARE_VEGAM; 141 break; 142 case CHIP_VEGA10: 143 fw_name = FIRMWARE_VEGA10; 144 break; 145 case CHIP_VEGA12: 146 fw_name = FIRMWARE_VEGA12; 147 break; 148 case CHIP_VEGA20: 149 fw_name = FIRMWARE_VEGA20; 150 break; 151 152 default: 153 return -EINVAL; 154 } 155 156 r = request_firmware(&adev->vce.fw, fw_name, adev->dev); 157 if (r) { 158 dev_err(adev->dev, "amdgpu_vce: Can't load firmware \"%s\"\n", 159 fw_name); 160 return r; 161 } 162 163 r = amdgpu_ucode_validate(adev->vce.fw); 164 if (r) { 165 dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n", 166 fw_name); 167 release_firmware(adev->vce.fw); 168 adev->vce.fw = NULL; 169 return r; 170 } 171 172 hdr = (const struct common_firmware_header *)adev->vce.fw->data; 173 174 ucode_version = le32_to_cpu(hdr->ucode_version); 175 version_major = (ucode_version >> 20) & 0xfff; 176 version_minor = (ucode_version >> 8) & 0xfff; 177 binary_id = ucode_version & 0xff; 178 DRM_INFO("Found VCE firmware Version: %hhd.%hhd Binary ID: %hhd\n", 179 version_major, version_minor, binary_id); 180 adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) | 181 (binary_id << 8)); 182 183 r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE, 184 AMDGPU_GEM_DOMAIN_VRAM, &adev->vce.vcpu_bo, 185 &adev->vce.gpu_addr, &adev->vce.cpu_addr); 186 if (r) { 187 dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r); 188 return r; 189 } 190 191 ring = &adev->vce.ring[0]; 192 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; 193 r = drm_sched_entity_init(&ring->sched, &adev->vce.entity, 194 rq, NULL); 195 if (r != 0) { 196 DRM_ERROR("Failed setting up VCE run queue.\n"); 197 return r; 198 } 199 200 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { 201 atomic_set(&adev->vce.handles[i], 0); 202 adev->vce.filp[i] = NULL; 203 } 204 205 INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler); 206 mutex_init(&adev->vce.idle_mutex); 207 208 return 0; 209 } 210 211 /** 212 * amdgpu_vce_fini - free memory 213 * 214 * @adev: amdgpu_device pointer 215 * 216 * Last step on VCE teardown, free firmware memory 217 */ 218 int amdgpu_vce_sw_fini(struct amdgpu_device *adev) 219 { 220 unsigned i; 221 222 if (adev->vce.vcpu_bo == NULL) 223 return 0; 224 225 drm_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity); 226 227 amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr, 228 (void **)&adev->vce.cpu_addr); 229 230 for (i = 0; i < adev->vce.num_rings; i++) 231 amdgpu_ring_fini(&adev->vce.ring[i]); 232 233 release_firmware(adev->vce.fw); 234 mutex_destroy(&adev->vce.idle_mutex); 235 236 return 0; 237 } 238 239 /** 240 * amdgpu_vce_suspend - unpin VCE fw memory 241 * 242 * @adev: amdgpu_device pointer 243 * 244 */ 245 int amdgpu_vce_suspend(struct amdgpu_device *adev) 246 { 247 int i; 248 249 if (adev->vce.vcpu_bo == NULL) 250 return 0; 251 252 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) 253 if (atomic_read(&adev->vce.handles[i])) 254 break; 255 256 if (i == AMDGPU_MAX_VCE_HANDLES) 257 return 0; 258 259 cancel_delayed_work_sync(&adev->vce.idle_work); 260 /* TODO: suspending running encoding sessions isn't supported */ 261 return -EINVAL; 262 } 263 264 /** 265 * amdgpu_vce_resume - pin VCE fw memory 266 * 267 * @adev: amdgpu_device pointer 268 * 269 */ 270 int amdgpu_vce_resume(struct amdgpu_device *adev) 271 { 272 void *cpu_addr; 273 const struct common_firmware_header *hdr; 274 unsigned offset; 275 int r; 276 277 if (adev->vce.vcpu_bo == NULL) 278 return -EINVAL; 279 280 r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false); 281 if (r) { 282 dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r); 283 return r; 284 } 285 286 r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr); 287 if (r) { 288 amdgpu_bo_unreserve(adev->vce.vcpu_bo); 289 dev_err(adev->dev, "(%d) VCE map failed\n", r); 290 return r; 291 } 292 293 hdr = (const struct common_firmware_header *)adev->vce.fw->data; 294 offset = le32_to_cpu(hdr->ucode_array_offset_bytes); 295 memcpy_toio(cpu_addr, adev->vce.fw->data + offset, 296 adev->vce.fw->size - offset); 297 298 amdgpu_bo_kunmap(adev->vce.vcpu_bo); 299 300 amdgpu_bo_unreserve(adev->vce.vcpu_bo); 301 302 return 0; 303 } 304 305 /** 306 * amdgpu_vce_idle_work_handler - power off VCE 307 * 308 * @work: pointer to work structure 309 * 310 * power of VCE when it's not used any more 311 */ 312 static void amdgpu_vce_idle_work_handler(struct work_struct *work) 313 { 314 struct amdgpu_device *adev = 315 container_of(work, struct amdgpu_device, vce.idle_work.work); 316 unsigned i, count = 0; 317 318 for (i = 0; i < adev->vce.num_rings; i++) 319 count += amdgpu_fence_count_emitted(&adev->vce.ring[i]); 320 321 if (count == 0) { 322 if (adev->pm.dpm_enabled) { 323 amdgpu_dpm_enable_vce(adev, false); 324 } else { 325 amdgpu_asic_set_vce_clocks(adev, 0, 0); 326 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 327 AMD_PG_STATE_GATE); 328 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 329 AMD_CG_STATE_GATE); 330 } 331 } else { 332 schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT); 333 } 334 } 335 336 /** 337 * amdgpu_vce_ring_begin_use - power up VCE 338 * 339 * @ring: amdgpu ring 340 * 341 * Make sure VCE is powerd up when we want to use it 342 */ 343 void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring) 344 { 345 struct amdgpu_device *adev = ring->adev; 346 bool set_clocks; 347 348 if (amdgpu_sriov_vf(adev)) 349 return; 350 351 mutex_lock(&adev->vce.idle_mutex); 352 set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work); 353 if (set_clocks) { 354 if (adev->pm.dpm_enabled) { 355 amdgpu_dpm_enable_vce(adev, true); 356 } else { 357 amdgpu_asic_set_vce_clocks(adev, 53300, 40000); 358 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 359 AMD_CG_STATE_UNGATE); 360 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 361 AMD_PG_STATE_UNGATE); 362 363 } 364 } 365 mutex_unlock(&adev->vce.idle_mutex); 366 } 367 368 /** 369 * amdgpu_vce_ring_end_use - power VCE down 370 * 371 * @ring: amdgpu ring 372 * 373 * Schedule work to power VCE down again 374 */ 375 void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring) 376 { 377 if (!amdgpu_sriov_vf(ring->adev)) 378 schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT); 379 } 380 381 /** 382 * amdgpu_vce_free_handles - free still open VCE handles 383 * 384 * @adev: amdgpu_device pointer 385 * @filp: drm file pointer 386 * 387 * Close all VCE handles still open by this file pointer 388 */ 389 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp) 390 { 391 struct amdgpu_ring *ring = &adev->vce.ring[0]; 392 int i, r; 393 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { 394 uint32_t handle = atomic_read(&adev->vce.handles[i]); 395 396 if (!handle || adev->vce.filp[i] != filp) 397 continue; 398 399 r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL); 400 if (r) 401 DRM_ERROR("Error destroying VCE handle (%d)!\n", r); 402 403 adev->vce.filp[i] = NULL; 404 atomic_set(&adev->vce.handles[i], 0); 405 } 406 } 407 408 /** 409 * amdgpu_vce_get_create_msg - generate a VCE create msg 410 * 411 * @adev: amdgpu_device pointer 412 * @ring: ring we should submit the msg to 413 * @handle: VCE session handle to use 414 * @fence: optional fence to return 415 * 416 * Open up a stream for HW test 417 */ 418 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, 419 struct dma_fence **fence) 420 { 421 const unsigned ib_size_dw = 1024; 422 struct amdgpu_job *job; 423 struct amdgpu_ib *ib; 424 struct dma_fence *f = NULL; 425 uint64_t dummy; 426 int i, r; 427 428 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); 429 if (r) 430 return r; 431 432 ib = &job->ibs[0]; 433 434 dummy = ib->gpu_addr + 1024; 435 436 /* stitch together an VCE create msg */ 437 ib->length_dw = 0; 438 ib->ptr[ib->length_dw++] = 0x0000000c; /* len */ 439 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */ 440 ib->ptr[ib->length_dw++] = handle; 441 442 if ((ring->adev->vce.fw_version >> 24) >= 52) 443 ib->ptr[ib->length_dw++] = 0x00000040; /* len */ 444 else 445 ib->ptr[ib->length_dw++] = 0x00000030; /* len */ 446 ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */ 447 ib->ptr[ib->length_dw++] = 0x00000000; 448 ib->ptr[ib->length_dw++] = 0x00000042; 449 ib->ptr[ib->length_dw++] = 0x0000000a; 450 ib->ptr[ib->length_dw++] = 0x00000001; 451 ib->ptr[ib->length_dw++] = 0x00000080; 452 ib->ptr[ib->length_dw++] = 0x00000060; 453 ib->ptr[ib->length_dw++] = 0x00000100; 454 ib->ptr[ib->length_dw++] = 0x00000100; 455 ib->ptr[ib->length_dw++] = 0x0000000c; 456 ib->ptr[ib->length_dw++] = 0x00000000; 457 if ((ring->adev->vce.fw_version >> 24) >= 52) { 458 ib->ptr[ib->length_dw++] = 0x00000000; 459 ib->ptr[ib->length_dw++] = 0x00000000; 460 ib->ptr[ib->length_dw++] = 0x00000000; 461 ib->ptr[ib->length_dw++] = 0x00000000; 462 } 463 464 ib->ptr[ib->length_dw++] = 0x00000014; /* len */ 465 ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */ 466 ib->ptr[ib->length_dw++] = upper_32_bits(dummy); 467 ib->ptr[ib->length_dw++] = dummy; 468 ib->ptr[ib->length_dw++] = 0x00000001; 469 470 for (i = ib->length_dw; i < ib_size_dw; ++i) 471 ib->ptr[i] = 0x0; 472 473 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); 474 job->fence = dma_fence_get(f); 475 if (r) 476 goto err; 477 478 amdgpu_job_free(job); 479 if (fence) 480 *fence = dma_fence_get(f); 481 dma_fence_put(f); 482 return 0; 483 484 err: 485 amdgpu_job_free(job); 486 return r; 487 } 488 489 /** 490 * amdgpu_vce_get_destroy_msg - generate a VCE destroy msg 491 * 492 * @adev: amdgpu_device pointer 493 * @ring: ring we should submit the msg to 494 * @handle: VCE session handle to use 495 * @fence: optional fence to return 496 * 497 * Close up a stream for HW test or if userspace failed to do so 498 */ 499 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, 500 bool direct, struct dma_fence **fence) 501 { 502 const unsigned ib_size_dw = 1024; 503 struct amdgpu_job *job; 504 struct amdgpu_ib *ib; 505 struct dma_fence *f = NULL; 506 int i, r; 507 508 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); 509 if (r) 510 return r; 511 512 ib = &job->ibs[0]; 513 514 /* stitch together an VCE destroy msg */ 515 ib->length_dw = 0; 516 ib->ptr[ib->length_dw++] = 0x0000000c; /* len */ 517 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */ 518 ib->ptr[ib->length_dw++] = handle; 519 520 ib->ptr[ib->length_dw++] = 0x00000020; /* len */ 521 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */ 522 ib->ptr[ib->length_dw++] = 0xffffffff; /* next task info, set to 0xffffffff if no */ 523 ib->ptr[ib->length_dw++] = 0x00000001; /* destroy session */ 524 ib->ptr[ib->length_dw++] = 0x00000000; 525 ib->ptr[ib->length_dw++] = 0x00000000; 526 ib->ptr[ib->length_dw++] = 0xffffffff; /* feedback is not needed, set to 0xffffffff and firmware will not output feedback */ 527 ib->ptr[ib->length_dw++] = 0x00000000; 528 529 ib->ptr[ib->length_dw++] = 0x00000008; /* len */ 530 ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */ 531 532 for (i = ib->length_dw; i < ib_size_dw; ++i) 533 ib->ptr[i] = 0x0; 534 535 if (direct) { 536 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); 537 job->fence = dma_fence_get(f); 538 if (r) 539 goto err; 540 541 amdgpu_job_free(job); 542 } else { 543 r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity, 544 AMDGPU_FENCE_OWNER_UNDEFINED, &f); 545 if (r) 546 goto err; 547 } 548 549 if (fence) 550 *fence = dma_fence_get(f); 551 dma_fence_put(f); 552 return 0; 553 554 err: 555 amdgpu_job_free(job); 556 return r; 557 } 558 559 /** 560 * amdgpu_vce_cs_validate_bo - make sure not to cross 4GB boundary 561 * 562 * @p: parser context 563 * @lo: address of lower dword 564 * @hi: address of higher dword 565 * @size: minimum size 566 * @index: bs/fb index 567 * 568 * Make sure that no BO cross a 4GB boundary. 569 */ 570 static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p, uint32_t ib_idx, 571 int lo, int hi, unsigned size, int32_t index) 572 { 573 int64_t offset = ((uint64_t)size) * ((int64_t)index); 574 struct ttm_operation_ctx ctx = { false, false }; 575 struct amdgpu_bo_va_mapping *mapping; 576 unsigned i, fpfn, lpfn; 577 struct amdgpu_bo *bo; 578 uint64_t addr; 579 int r; 580 581 addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) | 582 ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32; 583 if (index >= 0) { 584 addr += offset; 585 fpfn = PAGE_ALIGN(offset) >> PAGE_SHIFT; 586 lpfn = 0x100000000ULL >> PAGE_SHIFT; 587 } else { 588 fpfn = 0; 589 lpfn = (0x100000000ULL - PAGE_ALIGN(offset)) >> PAGE_SHIFT; 590 } 591 592 r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping); 593 if (r) { 594 DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n", 595 addr, lo, hi, size, index); 596 return r; 597 } 598 599 for (i = 0; i < bo->placement.num_placement; ++i) { 600 bo->placements[i].fpfn = max(bo->placements[i].fpfn, fpfn); 601 bo->placements[i].lpfn = bo->placements[i].lpfn ? 602 min(bo->placements[i].lpfn, lpfn) : lpfn; 603 } 604 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 605 } 606 607 608 /** 609 * amdgpu_vce_cs_reloc - command submission relocation 610 * 611 * @p: parser context 612 * @lo: address of lower dword 613 * @hi: address of higher dword 614 * @size: minimum size 615 * 616 * Patch relocation inside command stream with real buffer address 617 */ 618 static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, 619 int lo, int hi, unsigned size, uint32_t index) 620 { 621 struct amdgpu_bo_va_mapping *mapping; 622 struct amdgpu_bo *bo; 623 uint64_t addr; 624 int r; 625 626 if (index == 0xffffffff) 627 index = 0; 628 629 addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) | 630 ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32; 631 addr += ((uint64_t)size) * ((uint64_t)index); 632 633 r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping); 634 if (r) { 635 DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n", 636 addr, lo, hi, size, index); 637 return r; 638 } 639 640 if ((addr + (uint64_t)size) > 641 (mapping->last + 1) * AMDGPU_GPU_PAGE_SIZE) { 642 DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n", 643 addr, lo, hi); 644 return -EINVAL; 645 } 646 647 addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE; 648 addr += amdgpu_bo_gpu_offset(bo); 649 addr -= ((uint64_t)size) * ((uint64_t)index); 650 651 amdgpu_set_ib_value(p, ib_idx, lo, lower_32_bits(addr)); 652 amdgpu_set_ib_value(p, ib_idx, hi, upper_32_bits(addr)); 653 654 return 0; 655 } 656 657 /** 658 * amdgpu_vce_validate_handle - validate stream handle 659 * 660 * @p: parser context 661 * @handle: handle to validate 662 * @allocated: allocated a new handle? 663 * 664 * Validates the handle and return the found session index or -EINVAL 665 * we we don't have another free session index. 666 */ 667 static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p, 668 uint32_t handle, uint32_t *allocated) 669 { 670 unsigned i; 671 672 /* validate the handle */ 673 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { 674 if (atomic_read(&p->adev->vce.handles[i]) == handle) { 675 if (p->adev->vce.filp[i] != p->filp) { 676 DRM_ERROR("VCE handle collision detected!\n"); 677 return -EINVAL; 678 } 679 return i; 680 } 681 } 682 683 /* handle not found try to alloc a new one */ 684 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { 685 if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) { 686 p->adev->vce.filp[i] = p->filp; 687 p->adev->vce.img_size[i] = 0; 688 *allocated |= 1 << i; 689 return i; 690 } 691 } 692 693 DRM_ERROR("No more free VCE handles!\n"); 694 return -EINVAL; 695 } 696 697 /** 698 * amdgpu_vce_cs_parse - parse and validate the command stream 699 * 700 * @p: parser context 701 * 702 */ 703 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) 704 { 705 struct amdgpu_ib *ib = &p->job->ibs[ib_idx]; 706 unsigned fb_idx = 0, bs_idx = 0; 707 int session_idx = -1; 708 uint32_t destroyed = 0; 709 uint32_t created = 0; 710 uint32_t allocated = 0; 711 uint32_t tmp, handle = 0; 712 uint32_t *size = &tmp; 713 unsigned idx; 714 int i, r = 0; 715 716 p->job->vm = NULL; 717 ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); 718 719 for (idx = 0; idx < ib->length_dw;) { 720 uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx); 721 uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1); 722 723 if ((len < 8) || (len & 3)) { 724 DRM_ERROR("invalid VCE command length (%d)!\n", len); 725 r = -EINVAL; 726 goto out; 727 } 728 729 switch (cmd) { 730 case 0x00000002: /* task info */ 731 fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6); 732 bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7); 733 break; 734 735 case 0x03000001: /* encode */ 736 r = amdgpu_vce_validate_bo(p, ib_idx, idx + 10, 737 idx + 9, 0, 0); 738 if (r) 739 goto out; 740 741 r = amdgpu_vce_validate_bo(p, ib_idx, idx + 12, 742 idx + 11, 0, 0); 743 if (r) 744 goto out; 745 break; 746 747 case 0x05000001: /* context buffer */ 748 r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3, 749 idx + 2, 0, 0); 750 if (r) 751 goto out; 752 break; 753 754 case 0x05000004: /* video bitstream buffer */ 755 tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4); 756 r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3, idx + 2, 757 tmp, bs_idx); 758 if (r) 759 goto out; 760 break; 761 762 case 0x05000005: /* feedback buffer */ 763 r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3, idx + 2, 764 4096, fb_idx); 765 if (r) 766 goto out; 767 break; 768 769 case 0x0500000d: /* MV buffer */ 770 r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3, 771 idx + 2, 0, 0); 772 if (r) 773 goto out; 774 775 r = amdgpu_vce_validate_bo(p, ib_idx, idx + 8, 776 idx + 7, 0, 0); 777 if (r) 778 goto out; 779 break; 780 } 781 782 idx += len / 4; 783 } 784 785 for (idx = 0; idx < ib->length_dw;) { 786 uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx); 787 uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1); 788 789 switch (cmd) { 790 case 0x00000001: /* session */ 791 handle = amdgpu_get_ib_value(p, ib_idx, idx + 2); 792 session_idx = amdgpu_vce_validate_handle(p, handle, 793 &allocated); 794 if (session_idx < 0) { 795 r = session_idx; 796 goto out; 797 } 798 size = &p->adev->vce.img_size[session_idx]; 799 break; 800 801 case 0x00000002: /* task info */ 802 fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6); 803 bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7); 804 break; 805 806 case 0x01000001: /* create */ 807 created |= 1 << session_idx; 808 if (destroyed & (1 << session_idx)) { 809 destroyed &= ~(1 << session_idx); 810 allocated |= 1 << session_idx; 811 812 } else if (!(allocated & (1 << session_idx))) { 813 DRM_ERROR("Handle already in use!\n"); 814 r = -EINVAL; 815 goto out; 816 } 817 818 *size = amdgpu_get_ib_value(p, ib_idx, idx + 8) * 819 amdgpu_get_ib_value(p, ib_idx, idx + 10) * 820 8 * 3 / 2; 821 break; 822 823 case 0x04000001: /* config extension */ 824 case 0x04000002: /* pic control */ 825 case 0x04000005: /* rate control */ 826 case 0x04000007: /* motion estimation */ 827 case 0x04000008: /* rdo */ 828 case 0x04000009: /* vui */ 829 case 0x05000002: /* auxiliary buffer */ 830 case 0x05000009: /* clock table */ 831 break; 832 833 case 0x0500000c: /* hw config */ 834 switch (p->adev->asic_type) { 835 #ifdef CONFIG_DRM_AMDGPU_CIK 836 case CHIP_KAVERI: 837 case CHIP_MULLINS: 838 #endif 839 case CHIP_CARRIZO: 840 break; 841 default: 842 r = -EINVAL; 843 goto out; 844 } 845 break; 846 847 case 0x03000001: /* encode */ 848 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9, 849 *size, 0); 850 if (r) 851 goto out; 852 853 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 12, idx + 11, 854 *size / 3, 0); 855 if (r) 856 goto out; 857 break; 858 859 case 0x02000001: /* destroy */ 860 destroyed |= 1 << session_idx; 861 break; 862 863 case 0x05000001: /* context buffer */ 864 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2, 865 *size * 2, 0); 866 if (r) 867 goto out; 868 break; 869 870 case 0x05000004: /* video bitstream buffer */ 871 tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4); 872 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2, 873 tmp, bs_idx); 874 if (r) 875 goto out; 876 break; 877 878 case 0x05000005: /* feedback buffer */ 879 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2, 880 4096, fb_idx); 881 if (r) 882 goto out; 883 break; 884 885 case 0x0500000d: /* MV buffer */ 886 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, 887 idx + 2, *size, 0); 888 if (r) 889 goto out; 890 891 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 8, 892 idx + 7, *size / 12, 0); 893 if (r) 894 goto out; 895 break; 896 897 default: 898 DRM_ERROR("invalid VCE command (0x%x)!\n", cmd); 899 r = -EINVAL; 900 goto out; 901 } 902 903 if (session_idx == -1) { 904 DRM_ERROR("no session command at start of IB\n"); 905 r = -EINVAL; 906 goto out; 907 } 908 909 idx += len / 4; 910 } 911 912 if (allocated & ~created) { 913 DRM_ERROR("New session without create command!\n"); 914 r = -ENOENT; 915 } 916 917 out: 918 if (!r) { 919 /* No error, free all destroyed handle slots */ 920 tmp = destroyed; 921 } else { 922 /* Error during parsing, free all allocated handle slots */ 923 tmp = allocated; 924 } 925 926 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) 927 if (tmp & (1 << i)) 928 atomic_set(&p->adev->vce.handles[i], 0); 929 930 return r; 931 } 932 933 /** 934 * amdgpu_vce_cs_parse_vm - parse the command stream in VM mode 935 * 936 * @p: parser context 937 * 938 */ 939 int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx) 940 { 941 struct amdgpu_ib *ib = &p->job->ibs[ib_idx]; 942 int session_idx = -1; 943 uint32_t destroyed = 0; 944 uint32_t created = 0; 945 uint32_t allocated = 0; 946 uint32_t tmp, handle = 0; 947 int i, r = 0, idx = 0; 948 949 while (idx < ib->length_dw) { 950 uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx); 951 uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1); 952 953 if ((len < 8) || (len & 3)) { 954 DRM_ERROR("invalid VCE command length (%d)!\n", len); 955 r = -EINVAL; 956 goto out; 957 } 958 959 switch (cmd) { 960 case 0x00000001: /* session */ 961 handle = amdgpu_get_ib_value(p, ib_idx, idx + 2); 962 session_idx = amdgpu_vce_validate_handle(p, handle, 963 &allocated); 964 if (session_idx < 0) { 965 r = session_idx; 966 goto out; 967 } 968 break; 969 970 case 0x01000001: /* create */ 971 created |= 1 << session_idx; 972 if (destroyed & (1 << session_idx)) { 973 destroyed &= ~(1 << session_idx); 974 allocated |= 1 << session_idx; 975 976 } else if (!(allocated & (1 << session_idx))) { 977 DRM_ERROR("Handle already in use!\n"); 978 r = -EINVAL; 979 goto out; 980 } 981 982 break; 983 984 case 0x02000001: /* destroy */ 985 destroyed |= 1 << session_idx; 986 break; 987 988 default: 989 break; 990 } 991 992 if (session_idx == -1) { 993 DRM_ERROR("no session command at start of IB\n"); 994 r = -EINVAL; 995 goto out; 996 } 997 998 idx += len / 4; 999 } 1000 1001 if (allocated & ~created) { 1002 DRM_ERROR("New session without create command!\n"); 1003 r = -ENOENT; 1004 } 1005 1006 out: 1007 if (!r) { 1008 /* No error, free all destroyed handle slots */ 1009 tmp = destroyed; 1010 amdgpu_ib_free(p->adev, ib, NULL); 1011 } else { 1012 /* Error during parsing, free all allocated handle slots */ 1013 tmp = allocated; 1014 } 1015 1016 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) 1017 if (tmp & (1 << i)) 1018 atomic_set(&p->adev->vce.handles[i], 0); 1019 1020 return r; 1021 } 1022 1023 /** 1024 * amdgpu_vce_ring_emit_ib - execute indirect buffer 1025 * 1026 * @ring: engine to use 1027 * @ib: the IB to execute 1028 * 1029 */ 1030 void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, 1031 unsigned vmid, bool ctx_switch) 1032 { 1033 amdgpu_ring_write(ring, VCE_CMD_IB); 1034 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); 1035 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 1036 amdgpu_ring_write(ring, ib->length_dw); 1037 } 1038 1039 /** 1040 * amdgpu_vce_ring_emit_fence - add a fence command to the ring 1041 * 1042 * @ring: engine to use 1043 * @fence: the fence 1044 * 1045 */ 1046 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 1047 unsigned flags) 1048 { 1049 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 1050 1051 amdgpu_ring_write(ring, VCE_CMD_FENCE); 1052 amdgpu_ring_write(ring, addr); 1053 amdgpu_ring_write(ring, upper_32_bits(addr)); 1054 amdgpu_ring_write(ring, seq); 1055 amdgpu_ring_write(ring, VCE_CMD_TRAP); 1056 amdgpu_ring_write(ring, VCE_CMD_END); 1057 } 1058 1059 /** 1060 * amdgpu_vce_ring_test_ring - test if VCE ring is working 1061 * 1062 * @ring: the engine to test on 1063 * 1064 */ 1065 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring) 1066 { 1067 struct amdgpu_device *adev = ring->adev; 1068 uint32_t rptr = amdgpu_ring_get_rptr(ring); 1069 unsigned i; 1070 int r, timeout = adev->usec_timeout; 1071 1072 /* skip ring test for sriov*/ 1073 if (amdgpu_sriov_vf(adev)) 1074 return 0; 1075 1076 r = amdgpu_ring_alloc(ring, 16); 1077 if (r) { 1078 DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n", 1079 ring->idx, r); 1080 return r; 1081 } 1082 amdgpu_ring_write(ring, VCE_CMD_END); 1083 amdgpu_ring_commit(ring); 1084 1085 for (i = 0; i < timeout; i++) { 1086 if (amdgpu_ring_get_rptr(ring) != rptr) 1087 break; 1088 DRM_UDELAY(1); 1089 } 1090 1091 if (i < timeout) { 1092 DRM_DEBUG("ring test on %d succeeded in %d usecs\n", 1093 ring->idx, i); 1094 } else { 1095 DRM_ERROR("amdgpu: ring %d test failed\n", 1096 ring->idx); 1097 r = -ETIMEDOUT; 1098 } 1099 1100 return r; 1101 } 1102 1103 /** 1104 * amdgpu_vce_ring_test_ib - test if VCE IBs are working 1105 * 1106 * @ring: the engine to test on 1107 * 1108 */ 1109 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout) 1110 { 1111 struct dma_fence *fence = NULL; 1112 long r; 1113 1114 /* skip vce ring1/2 ib test for now, since it's not reliable */ 1115 if (ring != &ring->adev->vce.ring[0]) 1116 return 0; 1117 1118 r = amdgpu_vce_get_create_msg(ring, 1, NULL); 1119 if (r) { 1120 DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r); 1121 goto error; 1122 } 1123 1124 r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence); 1125 if (r) { 1126 DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); 1127 goto error; 1128 } 1129 1130 r = dma_fence_wait_timeout(fence, false, timeout); 1131 if (r == 0) { 1132 DRM_ERROR("amdgpu: IB test timed out.\n"); 1133 r = -ETIMEDOUT; 1134 } else if (r < 0) { 1135 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); 1136 } else { 1137 DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); 1138 r = 0; 1139 } 1140 error: 1141 dma_fence_put(fence); 1142 return r; 1143 } 1144