1 /* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 * Authors: Christian König <christian.koenig@amd.com> 26 */ 27 28 #include <linux/firmware.h> 29 #include <linux/module.h> 30 31 #include <drm/drm.h> 32 33 #include "amdgpu.h" 34 #include "amdgpu_pm.h" 35 #include "amdgpu_vce.h" 36 #include "cikd.h" 37 38 /* 1 second timeout */ 39 #define VCE_IDLE_TIMEOUT msecs_to_jiffies(1000) 40 41 /* Firmware Names */ 42 #ifdef CONFIG_DRM_AMDGPU_CIK 43 #define FIRMWARE_BONAIRE "amdgpu/bonaire_vce.bin" 44 #define FIRMWARE_KABINI "amdgpu/kabini_vce.bin" 45 #define FIRMWARE_KAVERI "amdgpu/kaveri_vce.bin" 46 #define FIRMWARE_HAWAII "amdgpu/hawaii_vce.bin" 47 #define FIRMWARE_MULLINS "amdgpu/mullins_vce.bin" 48 #endif 49 #define FIRMWARE_TONGA "amdgpu/tonga_vce.bin" 50 #define FIRMWARE_CARRIZO "amdgpu/carrizo_vce.bin" 51 #define FIRMWARE_FIJI "amdgpu/fiji_vce.bin" 52 #define FIRMWARE_STONEY "amdgpu/stoney_vce.bin" 53 #define FIRMWARE_POLARIS10 "amdgpu/polaris10_vce.bin" 54 #define FIRMWARE_POLARIS11 "amdgpu/polaris11_vce.bin" 55 #define FIRMWARE_POLARIS12 "amdgpu/polaris12_vce.bin" 56 #define FIRMWARE_VEGAM "amdgpu/vegam_vce.bin" 57 58 #define FIRMWARE_VEGA10 "amdgpu/vega10_vce.bin" 59 #define FIRMWARE_VEGA12 "amdgpu/vega12_vce.bin" 60 #define FIRMWARE_VEGA20 "amdgpu/vega20_vce.bin" 61 62 #ifdef CONFIG_DRM_AMDGPU_CIK 63 MODULE_FIRMWARE(FIRMWARE_BONAIRE); 64 MODULE_FIRMWARE(FIRMWARE_KABINI); 65 MODULE_FIRMWARE(FIRMWARE_KAVERI); 66 MODULE_FIRMWARE(FIRMWARE_HAWAII); 67 MODULE_FIRMWARE(FIRMWARE_MULLINS); 68 #endif 69 MODULE_FIRMWARE(FIRMWARE_TONGA); 70 MODULE_FIRMWARE(FIRMWARE_CARRIZO); 71 MODULE_FIRMWARE(FIRMWARE_FIJI); 72 MODULE_FIRMWARE(FIRMWARE_STONEY); 73 MODULE_FIRMWARE(FIRMWARE_POLARIS10); 74 MODULE_FIRMWARE(FIRMWARE_POLARIS11); 75 MODULE_FIRMWARE(FIRMWARE_POLARIS12); 76 MODULE_FIRMWARE(FIRMWARE_VEGAM); 77 78 MODULE_FIRMWARE(FIRMWARE_VEGA10); 79 MODULE_FIRMWARE(FIRMWARE_VEGA12); 80 MODULE_FIRMWARE(FIRMWARE_VEGA20); 81 82 static void amdgpu_vce_idle_work_handler(struct work_struct *work); 83 static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, 84 struct amdgpu_bo *bo, 85 struct dma_fence **fence); 86 static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, 87 bool direct, struct dma_fence **fence); 88 89 /** 90 * amdgpu_vce_init - allocate memory, load vce firmware 91 * 92 * @adev: amdgpu_device pointer 93 * 94 * First step to get VCE online, allocate memory and load the firmware 95 */ 96 int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) 97 { 98 const char *fw_name; 99 const struct common_firmware_header *hdr; 100 unsigned ucode_version, version_major, version_minor, binary_id; 101 int i, r; 102 103 switch (adev->asic_type) { 104 #ifdef CONFIG_DRM_AMDGPU_CIK 105 case CHIP_BONAIRE: 106 fw_name = FIRMWARE_BONAIRE; 107 break; 108 case CHIP_KAVERI: 109 fw_name = FIRMWARE_KAVERI; 110 break; 111 case CHIP_KABINI: 112 fw_name = FIRMWARE_KABINI; 113 break; 114 case CHIP_HAWAII: 115 fw_name = FIRMWARE_HAWAII; 116 break; 117 case CHIP_MULLINS: 118 fw_name = FIRMWARE_MULLINS; 119 break; 120 #endif 121 case CHIP_TONGA: 122 fw_name = FIRMWARE_TONGA; 123 break; 124 case CHIP_CARRIZO: 125 fw_name = FIRMWARE_CARRIZO; 126 break; 127 case CHIP_FIJI: 128 fw_name = FIRMWARE_FIJI; 129 break; 130 case CHIP_STONEY: 131 fw_name = FIRMWARE_STONEY; 132 break; 133 case CHIP_POLARIS10: 134 fw_name = FIRMWARE_POLARIS10; 135 break; 136 case CHIP_POLARIS11: 137 fw_name = FIRMWARE_POLARIS11; 138 break; 139 case CHIP_POLARIS12: 140 fw_name = FIRMWARE_POLARIS12; 141 break; 142 case CHIP_VEGAM: 143 fw_name = FIRMWARE_VEGAM; 144 break; 145 case CHIP_VEGA10: 146 fw_name = FIRMWARE_VEGA10; 147 break; 148 case CHIP_VEGA12: 149 fw_name = FIRMWARE_VEGA12; 150 break; 151 case CHIP_VEGA20: 152 fw_name = FIRMWARE_VEGA20; 153 break; 154 155 default: 156 return -EINVAL; 157 } 158 159 r = request_firmware(&adev->vce.fw, fw_name, adev->dev); 160 if (r) { 161 dev_err(adev->dev, "amdgpu_vce: Can't load firmware \"%s\"\n", 162 fw_name); 163 return r; 164 } 165 166 r = amdgpu_ucode_validate(adev->vce.fw); 167 if (r) { 168 dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n", 169 fw_name); 170 release_firmware(adev->vce.fw); 171 adev->vce.fw = NULL; 172 return r; 173 } 174 175 hdr = (const struct common_firmware_header *)adev->vce.fw->data; 176 177 ucode_version = le32_to_cpu(hdr->ucode_version); 178 version_major = (ucode_version >> 20) & 0xfff; 179 version_minor = (ucode_version >> 8) & 0xfff; 180 binary_id = ucode_version & 0xff; 181 DRM_INFO("Found VCE firmware Version: %hhd.%hhd Binary ID: %hhd\n", 182 version_major, version_minor, binary_id); 183 adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) | 184 (binary_id << 8)); 185 186 r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE, 187 AMDGPU_GEM_DOMAIN_VRAM, &adev->vce.vcpu_bo, 188 &adev->vce.gpu_addr, &adev->vce.cpu_addr); 189 if (r) { 190 dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r); 191 return r; 192 } 193 194 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { 195 atomic_set(&adev->vce.handles[i], 0); 196 adev->vce.filp[i] = NULL; 197 } 198 199 INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler); 200 mutex_init(&adev->vce.idle_mutex); 201 202 return 0; 203 } 204 205 /** 206 * amdgpu_vce_fini - free memory 207 * 208 * @adev: amdgpu_device pointer 209 * 210 * Last step on VCE teardown, free firmware memory 211 */ 212 int amdgpu_vce_sw_fini(struct amdgpu_device *adev) 213 { 214 unsigned i; 215 216 if (adev->vce.vcpu_bo == NULL) 217 return 0; 218 219 cancel_delayed_work_sync(&adev->vce.idle_work); 220 drm_sched_entity_destroy(&adev->vce.entity); 221 222 amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr, 223 (void **)&adev->vce.cpu_addr); 224 225 for (i = 0; i < adev->vce.num_rings; i++) 226 amdgpu_ring_fini(&adev->vce.ring[i]); 227 228 release_firmware(adev->vce.fw); 229 mutex_destroy(&adev->vce.idle_mutex); 230 231 return 0; 232 } 233 234 /** 235 * amdgpu_vce_entity_init - init entity 236 * 237 * @adev: amdgpu_device pointer 238 * 239 */ 240 int amdgpu_vce_entity_init(struct amdgpu_device *adev) 241 { 242 struct amdgpu_ring *ring; 243 struct drm_sched_rq *rq; 244 int r; 245 246 ring = &adev->vce.ring[0]; 247 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; 248 r = drm_sched_entity_init(&adev->vce.entity, &rq, 1, NULL); 249 if (r != 0) { 250 DRM_ERROR("Failed setting up VCE run queue.\n"); 251 return r; 252 } 253 254 return 0; 255 } 256 257 /** 258 * amdgpu_vce_suspend - unpin VCE fw memory 259 * 260 * @adev: amdgpu_device pointer 261 * 262 */ 263 int amdgpu_vce_suspend(struct amdgpu_device *adev) 264 { 265 int i; 266 267 cancel_delayed_work_sync(&adev->vce.idle_work); 268 269 if (adev->vce.vcpu_bo == NULL) 270 return 0; 271 272 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) 273 if (atomic_read(&adev->vce.handles[i])) 274 break; 275 276 if (i == AMDGPU_MAX_VCE_HANDLES) 277 return 0; 278 279 /* TODO: suspending running encoding sessions isn't supported */ 280 return -EINVAL; 281 } 282 283 /** 284 * amdgpu_vce_resume - pin VCE fw memory 285 * 286 * @adev: amdgpu_device pointer 287 * 288 */ 289 int amdgpu_vce_resume(struct amdgpu_device *adev) 290 { 291 void *cpu_addr; 292 const struct common_firmware_header *hdr; 293 unsigned offset; 294 int r; 295 296 if (adev->vce.vcpu_bo == NULL) 297 return -EINVAL; 298 299 r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false); 300 if (r) { 301 dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r); 302 return r; 303 } 304 305 r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr); 306 if (r) { 307 amdgpu_bo_unreserve(adev->vce.vcpu_bo); 308 dev_err(adev->dev, "(%d) VCE map failed\n", r); 309 return r; 310 } 311 312 hdr = (const struct common_firmware_header *)adev->vce.fw->data; 313 offset = le32_to_cpu(hdr->ucode_array_offset_bytes); 314 memcpy_toio(cpu_addr, adev->vce.fw->data + offset, 315 adev->vce.fw->size - offset); 316 317 amdgpu_bo_kunmap(adev->vce.vcpu_bo); 318 319 amdgpu_bo_unreserve(adev->vce.vcpu_bo); 320 321 return 0; 322 } 323 324 /** 325 * amdgpu_vce_idle_work_handler - power off VCE 326 * 327 * @work: pointer to work structure 328 * 329 * power of VCE when it's not used any more 330 */ 331 static void amdgpu_vce_idle_work_handler(struct work_struct *work) 332 { 333 struct amdgpu_device *adev = 334 container_of(work, struct amdgpu_device, vce.idle_work.work); 335 unsigned i, count = 0; 336 337 for (i = 0; i < adev->vce.num_rings; i++) 338 count += amdgpu_fence_count_emitted(&adev->vce.ring[i]); 339 340 if (count == 0) { 341 if (adev->pm.dpm_enabled) { 342 amdgpu_dpm_enable_vce(adev, false); 343 } else { 344 amdgpu_asic_set_vce_clocks(adev, 0, 0); 345 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 346 AMD_PG_STATE_GATE); 347 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 348 AMD_CG_STATE_GATE); 349 } 350 } else { 351 schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT); 352 } 353 } 354 355 /** 356 * amdgpu_vce_ring_begin_use - power up VCE 357 * 358 * @ring: amdgpu ring 359 * 360 * Make sure VCE is powerd up when we want to use it 361 */ 362 void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring) 363 { 364 struct amdgpu_device *adev = ring->adev; 365 bool set_clocks; 366 367 if (amdgpu_sriov_vf(adev)) 368 return; 369 370 mutex_lock(&adev->vce.idle_mutex); 371 set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work); 372 if (set_clocks) { 373 if (adev->pm.dpm_enabled) { 374 amdgpu_dpm_enable_vce(adev, true); 375 } else { 376 amdgpu_asic_set_vce_clocks(adev, 53300, 40000); 377 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 378 AMD_CG_STATE_UNGATE); 379 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 380 AMD_PG_STATE_UNGATE); 381 382 } 383 } 384 mutex_unlock(&adev->vce.idle_mutex); 385 } 386 387 /** 388 * amdgpu_vce_ring_end_use - power VCE down 389 * 390 * @ring: amdgpu ring 391 * 392 * Schedule work to power VCE down again 393 */ 394 void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring) 395 { 396 if (!amdgpu_sriov_vf(ring->adev)) 397 schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT); 398 } 399 400 /** 401 * amdgpu_vce_free_handles - free still open VCE handles 402 * 403 * @adev: amdgpu_device pointer 404 * @filp: drm file pointer 405 * 406 * Close all VCE handles still open by this file pointer 407 */ 408 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp) 409 { 410 struct amdgpu_ring *ring = &adev->vce.ring[0]; 411 int i, r; 412 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { 413 uint32_t handle = atomic_read(&adev->vce.handles[i]); 414 415 if (!handle || adev->vce.filp[i] != filp) 416 continue; 417 418 r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL); 419 if (r) 420 DRM_ERROR("Error destroying VCE handle (%d)!\n", r); 421 422 adev->vce.filp[i] = NULL; 423 atomic_set(&adev->vce.handles[i], 0); 424 } 425 } 426 427 /** 428 * amdgpu_vce_get_create_msg - generate a VCE create msg 429 * 430 * @adev: amdgpu_device pointer 431 * @ring: ring we should submit the msg to 432 * @handle: VCE session handle to use 433 * @fence: optional fence to return 434 * 435 * Open up a stream for HW test 436 */ 437 static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, 438 struct amdgpu_bo *bo, 439 struct dma_fence **fence) 440 { 441 const unsigned ib_size_dw = 1024; 442 struct amdgpu_job *job; 443 struct amdgpu_ib *ib; 444 struct dma_fence *f = NULL; 445 uint64_t addr; 446 int i, r; 447 448 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); 449 if (r) 450 return r; 451 452 ib = &job->ibs[0]; 453 454 addr = amdgpu_bo_gpu_offset(bo); 455 456 /* stitch together an VCE create msg */ 457 ib->length_dw = 0; 458 ib->ptr[ib->length_dw++] = 0x0000000c; /* len */ 459 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */ 460 ib->ptr[ib->length_dw++] = handle; 461 462 if ((ring->adev->vce.fw_version >> 24) >= 52) 463 ib->ptr[ib->length_dw++] = 0x00000040; /* len */ 464 else 465 ib->ptr[ib->length_dw++] = 0x00000030; /* len */ 466 ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */ 467 ib->ptr[ib->length_dw++] = 0x00000000; 468 ib->ptr[ib->length_dw++] = 0x00000042; 469 ib->ptr[ib->length_dw++] = 0x0000000a; 470 ib->ptr[ib->length_dw++] = 0x00000001; 471 ib->ptr[ib->length_dw++] = 0x00000080; 472 ib->ptr[ib->length_dw++] = 0x00000060; 473 ib->ptr[ib->length_dw++] = 0x00000100; 474 ib->ptr[ib->length_dw++] = 0x00000100; 475 ib->ptr[ib->length_dw++] = 0x0000000c; 476 ib->ptr[ib->length_dw++] = 0x00000000; 477 if ((ring->adev->vce.fw_version >> 24) >= 52) { 478 ib->ptr[ib->length_dw++] = 0x00000000; 479 ib->ptr[ib->length_dw++] = 0x00000000; 480 ib->ptr[ib->length_dw++] = 0x00000000; 481 ib->ptr[ib->length_dw++] = 0x00000000; 482 } 483 484 ib->ptr[ib->length_dw++] = 0x00000014; /* len */ 485 ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */ 486 ib->ptr[ib->length_dw++] = upper_32_bits(addr); 487 ib->ptr[ib->length_dw++] = addr; 488 ib->ptr[ib->length_dw++] = 0x00000001; 489 490 for (i = ib->length_dw; i < ib_size_dw; ++i) 491 ib->ptr[i] = 0x0; 492 493 r = amdgpu_job_submit_direct(job, ring, &f); 494 if (r) 495 goto err; 496 497 if (fence) 498 *fence = dma_fence_get(f); 499 dma_fence_put(f); 500 return 0; 501 502 err: 503 amdgpu_job_free(job); 504 return r; 505 } 506 507 /** 508 * amdgpu_vce_get_destroy_msg - generate a VCE destroy msg 509 * 510 * @adev: amdgpu_device pointer 511 * @ring: ring we should submit the msg to 512 * @handle: VCE session handle to use 513 * @fence: optional fence to return 514 * 515 * Close up a stream for HW test or if userspace failed to do so 516 */ 517 static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, 518 bool direct, struct dma_fence **fence) 519 { 520 const unsigned ib_size_dw = 1024; 521 struct amdgpu_job *job; 522 struct amdgpu_ib *ib; 523 struct dma_fence *f = NULL; 524 int i, r; 525 526 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); 527 if (r) 528 return r; 529 530 ib = &job->ibs[0]; 531 532 /* stitch together an VCE destroy msg */ 533 ib->length_dw = 0; 534 ib->ptr[ib->length_dw++] = 0x0000000c; /* len */ 535 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */ 536 ib->ptr[ib->length_dw++] = handle; 537 538 ib->ptr[ib->length_dw++] = 0x00000020; /* len */ 539 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */ 540 ib->ptr[ib->length_dw++] = 0xffffffff; /* next task info, set to 0xffffffff if no */ 541 ib->ptr[ib->length_dw++] = 0x00000001; /* destroy session */ 542 ib->ptr[ib->length_dw++] = 0x00000000; 543 ib->ptr[ib->length_dw++] = 0x00000000; 544 ib->ptr[ib->length_dw++] = 0xffffffff; /* feedback is not needed, set to 0xffffffff and firmware will not output feedback */ 545 ib->ptr[ib->length_dw++] = 0x00000000; 546 547 ib->ptr[ib->length_dw++] = 0x00000008; /* len */ 548 ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */ 549 550 for (i = ib->length_dw; i < ib_size_dw; ++i) 551 ib->ptr[i] = 0x0; 552 553 if (direct) 554 r = amdgpu_job_submit_direct(job, ring, &f); 555 else 556 r = amdgpu_job_submit(job, &ring->adev->vce.entity, 557 AMDGPU_FENCE_OWNER_UNDEFINED, &f); 558 if (r) 559 goto err; 560 561 if (fence) 562 *fence = dma_fence_get(f); 563 dma_fence_put(f); 564 return 0; 565 566 err: 567 amdgpu_job_free(job); 568 return r; 569 } 570 571 /** 572 * amdgpu_vce_cs_validate_bo - make sure not to cross 4GB boundary 573 * 574 * @p: parser context 575 * @lo: address of lower dword 576 * @hi: address of higher dword 577 * @size: minimum size 578 * @index: bs/fb index 579 * 580 * Make sure that no BO cross a 4GB boundary. 581 */ 582 static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p, uint32_t ib_idx, 583 int lo, int hi, unsigned size, int32_t index) 584 { 585 int64_t offset = ((uint64_t)size) * ((int64_t)index); 586 struct ttm_operation_ctx ctx = { false, false }; 587 struct amdgpu_bo_va_mapping *mapping; 588 unsigned i, fpfn, lpfn; 589 struct amdgpu_bo *bo; 590 uint64_t addr; 591 int r; 592 593 addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) | 594 ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32; 595 if (index >= 0) { 596 addr += offset; 597 fpfn = PAGE_ALIGN(offset) >> PAGE_SHIFT; 598 lpfn = 0x100000000ULL >> PAGE_SHIFT; 599 } else { 600 fpfn = 0; 601 lpfn = (0x100000000ULL - PAGE_ALIGN(offset)) >> PAGE_SHIFT; 602 } 603 604 r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping); 605 if (r) { 606 DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n", 607 addr, lo, hi, size, index); 608 return r; 609 } 610 611 for (i = 0; i < bo->placement.num_placement; ++i) { 612 bo->placements[i].fpfn = max(bo->placements[i].fpfn, fpfn); 613 bo->placements[i].lpfn = bo->placements[i].lpfn ? 614 min(bo->placements[i].lpfn, lpfn) : lpfn; 615 } 616 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 617 } 618 619 620 /** 621 * amdgpu_vce_cs_reloc - command submission relocation 622 * 623 * @p: parser context 624 * @lo: address of lower dword 625 * @hi: address of higher dword 626 * @size: minimum size 627 * 628 * Patch relocation inside command stream with real buffer address 629 */ 630 static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, 631 int lo, int hi, unsigned size, uint32_t index) 632 { 633 struct amdgpu_bo_va_mapping *mapping; 634 struct amdgpu_bo *bo; 635 uint64_t addr; 636 int r; 637 638 if (index == 0xffffffff) 639 index = 0; 640 641 addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) | 642 ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32; 643 addr += ((uint64_t)size) * ((uint64_t)index); 644 645 r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping); 646 if (r) { 647 DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n", 648 addr, lo, hi, size, index); 649 return r; 650 } 651 652 if ((addr + (uint64_t)size) > 653 (mapping->last + 1) * AMDGPU_GPU_PAGE_SIZE) { 654 DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n", 655 addr, lo, hi); 656 return -EINVAL; 657 } 658 659 addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE; 660 addr += amdgpu_bo_gpu_offset(bo); 661 addr -= ((uint64_t)size) * ((uint64_t)index); 662 663 amdgpu_set_ib_value(p, ib_idx, lo, lower_32_bits(addr)); 664 amdgpu_set_ib_value(p, ib_idx, hi, upper_32_bits(addr)); 665 666 return 0; 667 } 668 669 /** 670 * amdgpu_vce_validate_handle - validate stream handle 671 * 672 * @p: parser context 673 * @handle: handle to validate 674 * @allocated: allocated a new handle? 675 * 676 * Validates the handle and return the found session index or -EINVAL 677 * we we don't have another free session index. 678 */ 679 static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p, 680 uint32_t handle, uint32_t *allocated) 681 { 682 unsigned i; 683 684 /* validate the handle */ 685 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { 686 if (atomic_read(&p->adev->vce.handles[i]) == handle) { 687 if (p->adev->vce.filp[i] != p->filp) { 688 DRM_ERROR("VCE handle collision detected!\n"); 689 return -EINVAL; 690 } 691 return i; 692 } 693 } 694 695 /* handle not found try to alloc a new one */ 696 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { 697 if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) { 698 p->adev->vce.filp[i] = p->filp; 699 p->adev->vce.img_size[i] = 0; 700 *allocated |= 1 << i; 701 return i; 702 } 703 } 704 705 DRM_ERROR("No more free VCE handles!\n"); 706 return -EINVAL; 707 } 708 709 /** 710 * amdgpu_vce_cs_parse - parse and validate the command stream 711 * 712 * @p: parser context 713 * 714 */ 715 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) 716 { 717 struct amdgpu_ib *ib = &p->job->ibs[ib_idx]; 718 unsigned fb_idx = 0, bs_idx = 0; 719 int session_idx = -1; 720 uint32_t destroyed = 0; 721 uint32_t created = 0; 722 uint32_t allocated = 0; 723 uint32_t tmp, handle = 0; 724 uint32_t *size = &tmp; 725 unsigned idx; 726 int i, r = 0; 727 728 p->job->vm = NULL; 729 ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); 730 731 for (idx = 0; idx < ib->length_dw;) { 732 uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx); 733 uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1); 734 735 if ((len < 8) || (len & 3)) { 736 DRM_ERROR("invalid VCE command length (%d)!\n", len); 737 r = -EINVAL; 738 goto out; 739 } 740 741 switch (cmd) { 742 case 0x00000002: /* task info */ 743 fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6); 744 bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7); 745 break; 746 747 case 0x03000001: /* encode */ 748 r = amdgpu_vce_validate_bo(p, ib_idx, idx + 10, 749 idx + 9, 0, 0); 750 if (r) 751 goto out; 752 753 r = amdgpu_vce_validate_bo(p, ib_idx, idx + 12, 754 idx + 11, 0, 0); 755 if (r) 756 goto out; 757 break; 758 759 case 0x05000001: /* context buffer */ 760 r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3, 761 idx + 2, 0, 0); 762 if (r) 763 goto out; 764 break; 765 766 case 0x05000004: /* video bitstream buffer */ 767 tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4); 768 r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3, idx + 2, 769 tmp, bs_idx); 770 if (r) 771 goto out; 772 break; 773 774 case 0x05000005: /* feedback buffer */ 775 r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3, idx + 2, 776 4096, fb_idx); 777 if (r) 778 goto out; 779 break; 780 781 case 0x0500000d: /* MV buffer */ 782 r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3, 783 idx + 2, 0, 0); 784 if (r) 785 goto out; 786 787 r = amdgpu_vce_validate_bo(p, ib_idx, idx + 8, 788 idx + 7, 0, 0); 789 if (r) 790 goto out; 791 break; 792 } 793 794 idx += len / 4; 795 } 796 797 for (idx = 0; idx < ib->length_dw;) { 798 uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx); 799 uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1); 800 801 switch (cmd) { 802 case 0x00000001: /* session */ 803 handle = amdgpu_get_ib_value(p, ib_idx, idx + 2); 804 session_idx = amdgpu_vce_validate_handle(p, handle, 805 &allocated); 806 if (session_idx < 0) { 807 r = session_idx; 808 goto out; 809 } 810 size = &p->adev->vce.img_size[session_idx]; 811 break; 812 813 case 0x00000002: /* task info */ 814 fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6); 815 bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7); 816 break; 817 818 case 0x01000001: /* create */ 819 created |= 1 << session_idx; 820 if (destroyed & (1 << session_idx)) { 821 destroyed &= ~(1 << session_idx); 822 allocated |= 1 << session_idx; 823 824 } else if (!(allocated & (1 << session_idx))) { 825 DRM_ERROR("Handle already in use!\n"); 826 r = -EINVAL; 827 goto out; 828 } 829 830 *size = amdgpu_get_ib_value(p, ib_idx, idx + 8) * 831 amdgpu_get_ib_value(p, ib_idx, idx + 10) * 832 8 * 3 / 2; 833 break; 834 835 case 0x04000001: /* config extension */ 836 case 0x04000002: /* pic control */ 837 case 0x04000005: /* rate control */ 838 case 0x04000007: /* motion estimation */ 839 case 0x04000008: /* rdo */ 840 case 0x04000009: /* vui */ 841 case 0x05000002: /* auxiliary buffer */ 842 case 0x05000009: /* clock table */ 843 break; 844 845 case 0x0500000c: /* hw config */ 846 switch (p->adev->asic_type) { 847 #ifdef CONFIG_DRM_AMDGPU_CIK 848 case CHIP_KAVERI: 849 case CHIP_MULLINS: 850 #endif 851 case CHIP_CARRIZO: 852 break; 853 default: 854 r = -EINVAL; 855 goto out; 856 } 857 break; 858 859 case 0x03000001: /* encode */ 860 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9, 861 *size, 0); 862 if (r) 863 goto out; 864 865 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 12, idx + 11, 866 *size / 3, 0); 867 if (r) 868 goto out; 869 break; 870 871 case 0x02000001: /* destroy */ 872 destroyed |= 1 << session_idx; 873 break; 874 875 case 0x05000001: /* context buffer */ 876 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2, 877 *size * 2, 0); 878 if (r) 879 goto out; 880 break; 881 882 case 0x05000004: /* video bitstream buffer */ 883 tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4); 884 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2, 885 tmp, bs_idx); 886 if (r) 887 goto out; 888 break; 889 890 case 0x05000005: /* feedback buffer */ 891 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2, 892 4096, fb_idx); 893 if (r) 894 goto out; 895 break; 896 897 case 0x0500000d: /* MV buffer */ 898 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, 899 idx + 2, *size, 0); 900 if (r) 901 goto out; 902 903 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 8, 904 idx + 7, *size / 12, 0); 905 if (r) 906 goto out; 907 break; 908 909 default: 910 DRM_ERROR("invalid VCE command (0x%x)!\n", cmd); 911 r = -EINVAL; 912 goto out; 913 } 914 915 if (session_idx == -1) { 916 DRM_ERROR("no session command at start of IB\n"); 917 r = -EINVAL; 918 goto out; 919 } 920 921 idx += len / 4; 922 } 923 924 if (allocated & ~created) { 925 DRM_ERROR("New session without create command!\n"); 926 r = -ENOENT; 927 } 928 929 out: 930 if (!r) { 931 /* No error, free all destroyed handle slots */ 932 tmp = destroyed; 933 } else { 934 /* Error during parsing, free all allocated handle slots */ 935 tmp = allocated; 936 } 937 938 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) 939 if (tmp & (1 << i)) 940 atomic_set(&p->adev->vce.handles[i], 0); 941 942 return r; 943 } 944 945 /** 946 * amdgpu_vce_cs_parse_vm - parse the command stream in VM mode 947 * 948 * @p: parser context 949 * 950 */ 951 int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx) 952 { 953 struct amdgpu_ib *ib = &p->job->ibs[ib_idx]; 954 int session_idx = -1; 955 uint32_t destroyed = 0; 956 uint32_t created = 0; 957 uint32_t allocated = 0; 958 uint32_t tmp, handle = 0; 959 int i, r = 0, idx = 0; 960 961 while (idx < ib->length_dw) { 962 uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx); 963 uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1); 964 965 if ((len < 8) || (len & 3)) { 966 DRM_ERROR("invalid VCE command length (%d)!\n", len); 967 r = -EINVAL; 968 goto out; 969 } 970 971 switch (cmd) { 972 case 0x00000001: /* session */ 973 handle = amdgpu_get_ib_value(p, ib_idx, idx + 2); 974 session_idx = amdgpu_vce_validate_handle(p, handle, 975 &allocated); 976 if (session_idx < 0) { 977 r = session_idx; 978 goto out; 979 } 980 break; 981 982 case 0x01000001: /* create */ 983 created |= 1 << session_idx; 984 if (destroyed & (1 << session_idx)) { 985 destroyed &= ~(1 << session_idx); 986 allocated |= 1 << session_idx; 987 988 } else if (!(allocated & (1 << session_idx))) { 989 DRM_ERROR("Handle already in use!\n"); 990 r = -EINVAL; 991 goto out; 992 } 993 994 break; 995 996 case 0x02000001: /* destroy */ 997 destroyed |= 1 << session_idx; 998 break; 999 1000 default: 1001 break; 1002 } 1003 1004 if (session_idx == -1) { 1005 DRM_ERROR("no session command at start of IB\n"); 1006 r = -EINVAL; 1007 goto out; 1008 } 1009 1010 idx += len / 4; 1011 } 1012 1013 if (allocated & ~created) { 1014 DRM_ERROR("New session without create command!\n"); 1015 r = -ENOENT; 1016 } 1017 1018 out: 1019 if (!r) { 1020 /* No error, free all destroyed handle slots */ 1021 tmp = destroyed; 1022 amdgpu_ib_free(p->adev, ib, NULL); 1023 } else { 1024 /* Error during parsing, free all allocated handle slots */ 1025 tmp = allocated; 1026 } 1027 1028 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) 1029 if (tmp & (1 << i)) 1030 atomic_set(&p->adev->vce.handles[i], 0); 1031 1032 return r; 1033 } 1034 1035 /** 1036 * amdgpu_vce_ring_emit_ib - execute indirect buffer 1037 * 1038 * @ring: engine to use 1039 * @ib: the IB to execute 1040 * 1041 */ 1042 void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, 1043 struct amdgpu_job *job, 1044 struct amdgpu_ib *ib, 1045 uint32_t flags) 1046 { 1047 amdgpu_ring_write(ring, VCE_CMD_IB); 1048 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); 1049 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 1050 amdgpu_ring_write(ring, ib->length_dw); 1051 } 1052 1053 /** 1054 * amdgpu_vce_ring_emit_fence - add a fence command to the ring 1055 * 1056 * @ring: engine to use 1057 * @fence: the fence 1058 * 1059 */ 1060 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 1061 unsigned flags) 1062 { 1063 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 1064 1065 amdgpu_ring_write(ring, VCE_CMD_FENCE); 1066 amdgpu_ring_write(ring, addr); 1067 amdgpu_ring_write(ring, upper_32_bits(addr)); 1068 amdgpu_ring_write(ring, seq); 1069 amdgpu_ring_write(ring, VCE_CMD_TRAP); 1070 amdgpu_ring_write(ring, VCE_CMD_END); 1071 } 1072 1073 /** 1074 * amdgpu_vce_ring_test_ring - test if VCE ring is working 1075 * 1076 * @ring: the engine to test on 1077 * 1078 */ 1079 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring) 1080 { 1081 struct amdgpu_device *adev = ring->adev; 1082 uint32_t rptr; 1083 unsigned i; 1084 int r, timeout = adev->usec_timeout; 1085 1086 /* skip ring test for sriov*/ 1087 if (amdgpu_sriov_vf(adev)) 1088 return 0; 1089 1090 r = amdgpu_ring_alloc(ring, 16); 1091 if (r) 1092 return r; 1093 1094 rptr = amdgpu_ring_get_rptr(ring); 1095 1096 amdgpu_ring_write(ring, VCE_CMD_END); 1097 amdgpu_ring_commit(ring); 1098 1099 for (i = 0; i < timeout; i++) { 1100 if (amdgpu_ring_get_rptr(ring) != rptr) 1101 break; 1102 udelay(1); 1103 } 1104 1105 if (i >= timeout) 1106 r = -ETIMEDOUT; 1107 1108 return r; 1109 } 1110 1111 /** 1112 * amdgpu_vce_ring_test_ib - test if VCE IBs are working 1113 * 1114 * @ring: the engine to test on 1115 * 1116 */ 1117 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout) 1118 { 1119 struct dma_fence *fence = NULL; 1120 struct amdgpu_bo *bo = NULL; 1121 long r; 1122 1123 /* skip vce ring1/2 ib test for now, since it's not reliable */ 1124 if (ring != &ring->adev->vce.ring[0]) 1125 return 0; 1126 1127 r = amdgpu_bo_create_reserved(ring->adev, 512, PAGE_SIZE, 1128 AMDGPU_GEM_DOMAIN_VRAM, 1129 &bo, NULL, NULL); 1130 if (r) 1131 return r; 1132 1133 r = amdgpu_vce_get_create_msg(ring, 1, bo, NULL); 1134 if (r) 1135 goto error; 1136 1137 r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence); 1138 if (r) 1139 goto error; 1140 1141 r = dma_fence_wait_timeout(fence, false, timeout); 1142 if (r == 0) 1143 r = -ETIMEDOUT; 1144 else if (r > 0) 1145 r = 0; 1146 1147 error: 1148 dma_fence_put(fence); 1149 amdgpu_bo_unreserve(bo); 1150 amdgpu_bo_unref(&bo); 1151 return r; 1152 } 1153