1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26 27 #include <linux/firmware.h> 28 #include <linux/module.h> 29 #include <linux/pci.h> 30 31 #include "amdgpu.h" 32 #include "amdgpu_pm.h" 33 #include "amdgpu_vcn.h" 34 #include "soc15d.h" 35 36 /* Firmware Names */ 37 #define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin" 38 #define FIRMWARE_PICASSO "amdgpu/picasso_vcn.bin" 39 #define FIRMWARE_RAVEN2 "amdgpu/raven2_vcn.bin" 40 #define FIRMWARE_ARCTURUS "amdgpu/arcturus_vcn.bin" 41 #define FIRMWARE_RENOIR "amdgpu/renoir_vcn.bin" 42 #define FIRMWARE_GREEN_SARDINE "amdgpu/green_sardine_vcn.bin" 43 #define FIRMWARE_NAVI10 "amdgpu/navi10_vcn.bin" 44 #define FIRMWARE_NAVI14 "amdgpu/navi14_vcn.bin" 45 #define FIRMWARE_NAVI12 "amdgpu/navi12_vcn.bin" 46 #define FIRMWARE_SIENNA_CICHLID "amdgpu/sienna_cichlid_vcn.bin" 47 #define FIRMWARE_NAVY_FLOUNDER "amdgpu/navy_flounder_vcn.bin" 48 #define FIRMWARE_VANGOGH "amdgpu/vangogh_vcn.bin" 49 #define FIRMWARE_DIMGREY_CAVEFISH "amdgpu/dimgrey_cavefish_vcn.bin" 50 51 MODULE_FIRMWARE(FIRMWARE_RAVEN); 52 MODULE_FIRMWARE(FIRMWARE_PICASSO); 53 MODULE_FIRMWARE(FIRMWARE_RAVEN2); 54 MODULE_FIRMWARE(FIRMWARE_ARCTURUS); 55 MODULE_FIRMWARE(FIRMWARE_RENOIR); 56 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE); 57 MODULE_FIRMWARE(FIRMWARE_NAVI10); 58 MODULE_FIRMWARE(FIRMWARE_NAVI14); 59 MODULE_FIRMWARE(FIRMWARE_NAVI12); 60 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID); 61 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER); 62 MODULE_FIRMWARE(FIRMWARE_VANGOGH); 63 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH); 64 65 static void amdgpu_vcn_idle_work_handler(struct work_struct *work); 66 67 int amdgpu_vcn_sw_init(struct amdgpu_device *adev) 68 { 69 unsigned long bo_size; 70 const char *fw_name; 71 const struct common_firmware_header *hdr; 72 unsigned char fw_check; 73 int i, r; 74 75 INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler); 76 mutex_init(&adev->vcn.vcn_pg_lock); 77 mutex_init(&adev->vcn.vcn1_jpeg1_workaround); 78 atomic_set(&adev->vcn.total_submission_cnt, 0); 79 for (i = 0; i < adev->vcn.num_vcn_inst; i++) 80 atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0); 81 82 switch (adev->asic_type) { 83 case CHIP_RAVEN: 84 if (adev->apu_flags & AMD_APU_IS_RAVEN2) 85 fw_name = FIRMWARE_RAVEN2; 86 else if (adev->apu_flags & AMD_APU_IS_PICASSO) 87 fw_name = FIRMWARE_PICASSO; 88 else 89 fw_name = FIRMWARE_RAVEN; 90 break; 91 case CHIP_ARCTURUS: 92 fw_name = FIRMWARE_ARCTURUS; 93 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && 94 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) 95 adev->vcn.indirect_sram = true; 96 break; 97 case CHIP_RENOIR: 98 if (adev->apu_flags & AMD_APU_IS_RENOIR) 99 fw_name = FIRMWARE_RENOIR; 100 else 101 fw_name = FIRMWARE_GREEN_SARDINE; 102 103 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && 104 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) 105 adev->vcn.indirect_sram = true; 106 break; 107 case CHIP_NAVI10: 108 fw_name = FIRMWARE_NAVI10; 109 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && 110 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) 111 adev->vcn.indirect_sram = true; 112 break; 113 case CHIP_NAVI14: 114 fw_name = FIRMWARE_NAVI14; 115 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && 116 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) 117 adev->vcn.indirect_sram = true; 118 break; 119 case CHIP_NAVI12: 120 fw_name = FIRMWARE_NAVI12; 121 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && 122 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) 123 adev->vcn.indirect_sram = true; 124 break; 125 case CHIP_SIENNA_CICHLID: 126 fw_name = FIRMWARE_SIENNA_CICHLID; 127 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && 128 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) 129 adev->vcn.indirect_sram = true; 130 break; 131 case CHIP_NAVY_FLOUNDER: 132 fw_name = FIRMWARE_NAVY_FLOUNDER; 133 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && 134 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) 135 adev->vcn.indirect_sram = true; 136 break; 137 case CHIP_VANGOGH: 138 fw_name = FIRMWARE_VANGOGH; 139 break; 140 case CHIP_DIMGREY_CAVEFISH: 141 fw_name = FIRMWARE_DIMGREY_CAVEFISH; 142 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && 143 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) 144 adev->vcn.indirect_sram = true; 145 break; 146 default: 147 return -EINVAL; 148 } 149 150 r = request_firmware(&adev->vcn.fw, fw_name, adev->dev); 151 if (r) { 152 dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n", 153 fw_name); 154 return r; 155 } 156 157 r = amdgpu_ucode_validate(adev->vcn.fw); 158 if (r) { 159 dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n", 160 fw_name); 161 release_firmware(adev->vcn.fw); 162 adev->vcn.fw = NULL; 163 return r; 164 } 165 166 hdr = (const struct common_firmware_header *)adev->vcn.fw->data; 167 adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version); 168 169 /* Bit 20-23, it is encode major and non-zero for new naming convention. 170 * This field is part of version minor and DRM_DISABLED_FLAG in old naming 171 * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG 172 * is zero in old naming convention, this field is always zero so far. 173 * These four bits are used to tell which naming convention is present. 174 */ 175 fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf; 176 if (fw_check) { 177 unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev; 178 179 fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff; 180 enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff; 181 enc_major = fw_check; 182 dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf; 183 vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf; 184 DRM_INFO("Found VCN firmware Version ENC: %hu.%hu DEC: %hu VEP: %hu Revision: %hu\n", 185 enc_major, enc_minor, dec_ver, vep, fw_rev); 186 } else { 187 unsigned int version_major, version_minor, family_id; 188 189 family_id = le32_to_cpu(hdr->ucode_version) & 0xff; 190 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; 191 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; 192 DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n", 193 version_major, version_minor, family_id); 194 } 195 196 bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE; 197 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 198 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); 199 bo_size += AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)); 200 201 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 202 if (adev->vcn.harvest_config & (1 << i)) 203 continue; 204 205 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, 206 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].vcpu_bo, 207 &adev->vcn.inst[i].gpu_addr, &adev->vcn.inst[i].cpu_addr); 208 if (r) { 209 dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r); 210 return r; 211 } 212 213 adev->vcn.inst[i].fw_shared_cpu_addr = adev->vcn.inst[i].cpu_addr + 214 bo_size - AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)); 215 adev->vcn.inst[i].fw_shared_gpu_addr = adev->vcn.inst[i].gpu_addr + 216 bo_size - AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)); 217 218 if (adev->vcn.indirect_sram) { 219 r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE, 220 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].dpg_sram_bo, 221 &adev->vcn.inst[i].dpg_sram_gpu_addr, &adev->vcn.inst[i].dpg_sram_cpu_addr); 222 if (r) { 223 dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r); 224 return r; 225 } 226 } 227 } 228 229 return 0; 230 } 231 232 int amdgpu_vcn_sw_fini(struct amdgpu_device *adev) 233 { 234 int i, j; 235 236 cancel_delayed_work_sync(&adev->vcn.idle_work); 237 238 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { 239 if (adev->vcn.harvest_config & (1 << j)) 240 continue; 241 242 if (adev->vcn.indirect_sram) { 243 amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo, 244 &adev->vcn.inst[j].dpg_sram_gpu_addr, 245 (void **)&adev->vcn.inst[j].dpg_sram_cpu_addr); 246 } 247 kvfree(adev->vcn.inst[j].saved_bo); 248 249 amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo, 250 &adev->vcn.inst[j].gpu_addr, 251 (void **)&adev->vcn.inst[j].cpu_addr); 252 253 amdgpu_ring_fini(&adev->vcn.inst[j].ring_dec); 254 255 for (i = 0; i < adev->vcn.num_enc_rings; ++i) 256 amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]); 257 } 258 259 release_firmware(adev->vcn.fw); 260 mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround); 261 mutex_destroy(&adev->vcn.vcn_pg_lock); 262 263 return 0; 264 } 265 266 int amdgpu_vcn_suspend(struct amdgpu_device *adev) 267 { 268 unsigned size; 269 void *ptr; 270 int i; 271 272 cancel_delayed_work_sync(&adev->vcn.idle_work); 273 274 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 275 if (adev->vcn.harvest_config & (1 << i)) 276 continue; 277 if (adev->vcn.inst[i].vcpu_bo == NULL) 278 return 0; 279 280 size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo); 281 ptr = adev->vcn.inst[i].cpu_addr; 282 283 adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL); 284 if (!adev->vcn.inst[i].saved_bo) 285 return -ENOMEM; 286 287 memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size); 288 } 289 return 0; 290 } 291 292 int amdgpu_vcn_resume(struct amdgpu_device *adev) 293 { 294 unsigned size; 295 void *ptr; 296 int i; 297 298 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 299 if (adev->vcn.harvest_config & (1 << i)) 300 continue; 301 if (adev->vcn.inst[i].vcpu_bo == NULL) 302 return -EINVAL; 303 304 size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo); 305 ptr = adev->vcn.inst[i].cpu_addr; 306 307 if (adev->vcn.inst[i].saved_bo != NULL) { 308 memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size); 309 kvfree(adev->vcn.inst[i].saved_bo); 310 adev->vcn.inst[i].saved_bo = NULL; 311 } else { 312 const struct common_firmware_header *hdr; 313 unsigned offset; 314 315 hdr = (const struct common_firmware_header *)adev->vcn.fw->data; 316 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 317 offset = le32_to_cpu(hdr->ucode_array_offset_bytes); 318 memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset, 319 le32_to_cpu(hdr->ucode_size_bytes)); 320 size -= le32_to_cpu(hdr->ucode_size_bytes); 321 ptr += le32_to_cpu(hdr->ucode_size_bytes); 322 } 323 memset_io(ptr, 0, size); 324 } 325 } 326 return 0; 327 } 328 329 static void amdgpu_vcn_idle_work_handler(struct work_struct *work) 330 { 331 struct amdgpu_device *adev = 332 container_of(work, struct amdgpu_device, vcn.idle_work.work); 333 unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0}; 334 unsigned int i, j; 335 int r = 0; 336 337 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { 338 if (adev->vcn.harvest_config & (1 << j)) 339 continue; 340 341 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { 342 fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]); 343 } 344 345 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { 346 struct dpg_pause_state new_state; 347 348 if (fence[j] || 349 unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt))) 350 new_state.fw_based = VCN_DPG_STATE__PAUSE; 351 else 352 new_state.fw_based = VCN_DPG_STATE__UNPAUSE; 353 354 adev->vcn.pause_dpg_mode(adev, j, &new_state); 355 } 356 357 fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec); 358 fences += fence[j]; 359 } 360 361 if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) { 362 amdgpu_gfx_off_ctrl(adev, true); 363 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, 364 AMD_PG_STATE_GATE); 365 r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO, 366 false); 367 if (r) 368 dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r); 369 } else { 370 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT); 371 } 372 } 373 374 void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring) 375 { 376 struct amdgpu_device *adev = ring->adev; 377 int r = 0; 378 379 atomic_inc(&adev->vcn.total_submission_cnt); 380 381 if (!cancel_delayed_work_sync(&adev->vcn.idle_work)) { 382 amdgpu_gfx_off_ctrl(adev, false); 383 r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO, 384 true); 385 if (r) 386 dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r); 387 } 388 389 mutex_lock(&adev->vcn.vcn_pg_lock); 390 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, 391 AMD_PG_STATE_UNGATE); 392 393 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { 394 struct dpg_pause_state new_state; 395 396 if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) { 397 atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt); 398 new_state.fw_based = VCN_DPG_STATE__PAUSE; 399 } else { 400 unsigned int fences = 0; 401 unsigned int i; 402 403 for (i = 0; i < adev->vcn.num_enc_rings; ++i) 404 fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]); 405 406 if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt)) 407 new_state.fw_based = VCN_DPG_STATE__PAUSE; 408 else 409 new_state.fw_based = VCN_DPG_STATE__UNPAUSE; 410 } 411 412 adev->vcn.pause_dpg_mode(adev, ring->me, &new_state); 413 } 414 mutex_unlock(&adev->vcn.vcn_pg_lock); 415 } 416 417 void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring) 418 { 419 if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG && 420 ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) 421 atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt); 422 423 atomic_dec(&ring->adev->vcn.total_submission_cnt); 424 425 schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT); 426 } 427 428 int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring) 429 { 430 struct amdgpu_device *adev = ring->adev; 431 uint32_t tmp = 0; 432 unsigned i; 433 int r; 434 435 /* VCN in SRIOV does not support direct register read/write */ 436 if (amdgpu_sriov_vf(adev)) 437 return 0; 438 439 WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD); 440 r = amdgpu_ring_alloc(ring, 3); 441 if (r) 442 return r; 443 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0)); 444 amdgpu_ring_write(ring, 0xDEADBEEF); 445 amdgpu_ring_commit(ring); 446 for (i = 0; i < adev->usec_timeout; i++) { 447 tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9); 448 if (tmp == 0xDEADBEEF) 449 break; 450 udelay(1); 451 } 452 453 if (i >= adev->usec_timeout) 454 r = -ETIMEDOUT; 455 456 return r; 457 } 458 459 static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, 460 struct amdgpu_bo *bo, 461 struct dma_fence **fence) 462 { 463 struct amdgpu_device *adev = ring->adev; 464 struct dma_fence *f = NULL; 465 struct amdgpu_job *job; 466 struct amdgpu_ib *ib; 467 uint64_t addr; 468 int i, r; 469 470 r = amdgpu_job_alloc_with_ib(adev, 64, 471 AMDGPU_IB_POOL_DIRECT, &job); 472 if (r) 473 goto err; 474 475 ib = &job->ibs[0]; 476 addr = amdgpu_bo_gpu_offset(bo); 477 ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0); 478 ib->ptr[1] = addr; 479 ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0); 480 ib->ptr[3] = addr >> 32; 481 ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0); 482 ib->ptr[5] = 0; 483 for (i = 6; i < 16; i += 2) { 484 ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0); 485 ib->ptr[i+1] = 0; 486 } 487 ib->length_dw = 16; 488 489 r = amdgpu_job_submit_direct(job, ring, &f); 490 if (r) 491 goto err_free; 492 493 amdgpu_bo_fence(bo, f, false); 494 amdgpu_bo_unreserve(bo); 495 amdgpu_bo_unref(&bo); 496 497 if (fence) 498 *fence = dma_fence_get(f); 499 dma_fence_put(f); 500 501 return 0; 502 503 err_free: 504 amdgpu_job_free(job); 505 506 err: 507 amdgpu_bo_unreserve(bo); 508 amdgpu_bo_unref(&bo); 509 return r; 510 } 511 512 static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, 513 struct dma_fence **fence) 514 { 515 struct amdgpu_device *adev = ring->adev; 516 struct amdgpu_bo *bo = NULL; 517 uint32_t *msg; 518 int r, i; 519 520 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE, 521 AMDGPU_GEM_DOMAIN_VRAM, 522 &bo, NULL, (void **)&msg); 523 if (r) 524 return r; 525 526 msg[0] = cpu_to_le32(0x00000028); 527 msg[1] = cpu_to_le32(0x00000038); 528 msg[2] = cpu_to_le32(0x00000001); 529 msg[3] = cpu_to_le32(0x00000000); 530 msg[4] = cpu_to_le32(handle); 531 msg[5] = cpu_to_le32(0x00000000); 532 msg[6] = cpu_to_le32(0x00000001); 533 msg[7] = cpu_to_le32(0x00000028); 534 msg[8] = cpu_to_le32(0x00000010); 535 msg[9] = cpu_to_le32(0x00000000); 536 msg[10] = cpu_to_le32(0x00000007); 537 msg[11] = cpu_to_le32(0x00000000); 538 msg[12] = cpu_to_le32(0x00000780); 539 msg[13] = cpu_to_le32(0x00000440); 540 for (i = 14; i < 1024; ++i) 541 msg[i] = cpu_to_le32(0x0); 542 543 return amdgpu_vcn_dec_send_msg(ring, bo, fence); 544 } 545 546 static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, 547 struct dma_fence **fence) 548 { 549 struct amdgpu_device *adev = ring->adev; 550 struct amdgpu_bo *bo = NULL; 551 uint32_t *msg; 552 int r, i; 553 554 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE, 555 AMDGPU_GEM_DOMAIN_VRAM, 556 &bo, NULL, (void **)&msg); 557 if (r) 558 return r; 559 560 msg[0] = cpu_to_le32(0x00000028); 561 msg[1] = cpu_to_le32(0x00000018); 562 msg[2] = cpu_to_le32(0x00000000); 563 msg[3] = cpu_to_le32(0x00000002); 564 msg[4] = cpu_to_le32(handle); 565 msg[5] = cpu_to_le32(0x00000000); 566 for (i = 6; i < 1024; ++i) 567 msg[i] = cpu_to_le32(0x0); 568 569 return amdgpu_vcn_dec_send_msg(ring, bo, fence); 570 } 571 572 int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout) 573 { 574 struct dma_fence *fence; 575 long r; 576 577 r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL); 578 if (r) 579 goto error; 580 581 r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence); 582 if (r) 583 goto error; 584 585 r = dma_fence_wait_timeout(fence, false, timeout); 586 if (r == 0) 587 r = -ETIMEDOUT; 588 else if (r > 0) 589 r = 0; 590 591 dma_fence_put(fence); 592 error: 593 return r; 594 } 595 596 int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring) 597 { 598 struct amdgpu_device *adev = ring->adev; 599 uint32_t rptr; 600 unsigned i; 601 int r; 602 603 if (amdgpu_sriov_vf(adev)) 604 return 0; 605 606 r = amdgpu_ring_alloc(ring, 16); 607 if (r) 608 return r; 609 610 rptr = amdgpu_ring_get_rptr(ring); 611 612 amdgpu_ring_write(ring, VCN_ENC_CMD_END); 613 amdgpu_ring_commit(ring); 614 615 for (i = 0; i < adev->usec_timeout; i++) { 616 if (amdgpu_ring_get_rptr(ring) != rptr) 617 break; 618 udelay(1); 619 } 620 621 if (i >= adev->usec_timeout) 622 r = -ETIMEDOUT; 623 624 return r; 625 } 626 627 static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, 628 struct amdgpu_bo *bo, 629 struct dma_fence **fence) 630 { 631 const unsigned ib_size_dw = 16; 632 struct amdgpu_job *job; 633 struct amdgpu_ib *ib; 634 struct dma_fence *f = NULL; 635 uint64_t addr; 636 int i, r; 637 638 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, 639 AMDGPU_IB_POOL_DIRECT, &job); 640 if (r) 641 return r; 642 643 ib = &job->ibs[0]; 644 addr = amdgpu_bo_gpu_offset(bo); 645 646 ib->length_dw = 0; 647 ib->ptr[ib->length_dw++] = 0x00000018; 648 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */ 649 ib->ptr[ib->length_dw++] = handle; 650 ib->ptr[ib->length_dw++] = upper_32_bits(addr); 651 ib->ptr[ib->length_dw++] = addr; 652 ib->ptr[ib->length_dw++] = 0x0000000b; 653 654 ib->ptr[ib->length_dw++] = 0x00000014; 655 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */ 656 ib->ptr[ib->length_dw++] = 0x0000001c; 657 ib->ptr[ib->length_dw++] = 0x00000000; 658 ib->ptr[ib->length_dw++] = 0x00000000; 659 660 ib->ptr[ib->length_dw++] = 0x00000008; 661 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */ 662 663 for (i = ib->length_dw; i < ib_size_dw; ++i) 664 ib->ptr[i] = 0x0; 665 666 r = amdgpu_job_submit_direct(job, ring, &f); 667 if (r) 668 goto err; 669 670 if (fence) 671 *fence = dma_fence_get(f); 672 dma_fence_put(f); 673 674 return 0; 675 676 err: 677 amdgpu_job_free(job); 678 return r; 679 } 680 681 static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, 682 struct amdgpu_bo *bo, 683 struct dma_fence **fence) 684 { 685 const unsigned ib_size_dw = 16; 686 struct amdgpu_job *job; 687 struct amdgpu_ib *ib; 688 struct dma_fence *f = NULL; 689 uint64_t addr; 690 int i, r; 691 692 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, 693 AMDGPU_IB_POOL_DIRECT, &job); 694 if (r) 695 return r; 696 697 ib = &job->ibs[0]; 698 addr = amdgpu_bo_gpu_offset(bo); 699 700 ib->length_dw = 0; 701 ib->ptr[ib->length_dw++] = 0x00000018; 702 ib->ptr[ib->length_dw++] = 0x00000001; 703 ib->ptr[ib->length_dw++] = handle; 704 ib->ptr[ib->length_dw++] = upper_32_bits(addr); 705 ib->ptr[ib->length_dw++] = addr; 706 ib->ptr[ib->length_dw++] = 0x0000000b; 707 708 ib->ptr[ib->length_dw++] = 0x00000014; 709 ib->ptr[ib->length_dw++] = 0x00000002; 710 ib->ptr[ib->length_dw++] = 0x0000001c; 711 ib->ptr[ib->length_dw++] = 0x00000000; 712 ib->ptr[ib->length_dw++] = 0x00000000; 713 714 ib->ptr[ib->length_dw++] = 0x00000008; 715 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */ 716 717 for (i = ib->length_dw; i < ib_size_dw; ++i) 718 ib->ptr[i] = 0x0; 719 720 r = amdgpu_job_submit_direct(job, ring, &f); 721 if (r) 722 goto err; 723 724 if (fence) 725 *fence = dma_fence_get(f); 726 dma_fence_put(f); 727 728 return 0; 729 730 err: 731 amdgpu_job_free(job); 732 return r; 733 } 734 735 int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) 736 { 737 struct dma_fence *fence = NULL; 738 struct amdgpu_bo *bo = NULL; 739 long r; 740 741 r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE, 742 AMDGPU_GEM_DOMAIN_VRAM, 743 &bo, NULL, NULL); 744 if (r) 745 return r; 746 747 r = amdgpu_vcn_enc_get_create_msg(ring, 1, bo, NULL); 748 if (r) 749 goto error; 750 751 r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, bo, &fence); 752 if (r) 753 goto error; 754 755 r = dma_fence_wait_timeout(fence, false, timeout); 756 if (r == 0) 757 r = -ETIMEDOUT; 758 else if (r > 0) 759 r = 0; 760 761 error: 762 dma_fence_put(fence); 763 amdgpu_bo_unreserve(bo); 764 amdgpu_bo_unref(&bo); 765 return r; 766 } 767