1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/firmware.h> 25 #include "amdgpu.h" 26 #include "amdgpu_vcn.h" 27 #include "amdgpu_pm.h" 28 #include "soc15.h" 29 #include "soc15d.h" 30 #include "vcn_v2_0.h" 31 #include "mmsch_v3_0.h" 32 33 #include "vcn/vcn_3_0_0_offset.h" 34 #include "vcn/vcn_3_0_0_sh_mask.h" 35 #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h" 36 37 #include <drm/drm_drv.h> 38 39 #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27 40 #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f 41 #define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET 0x10 42 #define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET 0x11 43 #define mmUVD_NO_OP_INTERNAL_OFFSET 0x29 44 #define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET 0x66 45 #define mmUVD_SCRATCH9_INTERNAL_OFFSET 0xc01d 46 47 #define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET 0x431 48 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x3b4 49 #define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x3b5 50 #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x25c 51 52 #define VCN_INSTANCES_SIENNA_CICHLID 2 53 #define DEC_SW_RING_ENABLED FALSE 54 55 #define RDECODE_MSG_CREATE 0x00000000 56 #define RDECODE_MESSAGE_CREATE 0x00000001 57 58 static int amdgpu_ih_clientid_vcns[] = { 59 SOC15_IH_CLIENTID_VCN, 60 SOC15_IH_CLIENTID_VCN1 61 }; 62 63 static int amdgpu_ucode_id_vcns[] = { 64 AMDGPU_UCODE_ID_VCN, 65 AMDGPU_UCODE_ID_VCN1 66 }; 67 68 static int vcn_v3_0_start_sriov(struct amdgpu_device *adev); 69 static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev); 70 static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev); 71 static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev); 72 static int vcn_v3_0_set_powergating_state(void *handle, 73 enum amd_powergating_state state); 74 static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev, 75 int inst_idx, struct dpg_pause_state *new_state); 76 77 static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring); 78 static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring); 79 80 /** 81 * vcn_v3_0_early_init - set function pointers 82 * 83 * @handle: amdgpu_device pointer 84 * 85 * Set ring and irq function pointers 86 */ 87 static int vcn_v3_0_early_init(void *handle) 88 { 89 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 90 int i; 91 92 if (amdgpu_sriov_vf(adev)) { 93 adev->vcn.num_vcn_inst = VCN_INSTANCES_SIENNA_CICHLID; 94 adev->vcn.harvest_config = 0; 95 adev->vcn.num_enc_rings = 1; 96 97 } else { 98 if (adev->asic_type == CHIP_SIENNA_CICHLID) { 99 u32 harvest; 100 101 adev->vcn.num_vcn_inst = VCN_INSTANCES_SIENNA_CICHLID; 102 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 103 harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING); 104 if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK) 105 adev->vcn.harvest_config |= 1 << i; 106 } 107 108 if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 | 109 AMDGPU_VCN_HARVEST_VCN1)) 110 /* both instances are harvested, disable the block */ 111 return -ENOENT; 112 } else 113 adev->vcn.num_vcn_inst = 1; 114 115 if (adev->asic_type == CHIP_BEIGE_GOBY) 116 adev->vcn.num_enc_rings = 0; 117 else 118 adev->vcn.num_enc_rings = 2; 119 } 120 121 vcn_v3_0_set_dec_ring_funcs(adev); 122 vcn_v3_0_set_enc_ring_funcs(adev); 123 vcn_v3_0_set_irq_funcs(adev); 124 125 return 0; 126 } 127 128 /** 129 * vcn_v3_0_sw_init - sw init for VCN block 130 * 131 * @handle: amdgpu_device pointer 132 * 133 * Load firmware and sw initialization 134 */ 135 static int vcn_v3_0_sw_init(void *handle) 136 { 137 struct amdgpu_ring *ring; 138 int i, j, r; 139 int vcn_doorbell_index = 0; 140 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 141 142 r = amdgpu_vcn_sw_init(adev); 143 if (r) 144 return r; 145 146 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 147 const struct common_firmware_header *hdr; 148 hdr = (const struct common_firmware_header *)adev->vcn.fw->data; 149 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN; 150 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw; 151 adev->firmware.fw_size += 152 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); 153 154 if (adev->vcn.num_vcn_inst == VCN_INSTANCES_SIENNA_CICHLID) { 155 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].ucode_id = AMDGPU_UCODE_ID_VCN1; 156 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].fw = adev->vcn.fw; 157 adev->firmware.fw_size += 158 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); 159 } 160 dev_info(adev->dev, "Will use PSP to load VCN firmware\n"); 161 } 162 163 r = amdgpu_vcn_resume(adev); 164 if (r) 165 return r; 166 167 /* 168 * Note: doorbell assignment is fixed for SRIOV multiple VCN engines 169 * Formula: 170 * vcn_db_base = adev->doorbell_index.vcn.vcn_ring0_1 << 1; 171 * dec_ring_i = vcn_db_base + i * (adev->vcn.num_enc_rings + 1) 172 * enc_ring_i,j = vcn_db_base + i * (adev->vcn.num_enc_rings + 1) + 1 + j 173 */ 174 if (amdgpu_sriov_vf(adev)) { 175 vcn_doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1; 176 /* get DWORD offset */ 177 vcn_doorbell_index = vcn_doorbell_index << 1; 178 } 179 180 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 181 volatile struct amdgpu_fw_shared *fw_shared; 182 183 if (adev->vcn.harvest_config & (1 << i)) 184 continue; 185 186 adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET; 187 adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET; 188 adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET; 189 adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET; 190 adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET; 191 adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET; 192 193 adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET; 194 adev->vcn.inst[i].external.scratch9 = SOC15_REG_OFFSET(VCN, i, mmUVD_SCRATCH9); 195 adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET; 196 adev->vcn.inst[i].external.data0 = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_DATA0); 197 adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET; 198 adev->vcn.inst[i].external.data1 = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_DATA1); 199 adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET; 200 adev->vcn.inst[i].external.cmd = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_CMD); 201 adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET; 202 adev->vcn.inst[i].external.nop = SOC15_REG_OFFSET(VCN, i, mmUVD_NO_OP); 203 204 /* VCN DEC TRAP */ 205 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i], 206 VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[i].irq); 207 if (r) 208 return r; 209 210 atomic_set(&adev->vcn.inst[i].sched_score, 0); 211 212 ring = &adev->vcn.inst[i].ring_dec; 213 ring->use_doorbell = true; 214 if (amdgpu_sriov_vf(adev)) { 215 ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.num_enc_rings + 1); 216 } else { 217 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i; 218 } 219 sprintf(ring->name, "vcn_dec_%d", i); 220 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0, 221 AMDGPU_RING_PRIO_DEFAULT, 222 &adev->vcn.inst[i].sched_score); 223 if (r) 224 return r; 225 226 for (j = 0; j < adev->vcn.num_enc_rings; ++j) { 227 /* VCN ENC TRAP */ 228 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i], 229 j + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq); 230 if (r) 231 return r; 232 233 ring = &adev->vcn.inst[i].ring_enc[j]; 234 ring->use_doorbell = true; 235 if (amdgpu_sriov_vf(adev)) { 236 ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.num_enc_rings + 1) + 1 + j; 237 } else { 238 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + j + 8 * i; 239 } 240 sprintf(ring->name, "vcn_enc_%d.%d", i, j); 241 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0, 242 AMDGPU_RING_PRIO_DEFAULT, 243 &adev->vcn.inst[i].sched_score); 244 if (r) 245 return r; 246 } 247 248 fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr; 249 fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SW_RING_FLAG) | 250 cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG) | 251 cpu_to_le32(AMDGPU_VCN_FW_SHARED_FLAG_0_RB); 252 fw_shared->sw_ring.is_enabled = cpu_to_le32(DEC_SW_RING_ENABLED); 253 } 254 255 if (amdgpu_sriov_vf(adev)) { 256 r = amdgpu_virt_alloc_mm_table(adev); 257 if (r) 258 return r; 259 } 260 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) 261 adev->vcn.pause_dpg_mode = vcn_v3_0_pause_dpg_mode; 262 263 return 0; 264 } 265 266 /** 267 * vcn_v3_0_sw_fini - sw fini for VCN block 268 * 269 * @handle: amdgpu_device pointer 270 * 271 * VCN suspend and free up sw allocation 272 */ 273 static int vcn_v3_0_sw_fini(void *handle) 274 { 275 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 276 int i, r, idx; 277 278 if (drm_dev_enter(&adev->ddev, &idx)) { 279 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 280 volatile struct amdgpu_fw_shared *fw_shared; 281 282 if (adev->vcn.harvest_config & (1 << i)) 283 continue; 284 fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr; 285 fw_shared->present_flag_0 = 0; 286 fw_shared->sw_ring.is_enabled = false; 287 } 288 289 drm_dev_exit(idx); 290 } 291 292 if (amdgpu_sriov_vf(adev)) 293 amdgpu_virt_free_mm_table(adev); 294 295 r = amdgpu_vcn_suspend(adev); 296 if (r) 297 return r; 298 299 r = amdgpu_vcn_sw_fini(adev); 300 301 return r; 302 } 303 304 /** 305 * vcn_v3_0_hw_init - start and test VCN block 306 * 307 * @handle: amdgpu_device pointer 308 * 309 * Initialize the hardware, boot up the VCPU and do some testing 310 */ 311 static int vcn_v3_0_hw_init(void *handle) 312 { 313 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 314 struct amdgpu_ring *ring; 315 int i, j, r; 316 317 if (amdgpu_sriov_vf(adev)) { 318 r = vcn_v3_0_start_sriov(adev); 319 if (r) 320 goto done; 321 322 /* initialize VCN dec and enc ring buffers */ 323 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 324 if (adev->vcn.harvest_config & (1 << i)) 325 continue; 326 327 ring = &adev->vcn.inst[i].ring_dec; 328 if (amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, i)) { 329 ring->sched.ready = false; 330 dev_info(adev->dev, "ring %s is disabled by hypervisor\n", ring->name); 331 } else { 332 ring->wptr = 0; 333 ring->wptr_old = 0; 334 vcn_v3_0_dec_ring_set_wptr(ring); 335 ring->sched.ready = true; 336 } 337 338 for (j = 0; j < adev->vcn.num_enc_rings; ++j) { 339 ring = &adev->vcn.inst[i].ring_enc[j]; 340 if (amdgpu_vcn_is_disabled_vcn(adev, VCN_ENCODE_RING, i)) { 341 ring->sched.ready = false; 342 dev_info(adev->dev, "ring %s is disabled by hypervisor\n", ring->name); 343 } else { 344 ring->wptr = 0; 345 ring->wptr_old = 0; 346 vcn_v3_0_enc_ring_set_wptr(ring); 347 ring->sched.ready = true; 348 } 349 } 350 } 351 } else { 352 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 353 if (adev->vcn.harvest_config & (1 << i)) 354 continue; 355 356 ring = &adev->vcn.inst[i].ring_dec; 357 358 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, 359 ring->doorbell_index, i); 360 361 r = amdgpu_ring_test_helper(ring); 362 if (r) 363 goto done; 364 365 for (j = 0; j < adev->vcn.num_enc_rings; ++j) { 366 ring = &adev->vcn.inst[i].ring_enc[j]; 367 r = amdgpu_ring_test_helper(ring); 368 if (r) 369 goto done; 370 } 371 } 372 } 373 374 done: 375 if (!r) 376 DRM_INFO("VCN decode and encode initialized successfully(under %s).\n", 377 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode"); 378 379 return r; 380 } 381 382 /** 383 * vcn_v3_0_hw_fini - stop the hardware block 384 * 385 * @handle: amdgpu_device pointer 386 * 387 * Stop the VCN block, mark ring as not ready any more 388 */ 389 static int vcn_v3_0_hw_fini(void *handle) 390 { 391 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 392 int i; 393 394 cancel_delayed_work_sync(&adev->vcn.idle_work); 395 396 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 397 if (adev->vcn.harvest_config & (1 << i)) 398 continue; 399 400 if (!amdgpu_sriov_vf(adev)) { 401 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) || 402 (adev->vcn.cur_state != AMD_PG_STATE_GATE && 403 RREG32_SOC15(VCN, i, mmUVD_STATUS))) { 404 vcn_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE); 405 } 406 } 407 } 408 409 return 0; 410 } 411 412 /** 413 * vcn_v3_0_suspend - suspend VCN block 414 * 415 * @handle: amdgpu_device pointer 416 * 417 * HW fini and suspend VCN block 418 */ 419 static int vcn_v3_0_suspend(void *handle) 420 { 421 int r; 422 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 423 424 r = vcn_v3_0_hw_fini(adev); 425 if (r) 426 return r; 427 428 r = amdgpu_vcn_suspend(adev); 429 430 return r; 431 } 432 433 /** 434 * vcn_v3_0_resume - resume VCN block 435 * 436 * @handle: amdgpu_device pointer 437 * 438 * Resume firmware and hw init VCN block 439 */ 440 static int vcn_v3_0_resume(void *handle) 441 { 442 int r; 443 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 444 445 r = amdgpu_vcn_resume(adev); 446 if (r) 447 return r; 448 449 r = vcn_v3_0_hw_init(adev); 450 451 return r; 452 } 453 454 /** 455 * vcn_v3_0_mc_resume - memory controller programming 456 * 457 * @adev: amdgpu_device pointer 458 * @inst: instance number 459 * 460 * Let the VCN memory controller know it's offsets 461 */ 462 static void vcn_v3_0_mc_resume(struct amdgpu_device *adev, int inst) 463 { 464 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); 465 uint32_t offset; 466 467 /* cache window 0: fw */ 468 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 469 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, 470 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_lo)); 471 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, 472 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_hi)); 473 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET0, 0); 474 offset = 0; 475 } else { 476 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, 477 lower_32_bits(adev->vcn.inst[inst].gpu_addr)); 478 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, 479 upper_32_bits(adev->vcn.inst[inst].gpu_addr)); 480 offset = size; 481 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET0, 482 AMDGPU_UVD_FIRMWARE_OFFSET >> 3); 483 } 484 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_SIZE0, size); 485 486 /* cache window 1: stack */ 487 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, 488 lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset)); 489 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, 490 upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset)); 491 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET1, 0); 492 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE); 493 494 /* cache window 2: context */ 495 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, 496 lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); 497 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, 498 upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); 499 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET2, 0); 500 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); 501 502 /* non-cache window */ 503 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW, 504 lower_32_bits(adev->vcn.inst[inst].fw_shared_gpu_addr)); 505 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH, 506 upper_32_bits(adev->vcn.inst[inst].fw_shared_gpu_addr)); 507 WREG32_SOC15(VCN, inst, mmUVD_VCPU_NONCACHE_OFFSET0, 0); 508 WREG32_SOC15(VCN, inst, mmUVD_VCPU_NONCACHE_SIZE0, 509 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared))); 510 } 511 512 static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect) 513 { 514 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); 515 uint32_t offset; 516 517 /* cache window 0: fw */ 518 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 519 if (!indirect) { 520 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 521 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 522 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect); 523 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 524 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 525 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect); 526 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 527 VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); 528 } else { 529 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 530 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect); 531 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 532 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect); 533 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 534 VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); 535 } 536 offset = 0; 537 } else { 538 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 539 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 540 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); 541 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 542 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 543 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); 544 offset = size; 545 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 546 VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET0), 547 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect); 548 } 549 550 if (!indirect) 551 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 552 VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect); 553 else 554 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 555 VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect); 556 557 /* cache window 1: stack */ 558 if (!indirect) { 559 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 560 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 561 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); 562 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 563 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 564 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); 565 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 566 VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); 567 } else { 568 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 569 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect); 570 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 571 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect); 572 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 573 VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); 574 } 575 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 576 VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect); 577 578 /* cache window 2: context */ 579 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 580 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), 581 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); 582 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 583 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), 584 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); 585 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 586 VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect); 587 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 588 VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect); 589 590 /* non-cache window */ 591 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 592 VCN, inst_idx, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 593 lower_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect); 594 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 595 VCN, inst_idx, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 596 upper_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect); 597 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 598 VCN, inst_idx, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect); 599 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 600 VCN, inst_idx, mmUVD_VCPU_NONCACHE_SIZE0), 601 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect); 602 603 /* VCN global tiling registers */ 604 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( 605 UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); 606 } 607 608 static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int inst) 609 { 610 uint32_t data = 0; 611 612 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { 613 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT 614 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT 615 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT 616 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT 617 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT 618 | 2 << UVD_PGFSM_CONFIG__UVDIRL_PWR_CONFIG__SHIFT 619 | 1 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT 620 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT 621 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT 622 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT 623 | 2 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT 624 | 2 << UVD_PGFSM_CONFIG__UVDATD_PWR_CONFIG__SHIFT 625 | 2 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT 626 | 2 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT); 627 628 WREG32_SOC15(VCN, inst, mmUVD_PGFSM_CONFIG, data); 629 SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_PGFSM_STATUS, 630 UVD_PGFSM_STATUS__UVDM_UVDU_UVDLM_PWR_ON_3_0, 0x3F3FFFFF); 631 } else { 632 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT 633 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT 634 | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT 635 | 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT 636 | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT 637 | 1 << UVD_PGFSM_CONFIG__UVDIRL_PWR_CONFIG__SHIFT 638 | 1 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT 639 | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT 640 | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT 641 | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT 642 | 1 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT 643 | 1 << UVD_PGFSM_CONFIG__UVDATD_PWR_CONFIG__SHIFT 644 | 1 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT 645 | 1 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT); 646 WREG32_SOC15(VCN, inst, mmUVD_PGFSM_CONFIG, data); 647 SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_PGFSM_STATUS, 0, 0x3F3FFFFF); 648 } 649 650 data = RREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS); 651 data &= ~0x103; 652 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) 653 data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON | 654 UVD_POWER_STATUS__UVD_PG_EN_MASK; 655 656 WREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS, data); 657 } 658 659 static void vcn_v3_0_enable_static_power_gating(struct amdgpu_device *adev, int inst) 660 { 661 uint32_t data; 662 663 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { 664 /* Before power off, this indicator has to be turned on */ 665 data = RREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS); 666 data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK; 667 data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF; 668 WREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS, data); 669 670 data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT 671 | 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT 672 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT 673 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT 674 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT 675 | 2 << UVD_PGFSM_CONFIG__UVDIRL_PWR_CONFIG__SHIFT 676 | 2 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT 677 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT 678 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT 679 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT 680 | 2 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT 681 | 2 << UVD_PGFSM_CONFIG__UVDATD_PWR_CONFIG__SHIFT 682 | 2 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT 683 | 2 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT); 684 WREG32_SOC15(VCN, inst, mmUVD_PGFSM_CONFIG, data); 685 686 data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT 687 | 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT 688 | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT 689 | 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT 690 | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT 691 | 2 << UVD_PGFSM_STATUS__UVDIRL_PWR_STATUS__SHIFT 692 | 2 << UVD_PGFSM_STATUS__UVDLM_PWR_STATUS__SHIFT 693 | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT 694 | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT 695 | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT 696 | 2 << UVD_PGFSM_STATUS__UVDAB_PWR_STATUS__SHIFT 697 | 2 << UVD_PGFSM_STATUS__UVDATD_PWR_STATUS__SHIFT 698 | 2 << UVD_PGFSM_STATUS__UVDNA_PWR_STATUS__SHIFT 699 | 2 << UVD_PGFSM_STATUS__UVDNB_PWR_STATUS__SHIFT); 700 SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_PGFSM_STATUS, data, 0x3F3FFFFF); 701 } 702 } 703 704 /** 705 * vcn_v3_0_disable_clock_gating - disable VCN clock gating 706 * 707 * @adev: amdgpu_device pointer 708 * @inst: instance number 709 * 710 * Disable clock gating for VCN block 711 */ 712 static void vcn_v3_0_disable_clock_gating(struct amdgpu_device *adev, int inst) 713 { 714 uint32_t data; 715 716 /* VCN disable CGC */ 717 data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL); 718 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) 719 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 720 else 721 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; 722 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; 723 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; 724 WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data); 725 726 data = RREG32_SOC15(VCN, inst, mmUVD_CGC_GATE); 727 data &= ~(UVD_CGC_GATE__SYS_MASK 728 | UVD_CGC_GATE__UDEC_MASK 729 | UVD_CGC_GATE__MPEG2_MASK 730 | UVD_CGC_GATE__REGS_MASK 731 | UVD_CGC_GATE__RBC_MASK 732 | UVD_CGC_GATE__LMI_MC_MASK 733 | UVD_CGC_GATE__LMI_UMC_MASK 734 | UVD_CGC_GATE__IDCT_MASK 735 | UVD_CGC_GATE__MPRD_MASK 736 | UVD_CGC_GATE__MPC_MASK 737 | UVD_CGC_GATE__LBSI_MASK 738 | UVD_CGC_GATE__LRBBM_MASK 739 | UVD_CGC_GATE__UDEC_RE_MASK 740 | UVD_CGC_GATE__UDEC_CM_MASK 741 | UVD_CGC_GATE__UDEC_IT_MASK 742 | UVD_CGC_GATE__UDEC_DB_MASK 743 | UVD_CGC_GATE__UDEC_MP_MASK 744 | UVD_CGC_GATE__WCB_MASK 745 | UVD_CGC_GATE__VCPU_MASK 746 | UVD_CGC_GATE__MMSCH_MASK); 747 748 WREG32_SOC15(VCN, inst, mmUVD_CGC_GATE, data); 749 750 SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_CGC_GATE, 0, 0xFFFFFFFF); 751 752 data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL); 753 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK 754 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK 755 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK 756 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK 757 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK 758 | UVD_CGC_CTRL__SYS_MODE_MASK 759 | UVD_CGC_CTRL__UDEC_MODE_MASK 760 | UVD_CGC_CTRL__MPEG2_MODE_MASK 761 | UVD_CGC_CTRL__REGS_MODE_MASK 762 | UVD_CGC_CTRL__RBC_MODE_MASK 763 | UVD_CGC_CTRL__LMI_MC_MODE_MASK 764 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK 765 | UVD_CGC_CTRL__IDCT_MODE_MASK 766 | UVD_CGC_CTRL__MPRD_MODE_MASK 767 | UVD_CGC_CTRL__MPC_MODE_MASK 768 | UVD_CGC_CTRL__LBSI_MODE_MASK 769 | UVD_CGC_CTRL__LRBBM_MODE_MASK 770 | UVD_CGC_CTRL__WCB_MODE_MASK 771 | UVD_CGC_CTRL__VCPU_MODE_MASK 772 | UVD_CGC_CTRL__MMSCH_MODE_MASK); 773 WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data); 774 775 data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE); 776 data |= (UVD_SUVD_CGC_GATE__SRE_MASK 777 | UVD_SUVD_CGC_GATE__SIT_MASK 778 | UVD_SUVD_CGC_GATE__SMP_MASK 779 | UVD_SUVD_CGC_GATE__SCM_MASK 780 | UVD_SUVD_CGC_GATE__SDB_MASK 781 | UVD_SUVD_CGC_GATE__SRE_H264_MASK 782 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK 783 | UVD_SUVD_CGC_GATE__SIT_H264_MASK 784 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK 785 | UVD_SUVD_CGC_GATE__SCM_H264_MASK 786 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK 787 | UVD_SUVD_CGC_GATE__SDB_H264_MASK 788 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK 789 | UVD_SUVD_CGC_GATE__SCLR_MASK 790 | UVD_SUVD_CGC_GATE__ENT_MASK 791 | UVD_SUVD_CGC_GATE__IME_MASK 792 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 793 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 794 | UVD_SUVD_CGC_GATE__SITE_MASK 795 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK 796 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK 797 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 798 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK 799 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK 800 | UVD_SUVD_CGC_GATE__EFC_MASK 801 | UVD_SUVD_CGC_GATE__SAOE_MASK 802 | UVD_SUVD_CGC_GATE__SRE_AV1_MASK 803 | UVD_SUVD_CGC_GATE__FBC_PCLK_MASK 804 | UVD_SUVD_CGC_GATE__FBC_CCLK_MASK 805 | UVD_SUVD_CGC_GATE__SCM_AV1_MASK 806 | UVD_SUVD_CGC_GATE__SMPA_MASK); 807 WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE, data); 808 809 data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE2); 810 data |= (UVD_SUVD_CGC_GATE2__MPBE0_MASK 811 | UVD_SUVD_CGC_GATE2__MPBE1_MASK 812 | UVD_SUVD_CGC_GATE2__SIT_AV1_MASK 813 | UVD_SUVD_CGC_GATE2__SDB_AV1_MASK 814 | UVD_SUVD_CGC_GATE2__MPC1_MASK); 815 WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE2, data); 816 817 data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL); 818 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK 819 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK 820 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK 821 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK 822 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK 823 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK 824 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK 825 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK 826 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK 827 | UVD_SUVD_CGC_CTRL__EFC_MODE_MASK 828 | UVD_SUVD_CGC_CTRL__SAOE_MODE_MASK 829 | UVD_SUVD_CGC_CTRL__SMPA_MODE_MASK 830 | UVD_SUVD_CGC_CTRL__MPBE0_MODE_MASK 831 | UVD_SUVD_CGC_CTRL__MPBE1_MODE_MASK 832 | UVD_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 833 | UVD_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 834 | UVD_SUVD_CGC_CTRL__MPC1_MODE_MASK 835 | UVD_SUVD_CGC_CTRL__FBC_PCLK_MASK 836 | UVD_SUVD_CGC_CTRL__FBC_CCLK_MASK); 837 WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL, data); 838 } 839 840 static void vcn_v3_0_clock_gating_dpg_mode(struct amdgpu_device *adev, 841 uint8_t sram_sel, int inst_idx, uint8_t indirect) 842 { 843 uint32_t reg_data = 0; 844 845 /* enable sw clock gating control */ 846 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) 847 reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 848 else 849 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 850 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; 851 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; 852 reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK | 853 UVD_CGC_CTRL__UDEC_CM_MODE_MASK | 854 UVD_CGC_CTRL__UDEC_IT_MODE_MASK | 855 UVD_CGC_CTRL__UDEC_DB_MODE_MASK | 856 UVD_CGC_CTRL__UDEC_MP_MODE_MASK | 857 UVD_CGC_CTRL__SYS_MODE_MASK | 858 UVD_CGC_CTRL__UDEC_MODE_MASK | 859 UVD_CGC_CTRL__MPEG2_MODE_MASK | 860 UVD_CGC_CTRL__REGS_MODE_MASK | 861 UVD_CGC_CTRL__RBC_MODE_MASK | 862 UVD_CGC_CTRL__LMI_MC_MODE_MASK | 863 UVD_CGC_CTRL__LMI_UMC_MODE_MASK | 864 UVD_CGC_CTRL__IDCT_MODE_MASK | 865 UVD_CGC_CTRL__MPRD_MODE_MASK | 866 UVD_CGC_CTRL__MPC_MODE_MASK | 867 UVD_CGC_CTRL__LBSI_MODE_MASK | 868 UVD_CGC_CTRL__LRBBM_MODE_MASK | 869 UVD_CGC_CTRL__WCB_MODE_MASK | 870 UVD_CGC_CTRL__VCPU_MODE_MASK | 871 UVD_CGC_CTRL__MMSCH_MODE_MASK); 872 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 873 VCN, inst_idx, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect); 874 875 /* turn off clock gating */ 876 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 877 VCN, inst_idx, mmUVD_CGC_GATE), 0, sram_sel, indirect); 878 879 /* turn on SUVD clock gating */ 880 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 881 VCN, inst_idx, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect); 882 883 /* turn on sw mode in UVD_SUVD_CGC_CTRL */ 884 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 885 VCN, inst_idx, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect); 886 } 887 888 /** 889 * vcn_v3_0_enable_clock_gating - enable VCN clock gating 890 * 891 * @adev: amdgpu_device pointer 892 * @inst: instance number 893 * 894 * Enable clock gating for VCN block 895 */ 896 static void vcn_v3_0_enable_clock_gating(struct amdgpu_device *adev, int inst) 897 { 898 uint32_t data; 899 900 /* enable VCN CGC */ 901 data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL); 902 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) 903 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 904 else 905 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 906 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; 907 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; 908 WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data); 909 910 data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL); 911 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK 912 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK 913 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK 914 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK 915 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK 916 | UVD_CGC_CTRL__SYS_MODE_MASK 917 | UVD_CGC_CTRL__UDEC_MODE_MASK 918 | UVD_CGC_CTRL__MPEG2_MODE_MASK 919 | UVD_CGC_CTRL__REGS_MODE_MASK 920 | UVD_CGC_CTRL__RBC_MODE_MASK 921 | UVD_CGC_CTRL__LMI_MC_MODE_MASK 922 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK 923 | UVD_CGC_CTRL__IDCT_MODE_MASK 924 | UVD_CGC_CTRL__MPRD_MODE_MASK 925 | UVD_CGC_CTRL__MPC_MODE_MASK 926 | UVD_CGC_CTRL__LBSI_MODE_MASK 927 | UVD_CGC_CTRL__LRBBM_MODE_MASK 928 | UVD_CGC_CTRL__WCB_MODE_MASK 929 | UVD_CGC_CTRL__VCPU_MODE_MASK 930 | UVD_CGC_CTRL__MMSCH_MODE_MASK); 931 WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data); 932 933 data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL); 934 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK 935 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK 936 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK 937 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK 938 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK 939 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK 940 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK 941 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK 942 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK 943 | UVD_SUVD_CGC_CTRL__EFC_MODE_MASK 944 | UVD_SUVD_CGC_CTRL__SAOE_MODE_MASK 945 | UVD_SUVD_CGC_CTRL__SMPA_MODE_MASK 946 | UVD_SUVD_CGC_CTRL__MPBE0_MODE_MASK 947 | UVD_SUVD_CGC_CTRL__MPBE1_MODE_MASK 948 | UVD_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 949 | UVD_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 950 | UVD_SUVD_CGC_CTRL__MPC1_MODE_MASK 951 | UVD_SUVD_CGC_CTRL__FBC_PCLK_MASK 952 | UVD_SUVD_CGC_CTRL__FBC_CCLK_MASK); 953 WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL, data); 954 } 955 956 static int vcn_v3_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect) 957 { 958 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr; 959 struct amdgpu_ring *ring; 960 uint32_t rb_bufsz, tmp; 961 962 /* disable register anti-hang mechanism */ 963 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1, 964 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); 965 /* enable dynamic power gating mode */ 966 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS); 967 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK; 968 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK; 969 WREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS, tmp); 970 971 if (indirect) 972 adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr; 973 974 /* enable clock gating */ 975 vcn_v3_0_clock_gating_dpg_mode(adev, 0, inst_idx, indirect); 976 977 /* enable VCPU clock */ 978 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); 979 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK; 980 tmp |= UVD_VCPU_CNTL__BLK_RST_MASK; 981 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 982 VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect); 983 984 /* disable master interupt */ 985 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 986 VCN, inst_idx, mmUVD_MASTINT_EN), 0, 0, indirect); 987 988 /* setup mmUVD_LMI_CTRL */ 989 tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | 990 UVD_LMI_CTRL__REQ_MODE_MASK | 991 UVD_LMI_CTRL__CRC_RESET_MASK | 992 UVD_LMI_CTRL__MASK_MC_URGENT_MASK | 993 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | 994 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | 995 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | 996 0x00100000L); 997 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 998 VCN, inst_idx, mmUVD_LMI_CTRL), tmp, 0, indirect); 999 1000 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 1001 VCN, inst_idx, mmUVD_MPC_CNTL), 1002 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect); 1003 1004 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 1005 VCN, inst_idx, mmUVD_MPC_SET_MUXA0), 1006 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | 1007 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | 1008 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | 1009 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect); 1010 1011 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 1012 VCN, inst_idx, mmUVD_MPC_SET_MUXB0), 1013 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | 1014 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | 1015 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | 1016 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect); 1017 1018 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 1019 VCN, inst_idx, mmUVD_MPC_SET_MUX), 1020 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | 1021 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | 1022 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect); 1023 1024 vcn_v3_0_mc_resume_dpg_mode(adev, inst_idx, indirect); 1025 1026 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 1027 VCN, inst_idx, mmUVD_REG_XX_MASK), 0x10, 0, indirect); 1028 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 1029 VCN, inst_idx, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect); 1030 1031 /* enable LMI MC and UMC channels */ 1032 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 1033 VCN, inst_idx, mmUVD_LMI_CTRL2), 0, 0, indirect); 1034 1035 /* unblock VCPU register access */ 1036 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 1037 VCN, inst_idx, mmUVD_RB_ARB_CTRL), 0, 0, indirect); 1038 1039 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); 1040 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK; 1041 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 1042 VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect); 1043 1044 /* enable master interrupt */ 1045 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 1046 VCN, inst_idx, mmUVD_MASTINT_EN), 1047 UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect); 1048 1049 /* add nop to workaround PSP size check */ 1050 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 1051 VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect); 1052 1053 if (indirect) 1054 psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr, 1055 (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr - 1056 (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr)); 1057 1058 ring = &adev->vcn.inst[inst_idx].ring_dec; 1059 /* force RBC into idle state */ 1060 rb_bufsz = order_base_2(ring->ring_size); 1061 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); 1062 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); 1063 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); 1064 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); 1065 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); 1066 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_CNTL, tmp); 1067 1068 /* Stall DPG before WPTR/RPTR reset */ 1069 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1070 UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK, 1071 ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); 1072 fw_shared->multi_queue.decode_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET); 1073 1074 /* set the write pointer delay */ 1075 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0); 1076 1077 /* set the wb address */ 1078 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR_ADDR, 1079 (upper_32_bits(ring->gpu_addr) >> 2)); 1080 1081 /* programm the RB_BASE for ring buffer */ 1082 WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, 1083 lower_32_bits(ring->gpu_addr)); 1084 WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, 1085 upper_32_bits(ring->gpu_addr)); 1086 1087 /* Initialize the ring buffer's read and write pointers */ 1088 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, 0); 1089 1090 WREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2, 0); 1091 1092 ring->wptr = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR); 1093 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR, 1094 lower_32_bits(ring->wptr)); 1095 1096 /* Reset FW shared memory RBC WPTR/RPTR */ 1097 fw_shared->rb.rptr = 0; 1098 fw_shared->rb.wptr = lower_32_bits(ring->wptr); 1099 1100 /*resetting done, fw can check RB ring */ 1101 fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); 1102 1103 /* Unstall DPG */ 1104 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1105 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); 1106 1107 return 0; 1108 } 1109 1110 static int vcn_v3_0_start(struct amdgpu_device *adev) 1111 { 1112 volatile struct amdgpu_fw_shared *fw_shared; 1113 struct amdgpu_ring *ring; 1114 uint32_t rb_bufsz, tmp; 1115 int i, j, k, r; 1116 1117 if (adev->pm.dpm_enabled) 1118 amdgpu_dpm_enable_uvd(adev, true); 1119 1120 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 1121 if (adev->vcn.harvest_config & (1 << i)) 1122 continue; 1123 1124 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG){ 1125 r = vcn_v3_0_start_dpg_mode(adev, i, adev->vcn.indirect_sram); 1126 continue; 1127 } 1128 1129 /* disable VCN power gating */ 1130 vcn_v3_0_disable_static_power_gating(adev, i); 1131 1132 /* set VCN status busy */ 1133 tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY; 1134 WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp); 1135 1136 /*SW clock gating */ 1137 vcn_v3_0_disable_clock_gating(adev, i); 1138 1139 /* enable VCPU clock */ 1140 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 1141 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK); 1142 1143 /* disable master interrupt */ 1144 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0, 1145 ~UVD_MASTINT_EN__VCPU_EN_MASK); 1146 1147 /* enable LMI MC and UMC channels */ 1148 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0, 1149 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); 1150 1151 tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET); 1152 tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; 1153 tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; 1154 WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp); 1155 1156 /* setup mmUVD_LMI_CTRL */ 1157 tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL); 1158 WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp | 1159 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | 1160 UVD_LMI_CTRL__MASK_MC_URGENT_MASK | 1161 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | 1162 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK); 1163 1164 /* setup mmUVD_MPC_CNTL */ 1165 tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL); 1166 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK; 1167 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT; 1168 WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp); 1169 1170 /* setup UVD_MPC_SET_MUXA0 */ 1171 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0, 1172 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | 1173 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | 1174 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | 1175 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT))); 1176 1177 /* setup UVD_MPC_SET_MUXB0 */ 1178 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0, 1179 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | 1180 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | 1181 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | 1182 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT))); 1183 1184 /* setup mmUVD_MPC_SET_MUX */ 1185 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX, 1186 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | 1187 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | 1188 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT))); 1189 1190 vcn_v3_0_mc_resume(adev, i); 1191 1192 /* VCN global tiling registers */ 1193 WREG32_SOC15(VCN, i, mmUVD_GFX10_ADDR_CONFIG, 1194 adev->gfx.config.gb_addr_config); 1195 1196 /* unblock VCPU register access */ 1197 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0, 1198 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); 1199 1200 /* release VCPU reset to boot */ 1201 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0, 1202 ~UVD_VCPU_CNTL__BLK_RST_MASK); 1203 1204 for (j = 0; j < 10; ++j) { 1205 uint32_t status; 1206 1207 for (k = 0; k < 100; ++k) { 1208 status = RREG32_SOC15(VCN, i, mmUVD_STATUS); 1209 if (status & 2) 1210 break; 1211 mdelay(10); 1212 } 1213 r = 0; 1214 if (status & 2) 1215 break; 1216 1217 DRM_ERROR("VCN[%d] decode not responding, trying to reset the VCPU!!!\n", i); 1218 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 1219 UVD_VCPU_CNTL__BLK_RST_MASK, 1220 ~UVD_VCPU_CNTL__BLK_RST_MASK); 1221 mdelay(10); 1222 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0, 1223 ~UVD_VCPU_CNTL__BLK_RST_MASK); 1224 1225 mdelay(10); 1226 r = -1; 1227 } 1228 1229 if (r) { 1230 DRM_ERROR("VCN[%d] decode not responding, giving up!!!\n", i); 1231 return r; 1232 } 1233 1234 /* enable master interrupt */ 1235 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 1236 UVD_MASTINT_EN__VCPU_EN_MASK, 1237 ~UVD_MASTINT_EN__VCPU_EN_MASK); 1238 1239 /* clear the busy bit of VCN_STATUS */ 1240 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0, 1241 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); 1242 1243 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0); 1244 1245 ring = &adev->vcn.inst[i].ring_dec; 1246 /* force RBC into idle state */ 1247 rb_bufsz = order_base_2(ring->ring_size); 1248 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); 1249 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); 1250 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); 1251 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); 1252 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); 1253 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp); 1254 1255 fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr; 1256 fw_shared->multi_queue.decode_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET); 1257 1258 /* programm the RB_BASE for ring buffer */ 1259 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, 1260 lower_32_bits(ring->gpu_addr)); 1261 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, 1262 upper_32_bits(ring->gpu_addr)); 1263 1264 /* Initialize the ring buffer's read and write pointers */ 1265 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0); 1266 1267 WREG32_SOC15(VCN, i, mmUVD_SCRATCH2, 0); 1268 ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR); 1269 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR, 1270 lower_32_bits(ring->wptr)); 1271 fw_shared->rb.wptr = lower_32_bits(ring->wptr); 1272 fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); 1273 1274 if (adev->asic_type != CHIP_BEIGE_GOBY) { 1275 fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET); 1276 ring = &adev->vcn.inst[i].ring_enc[0]; 1277 WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); 1278 WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); 1279 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr); 1280 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 1281 WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4); 1282 fw_shared->multi_queue.encode_generalpurpose_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); 1283 1284 fw_shared->multi_queue.encode_lowlatency_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET); 1285 ring = &adev->vcn.inst[i].ring_enc[1]; 1286 WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); 1287 WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); 1288 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr); 1289 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); 1290 WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4); 1291 fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); 1292 } 1293 } 1294 1295 return 0; 1296 } 1297 1298 static int vcn_v3_0_start_sriov(struct amdgpu_device *adev) 1299 { 1300 int i, j; 1301 struct amdgpu_ring *ring; 1302 uint64_t cache_addr; 1303 uint64_t rb_addr; 1304 uint64_t ctx_addr; 1305 uint32_t param, resp, expected; 1306 uint32_t offset, cache_size; 1307 uint32_t tmp, timeout; 1308 uint32_t id; 1309 1310 struct amdgpu_mm_table *table = &adev->virt.mm_table; 1311 uint32_t *table_loc; 1312 uint32_t table_size; 1313 uint32_t size, size_dw; 1314 1315 struct mmsch_v3_0_cmd_direct_write 1316 direct_wt = { {0} }; 1317 struct mmsch_v3_0_cmd_direct_read_modify_write 1318 direct_rd_mod_wt = { {0} }; 1319 struct mmsch_v3_0_cmd_end end = { {0} }; 1320 struct mmsch_v3_0_init_header header; 1321 1322 direct_wt.cmd_header.command_type = 1323 MMSCH_COMMAND__DIRECT_REG_WRITE; 1324 direct_rd_mod_wt.cmd_header.command_type = 1325 MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE; 1326 end.cmd_header.command_type = 1327 MMSCH_COMMAND__END; 1328 1329 header.version = MMSCH_VERSION; 1330 header.total_size = sizeof(struct mmsch_v3_0_init_header) >> 2; 1331 for (i = 0; i < AMDGPU_MAX_VCN_INSTANCES; i++) { 1332 header.inst[i].init_status = 0; 1333 header.inst[i].table_offset = 0; 1334 header.inst[i].table_size = 0; 1335 } 1336 1337 table_loc = (uint32_t *)table->cpu_addr; 1338 table_loc += header.total_size; 1339 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 1340 if (adev->vcn.harvest_config & (1 << i)) 1341 continue; 1342 1343 table_size = 0; 1344 1345 MMSCH_V3_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, i, 1346 mmUVD_STATUS), 1347 ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY); 1348 1349 cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); 1350 1351 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 1352 id = amdgpu_ucode_id_vcns[i]; 1353 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, 1354 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 1355 adev->firmware.ucode[id].tmr_mc_addr_lo); 1356 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, 1357 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 1358 adev->firmware.ucode[id].tmr_mc_addr_hi); 1359 offset = 0; 1360 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, 1361 mmUVD_VCPU_CACHE_OFFSET0), 1362 0); 1363 } else { 1364 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, 1365 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 1366 lower_32_bits(adev->vcn.inst[i].gpu_addr)); 1367 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, 1368 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 1369 upper_32_bits(adev->vcn.inst[i].gpu_addr)); 1370 offset = cache_size; 1371 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, 1372 mmUVD_VCPU_CACHE_OFFSET0), 1373 AMDGPU_UVD_FIRMWARE_OFFSET >> 3); 1374 } 1375 1376 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, 1377 mmUVD_VCPU_CACHE_SIZE0), 1378 cache_size); 1379 1380 cache_addr = adev->vcn.inst[i].gpu_addr + offset; 1381 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, 1382 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 1383 lower_32_bits(cache_addr)); 1384 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, 1385 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 1386 upper_32_bits(cache_addr)); 1387 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, 1388 mmUVD_VCPU_CACHE_OFFSET1), 1389 0); 1390 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, 1391 mmUVD_VCPU_CACHE_SIZE1), 1392 AMDGPU_VCN_STACK_SIZE); 1393 1394 cache_addr = adev->vcn.inst[i].gpu_addr + offset + 1395 AMDGPU_VCN_STACK_SIZE; 1396 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, 1397 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), 1398 lower_32_bits(cache_addr)); 1399 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, 1400 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), 1401 upper_32_bits(cache_addr)); 1402 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, 1403 mmUVD_VCPU_CACHE_OFFSET2), 1404 0); 1405 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, 1406 mmUVD_VCPU_CACHE_SIZE2), 1407 AMDGPU_VCN_CONTEXT_SIZE); 1408 1409 for (j = 0; j < adev->vcn.num_enc_rings; ++j) { 1410 ring = &adev->vcn.inst[i].ring_enc[j]; 1411 ring->wptr = 0; 1412 rb_addr = ring->gpu_addr; 1413 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, 1414 mmUVD_RB_BASE_LO), 1415 lower_32_bits(rb_addr)); 1416 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, 1417 mmUVD_RB_BASE_HI), 1418 upper_32_bits(rb_addr)); 1419 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, 1420 mmUVD_RB_SIZE), 1421 ring->ring_size / 4); 1422 } 1423 1424 ring = &adev->vcn.inst[i].ring_dec; 1425 ring->wptr = 0; 1426 rb_addr = ring->gpu_addr; 1427 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, 1428 mmUVD_LMI_RBC_RB_64BIT_BAR_LOW), 1429 lower_32_bits(rb_addr)); 1430 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, 1431 mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH), 1432 upper_32_bits(rb_addr)); 1433 /* force RBC into idle state */ 1434 tmp = order_base_2(ring->ring_size); 1435 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, tmp); 1436 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); 1437 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); 1438 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); 1439 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); 1440 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, 1441 mmUVD_RBC_RB_CNTL), 1442 tmp); 1443 1444 /* add end packet */ 1445 MMSCH_V3_0_INSERT_END(); 1446 1447 /* refine header */ 1448 header.inst[i].init_status = 0; 1449 header.inst[i].table_offset = header.total_size; 1450 header.inst[i].table_size = table_size; 1451 header.total_size += table_size; 1452 } 1453 1454 /* Update init table header in memory */ 1455 size = sizeof(struct mmsch_v3_0_init_header); 1456 table_loc = (uint32_t *)table->cpu_addr; 1457 memcpy((void *)table_loc, &header, size); 1458 1459 /* message MMSCH (in VCN[0]) to initialize this client 1460 * 1, write to mmsch_vf_ctx_addr_lo/hi register with GPU mc addr 1461 * of memory descriptor location 1462 */ 1463 ctx_addr = table->gpu_addr; 1464 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr)); 1465 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr)); 1466 1467 /* 2, update vmid of descriptor */ 1468 tmp = RREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID); 1469 tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK; 1470 /* use domain0 for MM scheduler */ 1471 tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); 1472 WREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID, tmp); 1473 1474 /* 3, notify mmsch about the size of this descriptor */ 1475 size = header.total_size; 1476 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_SIZE, size); 1477 1478 /* 4, set resp to zero */ 1479 WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP, 0); 1480 1481 /* 5, kick off the initialization and wait until 1482 * MMSCH_VF_MAILBOX_RESP becomes non-zero 1483 */ 1484 param = 0x10000001; 1485 WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_HOST, param); 1486 tmp = 0; 1487 timeout = 1000; 1488 resp = 0; 1489 expected = param + 1; 1490 while (resp != expected) { 1491 resp = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP); 1492 if (resp == expected) 1493 break; 1494 1495 udelay(10); 1496 tmp = tmp + 10; 1497 if (tmp >= timeout) { 1498 DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\ 1499 " waiting for mmMMSCH_VF_MAILBOX_RESP "\ 1500 "(expected=0x%08x, readback=0x%08x)\n", 1501 tmp, expected, resp); 1502 return -EBUSY; 1503 } 1504 } 1505 1506 return 0; 1507 } 1508 1509 static int vcn_v3_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx) 1510 { 1511 uint32_t tmp; 1512 1513 /* Wait for power status to be 1 */ 1514 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1, 1515 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); 1516 1517 /* wait for read ptr to be equal to write ptr */ 1518 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR); 1519 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF); 1520 1521 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2); 1522 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF); 1523 1524 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF; 1525 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF); 1526 1527 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1, 1528 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); 1529 1530 /* disable dynamic power gating mode */ 1531 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0, 1532 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); 1533 1534 return 0; 1535 } 1536 1537 static int vcn_v3_0_stop(struct amdgpu_device *adev) 1538 { 1539 uint32_t tmp; 1540 int i, r = 0; 1541 1542 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 1543 if (adev->vcn.harvest_config & (1 << i)) 1544 continue; 1545 1546 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { 1547 r = vcn_v3_0_stop_dpg_mode(adev, i); 1548 continue; 1549 } 1550 1551 /* wait for vcn idle */ 1552 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7); 1553 if (r) 1554 return r; 1555 1556 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK | 1557 UVD_LMI_STATUS__READ_CLEAN_MASK | 1558 UVD_LMI_STATUS__WRITE_CLEAN_MASK | 1559 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK; 1560 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp); 1561 if (r) 1562 return r; 1563 1564 /* disable LMI UMC channel */ 1565 tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2); 1566 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK; 1567 WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp); 1568 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK| 1569 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK; 1570 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp); 1571 if (r) 1572 return r; 1573 1574 /* block VCPU register access */ 1575 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 1576 UVD_RB_ARB_CTRL__VCPU_DIS_MASK, 1577 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); 1578 1579 /* reset VCPU */ 1580 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 1581 UVD_VCPU_CNTL__BLK_RST_MASK, 1582 ~UVD_VCPU_CNTL__BLK_RST_MASK); 1583 1584 /* disable VCPU clock */ 1585 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0, 1586 ~(UVD_VCPU_CNTL__CLK_EN_MASK)); 1587 1588 /* apply soft reset */ 1589 tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET); 1590 tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; 1591 WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp); 1592 tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET); 1593 tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; 1594 WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp); 1595 1596 /* clear status */ 1597 WREG32_SOC15(VCN, i, mmUVD_STATUS, 0); 1598 1599 /* apply HW clock gating */ 1600 vcn_v3_0_enable_clock_gating(adev, i); 1601 1602 /* enable VCN power gating */ 1603 vcn_v3_0_enable_static_power_gating(adev, i); 1604 } 1605 1606 if (adev->pm.dpm_enabled) 1607 amdgpu_dpm_enable_uvd(adev, false); 1608 1609 return 0; 1610 } 1611 1612 static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev, 1613 int inst_idx, struct dpg_pause_state *new_state) 1614 { 1615 volatile struct amdgpu_fw_shared *fw_shared; 1616 struct amdgpu_ring *ring; 1617 uint32_t reg_data = 0; 1618 int ret_code; 1619 1620 /* pause/unpause if state is changed */ 1621 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) { 1622 DRM_DEBUG("dpg pause state changed %d -> %d", 1623 adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based); 1624 reg_data = RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE) & 1625 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); 1626 1627 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) { 1628 ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1, 1629 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); 1630 1631 if (!ret_code) { 1632 /* pause DPG */ 1633 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; 1634 WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data); 1635 1636 /* wait for ACK */ 1637 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_DPG_PAUSE, 1638 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, 1639 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); 1640 1641 /* Stall DPG before WPTR/RPTR reset */ 1642 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1643 UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK, 1644 ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); 1645 1646 if (adev->asic_type != CHIP_BEIGE_GOBY) { 1647 /* Restore */ 1648 fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr; 1649 fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET); 1650 ring = &adev->vcn.inst[inst_idx].ring_enc[0]; 1651 ring->wptr = 0; 1652 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr); 1653 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 1654 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4); 1655 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); 1656 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); 1657 fw_shared->multi_queue.encode_generalpurpose_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); 1658 1659 fw_shared->multi_queue.encode_lowlatency_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET); 1660 ring = &adev->vcn.inst[inst_idx].ring_enc[1]; 1661 ring->wptr = 0; 1662 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr); 1663 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); 1664 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4); 1665 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); 1666 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); 1667 fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); 1668 1669 /* restore wptr/rptr with pointers saved in FW shared memory*/ 1670 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, fw_shared->rb.rptr); 1671 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR, fw_shared->rb.wptr); 1672 } 1673 1674 /* Unstall DPG */ 1675 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1676 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); 1677 1678 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1679 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); 1680 } 1681 } else { 1682 /* unpause dpg, no need to wait */ 1683 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; 1684 WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data); 1685 } 1686 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based; 1687 } 1688 1689 return 0; 1690 } 1691 1692 /** 1693 * vcn_v3_0_dec_ring_get_rptr - get read pointer 1694 * 1695 * @ring: amdgpu_ring pointer 1696 * 1697 * Returns the current hardware read pointer 1698 */ 1699 static uint64_t vcn_v3_0_dec_ring_get_rptr(struct amdgpu_ring *ring) 1700 { 1701 struct amdgpu_device *adev = ring->adev; 1702 1703 return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_RPTR); 1704 } 1705 1706 /** 1707 * vcn_v3_0_dec_ring_get_wptr - get write pointer 1708 * 1709 * @ring: amdgpu_ring pointer 1710 * 1711 * Returns the current hardware write pointer 1712 */ 1713 static uint64_t vcn_v3_0_dec_ring_get_wptr(struct amdgpu_ring *ring) 1714 { 1715 struct amdgpu_device *adev = ring->adev; 1716 1717 if (ring->use_doorbell) 1718 return adev->wb.wb[ring->wptr_offs]; 1719 else 1720 return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR); 1721 } 1722 1723 /** 1724 * vcn_v3_0_dec_ring_set_wptr - set write pointer 1725 * 1726 * @ring: amdgpu_ring pointer 1727 * 1728 * Commits the write pointer to the hardware 1729 */ 1730 static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring) 1731 { 1732 struct amdgpu_device *adev = ring->adev; 1733 volatile struct amdgpu_fw_shared *fw_shared; 1734 1735 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { 1736 /*whenever update RBC_RB_WPTR, we save the wptr in shared rb.wptr and scratch2 */ 1737 fw_shared = adev->vcn.inst[ring->me].fw_shared_cpu_addr; 1738 fw_shared->rb.wptr = lower_32_bits(ring->wptr); 1739 WREG32_SOC15(VCN, ring->me, mmUVD_SCRATCH2, 1740 lower_32_bits(ring->wptr)); 1741 } 1742 1743 if (ring->use_doorbell) { 1744 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); 1745 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); 1746 } else { 1747 WREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); 1748 } 1749 } 1750 1751 static void vcn_v3_0_dec_sw_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, 1752 u64 seq, uint32_t flags) 1753 { 1754 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 1755 1756 amdgpu_ring_write(ring, VCN_DEC_SW_CMD_FENCE); 1757 amdgpu_ring_write(ring, addr); 1758 amdgpu_ring_write(ring, upper_32_bits(addr)); 1759 amdgpu_ring_write(ring, seq); 1760 amdgpu_ring_write(ring, VCN_DEC_SW_CMD_TRAP); 1761 } 1762 1763 static void vcn_v3_0_dec_sw_ring_insert_end(struct amdgpu_ring *ring) 1764 { 1765 amdgpu_ring_write(ring, VCN_DEC_SW_CMD_END); 1766 } 1767 1768 static void vcn_v3_0_dec_sw_ring_emit_ib(struct amdgpu_ring *ring, 1769 struct amdgpu_job *job, 1770 struct amdgpu_ib *ib, 1771 uint32_t flags) 1772 { 1773 uint32_t vmid = AMDGPU_JOB_GET_VMID(job); 1774 1775 amdgpu_ring_write(ring, VCN_DEC_SW_CMD_IB); 1776 amdgpu_ring_write(ring, vmid); 1777 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); 1778 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 1779 amdgpu_ring_write(ring, ib->length_dw); 1780 } 1781 1782 static void vcn_v3_0_dec_sw_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, 1783 uint32_t val, uint32_t mask) 1784 { 1785 amdgpu_ring_write(ring, VCN_DEC_SW_CMD_REG_WAIT); 1786 amdgpu_ring_write(ring, reg << 2); 1787 amdgpu_ring_write(ring, mask); 1788 amdgpu_ring_write(ring, val); 1789 } 1790 1791 static void vcn_v3_0_dec_sw_ring_emit_vm_flush(struct amdgpu_ring *ring, 1792 uint32_t vmid, uint64_t pd_addr) 1793 { 1794 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; 1795 uint32_t data0, data1, mask; 1796 1797 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); 1798 1799 /* wait for register write */ 1800 data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance; 1801 data1 = lower_32_bits(pd_addr); 1802 mask = 0xffffffff; 1803 vcn_v3_0_dec_sw_ring_emit_reg_wait(ring, data0, data1, mask); 1804 } 1805 1806 static void vcn_v3_0_dec_sw_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val) 1807 { 1808 amdgpu_ring_write(ring, VCN_DEC_SW_CMD_REG_WRITE); 1809 amdgpu_ring_write(ring, reg << 2); 1810 amdgpu_ring_write(ring, val); 1811 } 1812 1813 static const struct amdgpu_ring_funcs vcn_v3_0_dec_sw_ring_vm_funcs = { 1814 .type = AMDGPU_RING_TYPE_VCN_DEC, 1815 .align_mask = 0x3f, 1816 .nop = VCN_DEC_SW_CMD_NO_OP, 1817 .vmhub = AMDGPU_MMHUB_0, 1818 .get_rptr = vcn_v3_0_dec_ring_get_rptr, 1819 .get_wptr = vcn_v3_0_dec_ring_get_wptr, 1820 .set_wptr = vcn_v3_0_dec_ring_set_wptr, 1821 .emit_frame_size = 1822 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + 1823 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 + 1824 4 + /* vcn_v3_0_dec_sw_ring_emit_vm_flush */ 1825 5 + 5 + /* vcn_v3_0_dec_sw_ring_emit_fdec_swe x2 vm fdec_swe */ 1826 1, /* vcn_v3_0_dec_sw_ring_insert_end */ 1827 .emit_ib_size = 5, /* vcn_v3_0_dec_sw_ring_emit_ib */ 1828 .emit_ib = vcn_v3_0_dec_sw_ring_emit_ib, 1829 .emit_fence = vcn_v3_0_dec_sw_ring_emit_fence, 1830 .emit_vm_flush = vcn_v3_0_dec_sw_ring_emit_vm_flush, 1831 .test_ring = amdgpu_vcn_dec_sw_ring_test_ring, 1832 .test_ib = NULL,//amdgpu_vcn_dec_sw_ring_test_ib, 1833 .insert_nop = amdgpu_ring_insert_nop, 1834 .insert_end = vcn_v3_0_dec_sw_ring_insert_end, 1835 .pad_ib = amdgpu_ring_generic_pad_ib, 1836 .begin_use = amdgpu_vcn_ring_begin_use, 1837 .end_use = amdgpu_vcn_ring_end_use, 1838 .emit_wreg = vcn_v3_0_dec_sw_ring_emit_wreg, 1839 .emit_reg_wait = vcn_v3_0_dec_sw_ring_emit_reg_wait, 1840 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, 1841 }; 1842 1843 static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p) 1844 { 1845 struct drm_gpu_scheduler **scheds; 1846 1847 /* The create msg must be in the first IB submitted */ 1848 if (atomic_read(&p->entity->fence_seq)) 1849 return -EINVAL; 1850 1851 scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC] 1852 [AMDGPU_RING_PRIO_DEFAULT].sched; 1853 drm_sched_entity_modify_sched(p->entity, scheds, 1); 1854 return 0; 1855 } 1856 1857 static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, uint64_t addr) 1858 { 1859 struct ttm_operation_ctx ctx = { false, false }; 1860 struct amdgpu_bo_va_mapping *map; 1861 uint32_t *msg, num_buffers; 1862 struct amdgpu_bo *bo; 1863 uint64_t start, end; 1864 unsigned int i; 1865 void * ptr; 1866 int r; 1867 1868 addr &= AMDGPU_GMC_HOLE_MASK; 1869 r = amdgpu_cs_find_mapping(p, addr, &bo, &map); 1870 if (r) { 1871 DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr); 1872 return r; 1873 } 1874 1875 start = map->start * AMDGPU_GPU_PAGE_SIZE; 1876 end = (map->last + 1) * AMDGPU_GPU_PAGE_SIZE; 1877 if (addr & 0x7) { 1878 DRM_ERROR("VCN messages must be 8 byte aligned!\n"); 1879 return -EINVAL; 1880 } 1881 1882 bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 1883 amdgpu_bo_placement_from_domain(bo, bo->allowed_domains); 1884 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 1885 if (r) { 1886 DRM_ERROR("Failed validating the VCN message BO (%d)!\n", r); 1887 return r; 1888 } 1889 1890 r = amdgpu_bo_kmap(bo, &ptr); 1891 if (r) { 1892 DRM_ERROR("Failed mapping the VCN message (%d)!\n", r); 1893 return r; 1894 } 1895 1896 msg = ptr + addr - start; 1897 1898 /* Check length */ 1899 if (msg[1] > end - addr) { 1900 r = -EINVAL; 1901 goto out; 1902 } 1903 1904 if (msg[3] != RDECODE_MSG_CREATE) 1905 goto out; 1906 1907 num_buffers = msg[2]; 1908 for (i = 0, msg = &msg[6]; i < num_buffers; ++i, msg += 4) { 1909 uint32_t offset, size, *create; 1910 1911 if (msg[0] != RDECODE_MESSAGE_CREATE) 1912 continue; 1913 1914 offset = msg[1]; 1915 size = msg[2]; 1916 1917 if (offset + size > end) { 1918 r = -EINVAL; 1919 goto out; 1920 } 1921 1922 create = ptr + addr + offset - start; 1923 1924 /* H246, HEVC and VP9 can run on any instance */ 1925 if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11) 1926 continue; 1927 1928 r = vcn_v3_0_limit_sched(p); 1929 if (r) 1930 goto out; 1931 } 1932 1933 out: 1934 amdgpu_bo_kunmap(bo); 1935 return r; 1936 } 1937 1938 static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, 1939 uint32_t ib_idx) 1940 { 1941 struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched); 1942 struct amdgpu_ib *ib = &p->job->ibs[ib_idx]; 1943 uint32_t msg_lo = 0, msg_hi = 0; 1944 unsigned i; 1945 int r; 1946 1947 /* The first instance can decode anything */ 1948 if (!ring->me) 1949 return 0; 1950 1951 for (i = 0; i < ib->length_dw; i += 2) { 1952 uint32_t reg = amdgpu_get_ib_value(p, ib_idx, i); 1953 uint32_t val = amdgpu_get_ib_value(p, ib_idx, i + 1); 1954 1955 if (reg == PACKET0(p->adev->vcn.internal.data0, 0)) { 1956 msg_lo = val; 1957 } else if (reg == PACKET0(p->adev->vcn.internal.data1, 0)) { 1958 msg_hi = val; 1959 } else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0) && 1960 val == 0) { 1961 r = vcn_v3_0_dec_msg(p, ((u64)msg_hi) << 32 | msg_lo); 1962 if (r) 1963 return r; 1964 } 1965 } 1966 return 0; 1967 } 1968 1969 static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = { 1970 .type = AMDGPU_RING_TYPE_VCN_DEC, 1971 .align_mask = 0xf, 1972 .vmhub = AMDGPU_MMHUB_0, 1973 .get_rptr = vcn_v3_0_dec_ring_get_rptr, 1974 .get_wptr = vcn_v3_0_dec_ring_get_wptr, 1975 .set_wptr = vcn_v3_0_dec_ring_set_wptr, 1976 .patch_cs_in_place = vcn_v3_0_ring_patch_cs_in_place, 1977 .emit_frame_size = 1978 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + 1979 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + 1980 8 + /* vcn_v2_0_dec_ring_emit_vm_flush */ 1981 14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */ 1982 6, 1983 .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */ 1984 .emit_ib = vcn_v2_0_dec_ring_emit_ib, 1985 .emit_fence = vcn_v2_0_dec_ring_emit_fence, 1986 .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush, 1987 .test_ring = vcn_v2_0_dec_ring_test_ring, 1988 .test_ib = amdgpu_vcn_dec_ring_test_ib, 1989 .insert_nop = vcn_v2_0_dec_ring_insert_nop, 1990 .insert_start = vcn_v2_0_dec_ring_insert_start, 1991 .insert_end = vcn_v2_0_dec_ring_insert_end, 1992 .pad_ib = amdgpu_ring_generic_pad_ib, 1993 .begin_use = amdgpu_vcn_ring_begin_use, 1994 .end_use = amdgpu_vcn_ring_end_use, 1995 .emit_wreg = vcn_v2_0_dec_ring_emit_wreg, 1996 .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait, 1997 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, 1998 }; 1999 2000 /** 2001 * vcn_v3_0_enc_ring_get_rptr - get enc read pointer 2002 * 2003 * @ring: amdgpu_ring pointer 2004 * 2005 * Returns the current hardware enc read pointer 2006 */ 2007 static uint64_t vcn_v3_0_enc_ring_get_rptr(struct amdgpu_ring *ring) 2008 { 2009 struct amdgpu_device *adev = ring->adev; 2010 2011 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) 2012 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR); 2013 else 2014 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR2); 2015 } 2016 2017 /** 2018 * vcn_v3_0_enc_ring_get_wptr - get enc write pointer 2019 * 2020 * @ring: amdgpu_ring pointer 2021 * 2022 * Returns the current hardware enc write pointer 2023 */ 2024 static uint64_t vcn_v3_0_enc_ring_get_wptr(struct amdgpu_ring *ring) 2025 { 2026 struct amdgpu_device *adev = ring->adev; 2027 2028 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) { 2029 if (ring->use_doorbell) 2030 return adev->wb.wb[ring->wptr_offs]; 2031 else 2032 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR); 2033 } else { 2034 if (ring->use_doorbell) 2035 return adev->wb.wb[ring->wptr_offs]; 2036 else 2037 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2); 2038 } 2039 } 2040 2041 /** 2042 * vcn_v3_0_enc_ring_set_wptr - set enc write pointer 2043 * 2044 * @ring: amdgpu_ring pointer 2045 * 2046 * Commits the enc write pointer to the hardware 2047 */ 2048 static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring) 2049 { 2050 struct amdgpu_device *adev = ring->adev; 2051 2052 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) { 2053 if (ring->use_doorbell) { 2054 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); 2055 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); 2056 } else { 2057 WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); 2058 } 2059 } else { 2060 if (ring->use_doorbell) { 2061 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); 2062 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); 2063 } else { 2064 WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); 2065 } 2066 } 2067 } 2068 2069 static const struct amdgpu_ring_funcs vcn_v3_0_enc_ring_vm_funcs = { 2070 .type = AMDGPU_RING_TYPE_VCN_ENC, 2071 .align_mask = 0x3f, 2072 .nop = VCN_ENC_CMD_NO_OP, 2073 .vmhub = AMDGPU_MMHUB_0, 2074 .get_rptr = vcn_v3_0_enc_ring_get_rptr, 2075 .get_wptr = vcn_v3_0_enc_ring_get_wptr, 2076 .set_wptr = vcn_v3_0_enc_ring_set_wptr, 2077 .emit_frame_size = 2078 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + 2079 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 + 2080 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */ 2081 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */ 2082 1, /* vcn_v2_0_enc_ring_insert_end */ 2083 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */ 2084 .emit_ib = vcn_v2_0_enc_ring_emit_ib, 2085 .emit_fence = vcn_v2_0_enc_ring_emit_fence, 2086 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush, 2087 .test_ring = amdgpu_vcn_enc_ring_test_ring, 2088 .test_ib = amdgpu_vcn_enc_ring_test_ib, 2089 .insert_nop = amdgpu_ring_insert_nop, 2090 .insert_end = vcn_v2_0_enc_ring_insert_end, 2091 .pad_ib = amdgpu_ring_generic_pad_ib, 2092 .begin_use = amdgpu_vcn_ring_begin_use, 2093 .end_use = amdgpu_vcn_ring_end_use, 2094 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg, 2095 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait, 2096 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, 2097 }; 2098 2099 static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev) 2100 { 2101 int i; 2102 2103 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 2104 if (adev->vcn.harvest_config & (1 << i)) 2105 continue; 2106 2107 if (!DEC_SW_RING_ENABLED) 2108 adev->vcn.inst[i].ring_dec.funcs = &vcn_v3_0_dec_ring_vm_funcs; 2109 else 2110 adev->vcn.inst[i].ring_dec.funcs = &vcn_v3_0_dec_sw_ring_vm_funcs; 2111 adev->vcn.inst[i].ring_dec.me = i; 2112 DRM_INFO("VCN(%d) decode%s is enabled in VM mode\n", i, 2113 DEC_SW_RING_ENABLED?"(Software Ring)":""); 2114 } 2115 } 2116 2117 static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev) 2118 { 2119 int i, j; 2120 2121 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 2122 if (adev->vcn.harvest_config & (1 << i)) 2123 continue; 2124 2125 for (j = 0; j < adev->vcn.num_enc_rings; ++j) { 2126 adev->vcn.inst[i].ring_enc[j].funcs = &vcn_v3_0_enc_ring_vm_funcs; 2127 adev->vcn.inst[i].ring_enc[j].me = i; 2128 } 2129 if (adev->vcn.num_enc_rings > 0) 2130 DRM_INFO("VCN(%d) encode is enabled in VM mode\n", i); 2131 } 2132 } 2133 2134 static bool vcn_v3_0_is_idle(void *handle) 2135 { 2136 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2137 int i, ret = 1; 2138 2139 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 2140 if (adev->vcn.harvest_config & (1 << i)) 2141 continue; 2142 2143 ret &= (RREG32_SOC15(VCN, i, mmUVD_STATUS) == UVD_STATUS__IDLE); 2144 } 2145 2146 return ret; 2147 } 2148 2149 static int vcn_v3_0_wait_for_idle(void *handle) 2150 { 2151 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2152 int i, ret = 0; 2153 2154 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 2155 if (adev->vcn.harvest_config & (1 << i)) 2156 continue; 2157 2158 ret = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 2159 UVD_STATUS__IDLE); 2160 if (ret) 2161 return ret; 2162 } 2163 2164 return ret; 2165 } 2166 2167 static int vcn_v3_0_set_clockgating_state(void *handle, 2168 enum amd_clockgating_state state) 2169 { 2170 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2171 bool enable = (state == AMD_CG_STATE_GATE) ? true : false; 2172 int i; 2173 2174 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 2175 if (adev->vcn.harvest_config & (1 << i)) 2176 continue; 2177 2178 if (enable) { 2179 if (RREG32_SOC15(VCN, i, mmUVD_STATUS) != UVD_STATUS__IDLE) 2180 return -EBUSY; 2181 vcn_v3_0_enable_clock_gating(adev, i); 2182 } else { 2183 vcn_v3_0_disable_clock_gating(adev, i); 2184 } 2185 } 2186 2187 return 0; 2188 } 2189 2190 static int vcn_v3_0_set_powergating_state(void *handle, 2191 enum amd_powergating_state state) 2192 { 2193 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2194 int ret; 2195 2196 /* for SRIOV, guest should not control VCN Power-gating 2197 * MMSCH FW should control Power-gating and clock-gating 2198 * guest should avoid touching CGC and PG 2199 */ 2200 if (amdgpu_sriov_vf(adev)) { 2201 adev->vcn.cur_state = AMD_PG_STATE_UNGATE; 2202 return 0; 2203 } 2204 2205 if(state == adev->vcn.cur_state) 2206 return 0; 2207 2208 if (state == AMD_PG_STATE_GATE) 2209 ret = vcn_v3_0_stop(adev); 2210 else 2211 ret = vcn_v3_0_start(adev); 2212 2213 if(!ret) 2214 adev->vcn.cur_state = state; 2215 2216 return ret; 2217 } 2218 2219 static int vcn_v3_0_set_interrupt_state(struct amdgpu_device *adev, 2220 struct amdgpu_irq_src *source, 2221 unsigned type, 2222 enum amdgpu_interrupt_state state) 2223 { 2224 return 0; 2225 } 2226 2227 static int vcn_v3_0_process_interrupt(struct amdgpu_device *adev, 2228 struct amdgpu_irq_src *source, 2229 struct amdgpu_iv_entry *entry) 2230 { 2231 uint32_t ip_instance; 2232 2233 switch (entry->client_id) { 2234 case SOC15_IH_CLIENTID_VCN: 2235 ip_instance = 0; 2236 break; 2237 case SOC15_IH_CLIENTID_VCN1: 2238 ip_instance = 1; 2239 break; 2240 default: 2241 DRM_ERROR("Unhandled client id: %d\n", entry->client_id); 2242 return 0; 2243 } 2244 2245 DRM_DEBUG("IH: VCN TRAP\n"); 2246 2247 switch (entry->src_id) { 2248 case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT: 2249 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_dec); 2250 break; 2251 case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE: 2252 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]); 2253 break; 2254 case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY: 2255 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]); 2256 break; 2257 default: 2258 DRM_ERROR("Unhandled interrupt: %d %d\n", 2259 entry->src_id, entry->src_data[0]); 2260 break; 2261 } 2262 2263 return 0; 2264 } 2265 2266 static const struct amdgpu_irq_src_funcs vcn_v3_0_irq_funcs = { 2267 .set = vcn_v3_0_set_interrupt_state, 2268 .process = vcn_v3_0_process_interrupt, 2269 }; 2270 2271 static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev) 2272 { 2273 int i; 2274 2275 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 2276 if (adev->vcn.harvest_config & (1 << i)) 2277 continue; 2278 2279 adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1; 2280 adev->vcn.inst[i].irq.funcs = &vcn_v3_0_irq_funcs; 2281 } 2282 } 2283 2284 static const struct amd_ip_funcs vcn_v3_0_ip_funcs = { 2285 .name = "vcn_v3_0", 2286 .early_init = vcn_v3_0_early_init, 2287 .late_init = NULL, 2288 .sw_init = vcn_v3_0_sw_init, 2289 .sw_fini = vcn_v3_0_sw_fini, 2290 .hw_init = vcn_v3_0_hw_init, 2291 .hw_fini = vcn_v3_0_hw_fini, 2292 .suspend = vcn_v3_0_suspend, 2293 .resume = vcn_v3_0_resume, 2294 .is_idle = vcn_v3_0_is_idle, 2295 .wait_for_idle = vcn_v3_0_wait_for_idle, 2296 .check_soft_reset = NULL, 2297 .pre_soft_reset = NULL, 2298 .soft_reset = NULL, 2299 .post_soft_reset = NULL, 2300 .set_clockgating_state = vcn_v3_0_set_clockgating_state, 2301 .set_powergating_state = vcn_v3_0_set_powergating_state, 2302 }; 2303 2304 const struct amdgpu_ip_block_version vcn_v3_0_ip_block = 2305 { 2306 .type = AMD_IP_BLOCK_TYPE_VCN, 2307 .major = 3, 2308 .minor = 0, 2309 .rev = 0, 2310 .funcs = &vcn_v3_0_ip_funcs, 2311 }; 2312