1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/firmware.h> 25 26 #include "amdgpu.h" 27 #include "amdgpu_vcn.h" 28 #include "amdgpu_pm.h" 29 #include "soc15.h" 30 #include "soc15d.h" 31 #include "vcn_v2_0.h" 32 #include "mmsch_v1_0.h" 33 34 #include "vcn/vcn_2_5_offset.h" 35 #include "vcn/vcn_2_5_sh_mask.h" 36 #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h" 37 38 #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27 39 #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f 40 #define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET 0x10 41 #define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET 0x11 42 #define mmUVD_NO_OP_INTERNAL_OFFSET 0x29 43 #define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET 0x66 44 #define mmUVD_SCRATCH9_INTERNAL_OFFSET 0xc01d 45 46 #define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET 0x431 47 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x3b4 48 #define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x3b5 49 #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x25c 50 51 #define VCN25_MAX_HW_INSTANCES_ARCTURUS 2 52 53 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev); 54 static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev); 55 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev); 56 static int vcn_v2_5_set_powergating_state(void *handle, 57 enum amd_powergating_state state); 58 static int vcn_v2_5_sriov_start(struct amdgpu_device *adev); 59 60 static int amdgpu_ih_clientid_vcns[] = { 61 SOC15_IH_CLIENTID_VCN, 62 SOC15_IH_CLIENTID_VCN1 63 }; 64 65 /** 66 * vcn_v2_5_early_init - set function pointers 67 * 68 * @handle: amdgpu_device pointer 69 * 70 * Set ring and irq function pointers 71 */ 72 static int vcn_v2_5_early_init(void *handle) 73 { 74 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 75 if (adev->asic_type == CHIP_ARCTURUS) { 76 u32 harvest; 77 int i; 78 79 adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS; 80 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 81 harvest = RREG32_SOC15(UVD, i, mmCC_UVD_HARVESTING); 82 if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK) 83 adev->vcn.harvest_config |= 1 << i; 84 } 85 86 if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 | 87 AMDGPU_VCN_HARVEST_VCN1)) 88 /* both instances are harvested, disable the block */ 89 return -ENOENT; 90 } else 91 adev->vcn.num_vcn_inst = 1; 92 93 if (amdgpu_sriov_vf(adev)) { 94 adev->vcn.num_vcn_inst = 2; 95 adev->vcn.harvest_config = 0; 96 adev->vcn.num_enc_rings = 1; 97 } else { 98 adev->vcn.num_enc_rings = 2; 99 } 100 101 vcn_v2_5_set_dec_ring_funcs(adev); 102 vcn_v2_5_set_enc_ring_funcs(adev); 103 vcn_v2_5_set_irq_funcs(adev); 104 105 return 0; 106 } 107 108 /** 109 * vcn_v2_5_sw_init - sw init for VCN block 110 * 111 * @handle: amdgpu_device pointer 112 * 113 * Load firmware and sw initialization 114 */ 115 static int vcn_v2_5_sw_init(void *handle) 116 { 117 struct amdgpu_ring *ring; 118 int i, j, r; 119 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 120 121 for (j = 0; j < adev->vcn.num_vcn_inst; j++) { 122 if (adev->vcn.harvest_config & (1 << j)) 123 continue; 124 /* VCN DEC TRAP */ 125 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j], 126 VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[j].irq); 127 if (r) 128 return r; 129 130 /* VCN ENC TRAP */ 131 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { 132 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j], 133 i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[j].irq); 134 if (r) 135 return r; 136 } 137 } 138 139 r = amdgpu_vcn_sw_init(adev); 140 if (r) 141 return r; 142 143 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 144 const struct common_firmware_header *hdr; 145 hdr = (const struct common_firmware_header *)adev->vcn.fw->data; 146 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN; 147 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw; 148 adev->firmware.fw_size += 149 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); 150 151 if (adev->vcn.num_vcn_inst == VCN25_MAX_HW_INSTANCES_ARCTURUS) { 152 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].ucode_id = AMDGPU_UCODE_ID_VCN1; 153 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].fw = adev->vcn.fw; 154 adev->firmware.fw_size += 155 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); 156 } 157 DRM_INFO("PSP loading VCN firmware\n"); 158 } 159 160 r = amdgpu_vcn_resume(adev); 161 if (r) 162 return r; 163 164 for (j = 0; j < adev->vcn.num_vcn_inst; j++) { 165 if (adev->vcn.harvest_config & (1 << j)) 166 continue; 167 adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET; 168 adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET; 169 adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET; 170 adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET; 171 adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET; 172 adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET; 173 174 adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET; 175 adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(UVD, j, mmUVD_SCRATCH9); 176 adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET; 177 adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_DATA0); 178 adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET; 179 adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_DATA1); 180 adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET; 181 adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_CMD); 182 adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET; 183 adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(UVD, j, mmUVD_NO_OP); 184 185 ring = &adev->vcn.inst[j].ring_dec; 186 ring->use_doorbell = true; 187 188 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 189 (amdgpu_sriov_vf(adev) ? 2*j : 8*j); 190 sprintf(ring->name, "vcn_dec_%d", j); 191 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0); 192 if (r) 193 return r; 194 195 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { 196 ring = &adev->vcn.inst[j].ring_enc[i]; 197 ring->use_doorbell = true; 198 199 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 200 (amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j)); 201 202 sprintf(ring->name, "vcn_enc_%d.%d", j, i); 203 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0); 204 if (r) 205 return r; 206 } 207 } 208 209 if (amdgpu_sriov_vf(adev)) { 210 r = amdgpu_virt_alloc_mm_table(adev); 211 if (r) 212 return r; 213 } 214 215 return 0; 216 } 217 218 /** 219 * vcn_v2_5_sw_fini - sw fini for VCN block 220 * 221 * @handle: amdgpu_device pointer 222 * 223 * VCN suspend and free up sw allocation 224 */ 225 static int vcn_v2_5_sw_fini(void *handle) 226 { 227 int r; 228 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 229 230 if (amdgpu_sriov_vf(adev)) 231 amdgpu_virt_free_mm_table(adev); 232 233 r = amdgpu_vcn_suspend(adev); 234 if (r) 235 return r; 236 237 r = amdgpu_vcn_sw_fini(adev); 238 239 return r; 240 } 241 242 /** 243 * vcn_v2_5_hw_init - start and test VCN block 244 * 245 * @handle: amdgpu_device pointer 246 * 247 * Initialize the hardware, boot up the VCPU and do some testing 248 */ 249 static int vcn_v2_5_hw_init(void *handle) 250 { 251 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 252 struct amdgpu_ring *ring; 253 int i, j, r = 0; 254 255 if (amdgpu_sriov_vf(adev)) 256 r = vcn_v2_5_sriov_start(adev); 257 258 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { 259 if (adev->vcn.harvest_config & (1 << j)) 260 continue; 261 262 if (amdgpu_sriov_vf(adev)) { 263 adev->vcn.inst[j].ring_enc[0].sched.ready = true; 264 adev->vcn.inst[j].ring_enc[1].sched.ready = false; 265 adev->vcn.inst[j].ring_enc[2].sched.ready = false; 266 adev->vcn.inst[j].ring_dec.sched.ready = true; 267 } else { 268 269 ring = &adev->vcn.inst[j].ring_dec; 270 271 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, 272 ring->doorbell_index, j); 273 274 r = amdgpu_ring_test_helper(ring); 275 if (r) 276 goto done; 277 278 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { 279 ring = &adev->vcn.inst[j].ring_enc[i]; 280 r = amdgpu_ring_test_helper(ring); 281 if (r) 282 goto done; 283 } 284 } 285 } 286 287 done: 288 if (!r) 289 DRM_INFO("VCN decode and encode initialized successfully.\n"); 290 291 return r; 292 } 293 294 /** 295 * vcn_v2_5_hw_fini - stop the hardware block 296 * 297 * @handle: amdgpu_device pointer 298 * 299 * Stop the VCN block, mark ring as not ready any more 300 */ 301 static int vcn_v2_5_hw_fini(void *handle) 302 { 303 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 304 struct amdgpu_ring *ring; 305 int i, j; 306 307 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 308 if (adev->vcn.harvest_config & (1 << i)) 309 continue; 310 ring = &adev->vcn.inst[i].ring_dec; 311 312 if (RREG32_SOC15(VCN, i, mmUVD_STATUS)) 313 vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE); 314 315 ring->sched.ready = false; 316 317 for (j = 0; j < adev->vcn.num_enc_rings; ++j) { 318 ring = &adev->vcn.inst[i].ring_enc[j]; 319 ring->sched.ready = false; 320 } 321 } 322 323 return 0; 324 } 325 326 /** 327 * vcn_v2_5_suspend - suspend VCN block 328 * 329 * @handle: amdgpu_device pointer 330 * 331 * HW fini and suspend VCN block 332 */ 333 static int vcn_v2_5_suspend(void *handle) 334 { 335 int r; 336 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 337 338 r = vcn_v2_5_hw_fini(adev); 339 if (r) 340 return r; 341 342 r = amdgpu_vcn_suspend(adev); 343 344 return r; 345 } 346 347 /** 348 * vcn_v2_5_resume - resume VCN block 349 * 350 * @handle: amdgpu_device pointer 351 * 352 * Resume firmware and hw init VCN block 353 */ 354 static int vcn_v2_5_resume(void *handle) 355 { 356 int r; 357 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 358 359 r = amdgpu_vcn_resume(adev); 360 if (r) 361 return r; 362 363 r = vcn_v2_5_hw_init(adev); 364 365 return r; 366 } 367 368 /** 369 * vcn_v2_5_mc_resume - memory controller programming 370 * 371 * @adev: amdgpu_device pointer 372 * 373 * Let the VCN memory controller know it's offsets 374 */ 375 static void vcn_v2_5_mc_resume(struct amdgpu_device *adev) 376 { 377 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); 378 uint32_t offset; 379 int i; 380 381 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 382 if (adev->vcn.harvest_config & (1 << i)) 383 continue; 384 /* cache window 0: fw */ 385 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 386 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, 387 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo)); 388 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, 389 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi)); 390 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0); 391 offset = 0; 392 } else { 393 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, 394 lower_32_bits(adev->vcn.inst[i].gpu_addr)); 395 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, 396 upper_32_bits(adev->vcn.inst[i].gpu_addr)); 397 offset = size; 398 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 399 AMDGPU_UVD_FIRMWARE_OFFSET >> 3); 400 } 401 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size); 402 403 /* cache window 1: stack */ 404 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, 405 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset)); 406 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, 407 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset)); 408 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, 0); 409 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE); 410 411 /* cache window 2: context */ 412 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, 413 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); 414 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, 415 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); 416 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, 0); 417 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); 418 } 419 } 420 421 /** 422 * vcn_v2_5_disable_clock_gating - disable VCN clock gating 423 * 424 * @adev: amdgpu_device pointer 425 * 426 * Disable clock gating for VCN block 427 */ 428 static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev) 429 { 430 uint32_t data; 431 int ret = 0; 432 int i; 433 434 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 435 if (adev->vcn.harvest_config & (1 << i)) 436 continue; 437 /* UVD disable CGC */ 438 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL); 439 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) 440 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 441 else 442 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; 443 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; 444 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; 445 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data); 446 447 data = RREG32_SOC15(VCN, i, mmUVD_CGC_GATE); 448 data &= ~(UVD_CGC_GATE__SYS_MASK 449 | UVD_CGC_GATE__UDEC_MASK 450 | UVD_CGC_GATE__MPEG2_MASK 451 | UVD_CGC_GATE__REGS_MASK 452 | UVD_CGC_GATE__RBC_MASK 453 | UVD_CGC_GATE__LMI_MC_MASK 454 | UVD_CGC_GATE__LMI_UMC_MASK 455 | UVD_CGC_GATE__IDCT_MASK 456 | UVD_CGC_GATE__MPRD_MASK 457 | UVD_CGC_GATE__MPC_MASK 458 | UVD_CGC_GATE__LBSI_MASK 459 | UVD_CGC_GATE__LRBBM_MASK 460 | UVD_CGC_GATE__UDEC_RE_MASK 461 | UVD_CGC_GATE__UDEC_CM_MASK 462 | UVD_CGC_GATE__UDEC_IT_MASK 463 | UVD_CGC_GATE__UDEC_DB_MASK 464 | UVD_CGC_GATE__UDEC_MP_MASK 465 | UVD_CGC_GATE__WCB_MASK 466 | UVD_CGC_GATE__VCPU_MASK 467 | UVD_CGC_GATE__MMSCH_MASK); 468 469 WREG32_SOC15(VCN, i, mmUVD_CGC_GATE, data); 470 471 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_CGC_GATE, 0, 0xFFFFFFFF, ret); 472 473 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL); 474 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK 475 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK 476 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK 477 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK 478 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK 479 | UVD_CGC_CTRL__SYS_MODE_MASK 480 | UVD_CGC_CTRL__UDEC_MODE_MASK 481 | UVD_CGC_CTRL__MPEG2_MODE_MASK 482 | UVD_CGC_CTRL__REGS_MODE_MASK 483 | UVD_CGC_CTRL__RBC_MODE_MASK 484 | UVD_CGC_CTRL__LMI_MC_MODE_MASK 485 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK 486 | UVD_CGC_CTRL__IDCT_MODE_MASK 487 | UVD_CGC_CTRL__MPRD_MODE_MASK 488 | UVD_CGC_CTRL__MPC_MODE_MASK 489 | UVD_CGC_CTRL__LBSI_MODE_MASK 490 | UVD_CGC_CTRL__LRBBM_MODE_MASK 491 | UVD_CGC_CTRL__WCB_MODE_MASK 492 | UVD_CGC_CTRL__VCPU_MODE_MASK 493 | UVD_CGC_CTRL__MMSCH_MODE_MASK); 494 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data); 495 496 /* turn on */ 497 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE); 498 data |= (UVD_SUVD_CGC_GATE__SRE_MASK 499 | UVD_SUVD_CGC_GATE__SIT_MASK 500 | UVD_SUVD_CGC_GATE__SMP_MASK 501 | UVD_SUVD_CGC_GATE__SCM_MASK 502 | UVD_SUVD_CGC_GATE__SDB_MASK 503 | UVD_SUVD_CGC_GATE__SRE_H264_MASK 504 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK 505 | UVD_SUVD_CGC_GATE__SIT_H264_MASK 506 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK 507 | UVD_SUVD_CGC_GATE__SCM_H264_MASK 508 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK 509 | UVD_SUVD_CGC_GATE__SDB_H264_MASK 510 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK 511 | UVD_SUVD_CGC_GATE__SCLR_MASK 512 | UVD_SUVD_CGC_GATE__UVD_SC_MASK 513 | UVD_SUVD_CGC_GATE__ENT_MASK 514 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 515 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 516 | UVD_SUVD_CGC_GATE__SITE_MASK 517 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK 518 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK 519 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 520 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK 521 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK); 522 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE, data); 523 524 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL); 525 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK 526 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK 527 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK 528 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK 529 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK 530 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK 531 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 532 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK 533 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK 534 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK); 535 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data); 536 } 537 } 538 539 /** 540 * vcn_v2_5_enable_clock_gating - enable VCN clock gating 541 * 542 * @adev: amdgpu_device pointer 543 * 544 * Enable clock gating for VCN block 545 */ 546 static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev) 547 { 548 uint32_t data = 0; 549 int i; 550 551 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 552 if (adev->vcn.harvest_config & (1 << i)) 553 continue; 554 /* enable UVD CGC */ 555 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL); 556 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) 557 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 558 else 559 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 560 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; 561 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; 562 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data); 563 564 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL); 565 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK 566 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK 567 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK 568 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK 569 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK 570 | UVD_CGC_CTRL__SYS_MODE_MASK 571 | UVD_CGC_CTRL__UDEC_MODE_MASK 572 | UVD_CGC_CTRL__MPEG2_MODE_MASK 573 | UVD_CGC_CTRL__REGS_MODE_MASK 574 | UVD_CGC_CTRL__RBC_MODE_MASK 575 | UVD_CGC_CTRL__LMI_MC_MODE_MASK 576 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK 577 | UVD_CGC_CTRL__IDCT_MODE_MASK 578 | UVD_CGC_CTRL__MPRD_MODE_MASK 579 | UVD_CGC_CTRL__MPC_MODE_MASK 580 | UVD_CGC_CTRL__LBSI_MODE_MASK 581 | UVD_CGC_CTRL__LRBBM_MODE_MASK 582 | UVD_CGC_CTRL__WCB_MODE_MASK 583 | UVD_CGC_CTRL__VCPU_MODE_MASK); 584 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data); 585 586 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL); 587 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK 588 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK 589 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK 590 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK 591 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK 592 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK 593 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 594 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK 595 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK 596 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK); 597 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data); 598 } 599 } 600 601 static int vcn_v2_5_start(struct amdgpu_device *adev) 602 { 603 struct amdgpu_ring *ring; 604 uint32_t rb_bufsz, tmp; 605 int i, j, k, r; 606 607 if (adev->pm.dpm_enabled) 608 amdgpu_dpm_enable_uvd(adev, true); 609 610 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 611 if (adev->vcn.harvest_config & (1 << i)) 612 continue; 613 /* disable register anti-hang mechanism */ 614 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS), 0, 615 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); 616 617 /* set uvd status busy */ 618 tmp = RREG32_SOC15(UVD, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY; 619 WREG32_SOC15(UVD, i, mmUVD_STATUS, tmp); 620 } 621 622 /*SW clock gating */ 623 vcn_v2_5_disable_clock_gating(adev); 624 625 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 626 if (adev->vcn.harvest_config & (1 << i)) 627 continue; 628 /* enable VCPU clock */ 629 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 630 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK); 631 632 /* disable master interrupt */ 633 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN), 0, 634 ~UVD_MASTINT_EN__VCPU_EN_MASK); 635 636 /* setup mmUVD_LMI_CTRL */ 637 tmp = RREG32_SOC15(UVD, i, mmUVD_LMI_CTRL); 638 tmp &= ~0xff; 639 WREG32_SOC15(UVD, i, mmUVD_LMI_CTRL, tmp | 0x8| 640 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | 641 UVD_LMI_CTRL__MASK_MC_URGENT_MASK | 642 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | 643 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK); 644 645 /* setup mmUVD_MPC_CNTL */ 646 tmp = RREG32_SOC15(UVD, i, mmUVD_MPC_CNTL); 647 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK; 648 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT; 649 WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp); 650 651 /* setup UVD_MPC_SET_MUXA0 */ 652 WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUXA0, 653 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | 654 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | 655 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | 656 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT))); 657 658 /* setup UVD_MPC_SET_MUXB0 */ 659 WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUXB0, 660 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | 661 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | 662 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | 663 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT))); 664 665 /* setup mmUVD_MPC_SET_MUX */ 666 WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUX, 667 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | 668 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | 669 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT))); 670 } 671 672 vcn_v2_5_mc_resume(adev); 673 674 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 675 if (adev->vcn.harvest_config & (1 << i)) 676 continue; 677 /* VCN global tiling registers */ 678 WREG32_SOC15(UVD, i, mmUVD_GFX8_ADDR_CONFIG, 679 adev->gfx.config.gb_addr_config); 680 WREG32_SOC15(UVD, i, mmUVD_GFX8_ADDR_CONFIG, 681 adev->gfx.config.gb_addr_config); 682 683 /* enable LMI MC and UMC channels */ 684 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0, 685 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); 686 687 /* unblock VCPU register access */ 688 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_ARB_CTRL), 0, 689 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); 690 691 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0, 692 ~UVD_VCPU_CNTL__BLK_RST_MASK); 693 694 for (k = 0; k < 10; ++k) { 695 uint32_t status; 696 697 for (j = 0; j < 100; ++j) { 698 status = RREG32_SOC15(UVD, i, mmUVD_STATUS); 699 if (status & 2) 700 break; 701 if (amdgpu_emu_mode == 1) 702 msleep(500); 703 else 704 mdelay(10); 705 } 706 r = 0; 707 if (status & 2) 708 break; 709 710 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n"); 711 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 712 UVD_VCPU_CNTL__BLK_RST_MASK, 713 ~UVD_VCPU_CNTL__BLK_RST_MASK); 714 mdelay(10); 715 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0, 716 ~UVD_VCPU_CNTL__BLK_RST_MASK); 717 718 mdelay(10); 719 r = -1; 720 } 721 722 if (r) { 723 DRM_ERROR("VCN decode not responding, giving up!!!\n"); 724 return r; 725 } 726 727 /* enable master interrupt */ 728 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN), 729 UVD_MASTINT_EN__VCPU_EN_MASK, 730 ~UVD_MASTINT_EN__VCPU_EN_MASK); 731 732 /* clear the busy bit of VCN_STATUS */ 733 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0, 734 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); 735 736 WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_VMID, 0); 737 738 ring = &adev->vcn.inst[i].ring_dec; 739 /* force RBC into idle state */ 740 rb_bufsz = order_base_2(ring->ring_size); 741 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); 742 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); 743 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); 744 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); 745 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); 746 WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, tmp); 747 748 /* programm the RB_BASE for ring buffer */ 749 WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, 750 lower_32_bits(ring->gpu_addr)); 751 WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, 752 upper_32_bits(ring->gpu_addr)); 753 754 /* Initialize the ring buffer's read and write pointers */ 755 WREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR, 0); 756 757 ring->wptr = RREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR); 758 WREG32_SOC15(UVD, i, mmUVD_RBC_RB_WPTR, 759 lower_32_bits(ring->wptr)); 760 ring = &adev->vcn.inst[i].ring_enc[0]; 761 WREG32_SOC15(UVD, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); 762 WREG32_SOC15(UVD, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); 763 WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO, ring->gpu_addr); 764 WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 765 WREG32_SOC15(UVD, i, mmUVD_RB_SIZE, ring->ring_size / 4); 766 767 ring = &adev->vcn.inst[i].ring_enc[1]; 768 WREG32_SOC15(UVD, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); 769 WREG32_SOC15(UVD, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); 770 WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO2, ring->gpu_addr); 771 WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); 772 WREG32_SOC15(UVD, i, mmUVD_RB_SIZE2, ring->ring_size / 4); 773 } 774 775 return 0; 776 } 777 778 static int vcn_v2_5_mmsch_start(struct amdgpu_device *adev, 779 struct amdgpu_mm_table *table) 780 { 781 uint32_t data = 0, loop = 0, size = 0; 782 uint64_t addr = table->gpu_addr; 783 struct mmsch_v1_1_init_header *header = NULL;; 784 785 header = (struct mmsch_v1_1_init_header *)table->cpu_addr; 786 size = header->total_size; 787 788 /* 789 * 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of 790 * memory descriptor location 791 */ 792 WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr)); 793 WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr)); 794 795 /* 2, update vmid of descriptor */ 796 data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID); 797 data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK; 798 /* use domain0 for MM scheduler */ 799 data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); 800 WREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID, data); 801 802 /* 3, notify mmsch about the size of this descriptor */ 803 WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_SIZE, size); 804 805 /* 4, set resp to zero */ 806 WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP, 0); 807 808 /* 809 * 5, kick off the initialization and wait until 810 * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero 811 */ 812 WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001); 813 814 data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP); 815 loop = 10; 816 while ((data & 0x10000002) != 0x10000002) { 817 udelay(100); 818 data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP); 819 loop--; 820 if (!loop) 821 break; 822 } 823 824 if (!loop) { 825 dev_err(adev->dev, 826 "failed to init MMSCH, mmMMSCH_VF_MAILBOX_RESP = %x\n", 827 data); 828 return -EBUSY; 829 } 830 831 return 0; 832 } 833 834 static int vcn_v2_5_sriov_start(struct amdgpu_device *adev) 835 { 836 struct amdgpu_ring *ring; 837 uint32_t offset, size, tmp, i, rb_bufsz; 838 uint32_t table_size = 0; 839 struct mmsch_v1_0_cmd_direct_write direct_wt = { { 0 } }; 840 struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { { 0 } }; 841 struct mmsch_v1_0_cmd_direct_polling direct_poll = { { 0 } }; 842 struct mmsch_v1_0_cmd_end end = { { 0 } }; 843 uint32_t *init_table = adev->virt.mm_table.cpu_addr; 844 struct mmsch_v1_1_init_header *header = (struct mmsch_v1_1_init_header *)init_table; 845 846 direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE; 847 direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE; 848 direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING; 849 end.cmd_header.command_type = MMSCH_COMMAND__END; 850 851 header->version = MMSCH_VERSION; 852 header->total_size = sizeof(struct mmsch_v1_1_init_header) >> 2; 853 init_table += header->total_size; 854 855 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 856 header->eng[i].table_offset = header->total_size; 857 header->eng[i].init_status = 0; 858 header->eng[i].table_size = 0; 859 860 table_size = 0; 861 862 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT( 863 SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 864 ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY); 865 866 size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); 867 /* mc resume*/ 868 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 869 MMSCH_V1_0_INSERT_DIRECT_WT( 870 SOC15_REG_OFFSET(UVD, i, 871 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 872 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo); 873 MMSCH_V1_0_INSERT_DIRECT_WT( 874 SOC15_REG_OFFSET(UVD, i, 875 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 876 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi); 877 offset = 0; 878 MMSCH_V1_0_INSERT_DIRECT_WT( 879 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0), 0); 880 } else { 881 MMSCH_V1_0_INSERT_DIRECT_WT( 882 SOC15_REG_OFFSET(UVD, i, 883 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 884 lower_32_bits(adev->vcn.inst[i].gpu_addr)); 885 MMSCH_V1_0_INSERT_DIRECT_WT( 886 SOC15_REG_OFFSET(UVD, i, 887 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 888 upper_32_bits(adev->vcn.inst[i].gpu_addr)); 889 offset = size; 890 MMSCH_V1_0_INSERT_DIRECT_WT( 891 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0), 892 AMDGPU_UVD_FIRMWARE_OFFSET >> 3); 893 } 894 895 MMSCH_V1_0_INSERT_DIRECT_WT( 896 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), 897 size); 898 MMSCH_V1_0_INSERT_DIRECT_WT( 899 SOC15_REG_OFFSET(UVD, i, 900 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 901 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset)); 902 MMSCH_V1_0_INSERT_DIRECT_WT( 903 SOC15_REG_OFFSET(UVD, i, 904 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 905 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset)); 906 MMSCH_V1_0_INSERT_DIRECT_WT( 907 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1), 908 0); 909 MMSCH_V1_0_INSERT_DIRECT_WT( 910 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1), 911 AMDGPU_VCN_STACK_SIZE); 912 MMSCH_V1_0_INSERT_DIRECT_WT( 913 SOC15_REG_OFFSET(UVD, i, 914 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), 915 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + 916 AMDGPU_VCN_STACK_SIZE)); 917 MMSCH_V1_0_INSERT_DIRECT_WT( 918 SOC15_REG_OFFSET(UVD, i, 919 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), 920 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + 921 AMDGPU_VCN_STACK_SIZE)); 922 MMSCH_V1_0_INSERT_DIRECT_WT( 923 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2), 924 0); 925 MMSCH_V1_0_INSERT_DIRECT_WT( 926 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2), 927 AMDGPU_VCN_CONTEXT_SIZE); 928 929 ring = &adev->vcn.inst[i].ring_enc[0]; 930 ring->wptr = 0; 931 932 MMSCH_V1_0_INSERT_DIRECT_WT( 933 SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO), 934 lower_32_bits(ring->gpu_addr)); 935 MMSCH_V1_0_INSERT_DIRECT_WT( 936 SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI), 937 upper_32_bits(ring->gpu_addr)); 938 MMSCH_V1_0_INSERT_DIRECT_WT( 939 SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE), 940 ring->ring_size / 4); 941 942 ring = &adev->vcn.inst[i].ring_dec; 943 ring->wptr = 0; 944 MMSCH_V1_0_INSERT_DIRECT_WT( 945 SOC15_REG_OFFSET(UVD, i, 946 mmUVD_LMI_RBC_RB_64BIT_BAR_LOW), 947 lower_32_bits(ring->gpu_addr)); 948 MMSCH_V1_0_INSERT_DIRECT_WT( 949 SOC15_REG_OFFSET(UVD, i, 950 mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH), 951 upper_32_bits(ring->gpu_addr)); 952 953 /* force RBC into idle state */ 954 rb_bufsz = order_base_2(ring->ring_size); 955 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); 956 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); 957 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); 958 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); 959 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); 960 MMSCH_V1_0_INSERT_DIRECT_WT( 961 SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp); 962 963 /* add end packet */ 964 memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end)); 965 table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4; 966 init_table += sizeof(struct mmsch_v1_0_cmd_end) / 4; 967 968 /* refine header */ 969 header->eng[i].table_size = table_size; 970 header->total_size += table_size; 971 } 972 973 return vcn_v2_5_mmsch_start(adev, &adev->virt.mm_table); 974 } 975 976 static int vcn_v2_5_stop(struct amdgpu_device *adev) 977 { 978 uint32_t tmp; 979 int i, r = 0; 980 981 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 982 if (adev->vcn.harvest_config & (1 << i)) 983 continue; 984 /* wait for vcn idle */ 985 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, r); 986 if (r) 987 return r; 988 989 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK | 990 UVD_LMI_STATUS__READ_CLEAN_MASK | 991 UVD_LMI_STATUS__WRITE_CLEAN_MASK | 992 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK; 993 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp, r); 994 if (r) 995 return r; 996 997 /* block LMI UMC channel */ 998 tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2); 999 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK; 1000 WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp); 1001 1002 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK| 1003 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK; 1004 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp, r); 1005 if (r) 1006 return r; 1007 1008 /* block VCPU register access */ 1009 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_ARB_CTRL), 1010 UVD_RB_ARB_CTRL__VCPU_DIS_MASK, 1011 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); 1012 1013 /* reset VCPU */ 1014 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 1015 UVD_VCPU_CNTL__BLK_RST_MASK, 1016 ~UVD_VCPU_CNTL__BLK_RST_MASK); 1017 1018 /* disable VCPU clock */ 1019 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0, 1020 ~(UVD_VCPU_CNTL__CLK_EN_MASK)); 1021 1022 /* clear status */ 1023 WREG32_SOC15(VCN, i, mmUVD_STATUS, 0); 1024 1025 vcn_v2_5_enable_clock_gating(adev); 1026 1027 /* enable register anti-hang mechanism */ 1028 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS), 1029 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, 1030 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); 1031 } 1032 1033 if (adev->pm.dpm_enabled) 1034 amdgpu_dpm_enable_uvd(adev, false); 1035 1036 return 0; 1037 } 1038 1039 /** 1040 * vcn_v2_5_dec_ring_get_rptr - get read pointer 1041 * 1042 * @ring: amdgpu_ring pointer 1043 * 1044 * Returns the current hardware read pointer 1045 */ 1046 static uint64_t vcn_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring) 1047 { 1048 struct amdgpu_device *adev = ring->adev; 1049 1050 return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR); 1051 } 1052 1053 /** 1054 * vcn_v2_5_dec_ring_get_wptr - get write pointer 1055 * 1056 * @ring: amdgpu_ring pointer 1057 * 1058 * Returns the current hardware write pointer 1059 */ 1060 static uint64_t vcn_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring) 1061 { 1062 struct amdgpu_device *adev = ring->adev; 1063 1064 if (ring->use_doorbell) 1065 return adev->wb.wb[ring->wptr_offs]; 1066 else 1067 return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR); 1068 } 1069 1070 /** 1071 * vcn_v2_5_dec_ring_set_wptr - set write pointer 1072 * 1073 * @ring: amdgpu_ring pointer 1074 * 1075 * Commits the write pointer to the hardware 1076 */ 1077 static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring) 1078 { 1079 struct amdgpu_device *adev = ring->adev; 1080 1081 if (ring->use_doorbell) { 1082 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); 1083 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); 1084 } else { 1085 WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); 1086 } 1087 } 1088 1089 static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = { 1090 .type = AMDGPU_RING_TYPE_VCN_DEC, 1091 .align_mask = 0xf, 1092 .vmhub = AMDGPU_MMHUB_1, 1093 .get_rptr = vcn_v2_5_dec_ring_get_rptr, 1094 .get_wptr = vcn_v2_5_dec_ring_get_wptr, 1095 .set_wptr = vcn_v2_5_dec_ring_set_wptr, 1096 .emit_frame_size = 1097 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + 1098 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + 1099 8 + /* vcn_v2_0_dec_ring_emit_vm_flush */ 1100 14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */ 1101 6, 1102 .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */ 1103 .emit_ib = vcn_v2_0_dec_ring_emit_ib, 1104 .emit_fence = vcn_v2_0_dec_ring_emit_fence, 1105 .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush, 1106 .test_ring = amdgpu_vcn_dec_ring_test_ring, 1107 .test_ib = amdgpu_vcn_dec_ring_test_ib, 1108 .insert_nop = vcn_v2_0_dec_ring_insert_nop, 1109 .insert_start = vcn_v2_0_dec_ring_insert_start, 1110 .insert_end = vcn_v2_0_dec_ring_insert_end, 1111 .pad_ib = amdgpu_ring_generic_pad_ib, 1112 .begin_use = amdgpu_vcn_ring_begin_use, 1113 .end_use = amdgpu_vcn_ring_end_use, 1114 .emit_wreg = vcn_v2_0_dec_ring_emit_wreg, 1115 .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait, 1116 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, 1117 }; 1118 1119 /** 1120 * vcn_v2_5_enc_ring_get_rptr - get enc read pointer 1121 * 1122 * @ring: amdgpu_ring pointer 1123 * 1124 * Returns the current hardware enc read pointer 1125 */ 1126 static uint64_t vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring *ring) 1127 { 1128 struct amdgpu_device *adev = ring->adev; 1129 1130 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) 1131 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR); 1132 else 1133 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2); 1134 } 1135 1136 /** 1137 * vcn_v2_5_enc_ring_get_wptr - get enc write pointer 1138 * 1139 * @ring: amdgpu_ring pointer 1140 * 1141 * Returns the current hardware enc write pointer 1142 */ 1143 static uint64_t vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring *ring) 1144 { 1145 struct amdgpu_device *adev = ring->adev; 1146 1147 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) { 1148 if (ring->use_doorbell) 1149 return adev->wb.wb[ring->wptr_offs]; 1150 else 1151 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR); 1152 } else { 1153 if (ring->use_doorbell) 1154 return adev->wb.wb[ring->wptr_offs]; 1155 else 1156 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2); 1157 } 1158 } 1159 1160 /** 1161 * vcn_v2_5_enc_ring_set_wptr - set enc write pointer 1162 * 1163 * @ring: amdgpu_ring pointer 1164 * 1165 * Commits the enc write pointer to the hardware 1166 */ 1167 static void vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring *ring) 1168 { 1169 struct amdgpu_device *adev = ring->adev; 1170 1171 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) { 1172 if (ring->use_doorbell) { 1173 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); 1174 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); 1175 } else { 1176 WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); 1177 } 1178 } else { 1179 if (ring->use_doorbell) { 1180 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); 1181 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); 1182 } else { 1183 WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); 1184 } 1185 } 1186 } 1187 1188 static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = { 1189 .type = AMDGPU_RING_TYPE_VCN_ENC, 1190 .align_mask = 0x3f, 1191 .nop = VCN_ENC_CMD_NO_OP, 1192 .vmhub = AMDGPU_MMHUB_1, 1193 .get_rptr = vcn_v2_5_enc_ring_get_rptr, 1194 .get_wptr = vcn_v2_5_enc_ring_get_wptr, 1195 .set_wptr = vcn_v2_5_enc_ring_set_wptr, 1196 .emit_frame_size = 1197 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + 1198 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 + 1199 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */ 1200 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */ 1201 1, /* vcn_v2_0_enc_ring_insert_end */ 1202 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */ 1203 .emit_ib = vcn_v2_0_enc_ring_emit_ib, 1204 .emit_fence = vcn_v2_0_enc_ring_emit_fence, 1205 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush, 1206 .test_ring = amdgpu_vcn_enc_ring_test_ring, 1207 .test_ib = amdgpu_vcn_enc_ring_test_ib, 1208 .insert_nop = amdgpu_ring_insert_nop, 1209 .insert_end = vcn_v2_0_enc_ring_insert_end, 1210 .pad_ib = amdgpu_ring_generic_pad_ib, 1211 .begin_use = amdgpu_vcn_ring_begin_use, 1212 .end_use = amdgpu_vcn_ring_end_use, 1213 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg, 1214 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait, 1215 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, 1216 }; 1217 1218 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev) 1219 { 1220 int i; 1221 1222 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 1223 if (adev->vcn.harvest_config & (1 << i)) 1224 continue; 1225 adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs; 1226 adev->vcn.inst[i].ring_dec.me = i; 1227 DRM_INFO("VCN(%d) decode is enabled in VM mode\n", i); 1228 } 1229 } 1230 1231 static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev) 1232 { 1233 int i, j; 1234 1235 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { 1236 if (adev->vcn.harvest_config & (1 << j)) 1237 continue; 1238 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { 1239 adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs; 1240 adev->vcn.inst[j].ring_enc[i].me = j; 1241 } 1242 DRM_INFO("VCN(%d) encode is enabled in VM mode\n", j); 1243 } 1244 } 1245 1246 static bool vcn_v2_5_is_idle(void *handle) 1247 { 1248 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1249 int i, ret = 1; 1250 1251 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 1252 if (adev->vcn.harvest_config & (1 << i)) 1253 continue; 1254 ret &= (RREG32_SOC15(VCN, i, mmUVD_STATUS) == UVD_STATUS__IDLE); 1255 } 1256 1257 return ret; 1258 } 1259 1260 static int vcn_v2_5_wait_for_idle(void *handle) 1261 { 1262 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1263 int i, ret = 0; 1264 1265 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 1266 if (adev->vcn.harvest_config & (1 << i)) 1267 continue; 1268 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 1269 UVD_STATUS__IDLE, ret); 1270 if (ret) 1271 return ret; 1272 } 1273 1274 return ret; 1275 } 1276 1277 static int vcn_v2_5_set_clockgating_state(void *handle, 1278 enum amd_clockgating_state state) 1279 { 1280 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1281 bool enable = (state == AMD_CG_STATE_GATE) ? true : false; 1282 1283 if (amdgpu_sriov_vf(adev)) 1284 return 0; 1285 1286 if (enable) { 1287 if (vcn_v2_5_is_idle(handle)) 1288 return -EBUSY; 1289 vcn_v2_5_enable_clock_gating(adev); 1290 } else { 1291 vcn_v2_5_disable_clock_gating(adev); 1292 } 1293 1294 return 0; 1295 } 1296 1297 static int vcn_v2_5_set_powergating_state(void *handle, 1298 enum amd_powergating_state state) 1299 { 1300 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1301 int ret; 1302 1303 if (amdgpu_sriov_vf(adev)) 1304 return 0; 1305 1306 if(state == adev->vcn.cur_state) 1307 return 0; 1308 1309 if (state == AMD_PG_STATE_GATE) 1310 ret = vcn_v2_5_stop(adev); 1311 else 1312 ret = vcn_v2_5_start(adev); 1313 1314 if(!ret) 1315 adev->vcn.cur_state = state; 1316 1317 return ret; 1318 } 1319 1320 static int vcn_v2_5_set_interrupt_state(struct amdgpu_device *adev, 1321 struct amdgpu_irq_src *source, 1322 unsigned type, 1323 enum amdgpu_interrupt_state state) 1324 { 1325 return 0; 1326 } 1327 1328 static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev, 1329 struct amdgpu_irq_src *source, 1330 struct amdgpu_iv_entry *entry) 1331 { 1332 uint32_t ip_instance; 1333 1334 switch (entry->client_id) { 1335 case SOC15_IH_CLIENTID_VCN: 1336 ip_instance = 0; 1337 break; 1338 case SOC15_IH_CLIENTID_VCN1: 1339 ip_instance = 1; 1340 break; 1341 default: 1342 DRM_ERROR("Unhandled client id: %d\n", entry->client_id); 1343 return 0; 1344 } 1345 1346 DRM_DEBUG("IH: VCN TRAP\n"); 1347 1348 switch (entry->src_id) { 1349 case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT: 1350 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_dec); 1351 break; 1352 case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE: 1353 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]); 1354 break; 1355 case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY: 1356 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]); 1357 break; 1358 default: 1359 DRM_ERROR("Unhandled interrupt: %d %d\n", 1360 entry->src_id, entry->src_data[0]); 1361 break; 1362 } 1363 1364 return 0; 1365 } 1366 1367 static const struct amdgpu_irq_src_funcs vcn_v2_5_irq_funcs = { 1368 .set = vcn_v2_5_set_interrupt_state, 1369 .process = vcn_v2_5_process_interrupt, 1370 }; 1371 1372 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev) 1373 { 1374 int i; 1375 1376 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 1377 if (adev->vcn.harvest_config & (1 << i)) 1378 continue; 1379 adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1; 1380 adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs; 1381 } 1382 } 1383 1384 static const struct amd_ip_funcs vcn_v2_5_ip_funcs = { 1385 .name = "vcn_v2_5", 1386 .early_init = vcn_v2_5_early_init, 1387 .late_init = NULL, 1388 .sw_init = vcn_v2_5_sw_init, 1389 .sw_fini = vcn_v2_5_sw_fini, 1390 .hw_init = vcn_v2_5_hw_init, 1391 .hw_fini = vcn_v2_5_hw_fini, 1392 .suspend = vcn_v2_5_suspend, 1393 .resume = vcn_v2_5_resume, 1394 .is_idle = vcn_v2_5_is_idle, 1395 .wait_for_idle = vcn_v2_5_wait_for_idle, 1396 .check_soft_reset = NULL, 1397 .pre_soft_reset = NULL, 1398 .soft_reset = NULL, 1399 .post_soft_reset = NULL, 1400 .set_clockgating_state = vcn_v2_5_set_clockgating_state, 1401 .set_powergating_state = vcn_v2_5_set_powergating_state, 1402 }; 1403 1404 const struct amdgpu_ip_block_version vcn_v2_5_ip_block = 1405 { 1406 .type = AMD_IP_BLOCK_TYPE_VCN, 1407 .major = 2, 1408 .minor = 5, 1409 .rev = 0, 1410 .funcs = &vcn_v2_5_ip_funcs, 1411 }; 1412