1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/firmware.h> 25 #include <drm/drmP.h> 26 #include "amdgpu.h" 27 #include "amdgpu_vcn.h" 28 #include "soc15.h" 29 #include "soc15d.h" 30 #include "vcn_v2_0.h" 31 32 #include "vcn/vcn_2_5_offset.h" 33 #include "vcn/vcn_2_5_sh_mask.h" 34 #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h" 35 36 #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27 37 #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f 38 #define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET 0x10 39 #define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET 0x11 40 #define mmUVD_NO_OP_INTERNAL_OFFSET 0x29 41 #define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET 0x66 42 #define mmUVD_SCRATCH9_INTERNAL_OFFSET 0xc01d 43 44 #define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET 0x431 45 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x3b4 46 #define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x3b5 47 #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x25c 48 49 #define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f 50 51 #define VCN25_MAX_HW_INSTANCES_ARCTURUS 2 52 53 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev); 54 static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev); 55 static void vcn_v2_5_set_jpeg_ring_funcs(struct amdgpu_device *adev); 56 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev); 57 static int vcn_v2_5_set_powergating_state(void *handle, 58 enum amd_powergating_state state); 59 60 static int amdgpu_ih_clientid_vcns[] = { 61 SOC15_IH_CLIENTID_VCN, 62 SOC15_IH_CLIENTID_VCN1 63 }; 64 65 /** 66 * vcn_v2_5_early_init - set function pointers 67 * 68 * @handle: amdgpu_device pointer 69 * 70 * Set ring and irq function pointers 71 */ 72 static int vcn_v2_5_early_init(void *handle) 73 { 74 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 75 if (adev->asic_type == CHIP_ARCTURUS) { 76 u32 harvest; 77 int i; 78 79 adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS; 80 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 81 harvest = RREG32_SOC15(UVD, i, mmCC_UVD_HARVESTING); 82 if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK) 83 adev->vcn.harvest_config |= 1 << i; 84 } 85 86 if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 | 87 AMDGPU_VCN_HARVEST_VCN1)) 88 /* both instances are harvested, disable the block */ 89 return -ENOENT; 90 } else 91 adev->vcn.num_vcn_inst = 1; 92 93 adev->vcn.num_enc_rings = 2; 94 95 vcn_v2_5_set_dec_ring_funcs(adev); 96 vcn_v2_5_set_enc_ring_funcs(adev); 97 vcn_v2_5_set_jpeg_ring_funcs(adev); 98 vcn_v2_5_set_irq_funcs(adev); 99 100 return 0; 101 } 102 103 /** 104 * vcn_v2_5_sw_init - sw init for VCN block 105 * 106 * @handle: amdgpu_device pointer 107 * 108 * Load firmware and sw initialization 109 */ 110 static int vcn_v2_5_sw_init(void *handle) 111 { 112 struct amdgpu_ring *ring; 113 int i, j, r; 114 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 115 116 for (j = 0; j < adev->vcn.num_vcn_inst; j++) { 117 if (adev->vcn.harvest_config & (1 << j)) 118 continue; 119 /* VCN DEC TRAP */ 120 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j], 121 VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[j].irq); 122 if (r) 123 return r; 124 125 /* VCN ENC TRAP */ 126 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { 127 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j], 128 i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[j].irq); 129 if (r) 130 return r; 131 } 132 133 /* VCN JPEG TRAP */ 134 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j], 135 VCN_2_0__SRCID__JPEG_DECODE, &adev->vcn.inst[j].irq); 136 if (r) 137 return r; 138 } 139 140 r = amdgpu_vcn_sw_init(adev); 141 if (r) 142 return r; 143 144 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 145 const struct common_firmware_header *hdr; 146 hdr = (const struct common_firmware_header *)adev->vcn.fw->data; 147 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN; 148 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw; 149 adev->firmware.fw_size += 150 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); 151 152 if (adev->vcn.num_vcn_inst == VCN25_MAX_HW_INSTANCES_ARCTURUS) { 153 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].ucode_id = AMDGPU_UCODE_ID_VCN1; 154 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].fw = adev->vcn.fw; 155 adev->firmware.fw_size += 156 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); 157 } 158 DRM_INFO("PSP loading VCN firmware\n"); 159 } 160 161 r = amdgpu_vcn_resume(adev); 162 if (r) 163 return r; 164 165 for (j = 0; j < adev->vcn.num_vcn_inst; j++) { 166 if (adev->vcn.harvest_config & (1 << j)) 167 continue; 168 adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET; 169 adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET; 170 adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET; 171 adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET; 172 adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET; 173 adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET; 174 175 adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET; 176 adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(UVD, j, mmUVD_SCRATCH9); 177 adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET; 178 adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_DATA0); 179 adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET; 180 adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_DATA1); 181 adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET; 182 adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_CMD); 183 adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET; 184 adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(UVD, j, mmUVD_NO_OP); 185 186 adev->vcn.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET; 187 adev->vcn.inst[j].external.jpeg_pitch = SOC15_REG_OFFSET(UVD, j, mmUVD_JPEG_PITCH); 188 189 ring = &adev->vcn.inst[j].ring_dec; 190 ring->use_doorbell = true; 191 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8*j; 192 sprintf(ring->name, "vcn_dec_%d", j); 193 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0); 194 if (r) 195 return r; 196 197 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { 198 ring = &adev->vcn.inst[j].ring_enc[i]; 199 ring->use_doorbell = true; 200 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i + 8*j; 201 sprintf(ring->name, "vcn_enc_%d.%d", j, i); 202 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0); 203 if (r) 204 return r; 205 } 206 207 ring = &adev->vcn.inst[j].ring_jpeg; 208 ring->use_doorbell = true; 209 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8*j; 210 sprintf(ring->name, "vcn_jpeg_%d", j); 211 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0); 212 if (r) 213 return r; 214 } 215 216 return 0; 217 } 218 219 /** 220 * vcn_v2_5_sw_fini - sw fini for VCN block 221 * 222 * @handle: amdgpu_device pointer 223 * 224 * VCN suspend and free up sw allocation 225 */ 226 static int vcn_v2_5_sw_fini(void *handle) 227 { 228 int r; 229 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 230 231 r = amdgpu_vcn_suspend(adev); 232 if (r) 233 return r; 234 235 r = amdgpu_vcn_sw_fini(adev); 236 237 return r; 238 } 239 240 /** 241 * vcn_v2_5_hw_init - start and test VCN block 242 * 243 * @handle: amdgpu_device pointer 244 * 245 * Initialize the hardware, boot up the VCPU and do some testing 246 */ 247 static int vcn_v2_5_hw_init(void *handle) 248 { 249 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 250 struct amdgpu_ring *ring; 251 int i, j, r; 252 253 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { 254 if (adev->vcn.harvest_config & (1 << j)) 255 continue; 256 ring = &adev->vcn.inst[j].ring_dec; 257 258 adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell, 259 ring->doorbell_index, j); 260 261 r = amdgpu_ring_test_ring(ring); 262 if (r) { 263 ring->sched.ready = false; 264 goto done; 265 } 266 267 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { 268 ring = &adev->vcn.inst[j].ring_enc[i]; 269 ring->sched.ready = false; 270 continue; 271 r = amdgpu_ring_test_ring(ring); 272 if (r) { 273 ring->sched.ready = false; 274 goto done; 275 } 276 } 277 278 ring = &adev->vcn.inst[j].ring_jpeg; 279 r = amdgpu_ring_test_ring(ring); 280 if (r) { 281 ring->sched.ready = false; 282 goto done; 283 } 284 } 285 done: 286 if (!r) 287 DRM_INFO("VCN decode and encode initialized successfully.\n"); 288 289 return r; 290 } 291 292 /** 293 * vcn_v2_5_hw_fini - stop the hardware block 294 * 295 * @handle: amdgpu_device pointer 296 * 297 * Stop the VCN block, mark ring as not ready any more 298 */ 299 static int vcn_v2_5_hw_fini(void *handle) 300 { 301 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 302 struct amdgpu_ring *ring; 303 int i; 304 305 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 306 if (adev->vcn.harvest_config & (1 << i)) 307 continue; 308 ring = &adev->vcn.inst[i].ring_dec; 309 310 if (RREG32_SOC15(VCN, i, mmUVD_STATUS)) 311 vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE); 312 313 ring->sched.ready = false; 314 315 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { 316 ring = &adev->vcn.inst[i].ring_enc[i]; 317 ring->sched.ready = false; 318 } 319 320 ring = &adev->vcn.inst[i].ring_jpeg; 321 ring->sched.ready = false; 322 } 323 324 return 0; 325 } 326 327 /** 328 * vcn_v2_5_suspend - suspend VCN block 329 * 330 * @handle: amdgpu_device pointer 331 * 332 * HW fini and suspend VCN block 333 */ 334 static int vcn_v2_5_suspend(void *handle) 335 { 336 int r; 337 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 338 339 r = vcn_v2_5_hw_fini(adev); 340 if (r) 341 return r; 342 343 r = amdgpu_vcn_suspend(adev); 344 345 return r; 346 } 347 348 /** 349 * vcn_v2_5_resume - resume VCN block 350 * 351 * @handle: amdgpu_device pointer 352 * 353 * Resume firmware and hw init VCN block 354 */ 355 static int vcn_v2_5_resume(void *handle) 356 { 357 int r; 358 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 359 360 r = amdgpu_vcn_resume(adev); 361 if (r) 362 return r; 363 364 r = vcn_v2_5_hw_init(adev); 365 366 return r; 367 } 368 369 /** 370 * vcn_v2_5_mc_resume - memory controller programming 371 * 372 * @adev: amdgpu_device pointer 373 * 374 * Let the VCN memory controller know it's offsets 375 */ 376 static void vcn_v2_5_mc_resume(struct amdgpu_device *adev) 377 { 378 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); 379 uint32_t offset; 380 int i; 381 382 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 383 if (adev->vcn.harvest_config & (1 << i)) 384 continue; 385 /* cache window 0: fw */ 386 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 387 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, 388 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo)); 389 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, 390 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi)); 391 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0); 392 offset = 0; 393 } else { 394 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, 395 lower_32_bits(adev->vcn.inst[i].gpu_addr)); 396 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, 397 upper_32_bits(adev->vcn.inst[i].gpu_addr)); 398 offset = size; 399 /* No signed header for now from firmware 400 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 401 AMDGPU_UVD_FIRMWARE_OFFSET >> 3); 402 */ 403 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0); 404 } 405 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size); 406 407 /* cache window 1: stack */ 408 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, 409 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset)); 410 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, 411 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset)); 412 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, 0); 413 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE); 414 415 /* cache window 2: context */ 416 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, 417 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); 418 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, 419 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); 420 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, 0); 421 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); 422 } 423 } 424 425 /** 426 * vcn_v2_5_disable_clock_gating - disable VCN clock gating 427 * 428 * @adev: amdgpu_device pointer 429 * @sw: enable SW clock gating 430 * 431 * Disable clock gating for VCN block 432 */ 433 static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev) 434 { 435 uint32_t data; 436 int ret = 0; 437 int i; 438 439 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 440 if (adev->vcn.harvest_config & (1 << i)) 441 continue; 442 /* UVD disable CGC */ 443 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL); 444 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) 445 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 446 else 447 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; 448 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; 449 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; 450 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data); 451 452 data = RREG32_SOC15(VCN, i, mmUVD_CGC_GATE); 453 data &= ~(UVD_CGC_GATE__SYS_MASK 454 | UVD_CGC_GATE__UDEC_MASK 455 | UVD_CGC_GATE__MPEG2_MASK 456 | UVD_CGC_GATE__REGS_MASK 457 | UVD_CGC_GATE__RBC_MASK 458 | UVD_CGC_GATE__LMI_MC_MASK 459 | UVD_CGC_GATE__LMI_UMC_MASK 460 | UVD_CGC_GATE__IDCT_MASK 461 | UVD_CGC_GATE__MPRD_MASK 462 | UVD_CGC_GATE__MPC_MASK 463 | UVD_CGC_GATE__LBSI_MASK 464 | UVD_CGC_GATE__LRBBM_MASK 465 | UVD_CGC_GATE__UDEC_RE_MASK 466 | UVD_CGC_GATE__UDEC_CM_MASK 467 | UVD_CGC_GATE__UDEC_IT_MASK 468 | UVD_CGC_GATE__UDEC_DB_MASK 469 | UVD_CGC_GATE__UDEC_MP_MASK 470 | UVD_CGC_GATE__WCB_MASK 471 | UVD_CGC_GATE__VCPU_MASK 472 | UVD_CGC_GATE__MMSCH_MASK); 473 474 WREG32_SOC15(VCN, i, mmUVD_CGC_GATE, data); 475 476 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_CGC_GATE, 0, 0xFFFFFFFF, ret); 477 478 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL); 479 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK 480 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK 481 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK 482 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK 483 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK 484 | UVD_CGC_CTRL__SYS_MODE_MASK 485 | UVD_CGC_CTRL__UDEC_MODE_MASK 486 | UVD_CGC_CTRL__MPEG2_MODE_MASK 487 | UVD_CGC_CTRL__REGS_MODE_MASK 488 | UVD_CGC_CTRL__RBC_MODE_MASK 489 | UVD_CGC_CTRL__LMI_MC_MODE_MASK 490 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK 491 | UVD_CGC_CTRL__IDCT_MODE_MASK 492 | UVD_CGC_CTRL__MPRD_MODE_MASK 493 | UVD_CGC_CTRL__MPC_MODE_MASK 494 | UVD_CGC_CTRL__LBSI_MODE_MASK 495 | UVD_CGC_CTRL__LRBBM_MODE_MASK 496 | UVD_CGC_CTRL__WCB_MODE_MASK 497 | UVD_CGC_CTRL__VCPU_MODE_MASK 498 | UVD_CGC_CTRL__MMSCH_MODE_MASK); 499 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data); 500 501 /* turn on */ 502 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE); 503 data |= (UVD_SUVD_CGC_GATE__SRE_MASK 504 | UVD_SUVD_CGC_GATE__SIT_MASK 505 | UVD_SUVD_CGC_GATE__SMP_MASK 506 | UVD_SUVD_CGC_GATE__SCM_MASK 507 | UVD_SUVD_CGC_GATE__SDB_MASK 508 | UVD_SUVD_CGC_GATE__SRE_H264_MASK 509 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK 510 | UVD_SUVD_CGC_GATE__SIT_H264_MASK 511 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK 512 | UVD_SUVD_CGC_GATE__SCM_H264_MASK 513 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK 514 | UVD_SUVD_CGC_GATE__SDB_H264_MASK 515 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK 516 | UVD_SUVD_CGC_GATE__SCLR_MASK 517 | UVD_SUVD_CGC_GATE__UVD_SC_MASK 518 | UVD_SUVD_CGC_GATE__ENT_MASK 519 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 520 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 521 | UVD_SUVD_CGC_GATE__SITE_MASK 522 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK 523 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK 524 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 525 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK 526 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK); 527 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE, data); 528 529 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL); 530 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK 531 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK 532 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK 533 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK 534 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK 535 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK 536 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 537 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK 538 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK 539 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK); 540 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data); 541 } 542 } 543 544 /** 545 * vcn_v2_5_enable_clock_gating - enable VCN clock gating 546 * 547 * @adev: amdgpu_device pointer 548 * @sw: enable SW clock gating 549 * 550 * Enable clock gating for VCN block 551 */ 552 static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev) 553 { 554 uint32_t data = 0; 555 int i; 556 557 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 558 if (adev->vcn.harvest_config & (1 << i)) 559 continue; 560 /* enable UVD CGC */ 561 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL); 562 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) 563 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 564 else 565 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 566 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; 567 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; 568 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data); 569 570 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL); 571 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK 572 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK 573 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK 574 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK 575 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK 576 | UVD_CGC_CTRL__SYS_MODE_MASK 577 | UVD_CGC_CTRL__UDEC_MODE_MASK 578 | UVD_CGC_CTRL__MPEG2_MODE_MASK 579 | UVD_CGC_CTRL__REGS_MODE_MASK 580 | UVD_CGC_CTRL__RBC_MODE_MASK 581 | UVD_CGC_CTRL__LMI_MC_MODE_MASK 582 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK 583 | UVD_CGC_CTRL__IDCT_MODE_MASK 584 | UVD_CGC_CTRL__MPRD_MODE_MASK 585 | UVD_CGC_CTRL__MPC_MODE_MASK 586 | UVD_CGC_CTRL__LBSI_MODE_MASK 587 | UVD_CGC_CTRL__LRBBM_MODE_MASK 588 | UVD_CGC_CTRL__WCB_MODE_MASK 589 | UVD_CGC_CTRL__VCPU_MODE_MASK); 590 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data); 591 592 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL); 593 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK 594 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK 595 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK 596 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK 597 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK 598 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK 599 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 600 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK 601 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK 602 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK); 603 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data); 604 } 605 } 606 607 /** 608 * jpeg_v2_5_start - start JPEG block 609 * 610 * @adev: amdgpu_device pointer 611 * 612 * Setup and start the JPEG block 613 */ 614 static int jpeg_v2_5_start(struct amdgpu_device *adev) 615 { 616 struct amdgpu_ring *ring; 617 uint32_t tmp; 618 int i; 619 620 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 621 if (adev->vcn.harvest_config & (1 << i)) 622 continue; 623 ring = &adev->vcn.inst[i].ring_jpeg; 624 /* disable anti hang mechanism */ 625 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JPEG_POWER_STATUS), 0, 626 ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); 627 628 /* JPEG disable CGC */ 629 tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL); 630 tmp |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 631 tmp |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; 632 tmp |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; 633 WREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL, tmp); 634 635 tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_GATE); 636 tmp &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK 637 | JPEG_CGC_GATE__JPEG2_DEC_MASK 638 | JPEG_CGC_GATE__JMCIF_MASK 639 | JPEG_CGC_GATE__JRBBM_MASK); 640 WREG32_SOC15(VCN, i, mmJPEG_CGC_GATE, tmp); 641 642 tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL); 643 tmp &= ~(JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK 644 | JPEG_CGC_CTRL__JPEG2_DEC_MODE_MASK 645 | JPEG_CGC_CTRL__JMCIF_MODE_MASK 646 | JPEG_CGC_CTRL__JRBBM_MODE_MASK); 647 WREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL, tmp); 648 649 /* MJPEG global tiling registers */ 650 WREG32_SOC15(UVD, i, mmJPEG_DEC_GFX8_ADDR_CONFIG, 651 adev->gfx.config.gb_addr_config); 652 WREG32_SOC15(UVD, i, mmJPEG_DEC_GFX10_ADDR_CONFIG, 653 adev->gfx.config.gb_addr_config); 654 655 /* enable JMI channel */ 656 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JMI_CNTL), 0, 657 ~UVD_JMI_CNTL__SOFT_RESET_MASK); 658 659 /* enable System Interrupt for JRBC */ 660 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmJPEG_SYS_INT_EN), 661 JPEG_SYS_INT_EN__DJRBC_MASK, 662 ~JPEG_SYS_INT_EN__DJRBC_MASK); 663 664 WREG32_SOC15(UVD, i, mmUVD_LMI_JRBC_RB_VMID, 0); 665 WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L)); 666 WREG32_SOC15(UVD, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, 667 lower_32_bits(ring->gpu_addr)); 668 WREG32_SOC15(UVD, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, 669 upper_32_bits(ring->gpu_addr)); 670 WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_RPTR, 0); 671 WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_WPTR, 0); 672 WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_CNTL, 0x00000002L); 673 WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4); 674 ring->wptr = RREG32_SOC15(UVD, i, mmUVD_JRBC_RB_WPTR); 675 } 676 677 return 0; 678 } 679 680 /** 681 * jpeg_v2_5_stop - stop JPEG block 682 * 683 * @adev: amdgpu_device pointer 684 * 685 * stop the JPEG block 686 */ 687 static int jpeg_v2_5_stop(struct amdgpu_device *adev) 688 { 689 uint32_t tmp; 690 int i; 691 692 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 693 if (adev->vcn.harvest_config & (1 << i)) 694 continue; 695 /* reset JMI */ 696 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JMI_CNTL), 697 UVD_JMI_CNTL__SOFT_RESET_MASK, 698 ~UVD_JMI_CNTL__SOFT_RESET_MASK); 699 700 tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_GATE); 701 tmp |= (JPEG_CGC_GATE__JPEG_DEC_MASK 702 |JPEG_CGC_GATE__JPEG2_DEC_MASK 703 |JPEG_CGC_GATE__JMCIF_MASK 704 |JPEG_CGC_GATE__JRBBM_MASK); 705 WREG32_SOC15(VCN, i, mmJPEG_CGC_GATE, tmp); 706 707 /* enable anti hang mechanism */ 708 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JPEG_POWER_STATUS), 709 UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK, 710 ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); 711 } 712 713 return 0; 714 } 715 716 static int vcn_v2_5_start(struct amdgpu_device *adev) 717 { 718 struct amdgpu_ring *ring; 719 uint32_t rb_bufsz, tmp; 720 int i, j, k, r; 721 722 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 723 if (adev->vcn.harvest_config & (1 << i)) 724 continue; 725 /* disable register anti-hang mechanism */ 726 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS), 0, 727 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); 728 729 /* set uvd status busy */ 730 tmp = RREG32_SOC15(UVD, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY; 731 WREG32_SOC15(UVD, i, mmUVD_STATUS, tmp); 732 } 733 734 /*SW clock gating */ 735 vcn_v2_5_disable_clock_gating(adev); 736 737 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 738 if (adev->vcn.harvest_config & (1 << i)) 739 continue; 740 /* enable VCPU clock */ 741 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 742 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK); 743 744 /* disable master interrupt */ 745 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN), 0, 746 ~UVD_MASTINT_EN__VCPU_EN_MASK); 747 748 /* setup mmUVD_LMI_CTRL */ 749 tmp = RREG32_SOC15(UVD, i, mmUVD_LMI_CTRL); 750 tmp &= ~0xff; 751 WREG32_SOC15(UVD, i, mmUVD_LMI_CTRL, tmp | 0x8| 752 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | 753 UVD_LMI_CTRL__MASK_MC_URGENT_MASK | 754 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | 755 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK); 756 757 /* setup mmUVD_MPC_CNTL */ 758 tmp = RREG32_SOC15(UVD, i, mmUVD_MPC_CNTL); 759 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK; 760 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT; 761 WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp); 762 763 /* setup UVD_MPC_SET_MUXA0 */ 764 WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUXA0, 765 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | 766 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | 767 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | 768 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT))); 769 770 /* setup UVD_MPC_SET_MUXB0 */ 771 WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUXB0, 772 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | 773 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | 774 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | 775 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT))); 776 777 /* setup mmUVD_MPC_SET_MUX */ 778 WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUX, 779 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | 780 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | 781 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT))); 782 } 783 784 vcn_v2_5_mc_resume(adev); 785 786 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 787 if (adev->vcn.harvest_config & (1 << i)) 788 continue; 789 /* VCN global tiling registers */ 790 WREG32_SOC15(UVD, i, mmUVD_GFX8_ADDR_CONFIG, 791 adev->gfx.config.gb_addr_config); 792 WREG32_SOC15(UVD, i, mmUVD_GFX8_ADDR_CONFIG, 793 adev->gfx.config.gb_addr_config); 794 795 /* enable LMI MC and UMC channels */ 796 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0, 797 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); 798 799 /* unblock VCPU register access */ 800 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_ARB_CTRL), 0, 801 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); 802 803 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0, 804 ~UVD_VCPU_CNTL__BLK_RST_MASK); 805 806 for (k = 0; k < 10; ++k) { 807 uint32_t status; 808 809 for (j = 0; j < 100; ++j) { 810 status = RREG32_SOC15(UVD, i, mmUVD_STATUS); 811 if (status & 2) 812 break; 813 if (amdgpu_emu_mode == 1) 814 msleep(500); 815 else 816 mdelay(10); 817 } 818 r = 0; 819 if (status & 2) 820 break; 821 822 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n"); 823 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 824 UVD_VCPU_CNTL__BLK_RST_MASK, 825 ~UVD_VCPU_CNTL__BLK_RST_MASK); 826 mdelay(10); 827 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0, 828 ~UVD_VCPU_CNTL__BLK_RST_MASK); 829 830 mdelay(10); 831 r = -1; 832 } 833 834 if (r) { 835 DRM_ERROR("VCN decode not responding, giving up!!!\n"); 836 return r; 837 } 838 839 /* enable master interrupt */ 840 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN), 841 UVD_MASTINT_EN__VCPU_EN_MASK, 842 ~UVD_MASTINT_EN__VCPU_EN_MASK); 843 844 /* clear the busy bit of VCN_STATUS */ 845 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0, 846 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); 847 848 WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_VMID, 0); 849 850 ring = &adev->vcn.inst[i].ring_dec; 851 /* force RBC into idle state */ 852 rb_bufsz = order_base_2(ring->ring_size); 853 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); 854 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); 855 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); 856 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); 857 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); 858 WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, tmp); 859 860 /* programm the RB_BASE for ring buffer */ 861 WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, 862 lower_32_bits(ring->gpu_addr)); 863 WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, 864 upper_32_bits(ring->gpu_addr)); 865 866 /* Initialize the ring buffer's read and write pointers */ 867 WREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR, 0); 868 869 ring->wptr = RREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR); 870 WREG32_SOC15(UVD, i, mmUVD_RBC_RB_WPTR, 871 lower_32_bits(ring->wptr)); 872 ring = &adev->vcn.inst[i].ring_enc[0]; 873 WREG32_SOC15(UVD, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); 874 WREG32_SOC15(UVD, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); 875 WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO, ring->gpu_addr); 876 WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 877 WREG32_SOC15(UVD, i, mmUVD_RB_SIZE, ring->ring_size / 4); 878 879 ring = &adev->vcn.inst[i].ring_enc[1]; 880 WREG32_SOC15(UVD, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); 881 WREG32_SOC15(UVD, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); 882 WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO2, ring->gpu_addr); 883 WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); 884 WREG32_SOC15(UVD, i, mmUVD_RB_SIZE2, ring->ring_size / 4); 885 } 886 r = jpeg_v2_5_start(adev); 887 888 return r; 889 } 890 891 static int vcn_v2_5_stop(struct amdgpu_device *adev) 892 { 893 uint32_t tmp; 894 int i, r; 895 896 r = jpeg_v2_5_stop(adev); 897 if (r) 898 return r; 899 900 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 901 if (adev->vcn.harvest_config & (1 << i)) 902 continue; 903 /* wait for vcn idle */ 904 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, r); 905 if (r) 906 return r; 907 908 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK | 909 UVD_LMI_STATUS__READ_CLEAN_MASK | 910 UVD_LMI_STATUS__WRITE_CLEAN_MASK | 911 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK; 912 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp, r); 913 if (r) 914 return r; 915 916 /* block LMI UMC channel */ 917 tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2); 918 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK; 919 WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp); 920 921 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK| 922 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK; 923 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp, r); 924 if (r) 925 return r; 926 927 /* block VCPU register access */ 928 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_ARB_CTRL), 929 UVD_RB_ARB_CTRL__VCPU_DIS_MASK, 930 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); 931 932 /* reset VCPU */ 933 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 934 UVD_VCPU_CNTL__BLK_RST_MASK, 935 ~UVD_VCPU_CNTL__BLK_RST_MASK); 936 937 /* disable VCPU clock */ 938 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0, 939 ~(UVD_VCPU_CNTL__CLK_EN_MASK)); 940 941 /* clear status */ 942 WREG32_SOC15(VCN, i, mmUVD_STATUS, 0); 943 944 vcn_v2_5_enable_clock_gating(adev); 945 946 /* enable register anti-hang mechanism */ 947 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS), 948 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, 949 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); 950 } 951 952 return 0; 953 } 954 955 /** 956 * vcn_v2_5_dec_ring_get_rptr - get read pointer 957 * 958 * @ring: amdgpu_ring pointer 959 * 960 * Returns the current hardware read pointer 961 */ 962 static uint64_t vcn_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring) 963 { 964 struct amdgpu_device *adev = ring->adev; 965 966 return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR); 967 } 968 969 /** 970 * vcn_v2_5_dec_ring_get_wptr - get write pointer 971 * 972 * @ring: amdgpu_ring pointer 973 * 974 * Returns the current hardware write pointer 975 */ 976 static uint64_t vcn_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring) 977 { 978 struct amdgpu_device *adev = ring->adev; 979 980 if (ring->use_doorbell) 981 return adev->wb.wb[ring->wptr_offs]; 982 else 983 return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR); 984 } 985 986 /** 987 * vcn_v2_5_dec_ring_set_wptr - set write pointer 988 * 989 * @ring: amdgpu_ring pointer 990 * 991 * Commits the write pointer to the hardware 992 */ 993 static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring) 994 { 995 struct amdgpu_device *adev = ring->adev; 996 997 if (ring->use_doorbell) { 998 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); 999 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); 1000 } else { 1001 WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); 1002 } 1003 } 1004 1005 static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = { 1006 .type = AMDGPU_RING_TYPE_VCN_DEC, 1007 .align_mask = 0xf, 1008 .vmhub = AMDGPU_MMHUB_1, 1009 .get_rptr = vcn_v2_5_dec_ring_get_rptr, 1010 .get_wptr = vcn_v2_5_dec_ring_get_wptr, 1011 .set_wptr = vcn_v2_5_dec_ring_set_wptr, 1012 .emit_frame_size = 1013 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + 1014 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + 1015 8 + /* vcn_v2_0_dec_ring_emit_vm_flush */ 1016 14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */ 1017 6, 1018 .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */ 1019 .emit_ib = vcn_v2_0_dec_ring_emit_ib, 1020 .emit_fence = vcn_v2_0_dec_ring_emit_fence, 1021 .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush, 1022 .test_ring = amdgpu_vcn_dec_ring_test_ring, 1023 .test_ib = amdgpu_vcn_dec_ring_test_ib, 1024 .insert_nop = vcn_v2_0_dec_ring_insert_nop, 1025 .insert_start = vcn_v2_0_dec_ring_insert_start, 1026 .insert_end = vcn_v2_0_dec_ring_insert_end, 1027 .pad_ib = amdgpu_ring_generic_pad_ib, 1028 .begin_use = amdgpu_vcn_ring_begin_use, 1029 .end_use = amdgpu_vcn_ring_end_use, 1030 .emit_wreg = vcn_v2_0_dec_ring_emit_wreg, 1031 .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait, 1032 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, 1033 }; 1034 1035 /** 1036 * vcn_v2_5_enc_ring_get_rptr - get enc read pointer 1037 * 1038 * @ring: amdgpu_ring pointer 1039 * 1040 * Returns the current hardware enc read pointer 1041 */ 1042 static uint64_t vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring *ring) 1043 { 1044 struct amdgpu_device *adev = ring->adev; 1045 1046 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) 1047 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR); 1048 else 1049 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2); 1050 } 1051 1052 /** 1053 * vcn_v2_5_enc_ring_get_wptr - get enc write pointer 1054 * 1055 * @ring: amdgpu_ring pointer 1056 * 1057 * Returns the current hardware enc write pointer 1058 */ 1059 static uint64_t vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring *ring) 1060 { 1061 struct amdgpu_device *adev = ring->adev; 1062 1063 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) { 1064 if (ring->use_doorbell) 1065 return adev->wb.wb[ring->wptr_offs]; 1066 else 1067 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR); 1068 } else { 1069 if (ring->use_doorbell) 1070 return adev->wb.wb[ring->wptr_offs]; 1071 else 1072 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2); 1073 } 1074 } 1075 1076 /** 1077 * vcn_v2_5_enc_ring_set_wptr - set enc write pointer 1078 * 1079 * @ring: amdgpu_ring pointer 1080 * 1081 * Commits the enc write pointer to the hardware 1082 */ 1083 static void vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring *ring) 1084 { 1085 struct amdgpu_device *adev = ring->adev; 1086 1087 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) { 1088 if (ring->use_doorbell) { 1089 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); 1090 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); 1091 } else { 1092 WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); 1093 } 1094 } else { 1095 if (ring->use_doorbell) { 1096 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); 1097 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); 1098 } else { 1099 WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); 1100 } 1101 } 1102 } 1103 1104 static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = { 1105 .type = AMDGPU_RING_TYPE_VCN_ENC, 1106 .align_mask = 0x3f, 1107 .nop = VCN_ENC_CMD_NO_OP, 1108 .vmhub = AMDGPU_MMHUB_1, 1109 .get_rptr = vcn_v2_5_enc_ring_get_rptr, 1110 .get_wptr = vcn_v2_5_enc_ring_get_wptr, 1111 .set_wptr = vcn_v2_5_enc_ring_set_wptr, 1112 .emit_frame_size = 1113 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + 1114 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 + 1115 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */ 1116 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */ 1117 1, /* vcn_v2_0_enc_ring_insert_end */ 1118 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */ 1119 .emit_ib = vcn_v2_0_enc_ring_emit_ib, 1120 .emit_fence = vcn_v2_0_enc_ring_emit_fence, 1121 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush, 1122 .test_ring = amdgpu_vcn_enc_ring_test_ring, 1123 .test_ib = amdgpu_vcn_enc_ring_test_ib, 1124 .insert_nop = amdgpu_ring_insert_nop, 1125 .insert_end = vcn_v2_0_enc_ring_insert_end, 1126 .pad_ib = amdgpu_ring_generic_pad_ib, 1127 .begin_use = amdgpu_vcn_ring_begin_use, 1128 .end_use = amdgpu_vcn_ring_end_use, 1129 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg, 1130 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait, 1131 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, 1132 }; 1133 1134 /** 1135 * vcn_v2_5_jpeg_ring_get_rptr - get read pointer 1136 * 1137 * @ring: amdgpu_ring pointer 1138 * 1139 * Returns the current hardware read pointer 1140 */ 1141 static uint64_t vcn_v2_5_jpeg_ring_get_rptr(struct amdgpu_ring *ring) 1142 { 1143 struct amdgpu_device *adev = ring->adev; 1144 1145 return RREG32_SOC15(UVD, ring->me, mmUVD_JRBC_RB_RPTR); 1146 } 1147 1148 /** 1149 * vcn_v2_5_jpeg_ring_get_wptr - get write pointer 1150 * 1151 * @ring: amdgpu_ring pointer 1152 * 1153 * Returns the current hardware write pointer 1154 */ 1155 static uint64_t vcn_v2_5_jpeg_ring_get_wptr(struct amdgpu_ring *ring) 1156 { 1157 struct amdgpu_device *adev = ring->adev; 1158 1159 if (ring->use_doorbell) 1160 return adev->wb.wb[ring->wptr_offs]; 1161 else 1162 return RREG32_SOC15(UVD, ring->me, mmUVD_JRBC_RB_WPTR); 1163 } 1164 1165 /** 1166 * vcn_v2_5_jpeg_ring_set_wptr - set write pointer 1167 * 1168 * @ring: amdgpu_ring pointer 1169 * 1170 * Commits the write pointer to the hardware 1171 */ 1172 static void vcn_v2_5_jpeg_ring_set_wptr(struct amdgpu_ring *ring) 1173 { 1174 struct amdgpu_device *adev = ring->adev; 1175 1176 if (ring->use_doorbell) { 1177 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); 1178 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); 1179 } else { 1180 WREG32_SOC15(UVD, ring->me, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr)); 1181 } 1182 } 1183 1184 static const struct amdgpu_ring_funcs vcn_v2_5_jpeg_ring_vm_funcs = { 1185 .type = AMDGPU_RING_TYPE_VCN_JPEG, 1186 .align_mask = 0xf, 1187 .vmhub = AMDGPU_MMHUB_1, 1188 .get_rptr = vcn_v2_5_jpeg_ring_get_rptr, 1189 .get_wptr = vcn_v2_5_jpeg_ring_get_wptr, 1190 .set_wptr = vcn_v2_5_jpeg_ring_set_wptr, 1191 .emit_frame_size = 1192 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + 1193 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + 1194 8 + /* vcn_v2_0_jpeg_ring_emit_vm_flush */ 1195 18 + 18 + /* vcn_v2_0_jpeg_ring_emit_fence x2 vm fence */ 1196 8 + 16, 1197 .emit_ib_size = 22, /* vcn_v2_0_jpeg_ring_emit_ib */ 1198 .emit_ib = vcn_v2_0_jpeg_ring_emit_ib, 1199 .emit_fence = vcn_v2_0_jpeg_ring_emit_fence, 1200 .emit_vm_flush = vcn_v2_0_jpeg_ring_emit_vm_flush, 1201 .test_ring = amdgpu_vcn_jpeg_ring_test_ring, 1202 .test_ib = amdgpu_vcn_jpeg_ring_test_ib, 1203 .insert_nop = vcn_v2_0_jpeg_ring_nop, 1204 .insert_start = vcn_v2_0_jpeg_ring_insert_start, 1205 .insert_end = vcn_v2_0_jpeg_ring_insert_end, 1206 .pad_ib = amdgpu_ring_generic_pad_ib, 1207 .begin_use = amdgpu_vcn_ring_begin_use, 1208 .end_use = amdgpu_vcn_ring_end_use, 1209 .emit_wreg = vcn_v2_0_jpeg_ring_emit_wreg, 1210 .emit_reg_wait = vcn_v2_0_jpeg_ring_emit_reg_wait, 1211 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, 1212 }; 1213 1214 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev) 1215 { 1216 int i; 1217 1218 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 1219 if (adev->vcn.harvest_config & (1 << i)) 1220 continue; 1221 adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs; 1222 adev->vcn.inst[i].ring_dec.me = i; 1223 DRM_INFO("VCN(%d) decode is enabled in VM mode\n", i); 1224 } 1225 } 1226 1227 static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev) 1228 { 1229 int i, j; 1230 1231 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { 1232 if (adev->vcn.harvest_config & (1 << j)) 1233 continue; 1234 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { 1235 adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs; 1236 adev->vcn.inst[j].ring_enc[i].me = j; 1237 } 1238 DRM_INFO("VCN(%d) encode is enabled in VM mode\n", j); 1239 } 1240 } 1241 1242 static void vcn_v2_5_set_jpeg_ring_funcs(struct amdgpu_device *adev) 1243 { 1244 int i; 1245 1246 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 1247 if (adev->vcn.harvest_config & (1 << i)) 1248 continue; 1249 adev->vcn.inst[i].ring_jpeg.funcs = &vcn_v2_5_jpeg_ring_vm_funcs; 1250 adev->vcn.inst[i].ring_jpeg.me = i; 1251 DRM_INFO("VCN(%d) jpeg decode is enabled in VM mode\n", i); 1252 } 1253 } 1254 1255 static bool vcn_v2_5_is_idle(void *handle) 1256 { 1257 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1258 int i, ret = 1; 1259 1260 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 1261 if (adev->vcn.harvest_config & (1 << i)) 1262 continue; 1263 ret &= (RREG32_SOC15(VCN, i, mmUVD_STATUS) == UVD_STATUS__IDLE); 1264 } 1265 1266 return ret; 1267 } 1268 1269 static int vcn_v2_5_wait_for_idle(void *handle) 1270 { 1271 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1272 int i, ret = 0; 1273 1274 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 1275 if (adev->vcn.harvest_config & (1 << i)) 1276 continue; 1277 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 1278 UVD_STATUS__IDLE, ret); 1279 if (ret) 1280 return ret; 1281 } 1282 1283 return ret; 1284 } 1285 1286 static int vcn_v2_5_set_clockgating_state(void *handle, 1287 enum amd_clockgating_state state) 1288 { 1289 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1290 bool enable = (state == AMD_CG_STATE_GATE) ? true : false; 1291 1292 if (enable) { 1293 if (vcn_v2_5_is_idle(handle)) 1294 return -EBUSY; 1295 vcn_v2_5_enable_clock_gating(adev); 1296 } else { 1297 vcn_v2_5_disable_clock_gating(adev); 1298 } 1299 1300 return 0; 1301 } 1302 1303 static int vcn_v2_5_set_powergating_state(void *handle, 1304 enum amd_powergating_state state) 1305 { 1306 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1307 int ret; 1308 1309 if(state == adev->vcn.cur_state) 1310 return 0; 1311 1312 if (state == AMD_PG_STATE_GATE) 1313 ret = vcn_v2_5_stop(adev); 1314 else 1315 ret = vcn_v2_5_start(adev); 1316 1317 if(!ret) 1318 adev->vcn.cur_state = state; 1319 1320 return ret; 1321 } 1322 1323 static int vcn_v2_5_set_interrupt_state(struct amdgpu_device *adev, 1324 struct amdgpu_irq_src *source, 1325 unsigned type, 1326 enum amdgpu_interrupt_state state) 1327 { 1328 return 0; 1329 } 1330 1331 static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev, 1332 struct amdgpu_irq_src *source, 1333 struct amdgpu_iv_entry *entry) 1334 { 1335 uint32_t ip_instance; 1336 1337 switch (entry->client_id) { 1338 case SOC15_IH_CLIENTID_VCN: 1339 ip_instance = 0; 1340 break; 1341 case SOC15_IH_CLIENTID_VCN1: 1342 ip_instance = 1; 1343 break; 1344 default: 1345 DRM_ERROR("Unhandled client id: %d\n", entry->client_id); 1346 return 0; 1347 } 1348 1349 DRM_DEBUG("IH: VCN TRAP\n"); 1350 1351 switch (entry->src_id) { 1352 case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT: 1353 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_dec); 1354 break; 1355 case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE: 1356 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]); 1357 break; 1358 case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY: 1359 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]); 1360 break; 1361 case VCN_2_0__SRCID__JPEG_DECODE: 1362 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_jpeg); 1363 break; 1364 default: 1365 DRM_ERROR("Unhandled interrupt: %d %d\n", 1366 entry->src_id, entry->src_data[0]); 1367 break; 1368 } 1369 1370 return 0; 1371 } 1372 1373 static const struct amdgpu_irq_src_funcs vcn_v2_5_irq_funcs = { 1374 .set = vcn_v2_5_set_interrupt_state, 1375 .process = vcn_v2_5_process_interrupt, 1376 }; 1377 1378 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev) 1379 { 1380 int i; 1381 1382 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 1383 if (adev->vcn.harvest_config & (1 << i)) 1384 continue; 1385 adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 2; 1386 adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs; 1387 } 1388 } 1389 1390 static const struct amd_ip_funcs vcn_v2_5_ip_funcs = { 1391 .name = "vcn_v2_5", 1392 .early_init = vcn_v2_5_early_init, 1393 .late_init = NULL, 1394 .sw_init = vcn_v2_5_sw_init, 1395 .sw_fini = vcn_v2_5_sw_fini, 1396 .hw_init = vcn_v2_5_hw_init, 1397 .hw_fini = vcn_v2_5_hw_fini, 1398 .suspend = vcn_v2_5_suspend, 1399 .resume = vcn_v2_5_resume, 1400 .is_idle = vcn_v2_5_is_idle, 1401 .wait_for_idle = vcn_v2_5_wait_for_idle, 1402 .check_soft_reset = NULL, 1403 .pre_soft_reset = NULL, 1404 .soft_reset = NULL, 1405 .post_soft_reset = NULL, 1406 .set_clockgating_state = vcn_v2_5_set_clockgating_state, 1407 .set_powergating_state = vcn_v2_5_set_powergating_state, 1408 }; 1409 1410 const struct amdgpu_ip_block_version vcn_v2_5_ip_block = 1411 { 1412 .type = AMD_IP_BLOCK_TYPE_VCN, 1413 .major = 2, 1414 .minor = 5, 1415 .rev = 0, 1416 .funcs = &vcn_v2_5_ip_funcs, 1417 }; 1418