1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/firmware.h> 25 26 #include "amdgpu.h" 27 #include "amdgpu_vcn.h" 28 #include "amdgpu_pm.h" 29 #include "soc15.h" 30 #include "soc15d.h" 31 #include "soc15_common.h" 32 33 #include "vcn/vcn_1_0_offset.h" 34 #include "vcn/vcn_1_0_sh_mask.h" 35 #include "hdp/hdp_4_0_offset.h" 36 #include "mmhub/mmhub_9_1_offset.h" 37 #include "mmhub/mmhub_9_1_sh_mask.h" 38 39 #include "ivsrcid/vcn/irqsrcs_vcn_1_0.h" 40 #include "jpeg_v1_0.h" 41 #include "vcn_v1_0.h" 42 43 #define mmUVD_RBC_XX_IB_REG_CHECK_1_0 0x05ab 44 #define mmUVD_RBC_XX_IB_REG_CHECK_1_0_BASE_IDX 1 45 #define mmUVD_REG_XX_MASK_1_0 0x05ac 46 #define mmUVD_REG_XX_MASK_1_0_BASE_IDX 1 47 48 static int vcn_v1_0_stop(struct amdgpu_device *adev); 49 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev); 50 static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev); 51 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev); 52 static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_state state); 53 static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev, 54 int inst_idx, struct dpg_pause_state *new_state); 55 56 static void vcn_v1_0_idle_work_handler(struct work_struct *work); 57 58 /** 59 * vcn_v1_0_early_init - set function pointers 60 * 61 * @handle: amdgpu_device pointer 62 * 63 * Set ring and irq function pointers 64 */ 65 static int vcn_v1_0_early_init(void *handle) 66 { 67 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 68 69 adev->vcn.num_vcn_inst = 1; 70 adev->vcn.num_enc_rings = 2; 71 72 vcn_v1_0_set_dec_ring_funcs(adev); 73 vcn_v1_0_set_enc_ring_funcs(adev); 74 vcn_v1_0_set_irq_funcs(adev); 75 76 jpeg_v1_0_early_init(handle); 77 78 return 0; 79 } 80 81 /** 82 * vcn_v1_0_sw_init - sw init for VCN block 83 * 84 * @handle: amdgpu_device pointer 85 * 86 * Load firmware and sw initialization 87 */ 88 static int vcn_v1_0_sw_init(void *handle) 89 { 90 struct amdgpu_ring *ring; 91 int i, r; 92 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 93 94 /* VCN DEC TRAP */ 95 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 96 VCN_1_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst->irq); 97 if (r) 98 return r; 99 100 /* VCN ENC TRAP */ 101 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { 102 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + VCN_1_0__SRCID__UVD_ENC_GENERAL_PURPOSE, 103 &adev->vcn.inst->irq); 104 if (r) 105 return r; 106 } 107 108 r = amdgpu_vcn_sw_init(adev); 109 if (r) 110 return r; 111 112 /* Override the work func */ 113 adev->vcn.idle_work.work.func = vcn_v1_0_idle_work_handler; 114 115 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 116 const struct common_firmware_header *hdr; 117 hdr = (const struct common_firmware_header *)adev->vcn.fw->data; 118 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN; 119 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw; 120 adev->firmware.fw_size += 121 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); 122 DRM_INFO("PSP loading VCN firmware\n"); 123 } 124 125 r = amdgpu_vcn_resume(adev); 126 if (r) 127 return r; 128 129 ring = &adev->vcn.inst->ring_dec; 130 sprintf(ring->name, "vcn_dec"); 131 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, 132 AMDGPU_RING_PRIO_DEFAULT); 133 if (r) 134 return r; 135 136 adev->vcn.internal.scratch9 = adev->vcn.inst->external.scratch9 = 137 SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9); 138 adev->vcn.internal.data0 = adev->vcn.inst->external.data0 = 139 SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0); 140 adev->vcn.internal.data1 = adev->vcn.inst->external.data1 = 141 SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1); 142 adev->vcn.internal.cmd = adev->vcn.inst->external.cmd = 143 SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD); 144 adev->vcn.internal.nop = adev->vcn.inst->external.nop = 145 SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP); 146 147 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { 148 ring = &adev->vcn.inst->ring_enc[i]; 149 sprintf(ring->name, "vcn_enc%d", i); 150 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, 151 AMDGPU_RING_PRIO_DEFAULT); 152 if (r) 153 return r; 154 } 155 156 adev->vcn.pause_dpg_mode = vcn_v1_0_pause_dpg_mode; 157 158 r = jpeg_v1_0_sw_init(handle); 159 160 return r; 161 } 162 163 /** 164 * vcn_v1_0_sw_fini - sw fini for VCN block 165 * 166 * @handle: amdgpu_device pointer 167 * 168 * VCN suspend and free up sw allocation 169 */ 170 static int vcn_v1_0_sw_fini(void *handle) 171 { 172 int r; 173 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 174 175 r = amdgpu_vcn_suspend(adev); 176 if (r) 177 return r; 178 179 jpeg_v1_0_sw_fini(handle); 180 181 r = amdgpu_vcn_sw_fini(adev); 182 183 return r; 184 } 185 186 /** 187 * vcn_v1_0_hw_init - start and test VCN block 188 * 189 * @handle: amdgpu_device pointer 190 * 191 * Initialize the hardware, boot up the VCPU and do some testing 192 */ 193 static int vcn_v1_0_hw_init(void *handle) 194 { 195 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 196 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec; 197 int i, r; 198 199 r = amdgpu_ring_test_helper(ring); 200 if (r) 201 goto done; 202 203 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { 204 ring = &adev->vcn.inst->ring_enc[i]; 205 r = amdgpu_ring_test_helper(ring); 206 if (r) 207 goto done; 208 } 209 210 ring = &adev->jpeg.inst->ring_dec; 211 r = amdgpu_ring_test_helper(ring); 212 if (r) 213 goto done; 214 215 done: 216 if (!r) 217 DRM_INFO("VCN decode and encode initialized successfully(under %s).\n", 218 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode"); 219 220 return r; 221 } 222 223 /** 224 * vcn_v1_0_hw_fini - stop the hardware block 225 * 226 * @handle: amdgpu_device pointer 227 * 228 * Stop the VCN block, mark ring as not ready any more 229 */ 230 static int vcn_v1_0_hw_fini(void *handle) 231 { 232 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 233 234 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) || 235 RREG32_SOC15(VCN, 0, mmUVD_STATUS)) 236 vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE); 237 238 return 0; 239 } 240 241 /** 242 * vcn_v1_0_suspend - suspend VCN block 243 * 244 * @handle: amdgpu_device pointer 245 * 246 * HW fini and suspend VCN block 247 */ 248 static int vcn_v1_0_suspend(void *handle) 249 { 250 int r; 251 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 252 253 r = vcn_v1_0_hw_fini(adev); 254 if (r) 255 return r; 256 257 r = amdgpu_vcn_suspend(adev); 258 259 return r; 260 } 261 262 /** 263 * vcn_v1_0_resume - resume VCN block 264 * 265 * @handle: amdgpu_device pointer 266 * 267 * Resume firmware and hw init VCN block 268 */ 269 static int vcn_v1_0_resume(void *handle) 270 { 271 int r; 272 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 273 274 r = amdgpu_vcn_resume(adev); 275 if (r) 276 return r; 277 278 r = vcn_v1_0_hw_init(adev); 279 280 return r; 281 } 282 283 /** 284 * vcn_v1_0_mc_resume_spg_mode - memory controller programming 285 * 286 * @adev: amdgpu_device pointer 287 * 288 * Let the VCN memory controller know it's offsets 289 */ 290 static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev) 291 { 292 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); 293 uint32_t offset; 294 295 /* cache window 0: fw */ 296 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 297 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, 298 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo)); 299 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, 300 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi)); 301 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0); 302 offset = 0; 303 } else { 304 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, 305 lower_32_bits(adev->vcn.inst->gpu_addr)); 306 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, 307 upper_32_bits(adev->vcn.inst->gpu_addr)); 308 offset = size; 309 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 310 AMDGPU_UVD_FIRMWARE_OFFSET >> 3); 311 } 312 313 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size); 314 315 /* cache window 1: stack */ 316 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, 317 lower_32_bits(adev->vcn.inst->gpu_addr + offset)); 318 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, 319 upper_32_bits(adev->vcn.inst->gpu_addr + offset)); 320 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0); 321 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE); 322 323 /* cache window 2: context */ 324 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, 325 lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); 326 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, 327 upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); 328 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0); 329 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); 330 331 WREG32_SOC15(UVD, 0, mmUVD_UDEC_ADDR_CONFIG, 332 adev->gfx.config.gb_addr_config); 333 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG, 334 adev->gfx.config.gb_addr_config); 335 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG, 336 adev->gfx.config.gb_addr_config); 337 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_UV_ADDR_CONFIG, 338 adev->gfx.config.gb_addr_config); 339 WREG32_SOC15(UVD, 0, mmUVD_MIF_CURR_ADDR_CONFIG, 340 adev->gfx.config.gb_addr_config); 341 WREG32_SOC15(UVD, 0, mmUVD_MIF_CURR_UV_ADDR_CONFIG, 342 adev->gfx.config.gb_addr_config); 343 WREG32_SOC15(UVD, 0, mmUVD_MIF_RECON1_ADDR_CONFIG, 344 adev->gfx.config.gb_addr_config); 345 WREG32_SOC15(UVD, 0, mmUVD_MIF_RECON1_UV_ADDR_CONFIG, 346 adev->gfx.config.gb_addr_config); 347 WREG32_SOC15(UVD, 0, mmUVD_MIF_REF_ADDR_CONFIG, 348 adev->gfx.config.gb_addr_config); 349 WREG32_SOC15(UVD, 0, mmUVD_MIF_REF_UV_ADDR_CONFIG, 350 adev->gfx.config.gb_addr_config); 351 WREG32_SOC15(UVD, 0, mmUVD_JPEG_ADDR_CONFIG, 352 adev->gfx.config.gb_addr_config); 353 WREG32_SOC15(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG, 354 adev->gfx.config.gb_addr_config); 355 } 356 357 static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev) 358 { 359 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); 360 uint32_t offset; 361 362 /* cache window 0: fw */ 363 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 364 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, 365 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo), 366 0xFFFFFFFF, 0); 367 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, 368 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi), 369 0xFFFFFFFF, 0); 370 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0, 371 0xFFFFFFFF, 0); 372 offset = 0; 373 } else { 374 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, 375 lower_32_bits(adev->vcn.inst->gpu_addr), 0xFFFFFFFF, 0); 376 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, 377 upper_32_bits(adev->vcn.inst->gpu_addr), 0xFFFFFFFF, 0); 378 offset = size; 379 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 380 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0xFFFFFFFF, 0); 381 } 382 383 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size, 0xFFFFFFFF, 0); 384 385 /* cache window 1: stack */ 386 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, 387 lower_32_bits(adev->vcn.inst->gpu_addr + offset), 0xFFFFFFFF, 0); 388 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, 389 upper_32_bits(adev->vcn.inst->gpu_addr + offset), 0xFFFFFFFF, 0); 390 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0, 391 0xFFFFFFFF, 0); 392 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE, 393 0xFFFFFFFF, 0); 394 395 /* cache window 2: context */ 396 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, 397 lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 398 0xFFFFFFFF, 0); 399 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, 400 upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 401 0xFFFFFFFF, 0); 402 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0, 0xFFFFFFFF, 0); 403 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE, 404 0xFFFFFFFF, 0); 405 406 /* VCN global tiling registers */ 407 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_ADDR_CONFIG, 408 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); 409 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG, 410 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); 411 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG, 412 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); 413 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_DBW_UV_ADDR_CONFIG, 414 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); 415 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_CURR_ADDR_CONFIG, 416 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); 417 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_CURR_UV_ADDR_CONFIG, 418 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); 419 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_RECON1_ADDR_CONFIG, 420 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); 421 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_RECON1_UV_ADDR_CONFIG, 422 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); 423 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_REF_ADDR_CONFIG, 424 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); 425 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_REF_UV_ADDR_CONFIG, 426 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); 427 } 428 429 /** 430 * vcn_v1_0_disable_clock_gating - disable VCN clock gating 431 * 432 * @adev: amdgpu_device pointer 433 * @sw: enable SW clock gating 434 * 435 * Disable clock gating for VCN block 436 */ 437 static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev) 438 { 439 uint32_t data; 440 441 /* JPEG disable CGC */ 442 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL); 443 444 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) 445 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 446 else 447 data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE_MASK; 448 449 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; 450 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; 451 WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, data); 452 453 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE); 454 data &= ~(JPEG_CGC_GATE__JPEG_MASK | JPEG_CGC_GATE__JPEG2_MASK); 455 WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, data); 456 457 /* UVD disable CGC */ 458 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL); 459 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) 460 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 461 else 462 data &= ~ UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; 463 464 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; 465 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; 466 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data); 467 468 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_GATE); 469 data &= ~(UVD_CGC_GATE__SYS_MASK 470 | UVD_CGC_GATE__UDEC_MASK 471 | UVD_CGC_GATE__MPEG2_MASK 472 | UVD_CGC_GATE__REGS_MASK 473 | UVD_CGC_GATE__RBC_MASK 474 | UVD_CGC_GATE__LMI_MC_MASK 475 | UVD_CGC_GATE__LMI_UMC_MASK 476 | UVD_CGC_GATE__IDCT_MASK 477 | UVD_CGC_GATE__MPRD_MASK 478 | UVD_CGC_GATE__MPC_MASK 479 | UVD_CGC_GATE__LBSI_MASK 480 | UVD_CGC_GATE__LRBBM_MASK 481 | UVD_CGC_GATE__UDEC_RE_MASK 482 | UVD_CGC_GATE__UDEC_CM_MASK 483 | UVD_CGC_GATE__UDEC_IT_MASK 484 | UVD_CGC_GATE__UDEC_DB_MASK 485 | UVD_CGC_GATE__UDEC_MP_MASK 486 | UVD_CGC_GATE__WCB_MASK 487 | UVD_CGC_GATE__VCPU_MASK 488 | UVD_CGC_GATE__SCPU_MASK); 489 WREG32_SOC15(VCN, 0, mmUVD_CGC_GATE, data); 490 491 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL); 492 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK 493 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK 494 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK 495 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK 496 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK 497 | UVD_CGC_CTRL__SYS_MODE_MASK 498 | UVD_CGC_CTRL__UDEC_MODE_MASK 499 | UVD_CGC_CTRL__MPEG2_MODE_MASK 500 | UVD_CGC_CTRL__REGS_MODE_MASK 501 | UVD_CGC_CTRL__RBC_MODE_MASK 502 | UVD_CGC_CTRL__LMI_MC_MODE_MASK 503 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK 504 | UVD_CGC_CTRL__IDCT_MODE_MASK 505 | UVD_CGC_CTRL__MPRD_MODE_MASK 506 | UVD_CGC_CTRL__MPC_MODE_MASK 507 | UVD_CGC_CTRL__LBSI_MODE_MASK 508 | UVD_CGC_CTRL__LRBBM_MODE_MASK 509 | UVD_CGC_CTRL__WCB_MODE_MASK 510 | UVD_CGC_CTRL__VCPU_MODE_MASK 511 | UVD_CGC_CTRL__SCPU_MODE_MASK); 512 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data); 513 514 /* turn on */ 515 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE); 516 data |= (UVD_SUVD_CGC_GATE__SRE_MASK 517 | UVD_SUVD_CGC_GATE__SIT_MASK 518 | UVD_SUVD_CGC_GATE__SMP_MASK 519 | UVD_SUVD_CGC_GATE__SCM_MASK 520 | UVD_SUVD_CGC_GATE__SDB_MASK 521 | UVD_SUVD_CGC_GATE__SRE_H264_MASK 522 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK 523 | UVD_SUVD_CGC_GATE__SIT_H264_MASK 524 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK 525 | UVD_SUVD_CGC_GATE__SCM_H264_MASK 526 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK 527 | UVD_SUVD_CGC_GATE__SDB_H264_MASK 528 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK 529 | UVD_SUVD_CGC_GATE__SCLR_MASK 530 | UVD_SUVD_CGC_GATE__UVD_SC_MASK 531 | UVD_SUVD_CGC_GATE__ENT_MASK 532 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 533 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 534 | UVD_SUVD_CGC_GATE__SITE_MASK 535 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK 536 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK 537 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 538 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK 539 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK); 540 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE, data); 541 542 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL); 543 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK 544 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK 545 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK 546 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK 547 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK 548 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK 549 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 550 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK 551 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK 552 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK); 553 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data); 554 } 555 556 /** 557 * vcn_v1_0_enable_clock_gating - enable VCN clock gating 558 * 559 * @adev: amdgpu_device pointer 560 * @sw: enable SW clock gating 561 * 562 * Enable clock gating for VCN block 563 */ 564 static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev) 565 { 566 uint32_t data = 0; 567 568 /* enable JPEG CGC */ 569 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL); 570 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) 571 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 572 else 573 data |= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 574 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; 575 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; 576 WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, data); 577 578 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE); 579 data |= (JPEG_CGC_GATE__JPEG_MASK | JPEG_CGC_GATE__JPEG2_MASK); 580 WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, data); 581 582 /* enable UVD CGC */ 583 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL); 584 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) 585 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 586 else 587 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 588 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; 589 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; 590 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data); 591 592 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL); 593 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK 594 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK 595 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK 596 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK 597 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK 598 | UVD_CGC_CTRL__SYS_MODE_MASK 599 | UVD_CGC_CTRL__UDEC_MODE_MASK 600 | UVD_CGC_CTRL__MPEG2_MODE_MASK 601 | UVD_CGC_CTRL__REGS_MODE_MASK 602 | UVD_CGC_CTRL__RBC_MODE_MASK 603 | UVD_CGC_CTRL__LMI_MC_MODE_MASK 604 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK 605 | UVD_CGC_CTRL__IDCT_MODE_MASK 606 | UVD_CGC_CTRL__MPRD_MODE_MASK 607 | UVD_CGC_CTRL__MPC_MODE_MASK 608 | UVD_CGC_CTRL__LBSI_MODE_MASK 609 | UVD_CGC_CTRL__LRBBM_MODE_MASK 610 | UVD_CGC_CTRL__WCB_MODE_MASK 611 | UVD_CGC_CTRL__VCPU_MODE_MASK 612 | UVD_CGC_CTRL__SCPU_MODE_MASK); 613 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data); 614 615 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL); 616 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK 617 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK 618 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK 619 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK 620 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK 621 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK 622 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 623 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK 624 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK 625 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK); 626 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data); 627 } 628 629 static void vcn_v1_0_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel) 630 { 631 uint32_t reg_data = 0; 632 633 /* disable JPEG CGC */ 634 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) 635 reg_data = 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 636 else 637 reg_data = 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 638 reg_data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; 639 reg_data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; 640 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmJPEG_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel); 641 642 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmJPEG_CGC_GATE, 0, 0xFFFFFFFF, sram_sel); 643 644 /* enable sw clock gating control */ 645 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) 646 reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 647 else 648 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 649 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; 650 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; 651 reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK | 652 UVD_CGC_CTRL__UDEC_CM_MODE_MASK | 653 UVD_CGC_CTRL__UDEC_IT_MODE_MASK | 654 UVD_CGC_CTRL__UDEC_DB_MODE_MASK | 655 UVD_CGC_CTRL__UDEC_MP_MODE_MASK | 656 UVD_CGC_CTRL__SYS_MODE_MASK | 657 UVD_CGC_CTRL__UDEC_MODE_MASK | 658 UVD_CGC_CTRL__MPEG2_MODE_MASK | 659 UVD_CGC_CTRL__REGS_MODE_MASK | 660 UVD_CGC_CTRL__RBC_MODE_MASK | 661 UVD_CGC_CTRL__LMI_MC_MODE_MASK | 662 UVD_CGC_CTRL__LMI_UMC_MODE_MASK | 663 UVD_CGC_CTRL__IDCT_MODE_MASK | 664 UVD_CGC_CTRL__MPRD_MODE_MASK | 665 UVD_CGC_CTRL__MPC_MODE_MASK | 666 UVD_CGC_CTRL__LBSI_MODE_MASK | 667 UVD_CGC_CTRL__LRBBM_MODE_MASK | 668 UVD_CGC_CTRL__WCB_MODE_MASK | 669 UVD_CGC_CTRL__VCPU_MODE_MASK | 670 UVD_CGC_CTRL__SCPU_MODE_MASK); 671 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel); 672 673 /* turn off clock gating */ 674 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_CGC_GATE, 0, 0xFFFFFFFF, sram_sel); 675 676 /* turn on SUVD clock gating */ 677 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SUVD_CGC_GATE, 1, 0xFFFFFFFF, sram_sel); 678 679 /* turn on sw mode in UVD_SUVD_CGC_CTRL */ 680 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SUVD_CGC_CTRL, 0, 0xFFFFFFFF, sram_sel); 681 } 682 683 static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev) 684 { 685 uint32_t data = 0; 686 687 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { 688 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT 689 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT 690 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT 691 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT 692 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT 693 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT 694 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT 695 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT 696 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT 697 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT 698 | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT); 699 700 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data); 701 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON, 0xFFFFFF); 702 } else { 703 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT 704 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT 705 | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT 706 | 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT 707 | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT 708 | 1 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT 709 | 1 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT 710 | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT 711 | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT 712 | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT 713 | 1 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT); 714 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data); 715 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0, 0xFFFFFFFF); 716 } 717 718 /* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS , UVDU_PWR_STATUS are 0 (power on) */ 719 720 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS); 721 data &= ~0x103; 722 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) 723 data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON | UVD_POWER_STATUS__UVD_PG_EN_MASK; 724 725 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data); 726 } 727 728 static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev) 729 { 730 uint32_t data = 0; 731 732 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { 733 /* Before power off, this indicator has to be turned on */ 734 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS); 735 data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK; 736 data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF; 737 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data); 738 739 740 data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT 741 | 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT 742 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT 743 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT 744 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT 745 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT 746 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT 747 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT 748 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT 749 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT 750 | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT); 751 752 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data); 753 754 data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT 755 | 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT 756 | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT 757 | 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT 758 | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT 759 | 2 << UVD_PGFSM_STATUS__UVDIL_PWR_STATUS__SHIFT 760 | 2 << UVD_PGFSM_STATUS__UVDIR_PWR_STATUS__SHIFT 761 | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT 762 | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT 763 | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT 764 | 2 << UVD_PGFSM_STATUS__UVDW_PWR_STATUS__SHIFT); 765 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFFFFF); 766 } 767 } 768 769 /** 770 * vcn_v1_0_start - start VCN block 771 * 772 * @adev: amdgpu_device pointer 773 * 774 * Setup and start the VCN block 775 */ 776 static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev) 777 { 778 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec; 779 uint32_t rb_bufsz, tmp; 780 uint32_t lmi_swap_cntl; 781 int i, j, r; 782 783 /* disable byte swapping */ 784 lmi_swap_cntl = 0; 785 786 vcn_1_0_disable_static_power_gating(adev); 787 788 tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY; 789 WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp); 790 791 /* disable clock gating */ 792 vcn_v1_0_disable_clock_gating(adev); 793 794 /* disable interupt */ 795 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0, 796 ~UVD_MASTINT_EN__VCPU_EN_MASK); 797 798 /* initialize VCN memory controller */ 799 tmp = RREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL); 800 WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL, tmp | 801 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | 802 UVD_LMI_CTRL__MASK_MC_URGENT_MASK | 803 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | 804 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK); 805 806 #ifdef __BIG_ENDIAN 807 /* swap (8 in 32) RB and IB */ 808 lmi_swap_cntl = 0xa; 809 #endif 810 WREG32_SOC15(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); 811 812 tmp = RREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL); 813 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK; 814 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT; 815 WREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL, tmp); 816 817 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0, 818 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | 819 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | 820 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | 821 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT))); 822 823 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0, 824 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | 825 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | 826 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | 827 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT))); 828 829 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX, 830 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | 831 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | 832 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT))); 833 834 vcn_v1_0_mc_resume_spg_mode(adev); 835 836 WREG32_SOC15(UVD, 0, mmUVD_REG_XX_MASK_1_0, 0x10); 837 WREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK_1_0, 838 RREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK_1_0) | 0x3); 839 840 /* enable VCPU clock */ 841 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK); 842 843 /* boot up the VCPU */ 844 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0, 845 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 846 847 /* enable UMC */ 848 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0, 849 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); 850 851 tmp = RREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET); 852 tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; 853 tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; 854 WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, tmp); 855 856 for (i = 0; i < 10; ++i) { 857 uint32_t status; 858 859 for (j = 0; j < 100; ++j) { 860 status = RREG32_SOC15(UVD, 0, mmUVD_STATUS); 861 if (status & UVD_STATUS__IDLE) 862 break; 863 mdelay(10); 864 } 865 r = 0; 866 if (status & UVD_STATUS__IDLE) 867 break; 868 869 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n"); 870 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 871 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, 872 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 873 mdelay(10); 874 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0, 875 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 876 mdelay(10); 877 r = -1; 878 } 879 880 if (r) { 881 DRM_ERROR("VCN decode not responding, giving up!!!\n"); 882 return r; 883 } 884 /* enable master interrupt */ 885 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 886 UVD_MASTINT_EN__VCPU_EN_MASK, ~UVD_MASTINT_EN__VCPU_EN_MASK); 887 888 /* enable system interrupt for JRBC, TODO: move to set interrupt*/ 889 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SYS_INT_EN), 890 UVD_SYS_INT_EN__UVD_JRBC_EN_MASK, 891 ~UVD_SYS_INT_EN__UVD_JRBC_EN_MASK); 892 893 /* clear the busy bit of UVD_STATUS */ 894 tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) & ~UVD_STATUS__UVD_BUSY; 895 WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp); 896 897 /* force RBC into idle state */ 898 rb_bufsz = order_base_2(ring->ring_size); 899 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); 900 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); 901 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); 902 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); 903 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); 904 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp); 905 906 /* set the write pointer delay */ 907 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0); 908 909 /* set the wb address */ 910 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR, 911 (upper_32_bits(ring->gpu_addr) >> 2)); 912 913 /* programm the RB_BASE for ring buffer */ 914 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, 915 lower_32_bits(ring->gpu_addr)); 916 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, 917 upper_32_bits(ring->gpu_addr)); 918 919 /* Initialize the ring buffer's read and write pointers */ 920 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0); 921 922 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0); 923 924 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR); 925 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, 926 lower_32_bits(ring->wptr)); 927 928 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0, 929 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); 930 931 ring = &adev->vcn.inst->ring_enc[0]; 932 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); 933 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); 934 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr); 935 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 936 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4); 937 938 ring = &adev->vcn.inst->ring_enc[1]; 939 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); 940 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); 941 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr); 942 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); 943 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4); 944 945 jpeg_v1_0_start(adev, 0); 946 947 return 0; 948 } 949 950 static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev) 951 { 952 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec; 953 uint32_t rb_bufsz, tmp; 954 uint32_t lmi_swap_cntl; 955 956 /* disable byte swapping */ 957 lmi_swap_cntl = 0; 958 959 vcn_1_0_enable_static_power_gating(adev); 960 961 /* enable dynamic power gating mode */ 962 tmp = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS); 963 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK; 964 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK; 965 WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, tmp); 966 967 /* enable clock gating */ 968 vcn_v1_0_clock_gating_dpg_mode(adev, 0); 969 970 /* enable VCPU clock */ 971 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); 972 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK; 973 tmp |= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK; 974 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CNTL, tmp, 0xFFFFFFFF, 0); 975 976 /* disable interupt */ 977 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MASTINT_EN, 978 0, UVD_MASTINT_EN__VCPU_EN_MASK, 0); 979 980 /* initialize VCN memory controller */ 981 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_CTRL, 982 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | 983 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | 984 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | 985 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | 986 UVD_LMI_CTRL__REQ_MODE_MASK | 987 UVD_LMI_CTRL__CRC_RESET_MASK | 988 UVD_LMI_CTRL__MASK_MC_URGENT_MASK | 989 0x00100000L, 0xFFFFFFFF, 0); 990 991 #ifdef __BIG_ENDIAN 992 /* swap (8 in 32) RB and IB */ 993 lmi_swap_cntl = 0xa; 994 #endif 995 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl, 0xFFFFFFFF, 0); 996 997 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_CNTL, 998 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0xFFFFFFFF, 0); 999 1000 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_SET_MUXA0, 1001 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | 1002 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | 1003 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | 1004 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0xFFFFFFFF, 0); 1005 1006 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_SET_MUXB0, 1007 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | 1008 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | 1009 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | 1010 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0xFFFFFFFF, 0); 1011 1012 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_SET_MUX, 1013 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | 1014 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | 1015 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0xFFFFFFFF, 0); 1016 1017 vcn_v1_0_mc_resume_dpg_mode(adev); 1018 1019 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_REG_XX_MASK, 0x10, 0xFFFFFFFF, 0); 1020 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK, 0x3, 0xFFFFFFFF, 0); 1021 1022 /* boot up the VCPU */ 1023 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SOFT_RESET, 0, 0xFFFFFFFF, 0); 1024 1025 /* enable UMC */ 1026 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_CTRL2, 1027 0x1F << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT, 1028 0xFFFFFFFF, 0); 1029 1030 /* enable master interrupt */ 1031 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MASTINT_EN, 1032 UVD_MASTINT_EN__VCPU_EN_MASK, UVD_MASTINT_EN__VCPU_EN_MASK, 0); 1033 1034 vcn_v1_0_clock_gating_dpg_mode(adev, 1); 1035 /* setup mmUVD_LMI_CTRL */ 1036 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_CTRL, 1037 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | 1038 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | 1039 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | 1040 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | 1041 UVD_LMI_CTRL__REQ_MODE_MASK | 1042 UVD_LMI_CTRL__CRC_RESET_MASK | 1043 UVD_LMI_CTRL__MASK_MC_URGENT_MASK | 1044 0x00100000L, 0xFFFFFFFF, 1); 1045 1046 tmp = adev->gfx.config.gb_addr_config; 1047 /* setup VCN global tiling registers */ 1048 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_JPEG_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1); 1049 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1); 1050 1051 /* enable System Interrupt for JRBC */ 1052 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SYS_INT_EN, 1053 UVD_SYS_INT_EN__UVD_JRBC_EN_MASK, 0xFFFFFFFF, 1); 1054 1055 /* force RBC into idle state */ 1056 rb_bufsz = order_base_2(ring->ring_size); 1057 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); 1058 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); 1059 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); 1060 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); 1061 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); 1062 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp); 1063 1064 /* set the write pointer delay */ 1065 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0); 1066 1067 /* set the wb address */ 1068 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR, 1069 (upper_32_bits(ring->gpu_addr) >> 2)); 1070 1071 /* programm the RB_BASE for ring buffer */ 1072 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, 1073 lower_32_bits(ring->gpu_addr)); 1074 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, 1075 upper_32_bits(ring->gpu_addr)); 1076 1077 /* Initialize the ring buffer's read and write pointers */ 1078 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0); 1079 1080 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0); 1081 1082 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR); 1083 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, 1084 lower_32_bits(ring->wptr)); 1085 1086 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0, 1087 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); 1088 1089 jpeg_v1_0_start(adev, 1); 1090 1091 return 0; 1092 } 1093 1094 static int vcn_v1_0_start(struct amdgpu_device *adev) 1095 { 1096 int r; 1097 1098 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) 1099 r = vcn_v1_0_start_dpg_mode(adev); 1100 else 1101 r = vcn_v1_0_start_spg_mode(adev); 1102 return r; 1103 } 1104 1105 /** 1106 * vcn_v1_0_stop - stop VCN block 1107 * 1108 * @adev: amdgpu_device pointer 1109 * 1110 * stop the VCN block 1111 */ 1112 static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev) 1113 { 1114 int tmp; 1115 1116 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7); 1117 1118 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK | 1119 UVD_LMI_STATUS__READ_CLEAN_MASK | 1120 UVD_LMI_STATUS__WRITE_CLEAN_MASK | 1121 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK; 1122 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp); 1123 1124 /* put VCPU into reset */ 1125 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 1126 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, 1127 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 1128 1129 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK | 1130 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK; 1131 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp); 1132 1133 /* disable VCPU clock */ 1134 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0, 1135 ~UVD_VCPU_CNTL__CLK_EN_MASK); 1136 1137 /* reset LMI UMC/LMI */ 1138 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 1139 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK, 1140 ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); 1141 1142 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 1143 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK, 1144 ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK); 1145 1146 WREG32_SOC15(UVD, 0, mmUVD_STATUS, 0); 1147 1148 vcn_v1_0_enable_clock_gating(adev); 1149 vcn_1_0_enable_static_power_gating(adev); 1150 return 0; 1151 } 1152 1153 static int vcn_v1_0_stop_dpg_mode(struct amdgpu_device *adev) 1154 { 1155 uint32_t tmp; 1156 1157 /* Wait for power status to be UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF */ 1158 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1159 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF, 1160 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); 1161 1162 /* wait for read ptr to be equal to write ptr */ 1163 tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR); 1164 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF); 1165 1166 tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2); 1167 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF); 1168 1169 tmp = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR); 1170 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_JRBC_RB_RPTR, tmp, 0xFFFFFFFF); 1171 1172 tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF; 1173 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF); 1174 1175 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1176 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF, 1177 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); 1178 1179 /* disable dynamic power gating mode */ 1180 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0, 1181 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); 1182 1183 return 0; 1184 } 1185 1186 static int vcn_v1_0_stop(struct amdgpu_device *adev) 1187 { 1188 int r; 1189 1190 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) 1191 r = vcn_v1_0_stop_dpg_mode(adev); 1192 else 1193 r = vcn_v1_0_stop_spg_mode(adev); 1194 1195 return r; 1196 } 1197 1198 static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev, 1199 int inst_idx, struct dpg_pause_state *new_state) 1200 { 1201 int ret_code; 1202 uint32_t reg_data = 0; 1203 uint32_t reg_data2 = 0; 1204 struct amdgpu_ring *ring; 1205 1206 /* pause/unpause if state is changed */ 1207 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) { 1208 DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d", 1209 adev->vcn.inst[inst_idx].pause_state.fw_based, 1210 adev->vcn.inst[inst_idx].pause_state.jpeg, 1211 new_state->fw_based, new_state->jpeg); 1212 1213 reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) & 1214 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); 1215 1216 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) { 1217 ret_code = 0; 1218 1219 if (!(reg_data & UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK)) 1220 ret_code = SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1221 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF, 1222 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); 1223 1224 if (!ret_code) { 1225 /* pause DPG non-jpeg */ 1226 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; 1227 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data); 1228 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE, 1229 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, 1230 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); 1231 1232 /* Restore */ 1233 ring = &adev->vcn.inst->ring_enc[0]; 1234 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr); 1235 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 1236 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4); 1237 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); 1238 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); 1239 1240 ring = &adev->vcn.inst->ring_enc[1]; 1241 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr); 1242 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); 1243 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4); 1244 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); 1245 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); 1246 1247 ring = &adev->vcn.inst->ring_dec; 1248 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, 1249 RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF); 1250 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1251 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, 1252 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); 1253 } 1254 } else { 1255 /* unpause dpg non-jpeg, no need to wait */ 1256 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; 1257 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data); 1258 } 1259 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based; 1260 } 1261 1262 /* pause/unpause if state is changed */ 1263 if (adev->vcn.inst[inst_idx].pause_state.jpeg != new_state->jpeg) { 1264 DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d", 1265 adev->vcn.inst[inst_idx].pause_state.fw_based, 1266 adev->vcn.inst[inst_idx].pause_state.jpeg, 1267 new_state->fw_based, new_state->jpeg); 1268 1269 reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) & 1270 (~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK); 1271 1272 if (new_state->jpeg == VCN_DPG_STATE__PAUSE) { 1273 ret_code = 0; 1274 1275 if (!(reg_data & UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK)) 1276 ret_code = SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1277 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF, 1278 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); 1279 1280 if (!ret_code) { 1281 /* Make sure JPRG Snoop is disabled before sending the pause */ 1282 reg_data2 = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS); 1283 reg_data2 |= UVD_POWER_STATUS__JRBC_SNOOP_DIS_MASK; 1284 WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, reg_data2); 1285 1286 /* pause DPG jpeg */ 1287 reg_data |= UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK; 1288 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data); 1289 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE, 1290 UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK, 1291 UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK); 1292 1293 /* Restore */ 1294 ring = &adev->jpeg.inst->ring_dec; 1295 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0); 1296 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, 1297 UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK | 1298 UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK); 1299 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, 1300 lower_32_bits(ring->gpu_addr)); 1301 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, 1302 upper_32_bits(ring->gpu_addr)); 1303 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, ring->wptr); 1304 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, ring->wptr); 1305 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, 1306 UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK); 1307 1308 ring = &adev->vcn.inst->ring_dec; 1309 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, 1310 RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF); 1311 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1312 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, 1313 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); 1314 } 1315 } else { 1316 /* unpause dpg jpeg, no need to wait */ 1317 reg_data &= ~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK; 1318 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data); 1319 } 1320 adev->vcn.inst[inst_idx].pause_state.jpeg = new_state->jpeg; 1321 } 1322 1323 return 0; 1324 } 1325 1326 static bool vcn_v1_0_is_idle(void *handle) 1327 { 1328 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1329 1330 return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE); 1331 } 1332 1333 static int vcn_v1_0_wait_for_idle(void *handle) 1334 { 1335 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1336 int ret; 1337 1338 ret = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 1339 UVD_STATUS__IDLE); 1340 1341 return ret; 1342 } 1343 1344 static int vcn_v1_0_set_clockgating_state(void *handle, 1345 enum amd_clockgating_state state) 1346 { 1347 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1348 bool enable = (state == AMD_CG_STATE_GATE); 1349 1350 if (enable) { 1351 /* wait for STATUS to clear */ 1352 if (!vcn_v1_0_is_idle(handle)) 1353 return -EBUSY; 1354 vcn_v1_0_enable_clock_gating(adev); 1355 } else { 1356 /* disable HW gating and enable Sw gating */ 1357 vcn_v1_0_disable_clock_gating(adev); 1358 } 1359 return 0; 1360 } 1361 1362 /** 1363 * vcn_v1_0_dec_ring_get_rptr - get read pointer 1364 * 1365 * @ring: amdgpu_ring pointer 1366 * 1367 * Returns the current hardware read pointer 1368 */ 1369 static uint64_t vcn_v1_0_dec_ring_get_rptr(struct amdgpu_ring *ring) 1370 { 1371 struct amdgpu_device *adev = ring->adev; 1372 1373 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR); 1374 } 1375 1376 /** 1377 * vcn_v1_0_dec_ring_get_wptr - get write pointer 1378 * 1379 * @ring: amdgpu_ring pointer 1380 * 1381 * Returns the current hardware write pointer 1382 */ 1383 static uint64_t vcn_v1_0_dec_ring_get_wptr(struct amdgpu_ring *ring) 1384 { 1385 struct amdgpu_device *adev = ring->adev; 1386 1387 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR); 1388 } 1389 1390 /** 1391 * vcn_v1_0_dec_ring_set_wptr - set write pointer 1392 * 1393 * @ring: amdgpu_ring pointer 1394 * 1395 * Commits the write pointer to the hardware 1396 */ 1397 static void vcn_v1_0_dec_ring_set_wptr(struct amdgpu_ring *ring) 1398 { 1399 struct amdgpu_device *adev = ring->adev; 1400 1401 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) 1402 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 1403 lower_32_bits(ring->wptr) | 0x80000000); 1404 1405 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); 1406 } 1407 1408 /** 1409 * vcn_v1_0_dec_ring_insert_start - insert a start command 1410 * 1411 * @ring: amdgpu_ring pointer 1412 * 1413 * Write a start command to the ring. 1414 */ 1415 static void vcn_v1_0_dec_ring_insert_start(struct amdgpu_ring *ring) 1416 { 1417 struct amdgpu_device *adev = ring->adev; 1418 1419 amdgpu_ring_write(ring, 1420 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0)); 1421 amdgpu_ring_write(ring, 0); 1422 amdgpu_ring_write(ring, 1423 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0)); 1424 amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_START << 1); 1425 } 1426 1427 /** 1428 * vcn_v1_0_dec_ring_insert_end - insert a end command 1429 * 1430 * @ring: amdgpu_ring pointer 1431 * 1432 * Write a end command to the ring. 1433 */ 1434 static void vcn_v1_0_dec_ring_insert_end(struct amdgpu_ring *ring) 1435 { 1436 struct amdgpu_device *adev = ring->adev; 1437 1438 amdgpu_ring_write(ring, 1439 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0)); 1440 amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_END << 1); 1441 } 1442 1443 /** 1444 * vcn_v1_0_dec_ring_emit_fence - emit an fence & trap command 1445 * 1446 * @ring: amdgpu_ring pointer 1447 * @fence: fence to emit 1448 * 1449 * Write a fence and a trap command to the ring. 1450 */ 1451 static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 1452 unsigned flags) 1453 { 1454 struct amdgpu_device *adev = ring->adev; 1455 1456 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 1457 1458 amdgpu_ring_write(ring, 1459 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0)); 1460 amdgpu_ring_write(ring, seq); 1461 amdgpu_ring_write(ring, 1462 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0)); 1463 amdgpu_ring_write(ring, addr & 0xffffffff); 1464 amdgpu_ring_write(ring, 1465 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0)); 1466 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff); 1467 amdgpu_ring_write(ring, 1468 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0)); 1469 amdgpu_ring_write(ring, VCN_DEC_CMD_FENCE << 1); 1470 1471 amdgpu_ring_write(ring, 1472 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0)); 1473 amdgpu_ring_write(ring, 0); 1474 amdgpu_ring_write(ring, 1475 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0)); 1476 amdgpu_ring_write(ring, 0); 1477 amdgpu_ring_write(ring, 1478 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0)); 1479 amdgpu_ring_write(ring, VCN_DEC_CMD_TRAP << 1); 1480 } 1481 1482 /** 1483 * vcn_v1_0_dec_ring_emit_ib - execute indirect buffer 1484 * 1485 * @ring: amdgpu_ring pointer 1486 * @ib: indirect buffer to execute 1487 * 1488 * Write ring commands to execute the indirect buffer 1489 */ 1490 static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring, 1491 struct amdgpu_job *job, 1492 struct amdgpu_ib *ib, 1493 uint32_t flags) 1494 { 1495 struct amdgpu_device *adev = ring->adev; 1496 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 1497 1498 amdgpu_ring_write(ring, 1499 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0)); 1500 amdgpu_ring_write(ring, vmid); 1501 1502 amdgpu_ring_write(ring, 1503 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0)); 1504 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); 1505 amdgpu_ring_write(ring, 1506 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0)); 1507 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 1508 amdgpu_ring_write(ring, 1509 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_IB_SIZE), 0)); 1510 amdgpu_ring_write(ring, ib->length_dw); 1511 } 1512 1513 static void vcn_v1_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, 1514 uint32_t reg, uint32_t val, 1515 uint32_t mask) 1516 { 1517 struct amdgpu_device *adev = ring->adev; 1518 1519 amdgpu_ring_write(ring, 1520 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0)); 1521 amdgpu_ring_write(ring, reg << 2); 1522 amdgpu_ring_write(ring, 1523 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0)); 1524 amdgpu_ring_write(ring, val); 1525 amdgpu_ring_write(ring, 1526 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH8), 0)); 1527 amdgpu_ring_write(ring, mask); 1528 amdgpu_ring_write(ring, 1529 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0)); 1530 amdgpu_ring_write(ring, VCN_DEC_CMD_REG_READ_COND_WAIT << 1); 1531 } 1532 1533 static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, 1534 unsigned vmid, uint64_t pd_addr) 1535 { 1536 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; 1537 uint32_t data0, data1, mask; 1538 1539 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); 1540 1541 /* wait for register write */ 1542 data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance; 1543 data1 = lower_32_bits(pd_addr); 1544 mask = 0xffffffff; 1545 vcn_v1_0_dec_ring_emit_reg_wait(ring, data0, data1, mask); 1546 } 1547 1548 static void vcn_v1_0_dec_ring_emit_wreg(struct amdgpu_ring *ring, 1549 uint32_t reg, uint32_t val) 1550 { 1551 struct amdgpu_device *adev = ring->adev; 1552 1553 amdgpu_ring_write(ring, 1554 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0)); 1555 amdgpu_ring_write(ring, reg << 2); 1556 amdgpu_ring_write(ring, 1557 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0)); 1558 amdgpu_ring_write(ring, val); 1559 amdgpu_ring_write(ring, 1560 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0)); 1561 amdgpu_ring_write(ring, VCN_DEC_CMD_WRITE_REG << 1); 1562 } 1563 1564 /** 1565 * vcn_v1_0_enc_ring_get_rptr - get enc read pointer 1566 * 1567 * @ring: amdgpu_ring pointer 1568 * 1569 * Returns the current hardware enc read pointer 1570 */ 1571 static uint64_t vcn_v1_0_enc_ring_get_rptr(struct amdgpu_ring *ring) 1572 { 1573 struct amdgpu_device *adev = ring->adev; 1574 1575 if (ring == &adev->vcn.inst->ring_enc[0]) 1576 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR); 1577 else 1578 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2); 1579 } 1580 1581 /** 1582 * vcn_v1_0_enc_ring_get_wptr - get enc write pointer 1583 * 1584 * @ring: amdgpu_ring pointer 1585 * 1586 * Returns the current hardware enc write pointer 1587 */ 1588 static uint64_t vcn_v1_0_enc_ring_get_wptr(struct amdgpu_ring *ring) 1589 { 1590 struct amdgpu_device *adev = ring->adev; 1591 1592 if (ring == &adev->vcn.inst->ring_enc[0]) 1593 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR); 1594 else 1595 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2); 1596 } 1597 1598 /** 1599 * vcn_v1_0_enc_ring_set_wptr - set enc write pointer 1600 * 1601 * @ring: amdgpu_ring pointer 1602 * 1603 * Commits the enc write pointer to the hardware 1604 */ 1605 static void vcn_v1_0_enc_ring_set_wptr(struct amdgpu_ring *ring) 1606 { 1607 struct amdgpu_device *adev = ring->adev; 1608 1609 if (ring == &adev->vcn.inst->ring_enc[0]) 1610 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, 1611 lower_32_bits(ring->wptr)); 1612 else 1613 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, 1614 lower_32_bits(ring->wptr)); 1615 } 1616 1617 /** 1618 * vcn_v1_0_enc_ring_emit_fence - emit an enc fence & trap command 1619 * 1620 * @ring: amdgpu_ring pointer 1621 * @fence: fence to emit 1622 * 1623 * Write enc a fence and a trap command to the ring. 1624 */ 1625 static void vcn_v1_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, 1626 u64 seq, unsigned flags) 1627 { 1628 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 1629 1630 amdgpu_ring_write(ring, VCN_ENC_CMD_FENCE); 1631 amdgpu_ring_write(ring, addr); 1632 amdgpu_ring_write(ring, upper_32_bits(addr)); 1633 amdgpu_ring_write(ring, seq); 1634 amdgpu_ring_write(ring, VCN_ENC_CMD_TRAP); 1635 } 1636 1637 static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring *ring) 1638 { 1639 amdgpu_ring_write(ring, VCN_ENC_CMD_END); 1640 } 1641 1642 /** 1643 * vcn_v1_0_enc_ring_emit_ib - enc execute indirect buffer 1644 * 1645 * @ring: amdgpu_ring pointer 1646 * @ib: indirect buffer to execute 1647 * 1648 * Write enc ring commands to execute the indirect buffer 1649 */ 1650 static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring, 1651 struct amdgpu_job *job, 1652 struct amdgpu_ib *ib, 1653 uint32_t flags) 1654 { 1655 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 1656 1657 amdgpu_ring_write(ring, VCN_ENC_CMD_IB); 1658 amdgpu_ring_write(ring, vmid); 1659 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); 1660 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 1661 amdgpu_ring_write(ring, ib->length_dw); 1662 } 1663 1664 static void vcn_v1_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, 1665 uint32_t reg, uint32_t val, 1666 uint32_t mask) 1667 { 1668 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT); 1669 amdgpu_ring_write(ring, reg << 2); 1670 amdgpu_ring_write(ring, mask); 1671 amdgpu_ring_write(ring, val); 1672 } 1673 1674 static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, 1675 unsigned int vmid, uint64_t pd_addr) 1676 { 1677 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; 1678 1679 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); 1680 1681 /* wait for reg writes */ 1682 vcn_v1_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + 1683 vmid * hub->ctx_addr_distance, 1684 lower_32_bits(pd_addr), 0xffffffff); 1685 } 1686 1687 static void vcn_v1_0_enc_ring_emit_wreg(struct amdgpu_ring *ring, 1688 uint32_t reg, uint32_t val) 1689 { 1690 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE); 1691 amdgpu_ring_write(ring, reg << 2); 1692 amdgpu_ring_write(ring, val); 1693 } 1694 1695 static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev, 1696 struct amdgpu_irq_src *source, 1697 unsigned type, 1698 enum amdgpu_interrupt_state state) 1699 { 1700 return 0; 1701 } 1702 1703 static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev, 1704 struct amdgpu_irq_src *source, 1705 struct amdgpu_iv_entry *entry) 1706 { 1707 DRM_DEBUG("IH: VCN TRAP\n"); 1708 1709 switch (entry->src_id) { 1710 case 124: 1711 amdgpu_fence_process(&adev->vcn.inst->ring_dec); 1712 break; 1713 case 119: 1714 amdgpu_fence_process(&adev->vcn.inst->ring_enc[0]); 1715 break; 1716 case 120: 1717 amdgpu_fence_process(&adev->vcn.inst->ring_enc[1]); 1718 break; 1719 default: 1720 DRM_ERROR("Unhandled interrupt: %d %d\n", 1721 entry->src_id, entry->src_data[0]); 1722 break; 1723 } 1724 1725 return 0; 1726 } 1727 1728 static void vcn_v1_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) 1729 { 1730 struct amdgpu_device *adev = ring->adev; 1731 int i; 1732 1733 WARN_ON(ring->wptr % 2 || count % 2); 1734 1735 for (i = 0; i < count / 2; i++) { 1736 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0)); 1737 amdgpu_ring_write(ring, 0); 1738 } 1739 } 1740 1741 static int vcn_v1_0_set_powergating_state(void *handle, 1742 enum amd_powergating_state state) 1743 { 1744 /* This doesn't actually powergate the VCN block. 1745 * That's done in the dpm code via the SMC. This 1746 * just re-inits the block as necessary. The actual 1747 * gating still happens in the dpm code. We should 1748 * revisit this when there is a cleaner line between 1749 * the smc and the hw blocks 1750 */ 1751 int ret; 1752 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1753 1754 if(state == adev->vcn.cur_state) 1755 return 0; 1756 1757 if (state == AMD_PG_STATE_GATE) 1758 ret = vcn_v1_0_stop(adev); 1759 else 1760 ret = vcn_v1_0_start(adev); 1761 1762 if(!ret) 1763 adev->vcn.cur_state = state; 1764 return ret; 1765 } 1766 1767 static void vcn_v1_0_idle_work_handler(struct work_struct *work) 1768 { 1769 struct amdgpu_device *adev = 1770 container_of(work, struct amdgpu_device, vcn.idle_work.work); 1771 unsigned int fences = 0, i; 1772 1773 for (i = 0; i < adev->vcn.num_enc_rings; ++i) 1774 fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]); 1775 1776 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { 1777 struct dpg_pause_state new_state; 1778 1779 if (fences) 1780 new_state.fw_based = VCN_DPG_STATE__PAUSE; 1781 else 1782 new_state.fw_based = VCN_DPG_STATE__UNPAUSE; 1783 1784 if (amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec)) 1785 new_state.jpeg = VCN_DPG_STATE__PAUSE; 1786 else 1787 new_state.jpeg = VCN_DPG_STATE__UNPAUSE; 1788 1789 adev->vcn.pause_dpg_mode(adev, 0, &new_state); 1790 } 1791 1792 fences += amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec); 1793 fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_dec); 1794 1795 if (fences == 0) { 1796 amdgpu_gfx_off_ctrl(adev, true); 1797 if (adev->pm.dpm_enabled) 1798 amdgpu_dpm_enable_uvd(adev, false); 1799 else 1800 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, 1801 AMD_PG_STATE_GATE); 1802 } else { 1803 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT); 1804 } 1805 } 1806 1807 void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring) 1808 { 1809 struct amdgpu_device *adev = ring->adev; 1810 bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work); 1811 1812 if (set_clocks) { 1813 amdgpu_gfx_off_ctrl(adev, false); 1814 if (adev->pm.dpm_enabled) 1815 amdgpu_dpm_enable_uvd(adev, true); 1816 else 1817 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, 1818 AMD_PG_STATE_UNGATE); 1819 } 1820 1821 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { 1822 struct dpg_pause_state new_state; 1823 unsigned int fences = 0, i; 1824 1825 for (i = 0; i < adev->vcn.num_enc_rings; ++i) 1826 fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]); 1827 1828 if (fences) 1829 new_state.fw_based = VCN_DPG_STATE__PAUSE; 1830 else 1831 new_state.fw_based = VCN_DPG_STATE__UNPAUSE; 1832 1833 if (amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec)) 1834 new_state.jpeg = VCN_DPG_STATE__PAUSE; 1835 else 1836 new_state.jpeg = VCN_DPG_STATE__UNPAUSE; 1837 1838 if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) 1839 new_state.fw_based = VCN_DPG_STATE__PAUSE; 1840 else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) 1841 new_state.jpeg = VCN_DPG_STATE__PAUSE; 1842 1843 adev->vcn.pause_dpg_mode(adev, 0, &new_state); 1844 } 1845 } 1846 1847 static const struct amd_ip_funcs vcn_v1_0_ip_funcs = { 1848 .name = "vcn_v1_0", 1849 .early_init = vcn_v1_0_early_init, 1850 .late_init = NULL, 1851 .sw_init = vcn_v1_0_sw_init, 1852 .sw_fini = vcn_v1_0_sw_fini, 1853 .hw_init = vcn_v1_0_hw_init, 1854 .hw_fini = vcn_v1_0_hw_fini, 1855 .suspend = vcn_v1_0_suspend, 1856 .resume = vcn_v1_0_resume, 1857 .is_idle = vcn_v1_0_is_idle, 1858 .wait_for_idle = vcn_v1_0_wait_for_idle, 1859 .check_soft_reset = NULL /* vcn_v1_0_check_soft_reset */, 1860 .pre_soft_reset = NULL /* vcn_v1_0_pre_soft_reset */, 1861 .soft_reset = NULL /* vcn_v1_0_soft_reset */, 1862 .post_soft_reset = NULL /* vcn_v1_0_post_soft_reset */, 1863 .set_clockgating_state = vcn_v1_0_set_clockgating_state, 1864 .set_powergating_state = vcn_v1_0_set_powergating_state, 1865 }; 1866 1867 static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = { 1868 .type = AMDGPU_RING_TYPE_VCN_DEC, 1869 .align_mask = 0xf, 1870 .support_64bit_ptrs = false, 1871 .no_user_fence = true, 1872 .vmhub = AMDGPU_MMHUB_0, 1873 .get_rptr = vcn_v1_0_dec_ring_get_rptr, 1874 .get_wptr = vcn_v1_0_dec_ring_get_wptr, 1875 .set_wptr = vcn_v1_0_dec_ring_set_wptr, 1876 .emit_frame_size = 1877 6 + 6 + /* hdp invalidate / flush */ 1878 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + 1879 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + 1880 8 + /* vcn_v1_0_dec_ring_emit_vm_flush */ 1881 14 + 14 + /* vcn_v1_0_dec_ring_emit_fence x2 vm fence */ 1882 6, 1883 .emit_ib_size = 8, /* vcn_v1_0_dec_ring_emit_ib */ 1884 .emit_ib = vcn_v1_0_dec_ring_emit_ib, 1885 .emit_fence = vcn_v1_0_dec_ring_emit_fence, 1886 .emit_vm_flush = vcn_v1_0_dec_ring_emit_vm_flush, 1887 .test_ring = amdgpu_vcn_dec_ring_test_ring, 1888 .test_ib = amdgpu_vcn_dec_ring_test_ib, 1889 .insert_nop = vcn_v1_0_dec_ring_insert_nop, 1890 .insert_start = vcn_v1_0_dec_ring_insert_start, 1891 .insert_end = vcn_v1_0_dec_ring_insert_end, 1892 .pad_ib = amdgpu_ring_generic_pad_ib, 1893 .begin_use = vcn_v1_0_ring_begin_use, 1894 .end_use = amdgpu_vcn_ring_end_use, 1895 .emit_wreg = vcn_v1_0_dec_ring_emit_wreg, 1896 .emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait, 1897 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, 1898 }; 1899 1900 static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = { 1901 .type = AMDGPU_RING_TYPE_VCN_ENC, 1902 .align_mask = 0x3f, 1903 .nop = VCN_ENC_CMD_NO_OP, 1904 .support_64bit_ptrs = false, 1905 .no_user_fence = true, 1906 .vmhub = AMDGPU_MMHUB_0, 1907 .get_rptr = vcn_v1_0_enc_ring_get_rptr, 1908 .get_wptr = vcn_v1_0_enc_ring_get_wptr, 1909 .set_wptr = vcn_v1_0_enc_ring_set_wptr, 1910 .emit_frame_size = 1911 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + 1912 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 + 1913 4 + /* vcn_v1_0_enc_ring_emit_vm_flush */ 1914 5 + 5 + /* vcn_v1_0_enc_ring_emit_fence x2 vm fence */ 1915 1, /* vcn_v1_0_enc_ring_insert_end */ 1916 .emit_ib_size = 5, /* vcn_v1_0_enc_ring_emit_ib */ 1917 .emit_ib = vcn_v1_0_enc_ring_emit_ib, 1918 .emit_fence = vcn_v1_0_enc_ring_emit_fence, 1919 .emit_vm_flush = vcn_v1_0_enc_ring_emit_vm_flush, 1920 .test_ring = amdgpu_vcn_enc_ring_test_ring, 1921 .test_ib = amdgpu_vcn_enc_ring_test_ib, 1922 .insert_nop = amdgpu_ring_insert_nop, 1923 .insert_end = vcn_v1_0_enc_ring_insert_end, 1924 .pad_ib = amdgpu_ring_generic_pad_ib, 1925 .begin_use = vcn_v1_0_ring_begin_use, 1926 .end_use = amdgpu_vcn_ring_end_use, 1927 .emit_wreg = vcn_v1_0_enc_ring_emit_wreg, 1928 .emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait, 1929 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, 1930 }; 1931 1932 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev) 1933 { 1934 adev->vcn.inst->ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs; 1935 DRM_INFO("VCN decode is enabled in VM mode\n"); 1936 } 1937 1938 static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev) 1939 { 1940 int i; 1941 1942 for (i = 0; i < adev->vcn.num_enc_rings; ++i) 1943 adev->vcn.inst->ring_enc[i].funcs = &vcn_v1_0_enc_ring_vm_funcs; 1944 1945 DRM_INFO("VCN encode is enabled in VM mode\n"); 1946 } 1947 1948 static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = { 1949 .set = vcn_v1_0_set_interrupt_state, 1950 .process = vcn_v1_0_process_interrupt, 1951 }; 1952 1953 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev) 1954 { 1955 adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 2; 1956 adev->vcn.inst->irq.funcs = &vcn_v1_0_irq_funcs; 1957 } 1958 1959 const struct amdgpu_ip_block_version vcn_v1_0_ip_block = 1960 { 1961 .type = AMD_IP_BLOCK_TYPE_VCN, 1962 .major = 1, 1963 .minor = 0, 1964 .rev = 0, 1965 .funcs = &vcn_v1_0_ip_funcs, 1966 }; 1967