1 /* 2 * Copyright 2021 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/firmware.h> 25 #include "amdgpu.h" 26 #include "amdgpu_vcn.h" 27 #include "amdgpu_pm.h" 28 #include "soc15.h" 29 #include "soc15d.h" 30 #include "soc15_hw_ip.h" 31 #include "vcn_v2_0.h" 32 #include "vcn_v3_0.h" 33 34 #include "vcn/vcn_4_0_0_offset.h" 35 #include "vcn/vcn_4_0_0_sh_mask.h" 36 #include "ivsrcid/vcn/irqsrcs_vcn_4_0.h" 37 38 #include <drm/drm_drv.h> 39 40 #define mmUVD_DPG_LMA_CTL regUVD_DPG_LMA_CTL 41 #define mmUVD_DPG_LMA_CTL_BASE_IDX regUVD_DPG_LMA_CTL_BASE_IDX 42 #define mmUVD_DPG_LMA_DATA regUVD_DPG_LMA_DATA 43 #define mmUVD_DPG_LMA_DATA_BASE_IDX regUVD_DPG_LMA_DATA_BASE_IDX 44 45 #define VCN_VID_SOC_ADDRESS_2_0 0x1fb00 46 #define VCN1_VID_SOC_ADDRESS_3_0 0x48300 47 48 bool unifiedQ_enabled = false; 49 50 static int amdgpu_ih_clientid_vcns[] = { 51 SOC15_IH_CLIENTID_VCN, 52 SOC15_IH_CLIENTID_VCN1 53 }; 54 55 static void vcn_v4_0_set_dec_ring_funcs(struct amdgpu_device *adev); 56 static void vcn_v4_0_set_enc_ring_funcs(struct amdgpu_device *adev); 57 static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev); 58 static int vcn_v4_0_set_powergating_state(void *handle, 59 enum amd_powergating_state state); 60 static int vcn_v4_0_pause_dpg_mode(struct amdgpu_device *adev, 61 int inst_idx, struct dpg_pause_state *new_state); 62 63 /** 64 * vcn_v4_0_early_init - set function pointers 65 * 66 * @handle: amdgpu_device pointer 67 * 68 * Set ring and irq function pointers 69 */ 70 static int vcn_v4_0_early_init(void *handle) 71 { 72 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 73 74 if (unifiedQ_enabled) { 75 adev->vcn.num_vcn_inst = 1; 76 adev->vcn.num_enc_rings = 1; 77 } else { 78 adev->vcn.num_enc_rings = 2; 79 } 80 81 if (!unifiedQ_enabled) 82 vcn_v4_0_set_dec_ring_funcs(adev); 83 84 vcn_v4_0_set_enc_ring_funcs(adev); 85 vcn_v4_0_set_irq_funcs(adev); 86 87 return 0; 88 } 89 90 static void amdgpu_vcn_setup_unified_queue_ucode(struct amdgpu_device *adev) 91 { 92 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 93 const struct common_firmware_header *hdr; 94 95 hdr = (const struct common_firmware_header *)adev->vcn.fw->data; 96 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN; 97 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw; 98 adev->firmware.fw_size += 99 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); 100 DRM_INFO("PSP loading VCN firmware\n"); 101 } 102 } 103 104 /** 105 * vcn_v4_0_sw_init - sw init for VCN block 106 * 107 * @handle: amdgpu_device pointer 108 * 109 * Load firmware and sw initialization 110 */ 111 static int vcn_v4_0_sw_init(void *handle) 112 { 113 struct amdgpu_ring *ring; 114 int i, j, r; 115 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 116 117 r = amdgpu_vcn_sw_init(adev); 118 if (r) 119 return r; 120 121 if (unifiedQ_enabled) 122 amdgpu_vcn_setup_unified_queue_ucode(adev); 123 else 124 amdgpu_vcn_setup_ucode(adev); 125 126 r = amdgpu_vcn_resume(adev); 127 if (r) 128 return r; 129 130 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 131 volatile struct amdgpu_vcn4_fw_shared *fw_shared; 132 if (adev->vcn.harvest_config & (1 << i)) 133 continue; 134 /* VCN DEC TRAP */ 135 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i], 136 VCN_4_0__SRCID__UVD_TRAP, &adev->vcn.inst[i].irq); 137 if (r) 138 return r; 139 140 atomic_set(&adev->vcn.inst[i].sched_score, 0); 141 if (!unifiedQ_enabled) { 142 ring = &adev->vcn.inst[i].ring_dec; 143 ring->use_doorbell = true; 144 145 /* VCN4 doorbell layout 146 * 1: VCN_JPEG_DB_CTRL UVD_JRBC_RB_WPTR; (jpeg) 147 * 2: VCN_RB1_DB_CTRL UVD_RB_WPTR; (decode/encode for unified queue) 148 * 3: VCN_RB2_DB_CTRL UVD_RB_WPTR2; (encode only for swqueue) 149 * 4: VCN_RB3_DB_CTRL UVD_RB_WPTR3; (Reserved) 150 * 5: VCN_RB4_DB_CTRL UVD_RB_WPTR4; (decode only for swqueue) 151 */ 152 153 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) 154 + 5 + 8 * i; 155 156 sprintf(ring->name, "vcn_dec_%d", i); 157 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0, 158 AMDGPU_RING_PRIO_DEFAULT, 159 &adev->vcn.inst[i].sched_score); 160 if (r) 161 return r; 162 } 163 for (j = 0; j < adev->vcn.num_enc_rings; ++j) { 164 /* VCN ENC TRAP */ 165 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i], 166 j + VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq); 167 if (r) 168 return r; 169 170 ring = &adev->vcn.inst[i].ring_enc[j]; 171 ring->use_doorbell = true; 172 173 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + j + 8 * i; 174 175 if (unifiedQ_enabled) { 176 sprintf(ring->name, "vcn_unified%d", i); 177 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, 178 AMDGPU_RING_PRIO_DEFAULT, NULL); 179 } else { 180 enum amdgpu_ring_priority_level hw_prio; 181 182 hw_prio = amdgpu_vcn_get_enc_ring_prio(j); 183 sprintf(ring->name, "vcn_enc_%d.%d", i, j); 184 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0, 185 hw_prio, &adev->vcn.inst[i].sched_score); 186 } 187 if (r) 188 return r; 189 } 190 191 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; 192 fw_shared->present_flag_0 = 0; 193 194 if (unifiedQ_enabled) { 195 fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE); 196 fw_shared->sq.is_enabled = 1; 197 } 198 199 if (amdgpu_vcnfw_log) 200 amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]); 201 } 202 203 if (!unifiedQ_enabled) { 204 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) 205 adev->vcn.pause_dpg_mode = vcn_v4_0_pause_dpg_mode; 206 } 207 return 0; 208 } 209 210 /** 211 * vcn_v4_0_sw_fini - sw fini for VCN block 212 * 213 * @handle: amdgpu_device pointer 214 * 215 * VCN suspend and free up sw allocation 216 */ 217 static int vcn_v4_0_sw_fini(void *handle) 218 { 219 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 220 int i, r, idx; 221 222 if (drm_dev_enter(&adev->ddev, &idx)) { 223 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 224 volatile struct amdgpu_vcn4_fw_shared *fw_shared; 225 226 if (adev->vcn.harvest_config & (1 << i)) 227 continue; 228 229 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; 230 fw_shared->present_flag_0 = 0; 231 fw_shared->sq.is_enabled = 0; 232 } 233 234 drm_dev_exit(idx); 235 } 236 237 r = amdgpu_vcn_suspend(adev); 238 if (r) 239 return r; 240 241 r = amdgpu_vcn_sw_fini(adev); 242 243 return r; 244 } 245 246 /** 247 * vcn_v4_0_hw_init - start and test VCN block 248 * 249 * @handle: amdgpu_device pointer 250 * 251 * Initialize the hardware, boot up the VCPU and do some testing 252 */ 253 static int vcn_v4_0_hw_init(void *handle) 254 { 255 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 256 struct amdgpu_ring *ring; 257 int i, j, r; 258 259 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 260 if (adev->vcn.harvest_config & (1 << i)) 261 continue; 262 if (unifiedQ_enabled) 263 ring = &adev->vcn.inst[i].ring_enc[0]; 264 else 265 ring = &adev->vcn.inst[i].ring_dec; 266 267 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, 268 ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i), i); 269 270 r = amdgpu_ring_test_helper(ring); 271 if (r) 272 goto done; 273 274 for (j = 0; j < adev->vcn.num_enc_rings; ++j) { 275 ring = &adev->vcn.inst[i].ring_enc[j]; 276 r = amdgpu_ring_test_helper(ring); 277 if (r) 278 goto done; 279 } 280 } 281 282 done: 283 if (!r) 284 DRM_INFO("VCN decode and encode initialized successfully(under %s).\n", 285 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode"); 286 287 return r; 288 } 289 290 /** 291 * vcn_v4_0_hw_fini - stop the hardware block 292 * 293 * @handle: amdgpu_device pointer 294 * 295 * Stop the VCN block, mark ring as not ready any more 296 */ 297 static int vcn_v4_0_hw_fini(void *handle) 298 { 299 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 300 int i; 301 302 cancel_delayed_work_sync(&adev->vcn.idle_work); 303 304 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 305 if (adev->vcn.harvest_config & (1 << i)) 306 continue; 307 308 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) || 309 (adev->vcn.cur_state != AMD_PG_STATE_GATE && 310 RREG32_SOC15(VCN, i, regUVD_STATUS))) { 311 vcn_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE); 312 } 313 } 314 315 return 0; 316 } 317 318 /** 319 * vcn_v4_0_suspend - suspend VCN block 320 * 321 * @handle: amdgpu_device pointer 322 * 323 * HW fini and suspend VCN block 324 */ 325 static int vcn_v4_0_suspend(void *handle) 326 { 327 int r; 328 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 329 330 r = vcn_v4_0_hw_fini(adev); 331 if (r) 332 return r; 333 334 r = amdgpu_vcn_suspend(adev); 335 336 return r; 337 } 338 339 /** 340 * vcn_v4_0_resume - resume VCN block 341 * 342 * @handle: amdgpu_device pointer 343 * 344 * Resume firmware and hw init VCN block 345 */ 346 static int vcn_v4_0_resume(void *handle) 347 { 348 int r; 349 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 350 351 r = amdgpu_vcn_resume(adev); 352 if (r) 353 return r; 354 355 r = vcn_v4_0_hw_init(adev); 356 357 return r; 358 } 359 360 /** 361 * vcn_v4_0_mc_resume - memory controller programming 362 * 363 * @adev: amdgpu_device pointer 364 * @inst: instance number 365 * 366 * Let the VCN memory controller know it's offsets 367 */ 368 static void vcn_v4_0_mc_resume(struct amdgpu_device *adev, int inst) 369 { 370 uint32_t offset, size; 371 const struct common_firmware_header *hdr; 372 373 hdr = (const struct common_firmware_header *)adev->vcn.fw->data; 374 size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); 375 376 /* cache window 0: fw */ 377 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 378 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, 379 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_lo)); 380 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, 381 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_hi)); 382 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET0, 0); 383 offset = 0; 384 } else { 385 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, 386 lower_32_bits(adev->vcn.inst[inst].gpu_addr)); 387 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, 388 upper_32_bits(adev->vcn.inst[inst].gpu_addr)); 389 offset = size; 390 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET0, AMDGPU_UVD_FIRMWARE_OFFSET >> 3); 391 } 392 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE0, size); 393 394 /* cache window 1: stack */ 395 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, 396 lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset)); 397 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, 398 upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset)); 399 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET1, 0); 400 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE); 401 402 /* cache window 2: context */ 403 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, 404 lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); 405 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, 406 upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); 407 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET2, 0); 408 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); 409 410 /* non-cache window */ 411 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW, 412 lower_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr)); 413 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH, 414 upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr)); 415 WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_OFFSET0, 0); 416 WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_SIZE0, 417 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared))); 418 } 419 420 /** 421 * vcn_v4_0_mc_resume_dpg_mode - memory controller programming for dpg mode 422 * 423 * @adev: amdgpu_device pointer 424 * @inst_idx: instance number index 425 * @indirect: indirectly write sram 426 * 427 * Let the VCN memory controller know it's offsets with dpg mode 428 */ 429 static void vcn_v4_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect) 430 { 431 uint32_t offset, size; 432 const struct common_firmware_header *hdr; 433 hdr = (const struct common_firmware_header *)adev->vcn.fw->data; 434 size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); 435 436 /* cache window 0: fw */ 437 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 438 if (!indirect) { 439 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 440 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 441 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect); 442 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 443 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 444 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect); 445 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 446 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); 447 } else { 448 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 449 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect); 450 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 451 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect); 452 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 453 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); 454 } 455 offset = 0; 456 } else { 457 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 458 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 459 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); 460 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 461 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 462 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); 463 offset = size; 464 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 465 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), 466 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect); 467 468 } 469 470 if (!indirect) 471 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 472 VCN, inst_idx, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect); 473 else 474 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 475 VCN, inst_idx, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect); 476 477 /* cache window 1: stack */ 478 if (!indirect) { 479 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 480 VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 481 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); 482 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 483 VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 484 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); 485 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 486 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); 487 } else { 488 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 489 VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect); 490 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 491 VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect); 492 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 493 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); 494 } 495 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 496 VCN, inst_idx, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect); 497 498 /* cache window 2: context */ 499 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 500 VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), 501 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); 502 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 503 VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), 504 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); 505 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 506 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect); 507 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 508 VCN, inst_idx, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect); 509 510 /* non-cache window */ 511 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 512 VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 513 lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect); 514 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 515 VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 516 upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect); 517 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 518 VCN, inst_idx, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect); 519 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 520 VCN, inst_idx, regUVD_VCPU_NONCACHE_SIZE0), 521 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)), 0, indirect); 522 523 /* VCN global tiling registers */ 524 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 525 VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); 526 } 527 528 /** 529 * vcn_v4_0_disable_static_power_gating - disable VCN static power gating 530 * 531 * @adev: amdgpu_device pointer 532 * @inst: instance number 533 * 534 * Disable static power gating for VCN block 535 */ 536 static void vcn_v4_0_disable_static_power_gating(struct amdgpu_device *adev, int inst) 537 { 538 uint32_t data = 0; 539 540 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { 541 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT 542 | 1 << UVD_PGFSM_CONFIG__UVDS_PWR_CONFIG__SHIFT 543 | 1 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT 544 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT 545 | 2 << UVD_PGFSM_CONFIG__UVDTC_PWR_CONFIG__SHIFT 546 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT 547 | 2 << UVD_PGFSM_CONFIG__UVDTA_PWR_CONFIG__SHIFT 548 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT 549 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT 550 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT 551 | 2 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT 552 | 2 << UVD_PGFSM_CONFIG__UVDTB_PWR_CONFIG__SHIFT 553 | 2 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT 554 | 2 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT); 555 556 WREG32_SOC15(VCN, inst, regUVD_PGFSM_CONFIG, data); 557 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_PGFSM_STATUS, 558 UVD_PGFSM_STATUS__UVDM_UVDU_UVDLM_PWR_ON_3_0, 0x3F3FFFFF); 559 } else { 560 uint32_t value; 561 562 value = (inst) ? 0x2200800 : 0; 563 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT 564 | 1 << UVD_PGFSM_CONFIG__UVDS_PWR_CONFIG__SHIFT 565 | 1 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT 566 | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT 567 | 1 << UVD_PGFSM_CONFIG__UVDTC_PWR_CONFIG__SHIFT 568 | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT 569 | 1 << UVD_PGFSM_CONFIG__UVDTA_PWR_CONFIG__SHIFT 570 | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT 571 | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT 572 | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT 573 | 1 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT 574 | 1 << UVD_PGFSM_CONFIG__UVDTB_PWR_CONFIG__SHIFT 575 | 1 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT 576 | 1 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT); 577 578 WREG32_SOC15(VCN, inst, regUVD_PGFSM_CONFIG, data); 579 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_PGFSM_STATUS, value, 0x3F3FFFFF); 580 } 581 582 data = RREG32_SOC15(VCN, inst, regUVD_POWER_STATUS); 583 data &= ~0x103; 584 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) 585 data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON | 586 UVD_POWER_STATUS__UVD_PG_EN_MASK; 587 588 WREG32_SOC15(VCN, inst, regUVD_POWER_STATUS, data); 589 590 return; 591 } 592 593 /** 594 * vcn_v4_0_enable_static_power_gating - enable VCN static power gating 595 * 596 * @adev: amdgpu_device pointer 597 * @inst: instance number 598 * 599 * Enable static power gating for VCN block 600 */ 601 static void vcn_v4_0_enable_static_power_gating(struct amdgpu_device *adev, int inst) 602 { 603 uint32_t data; 604 605 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { 606 /* Before power off, this indicator has to be turned on */ 607 data = RREG32_SOC15(VCN, inst, regUVD_POWER_STATUS); 608 data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK; 609 data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF; 610 WREG32_SOC15(VCN, inst, regUVD_POWER_STATUS, data); 611 612 data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT 613 | 2 << UVD_PGFSM_CONFIG__UVDS_PWR_CONFIG__SHIFT 614 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT 615 | 2 << UVD_PGFSM_CONFIG__UVDTC_PWR_CONFIG__SHIFT 616 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT 617 | 2 << UVD_PGFSM_CONFIG__UVDTA_PWR_CONFIG__SHIFT 618 | 2 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT 619 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT 620 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT 621 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT 622 | 2 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT 623 | 2 << UVD_PGFSM_CONFIG__UVDTB_PWR_CONFIG__SHIFT 624 | 2 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT 625 | 2 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT); 626 WREG32_SOC15(VCN, inst, regUVD_PGFSM_CONFIG, data); 627 628 data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT 629 | 2 << UVD_PGFSM_STATUS__UVDS_PWR_STATUS__SHIFT 630 | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT 631 | 2 << UVD_PGFSM_STATUS__UVDTC_PWR_STATUS__SHIFT 632 | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT 633 | 2 << UVD_PGFSM_STATUS__UVDTA_PWR_STATUS__SHIFT 634 | 2 << UVD_PGFSM_STATUS__UVDLM_PWR_STATUS__SHIFT 635 | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT 636 | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT 637 | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT 638 | 2 << UVD_PGFSM_STATUS__UVDAB_PWR_STATUS__SHIFT 639 | 2 << UVD_PGFSM_STATUS__UVDTB_PWR_STATUS__SHIFT 640 | 2 << UVD_PGFSM_STATUS__UVDNA_PWR_STATUS__SHIFT 641 | 2 << UVD_PGFSM_STATUS__UVDNB_PWR_STATUS__SHIFT); 642 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_PGFSM_STATUS, data, 0x3F3FFFFF); 643 } 644 645 return; 646 } 647 648 /** 649 * vcn_v4_0_disable_clock_gating - disable VCN clock gating 650 * 651 * @adev: amdgpu_device pointer 652 * @inst: instance number 653 * 654 * Disable clock gating for VCN block 655 */ 656 static void vcn_v4_0_disable_clock_gating(struct amdgpu_device *adev, int inst) 657 { 658 uint32_t data; 659 660 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) 661 return; 662 663 /* VCN disable CGC */ 664 data = RREG32_SOC15(VCN, inst, regUVD_CGC_CTRL); 665 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; 666 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; 667 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; 668 WREG32_SOC15(VCN, inst, regUVD_CGC_CTRL, data); 669 670 data = RREG32_SOC15(VCN, inst, regUVD_CGC_GATE); 671 data &= ~(UVD_CGC_GATE__SYS_MASK 672 | UVD_CGC_GATE__UDEC_MASK 673 | UVD_CGC_GATE__MPEG2_MASK 674 | UVD_CGC_GATE__REGS_MASK 675 | UVD_CGC_GATE__RBC_MASK 676 | UVD_CGC_GATE__LMI_MC_MASK 677 | UVD_CGC_GATE__LMI_UMC_MASK 678 | UVD_CGC_GATE__IDCT_MASK 679 | UVD_CGC_GATE__MPRD_MASK 680 | UVD_CGC_GATE__MPC_MASK 681 | UVD_CGC_GATE__LBSI_MASK 682 | UVD_CGC_GATE__LRBBM_MASK 683 | UVD_CGC_GATE__UDEC_RE_MASK 684 | UVD_CGC_GATE__UDEC_CM_MASK 685 | UVD_CGC_GATE__UDEC_IT_MASK 686 | UVD_CGC_GATE__UDEC_DB_MASK 687 | UVD_CGC_GATE__UDEC_MP_MASK 688 | UVD_CGC_GATE__WCB_MASK 689 | UVD_CGC_GATE__VCPU_MASK 690 | UVD_CGC_GATE__MMSCH_MASK); 691 692 WREG32_SOC15(VCN, inst, regUVD_CGC_GATE, data); 693 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_CGC_GATE, 0, 0xFFFFFFFF); 694 695 data = RREG32_SOC15(VCN, inst, regUVD_CGC_CTRL); 696 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK 697 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK 698 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK 699 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK 700 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK 701 | UVD_CGC_CTRL__SYS_MODE_MASK 702 | UVD_CGC_CTRL__UDEC_MODE_MASK 703 | UVD_CGC_CTRL__MPEG2_MODE_MASK 704 | UVD_CGC_CTRL__REGS_MODE_MASK 705 | UVD_CGC_CTRL__RBC_MODE_MASK 706 | UVD_CGC_CTRL__LMI_MC_MODE_MASK 707 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK 708 | UVD_CGC_CTRL__IDCT_MODE_MASK 709 | UVD_CGC_CTRL__MPRD_MODE_MASK 710 | UVD_CGC_CTRL__MPC_MODE_MASK 711 | UVD_CGC_CTRL__LBSI_MODE_MASK 712 | UVD_CGC_CTRL__LRBBM_MODE_MASK 713 | UVD_CGC_CTRL__WCB_MODE_MASK 714 | UVD_CGC_CTRL__VCPU_MODE_MASK 715 | UVD_CGC_CTRL__MMSCH_MODE_MASK); 716 WREG32_SOC15(VCN, inst, regUVD_CGC_CTRL, data); 717 718 data = RREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_GATE); 719 data |= (UVD_SUVD_CGC_GATE__SRE_MASK 720 | UVD_SUVD_CGC_GATE__SIT_MASK 721 | UVD_SUVD_CGC_GATE__SMP_MASK 722 | UVD_SUVD_CGC_GATE__SCM_MASK 723 | UVD_SUVD_CGC_GATE__SDB_MASK 724 | UVD_SUVD_CGC_GATE__SRE_H264_MASK 725 | UVD_SUVD_CGC_GATE__SRE_H264_MASK 726 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK 727 | UVD_SUVD_CGC_GATE__SIT_H264_MASK 728 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK 729 | UVD_SUVD_CGC_GATE__SCM_H264_MASK 730 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK 731 | UVD_SUVD_CGC_GATE__SDB_H264_MASK 732 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK 733 | UVD_SUVD_CGC_GATE__SCLR_MASK 734 | UVD_SUVD_CGC_GATE__UVD_SC_MASK 735 | UVD_SUVD_CGC_GATE__ENT_MASK 736 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 737 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 738 | UVD_SUVD_CGC_GATE__SITE_MASK 739 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK 740 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK 741 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 742 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK 743 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK); 744 WREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_GATE, data); 745 746 data = RREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_CTRL); 747 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK 748 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK 749 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK 750 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK 751 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK 752 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK 753 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 754 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK 755 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK 756 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK); 757 WREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_CTRL, data); 758 } 759 760 /** 761 * vcn_v4_0_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode 762 * 763 * @adev: amdgpu_device pointer 764 * @sram_sel: sram select 765 * @inst_idx: instance number index 766 * @indirect: indirectly write sram 767 * 768 * Disable clock gating for VCN block with dpg mode 769 */ 770 static void vcn_v4_0_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel, 771 int inst_idx, uint8_t indirect) 772 { 773 uint32_t reg_data = 0; 774 775 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) 776 return; 777 778 /* enable sw clock gating control */ 779 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 780 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; 781 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; 782 reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK | 783 UVD_CGC_CTRL__UDEC_CM_MODE_MASK | 784 UVD_CGC_CTRL__UDEC_IT_MODE_MASK | 785 UVD_CGC_CTRL__UDEC_DB_MODE_MASK | 786 UVD_CGC_CTRL__UDEC_MP_MODE_MASK | 787 UVD_CGC_CTRL__SYS_MODE_MASK | 788 UVD_CGC_CTRL__UDEC_MODE_MASK | 789 UVD_CGC_CTRL__MPEG2_MODE_MASK | 790 UVD_CGC_CTRL__REGS_MODE_MASK | 791 UVD_CGC_CTRL__RBC_MODE_MASK | 792 UVD_CGC_CTRL__LMI_MC_MODE_MASK | 793 UVD_CGC_CTRL__LMI_UMC_MODE_MASK | 794 UVD_CGC_CTRL__IDCT_MODE_MASK | 795 UVD_CGC_CTRL__MPRD_MODE_MASK | 796 UVD_CGC_CTRL__MPC_MODE_MASK | 797 UVD_CGC_CTRL__LBSI_MODE_MASK | 798 UVD_CGC_CTRL__LRBBM_MODE_MASK | 799 UVD_CGC_CTRL__WCB_MODE_MASK | 800 UVD_CGC_CTRL__VCPU_MODE_MASK); 801 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 802 VCN, inst_idx, regUVD_CGC_CTRL), reg_data, sram_sel, indirect); 803 804 /* turn off clock gating */ 805 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 806 VCN, inst_idx, regUVD_CGC_GATE), 0, sram_sel, indirect); 807 808 /* turn on SUVD clock gating */ 809 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 810 VCN, inst_idx, regUVD_SUVD_CGC_GATE), 1, sram_sel, indirect); 811 812 /* turn on sw mode in UVD_SUVD_CGC_CTRL */ 813 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 814 VCN, inst_idx, regUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect); 815 } 816 817 /** 818 * vcn_v4_0_enable_clock_gating - enable VCN clock gating 819 * 820 * @adev: amdgpu_device pointer 821 * @inst: instance number 822 * 823 * Enable clock gating for VCN block 824 */ 825 static void vcn_v4_0_enable_clock_gating(struct amdgpu_device *adev, int inst) 826 { 827 uint32_t data; 828 829 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) 830 return; 831 832 /* enable VCN CGC */ 833 data = RREG32_SOC15(VCN, inst, regUVD_CGC_CTRL); 834 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; 835 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; 836 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; 837 WREG32_SOC15(VCN, inst, regUVD_CGC_CTRL, data); 838 839 data = RREG32_SOC15(VCN, inst, regUVD_CGC_CTRL); 840 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK 841 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK 842 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK 843 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK 844 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK 845 | UVD_CGC_CTRL__SYS_MODE_MASK 846 | UVD_CGC_CTRL__UDEC_MODE_MASK 847 | UVD_CGC_CTRL__MPEG2_MODE_MASK 848 | UVD_CGC_CTRL__REGS_MODE_MASK 849 | UVD_CGC_CTRL__RBC_MODE_MASK 850 | UVD_CGC_CTRL__LMI_MC_MODE_MASK 851 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK 852 | UVD_CGC_CTRL__IDCT_MODE_MASK 853 | UVD_CGC_CTRL__MPRD_MODE_MASK 854 | UVD_CGC_CTRL__MPC_MODE_MASK 855 | UVD_CGC_CTRL__LBSI_MODE_MASK 856 | UVD_CGC_CTRL__LRBBM_MODE_MASK 857 | UVD_CGC_CTRL__WCB_MODE_MASK 858 | UVD_CGC_CTRL__VCPU_MODE_MASK 859 | UVD_CGC_CTRL__MMSCH_MODE_MASK); 860 WREG32_SOC15(VCN, inst, regUVD_CGC_CTRL, data); 861 862 data = RREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_CTRL); 863 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK 864 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK 865 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK 866 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK 867 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK 868 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK 869 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 870 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK 871 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK 872 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK); 873 WREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_CTRL, data); 874 875 return; 876 } 877 878 /** 879 * vcn_v4_0_start_dpg_mode - VCN start with dpg mode 880 * 881 * @adev: amdgpu_device pointer 882 * @inst_idx: instance number index 883 * @indirect: indirectly write sram 884 * 885 * Start VCN block with dpg mode 886 */ 887 static int vcn_v4_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect) 888 { 889 volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; 890 struct amdgpu_ring *ring; 891 uint32_t tmp; 892 int i; 893 894 /* disable register anti-hang mechanism */ 895 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1, 896 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); 897 /* enable dynamic power gating mode */ 898 tmp = RREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS); 899 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK; 900 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK; 901 WREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS, tmp); 902 903 if (indirect) 904 adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr; 905 906 /* enable clock gating */ 907 vcn_v4_0_disable_clock_gating_dpg_mode(adev, 0, inst_idx, indirect); 908 909 /* enable VCPU clock */ 910 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); 911 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK | UVD_VCPU_CNTL__BLK_RST_MASK; 912 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 913 VCN, inst_idx, regUVD_VCPU_CNTL), tmp, 0, indirect); 914 915 /* disable master interupt */ 916 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 917 VCN, inst_idx, regUVD_MASTINT_EN), 0, 0, indirect); 918 919 /* setup regUVD_LMI_CTRL */ 920 tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | 921 UVD_LMI_CTRL__REQ_MODE_MASK | 922 UVD_LMI_CTRL__CRC_RESET_MASK | 923 UVD_LMI_CTRL__MASK_MC_URGENT_MASK | 924 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | 925 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | 926 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | 927 0x00100000L); 928 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 929 VCN, inst_idx, regUVD_LMI_CTRL), tmp, 0, indirect); 930 931 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 932 VCN, inst_idx, regUVD_MPC_CNTL), 933 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect); 934 935 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 936 VCN, inst_idx, regUVD_MPC_SET_MUXA0), 937 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | 938 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | 939 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | 940 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect); 941 942 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 943 VCN, inst_idx, regUVD_MPC_SET_MUXB0), 944 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | 945 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | 946 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | 947 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect); 948 949 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 950 VCN, inst_idx, regUVD_MPC_SET_MUX), 951 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | 952 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | 953 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect); 954 955 vcn_v4_0_mc_resume_dpg_mode(adev, inst_idx, indirect); 956 957 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); 958 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK; 959 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 960 VCN, inst_idx, regUVD_VCPU_CNTL), tmp, 0, indirect); 961 962 /* enable LMI MC and UMC channels */ 963 tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT; 964 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 965 VCN, inst_idx, regUVD_LMI_CTRL2), tmp, 0, indirect); 966 967 /* enable master interrupt */ 968 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 969 VCN, inst_idx, regUVD_MASTINT_EN), 970 UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect); 971 972 973 if (indirect) 974 psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr, 975 (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr - 976 (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr)); 977 978 if (unifiedQ_enabled) { 979 ring = &adev->vcn.inst[inst_idx].ring_enc[0]; 980 fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET; 981 } else 982 ring = &adev->vcn.inst[inst_idx].ring_dec; 983 984 WREG32_SOC15(VCN, inst_idx, regVCN_RB4_DB_CTRL, 985 ring->doorbell_index << VCN_RB4_DB_CTRL__OFFSET__SHIFT | 986 VCN_RB4_DB_CTRL__EN_MASK); 987 988 /* program the RB_BASE for ring buffer */ 989 WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_LO4, 990 lower_32_bits(ring->gpu_addr)); 991 WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_HI4, 992 upper_32_bits(ring->gpu_addr)); 993 994 WREG32_SOC15(VCN, inst_idx, regUVD_RB_SIZE4, ring->ring_size / sizeof(uint32_t)); 995 996 /* reseting ring, fw should not check RB ring */ 997 tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE); 998 tmp &= ~(VCN_RB_ENABLE__RB4_EN_MASK); 999 WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp); 1000 1001 /* Initialize the ring buffer's read and write pointers */ 1002 tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR4); 1003 WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR4, tmp); 1004 ring->wptr = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR4); 1005 1006 tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE); 1007 tmp |= VCN_RB_ENABLE__RB4_EN_MASK; 1008 WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp); 1009 1010 WREG32_SOC15(VCN, inst_idx, regUVD_SCRATCH2, 0); 1011 1012 if (unifiedQ_enabled) 1013 fw_shared->sq.queue_mode &= ~FW_QUEUE_RING_RESET; 1014 1015 for (i = 0; i < adev->vcn.num_enc_rings; i++) { 1016 ring = &adev->vcn.inst[inst_idx].ring_enc[i]; 1017 1018 if (i) { 1019 ring = &adev->vcn.inst[inst_idx].ring_enc[1]; 1020 1021 WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_LO2, ring->gpu_addr); 1022 WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); 1023 WREG32_SOC15(VCN, inst_idx, regUVD_RB_SIZE2, ring->ring_size / 4); 1024 tmp= RREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR2); 1025 WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR2, tmp); 1026 ring->wptr = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR2); 1027 1028 WREG32_SOC15(VCN, inst_idx, regVCN_RB2_DB_CTRL, 1029 ring->doorbell_index << VCN_RB2_DB_CTRL__OFFSET__SHIFT | 1030 VCN_RB2_DB_CTRL__EN_MASK); 1031 } else { 1032 ring = &adev->vcn.inst[inst_idx].ring_enc[0]; 1033 1034 WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_LO, ring->gpu_addr); 1035 WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 1036 WREG32_SOC15(VCN, inst_idx, regUVD_RB_SIZE, ring->ring_size / 4); 1037 tmp= RREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR); 1038 WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, tmp); 1039 ring->wptr = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR); 1040 1041 WREG32_SOC15(VCN, inst_idx, regVCN_RB1_DB_CTRL, 1042 ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT | 1043 VCN_RB1_DB_CTRL__EN_MASK); 1044 } 1045 } 1046 return 0; 1047 } 1048 1049 1050 /** 1051 * vcn_v4_0_start - VCN start 1052 * 1053 * @adev: amdgpu_device pointer 1054 * 1055 * Start VCN block 1056 */ 1057 static int vcn_v4_0_start(struct amdgpu_device *adev) 1058 { 1059 volatile struct amdgpu_vcn4_fw_shared *fw_shared; 1060 struct amdgpu_ring *ring; 1061 uint32_t tmp; 1062 int i, j, k, r; 1063 1064 if (adev->pm.dpm_enabled) 1065 amdgpu_dpm_enable_uvd(adev, true); 1066 1067 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 1068 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { 1069 r = vcn_v4_0_start_dpg_mode(adev, i, adev->vcn.indirect_sram); 1070 continue; 1071 } 1072 1073 /* disable VCN power gating */ 1074 vcn_v4_0_disable_static_power_gating(adev, i); 1075 1076 /* set VCN status busy */ 1077 tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY; 1078 WREG32_SOC15(VCN, i, regUVD_STATUS, tmp); 1079 1080 /*SW clock gating */ 1081 vcn_v4_0_disable_clock_gating(adev, i); 1082 1083 /* enable VCPU clock */ 1084 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 1085 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK); 1086 1087 /* disable master interrupt */ 1088 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0, 1089 ~UVD_MASTINT_EN__VCPU_EN_MASK); 1090 1091 /* enable LMI MC and UMC channels */ 1092 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0, 1093 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); 1094 1095 tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET); 1096 tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; 1097 tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; 1098 WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp); 1099 1100 /* setup regUVD_LMI_CTRL */ 1101 tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL); 1102 WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp | 1103 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | 1104 UVD_LMI_CTRL__MASK_MC_URGENT_MASK | 1105 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | 1106 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK); 1107 1108 /* setup regUVD_MPC_CNTL */ 1109 tmp = RREG32_SOC15(VCN, i, regUVD_MPC_CNTL); 1110 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK; 1111 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT; 1112 WREG32_SOC15(VCN, i, regUVD_MPC_CNTL, tmp); 1113 1114 /* setup UVD_MPC_SET_MUXA0 */ 1115 WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXA0, 1116 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | 1117 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | 1118 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | 1119 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT))); 1120 1121 /* setup UVD_MPC_SET_MUXB0 */ 1122 WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXB0, 1123 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | 1124 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | 1125 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | 1126 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT))); 1127 1128 /* setup UVD_MPC_SET_MUX */ 1129 WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUX, 1130 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | 1131 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | 1132 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT))); 1133 1134 vcn_v4_0_mc_resume(adev, i); 1135 1136 /* VCN global tiling registers */ 1137 WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG, 1138 adev->gfx.config.gb_addr_config); 1139 1140 /* unblock VCPU register access */ 1141 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0, 1142 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); 1143 1144 /* release VCPU reset to boot */ 1145 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0, 1146 ~UVD_VCPU_CNTL__BLK_RST_MASK); 1147 1148 for (j = 0; j < 10; ++j) { 1149 uint32_t status; 1150 1151 for (k = 0; k < 100; ++k) { 1152 status = RREG32_SOC15(VCN, i, regUVD_STATUS); 1153 if (status & 2) 1154 break; 1155 mdelay(10); 1156 if (amdgpu_emu_mode==1) 1157 msleep(1); 1158 } 1159 1160 if (amdgpu_emu_mode==1) { 1161 if (status & 2) { 1162 r = 0; 1163 break; 1164 } 1165 } else { 1166 r = 0; 1167 if (status & 2) 1168 break; 1169 1170 dev_err(adev->dev, "VCN[%d] decode not responding, trying to reset the VCPU!!!\n", i); 1171 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 1172 UVD_VCPU_CNTL__BLK_RST_MASK, 1173 ~UVD_VCPU_CNTL__BLK_RST_MASK); 1174 mdelay(10); 1175 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0, 1176 ~UVD_VCPU_CNTL__BLK_RST_MASK); 1177 1178 mdelay(10); 1179 r = -1; 1180 } 1181 } 1182 1183 if (r) { 1184 dev_err(adev->dev, "VCN[%d] decode not responding, giving up!!!\n", i); 1185 return r; 1186 } 1187 1188 /* enable master interrupt */ 1189 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 1190 UVD_MASTINT_EN__VCPU_EN_MASK, 1191 ~UVD_MASTINT_EN__VCPU_EN_MASK); 1192 1193 /* clear the busy bit of VCN_STATUS */ 1194 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0, 1195 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); 1196 1197 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; 1198 if (unifiedQ_enabled) { 1199 ring = &adev->vcn.inst[i].ring_enc[0]; 1200 fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET; 1201 } else { 1202 ring = &adev->vcn.inst[i].ring_dec; 1203 1204 WREG32_SOC15(VCN, i, regVCN_RB4_DB_CTRL, 1205 ring->doorbell_index << VCN_RB4_DB_CTRL__OFFSET__SHIFT | 1206 VCN_RB4_DB_CTRL__EN_MASK); 1207 1208 /* program the RB_BASE for ring buffer */ 1209 WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO4, 1210 lower_32_bits(ring->gpu_addr)); 1211 WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI4, 1212 upper_32_bits(ring->gpu_addr)); 1213 1214 WREG32_SOC15(VCN, i, regUVD_RB_SIZE4, ring->ring_size / sizeof(uint32_t)); 1215 1216 /* resetting ring, fw should not check RB ring */ 1217 tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE); 1218 tmp &= ~(VCN_RB_ENABLE__RB4_EN_MASK); 1219 WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp); 1220 1221 /* Initialize the ring buffer's read and write pointers */ 1222 tmp = RREG32_SOC15(VCN, i, regUVD_RB_RPTR4); 1223 WREG32_SOC15(VCN, i, regUVD_RB_WPTR4, tmp); 1224 ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR4); 1225 1226 tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE); 1227 tmp |= VCN_RB_ENABLE__RB4_EN_MASK; 1228 WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp); 1229 1230 ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_RPTR4); 1231 } 1232 ring = &adev->vcn.inst[i].ring_enc[0]; 1233 WREG32_SOC15(VCN, i, regVCN_RB1_DB_CTRL, 1234 ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT | 1235 VCN_RB1_DB_CTRL__EN_MASK); 1236 tmp = RREG32_SOC15(VCN, i, regUVD_RB_RPTR); 1237 WREG32_SOC15(VCN, i, regUVD_RB_WPTR, tmp); 1238 ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR); 1239 WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO, ring->gpu_addr); 1240 WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 1241 WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / 4); 1242 if (unifiedQ_enabled) 1243 fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF); 1244 else { 1245 ring = &adev->vcn.inst[i].ring_enc[1]; 1246 WREG32_SOC15(VCN, i, regVCN_RB2_DB_CTRL, 1247 ring->doorbell_index << VCN_RB2_DB_CTRL__OFFSET__SHIFT | 1248 VCN_RB2_DB_CTRL__EN_MASK); 1249 tmp = RREG32_SOC15(VCN, i, regUVD_RB_RPTR2); 1250 WREG32_SOC15(VCN, i, regUVD_RB_WPTR2, tmp); 1251 ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR2); 1252 WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO2, ring->gpu_addr); 1253 WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); 1254 WREG32_SOC15(VCN, i, regUVD_RB_SIZE2, ring->ring_size / 4); 1255 } 1256 } 1257 1258 return 0; 1259 } 1260 1261 /** 1262 * vcn_v4_0_stop_dpg_mode - VCN stop with dpg mode 1263 * 1264 * @adev: amdgpu_device pointer 1265 * @inst_idx: instance number index 1266 * 1267 * Stop VCN block with dpg mode 1268 */ 1269 static int vcn_v4_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx) 1270 { 1271 uint32_t tmp; 1272 1273 /* Wait for power status to be 1 */ 1274 SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1, 1275 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); 1276 1277 /* wait for read ptr to be equal to write ptr */ 1278 tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR); 1279 SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_RB_RPTR, tmp, 0xFFFFFFFF); 1280 1281 tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR2); 1282 SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_RB_RPTR2, tmp, 0xFFFFFFFF); 1283 1284 tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR4); 1285 SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_RB_RPTR4, tmp, 0xFFFFFFFF); 1286 1287 SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1, 1288 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); 1289 1290 /* disable dynamic power gating mode */ 1291 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0, 1292 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); 1293 return 0; 1294 } 1295 1296 /** 1297 * vcn_v4_0_stop - VCN stop 1298 * 1299 * @adev: amdgpu_device pointer 1300 * 1301 * Stop VCN block 1302 */ 1303 static int vcn_v4_0_stop(struct amdgpu_device *adev) 1304 { 1305 uint32_t tmp; 1306 int i, r = 0; 1307 1308 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 1309 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { 1310 r = vcn_v4_0_stop_dpg_mode(adev, i); 1311 continue; 1312 } 1313 1314 /* wait for vcn idle */ 1315 r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7); 1316 if (r) 1317 return r; 1318 1319 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK | 1320 UVD_LMI_STATUS__READ_CLEAN_MASK | 1321 UVD_LMI_STATUS__WRITE_CLEAN_MASK | 1322 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK; 1323 r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp); 1324 if (r) 1325 return r; 1326 1327 /* disable LMI UMC channel */ 1328 tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2); 1329 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK; 1330 WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp); 1331 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK | 1332 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK; 1333 r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp); 1334 if (r) 1335 return r; 1336 1337 /* block VCPU register access */ 1338 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 1339 UVD_RB_ARB_CTRL__VCPU_DIS_MASK, 1340 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); 1341 1342 /* reset VCPU */ 1343 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 1344 UVD_VCPU_CNTL__BLK_RST_MASK, 1345 ~UVD_VCPU_CNTL__BLK_RST_MASK); 1346 1347 /* disable VCPU clock */ 1348 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0, 1349 ~(UVD_VCPU_CNTL__CLK_EN_MASK)); 1350 1351 /* apply soft reset */ 1352 tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET); 1353 tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; 1354 WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp); 1355 tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET); 1356 tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; 1357 WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp); 1358 1359 /* clear status */ 1360 WREG32_SOC15(VCN, i, regUVD_STATUS, 0); 1361 1362 /* apply HW clock gating */ 1363 vcn_v4_0_enable_clock_gating(adev, i); 1364 1365 /* enable VCN power gating */ 1366 vcn_v4_0_enable_static_power_gating(adev, i); 1367 } 1368 1369 if (adev->pm.dpm_enabled) 1370 amdgpu_dpm_enable_uvd(adev, false); 1371 1372 return 0; 1373 } 1374 1375 /** 1376 * vcn_v4_0_pause_dpg_mode - VCN pause with dpg mode 1377 * 1378 * @adev: amdgpu_device pointer 1379 * @inst_idx: instance number index 1380 * @new_state: pause state 1381 * 1382 * Pause dpg mode for VCN block 1383 */ 1384 static int vcn_v4_0_pause_dpg_mode(struct amdgpu_device *adev, int inst_idx, 1385 struct dpg_pause_state *new_state) 1386 { 1387 uint32_t reg_data = 0; 1388 int ret_code; 1389 1390 /* pause/unpause if state is changed */ 1391 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) { 1392 DRM_DEV_DEBUG(adev->dev, "dpg pause state changed %d -> %d", 1393 adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based); 1394 reg_data = RREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE) & 1395 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); 1396 1397 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) { 1398 ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 0x1, 1399 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); 1400 1401 if (!ret_code) { 1402 /* pause DPG */ 1403 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; 1404 WREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE, reg_data); 1405 1406 /* wait for ACK */ 1407 SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_DPG_PAUSE, 1408 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, 1409 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); 1410 1411 SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1412 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); 1413 } 1414 } else { 1415 /* unpause dpg, no need to wait */ 1416 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; 1417 WREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE, reg_data); 1418 SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 0x1, 1419 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); 1420 } 1421 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based; 1422 } 1423 1424 return 0; 1425 } 1426 1427 /** 1428 * vcn_v4_0_dec_ring_get_rptr - get read pointer 1429 * 1430 * @ring: amdgpu_ring pointer 1431 * 1432 * Returns the current hardware read pointer 1433 */ 1434 static uint64_t vcn_v4_0_dec_ring_get_rptr(struct amdgpu_ring *ring) 1435 { 1436 struct amdgpu_device *adev = ring->adev; 1437 1438 return RREG32_SOC15(VCN, ring->me, regUVD_RB_RPTR4); 1439 } 1440 1441 /** 1442 * vcn_v4_0_dec_ring_get_wptr - get write pointer 1443 * 1444 * @ring: amdgpu_ring pointer 1445 * 1446 * Returns the current hardware write pointer 1447 */ 1448 static uint64_t vcn_v4_0_dec_ring_get_wptr(struct amdgpu_ring *ring) 1449 { 1450 struct amdgpu_device *adev = ring->adev; 1451 1452 if (ring->use_doorbell) 1453 return *ring->wptr_cpu_addr; 1454 else 1455 return RREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR4); 1456 } 1457 1458 /** 1459 * vcn_v4_0_dec_ring_set_wptr - set write pointer 1460 * 1461 * @ring: amdgpu_ring pointer 1462 * 1463 * Commits the write pointer to the hardware 1464 */ 1465 static void vcn_v4_0_dec_ring_set_wptr(struct amdgpu_ring *ring) 1466 { 1467 struct amdgpu_device *adev = ring->adev; 1468 1469 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { 1470 WREG32_SOC15(VCN, ring->me, regUVD_SCRATCH2, 1471 lower_32_bits(ring->wptr)); 1472 } 1473 1474 if (ring->use_doorbell) { 1475 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr); 1476 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); 1477 } else { 1478 WREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR4, lower_32_bits(ring->wptr)); 1479 } 1480 } 1481 1482 static const struct amdgpu_ring_funcs vcn_v4_0_dec_sw_ring_vm_funcs = { 1483 .type = AMDGPU_RING_TYPE_VCN_DEC, 1484 .align_mask = 0x3f, 1485 .nop = VCN_DEC_SW_CMD_NO_OP, 1486 .vmhub = AMDGPU_MMHUB_0, 1487 .get_rptr = vcn_v4_0_dec_ring_get_rptr, 1488 .get_wptr = vcn_v4_0_dec_ring_get_wptr, 1489 .set_wptr = vcn_v4_0_dec_ring_set_wptr, 1490 .emit_frame_size = 1491 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + 1492 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 + 1493 4 + /* vcn_v3_0_dec_sw_ring_emit_vm_flush */ 1494 5 + 5 + /* vcn_v3_0_dec_sw_ring_emit_fdec_swe x2 vm fdec_swe */ 1495 1, /* vcn_v3_0_dec_sw_ring_insert_end */ 1496 .emit_ib_size = 5, /* vcn_v3_0_dec_sw_ring_emit_ib */ 1497 .emit_ib = vcn_v3_0_dec_sw_ring_emit_ib, 1498 .emit_fence = vcn_v3_0_dec_sw_ring_emit_fence, 1499 .emit_vm_flush = vcn_v3_0_dec_sw_ring_emit_vm_flush, 1500 .test_ring = amdgpu_vcn_dec_sw_ring_test_ring, 1501 .test_ib = amdgpu_vcn_dec_sw_ring_test_ib, 1502 .insert_nop = amdgpu_ring_insert_nop, 1503 .insert_end = vcn_v3_0_dec_sw_ring_insert_end, 1504 .pad_ib = amdgpu_ring_generic_pad_ib, 1505 .begin_use = amdgpu_vcn_ring_begin_use, 1506 .end_use = amdgpu_vcn_ring_end_use, 1507 .emit_wreg = vcn_v3_0_dec_sw_ring_emit_wreg, 1508 .emit_reg_wait = vcn_v3_0_dec_sw_ring_emit_reg_wait, 1509 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, 1510 }; 1511 1512 /** 1513 * vcn_v4_0_enc_ring_get_rptr - get enc read pointer 1514 * 1515 * @ring: amdgpu_ring pointer 1516 * 1517 * Returns the current hardware enc read pointer 1518 */ 1519 static uint64_t vcn_v4_0_enc_ring_get_rptr(struct amdgpu_ring *ring) 1520 { 1521 struct amdgpu_device *adev = ring->adev; 1522 1523 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) 1524 return RREG32_SOC15(VCN, ring->me, regUVD_RB_RPTR); 1525 else 1526 return RREG32_SOC15(VCN, ring->me, regUVD_RB_RPTR2); 1527 } 1528 1529 /** 1530 * vcn_v4_0_enc_ring_get_wptr - get enc write pointer 1531 * 1532 * @ring: amdgpu_ring pointer 1533 * 1534 * Returns the current hardware enc write pointer 1535 */ 1536 static uint64_t vcn_v4_0_enc_ring_get_wptr(struct amdgpu_ring *ring) 1537 { 1538 struct amdgpu_device *adev = ring->adev; 1539 1540 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) { 1541 if (ring->use_doorbell) 1542 return *ring->wptr_cpu_addr; 1543 else 1544 return RREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR); 1545 } else { 1546 if (ring->use_doorbell) 1547 return *ring->wptr_cpu_addr; 1548 else 1549 return RREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR2); 1550 } 1551 } 1552 1553 /** 1554 * vcn_v4_0_enc_ring_set_wptr - set enc write pointer 1555 * 1556 * @ring: amdgpu_ring pointer 1557 * 1558 * Commits the enc write pointer to the hardware 1559 */ 1560 static void vcn_v4_0_enc_ring_set_wptr(struct amdgpu_ring *ring) 1561 { 1562 struct amdgpu_device *adev = ring->adev; 1563 1564 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) { 1565 if (ring->use_doorbell) { 1566 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr); 1567 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); 1568 } else { 1569 WREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR, lower_32_bits(ring->wptr)); 1570 } 1571 } else { 1572 if (ring->use_doorbell) { 1573 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr); 1574 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); 1575 } else { 1576 WREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR2, lower_32_bits(ring->wptr)); 1577 } 1578 } 1579 } 1580 1581 static const struct amdgpu_ring_funcs vcn_v4_0_enc_ring_vm_funcs = { 1582 .type = AMDGPU_RING_TYPE_VCN_ENC, 1583 .align_mask = 0x3f, 1584 .nop = VCN_ENC_CMD_NO_OP, 1585 .vmhub = AMDGPU_MMHUB_0, 1586 .get_rptr = vcn_v4_0_enc_ring_get_rptr, 1587 .get_wptr = vcn_v4_0_enc_ring_get_wptr, 1588 .set_wptr = vcn_v4_0_enc_ring_set_wptr, 1589 .emit_frame_size = 1590 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + 1591 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 + 1592 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */ 1593 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */ 1594 1, /* vcn_v2_0_enc_ring_insert_end */ 1595 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */ 1596 .emit_ib = vcn_v2_0_enc_ring_emit_ib, 1597 .emit_fence = vcn_v2_0_enc_ring_emit_fence, 1598 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush, 1599 .test_ring = amdgpu_vcn_enc_ring_test_ring, 1600 .test_ib = amdgpu_vcn_enc_ring_test_ib, 1601 .insert_nop = amdgpu_ring_insert_nop, 1602 .insert_end = vcn_v2_0_enc_ring_insert_end, 1603 .pad_ib = amdgpu_ring_generic_pad_ib, 1604 .begin_use = amdgpu_vcn_ring_begin_use, 1605 .end_use = amdgpu_vcn_ring_end_use, 1606 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg, 1607 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait, 1608 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, 1609 }; 1610 1611 /** 1612 * vcn_v4_0_set_dec_ring_funcs - set dec ring functions 1613 * 1614 * @adev: amdgpu_device pointer 1615 * 1616 * Set decode ring functions 1617 */ 1618 static void vcn_v4_0_set_dec_ring_funcs(struct amdgpu_device *adev) 1619 { 1620 int i; 1621 1622 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 1623 if (adev->vcn.harvest_config & (1 << i)) 1624 continue; 1625 1626 adev->vcn.inst[i].ring_dec.funcs = &vcn_v4_0_dec_sw_ring_vm_funcs; 1627 adev->vcn.inst[i].ring_dec.me = i; 1628 DRM_INFO("VCN(%d) decode software ring is enabled in VM mode\n", i); 1629 } 1630 } 1631 1632 /** 1633 * vcn_v4_0_set_enc_ring_funcs - set enc ring functions 1634 * 1635 * @adev: amdgpu_device pointer 1636 * 1637 * Set encode ring functions 1638 */ 1639 static void vcn_v4_0_set_enc_ring_funcs(struct amdgpu_device *adev) 1640 { 1641 int i, j; 1642 1643 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 1644 if (adev->vcn.harvest_config & (1 << i)) 1645 continue; 1646 1647 for (j = 0; j < adev->vcn.num_enc_rings; ++j) { 1648 adev->vcn.inst[i].ring_enc[j].funcs = &vcn_v4_0_enc_ring_vm_funcs; 1649 adev->vcn.inst[i].ring_enc[j].me = i; 1650 } 1651 DRM_INFO("VCN(%d) encode is enabled in VM mode\n", i); 1652 } 1653 } 1654 1655 /** 1656 * vcn_v4_0_is_idle - check VCN block is idle 1657 * 1658 * @handle: amdgpu_device pointer 1659 * 1660 * Check whether VCN block is idle 1661 */ 1662 static bool vcn_v4_0_is_idle(void *handle) 1663 { 1664 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1665 int i, ret = 1; 1666 1667 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 1668 if (adev->vcn.harvest_config & (1 << i)) 1669 continue; 1670 1671 ret &= (RREG32_SOC15(VCN, i, regUVD_STATUS) == UVD_STATUS__IDLE); 1672 } 1673 1674 return ret; 1675 } 1676 1677 /** 1678 * vcn_v4_0_wait_for_idle - wait for VCN block idle 1679 * 1680 * @handle: amdgpu_device pointer 1681 * 1682 * Wait for VCN block idle 1683 */ 1684 static int vcn_v4_0_wait_for_idle(void *handle) 1685 { 1686 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1687 int i, ret = 0; 1688 1689 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 1690 if (adev->vcn.harvest_config & (1 << i)) 1691 continue; 1692 1693 ret = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 1694 UVD_STATUS__IDLE); 1695 if (ret) 1696 return ret; 1697 } 1698 1699 return ret; 1700 } 1701 1702 /** 1703 * vcn_v4_0_set_clockgating_state - set VCN block clockgating state 1704 * 1705 * @handle: amdgpu_device pointer 1706 * @state: clock gating state 1707 * 1708 * Set VCN block clockgating state 1709 */ 1710 static int vcn_v4_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) 1711 { 1712 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1713 bool enable = (state == AMD_CG_STATE_GATE) ? true : false; 1714 int i; 1715 1716 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 1717 if (adev->vcn.harvest_config & (1 << i)) 1718 continue; 1719 1720 if (enable) { 1721 if (RREG32_SOC15(VCN, i, regUVD_STATUS) != UVD_STATUS__IDLE) 1722 return -EBUSY; 1723 vcn_v4_0_enable_clock_gating(adev, i); 1724 } else { 1725 vcn_v4_0_disable_clock_gating(adev, i); 1726 } 1727 } 1728 1729 return 0; 1730 } 1731 1732 /** 1733 * vcn_v4_0_set_powergating_state - set VCN block powergating state 1734 * 1735 * @handle: amdgpu_device pointer 1736 * @state: power gating state 1737 * 1738 * Set VCN block powergating state 1739 */ 1740 static int vcn_v4_0_set_powergating_state(void *handle, enum amd_powergating_state state) 1741 { 1742 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1743 int ret; 1744 1745 if(state == adev->vcn.cur_state) 1746 return 0; 1747 1748 if (state == AMD_PG_STATE_GATE) 1749 ret = vcn_v4_0_stop(adev); 1750 else 1751 ret = vcn_v4_0_start(adev); 1752 1753 if(!ret) 1754 adev->vcn.cur_state = state; 1755 1756 return ret; 1757 } 1758 1759 /** 1760 * vcn_v4_0_set_interrupt_state - set VCN block interrupt state 1761 * 1762 * @adev: amdgpu_device pointer 1763 * @source: interrupt sources 1764 * @type: interrupt types 1765 * @state: interrupt states 1766 * 1767 * Set VCN block interrupt state 1768 */ 1769 static int vcn_v4_0_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, 1770 unsigned type, enum amdgpu_interrupt_state state) 1771 { 1772 return 0; 1773 } 1774 1775 /** 1776 * vcn_v4_0_process_interrupt - process VCN block interrupt 1777 * 1778 * @adev: amdgpu_device pointer 1779 * @source: interrupt sources 1780 * @entry: interrupt entry from clients and sources 1781 * 1782 * Process VCN block interrupt 1783 */ 1784 static int vcn_v4_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, 1785 struct amdgpu_iv_entry *entry) 1786 { 1787 uint32_t ip_instance; 1788 1789 switch (entry->client_id) { 1790 case SOC15_IH_CLIENTID_VCN: 1791 ip_instance = 0; 1792 break; 1793 case SOC15_IH_CLIENTID_VCN1: 1794 ip_instance = 1; 1795 break; 1796 default: 1797 DRM_ERROR("Unhandled client id: %d\n", entry->client_id); 1798 return 0; 1799 } 1800 1801 DRM_DEBUG("IH: VCN TRAP\n"); 1802 1803 switch (entry->src_id) { 1804 case VCN_4_0__SRCID__UVD_TRAP: 1805 if (!unifiedQ_enabled) { 1806 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_dec); 1807 break; 1808 } 1809 break; 1810 case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE: 1811 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]); 1812 break; 1813 case VCN_4_0__SRCID__UVD_ENC_LOW_LATENCY: 1814 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]); 1815 break; 1816 default: 1817 DRM_ERROR("Unhandled interrupt: %d %d\n", 1818 entry->src_id, entry->src_data[0]); 1819 break; 1820 } 1821 1822 return 0; 1823 } 1824 1825 static const struct amdgpu_irq_src_funcs vcn_v4_0_irq_funcs = { 1826 .set = vcn_v4_0_set_interrupt_state, 1827 .process = vcn_v4_0_process_interrupt, 1828 }; 1829 1830 /** 1831 * vcn_v4_0_set_irq_funcs - set VCN block interrupt irq functions 1832 * 1833 * @adev: amdgpu_device pointer 1834 * 1835 * Set VCN block interrupt irq functions 1836 */ 1837 static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev) 1838 { 1839 int i; 1840 1841 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 1842 if (adev->vcn.harvest_config & (1 << i)) 1843 continue; 1844 1845 adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1; 1846 adev->vcn.inst[i].irq.funcs = &vcn_v4_0_irq_funcs; 1847 } 1848 } 1849 1850 static const struct amd_ip_funcs vcn_v4_0_ip_funcs = { 1851 .name = "vcn_v4_0", 1852 .early_init = vcn_v4_0_early_init, 1853 .late_init = NULL, 1854 .sw_init = vcn_v4_0_sw_init, 1855 .sw_fini = vcn_v4_0_sw_fini, 1856 .hw_init = vcn_v4_0_hw_init, 1857 .hw_fini = vcn_v4_0_hw_fini, 1858 .suspend = vcn_v4_0_suspend, 1859 .resume = vcn_v4_0_resume, 1860 .is_idle = vcn_v4_0_is_idle, 1861 .wait_for_idle = vcn_v4_0_wait_for_idle, 1862 .check_soft_reset = NULL, 1863 .pre_soft_reset = NULL, 1864 .soft_reset = NULL, 1865 .post_soft_reset = NULL, 1866 .set_clockgating_state = vcn_v4_0_set_clockgating_state, 1867 .set_powergating_state = vcn_v4_0_set_powergating_state, 1868 }; 1869 1870 const struct amdgpu_ip_block_version vcn_v4_0_ip_block = 1871 { 1872 .type = AMD_IP_BLOCK_TYPE_VCN, 1873 .major = 4, 1874 .minor = 0, 1875 .rev = 0, 1876 .funcs = &vcn_v4_0_ip_funcs, 1877 }; 1878