1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 * Authors: Christian König <christian.koenig@amd.com> 26 */ 27 28 #include <linux/firmware.h> 29 #include <drm/drmP.h> 30 #include "amdgpu.h" 31 #include "amdgpu_vce.h" 32 #include "vid.h" 33 #include "vce/vce_3_0_d.h" 34 #include "vce/vce_3_0_sh_mask.h" 35 #include "oss/oss_3_0_d.h" 36 #include "oss/oss_3_0_sh_mask.h" 37 #include "gca/gfx_8_0_d.h" 38 #include "smu/smu_7_1_2_d.h" 39 #include "smu/smu_7_1_2_sh_mask.h" 40 #include "gca/gfx_8_0_d.h" 41 #include "gca/gfx_8_0_sh_mask.h" 42 43 44 #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 45 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 46 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616 47 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617 48 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618 49 #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02 50 51 #define VCE_V3_0_FW_SIZE (384 * 1024) 52 #define VCE_V3_0_STACK_SIZE (64 * 1024) 53 #define VCE_V3_0_DATA_SIZE ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024)) 54 55 static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx); 56 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev); 57 static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev); 58 static int vce_v3_0_wait_for_idle(void *handle); 59 60 /** 61 * vce_v3_0_ring_get_rptr - get read pointer 62 * 63 * @ring: amdgpu_ring pointer 64 * 65 * Returns the current hardware read pointer 66 */ 67 static uint32_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring) 68 { 69 struct amdgpu_device *adev = ring->adev; 70 71 if (ring == &adev->vce.ring[0]) 72 return RREG32(mmVCE_RB_RPTR); 73 else if (ring == &adev->vce.ring[1]) 74 return RREG32(mmVCE_RB_RPTR2); 75 else 76 return RREG32(mmVCE_RB_RPTR3); 77 } 78 79 /** 80 * vce_v3_0_ring_get_wptr - get write pointer 81 * 82 * @ring: amdgpu_ring pointer 83 * 84 * Returns the current hardware write pointer 85 */ 86 static uint32_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring) 87 { 88 struct amdgpu_device *adev = ring->adev; 89 90 if (ring == &adev->vce.ring[0]) 91 return RREG32(mmVCE_RB_WPTR); 92 else if (ring == &adev->vce.ring[1]) 93 return RREG32(mmVCE_RB_WPTR2); 94 else 95 return RREG32(mmVCE_RB_WPTR3); 96 } 97 98 /** 99 * vce_v3_0_ring_set_wptr - set write pointer 100 * 101 * @ring: amdgpu_ring pointer 102 * 103 * Commits the write pointer to the hardware 104 */ 105 static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring) 106 { 107 struct amdgpu_device *adev = ring->adev; 108 109 if (ring == &adev->vce.ring[0]) 110 WREG32(mmVCE_RB_WPTR, ring->wptr); 111 else if (ring == &adev->vce.ring[1]) 112 WREG32(mmVCE_RB_WPTR2, ring->wptr); 113 else 114 WREG32(mmVCE_RB_WPTR3, ring->wptr); 115 } 116 117 static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override) 118 { 119 WREG32_FIELD(VCE_RB_ARB_CTRL, VCE_CGTT_OVERRIDE, override ? 1 : 0); 120 } 121 122 static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, 123 bool gated) 124 { 125 u32 data; 126 127 /* Set Override to disable Clock Gating */ 128 vce_v3_0_override_vce_clock_gating(adev, true); 129 130 /* This function enables MGCG which is controlled by firmware. 131 With the clocks in the gated state the core is still 132 accessible but the firmware will throttle the clocks on the 133 fly as necessary. 134 */ 135 if (gated) { 136 data = RREG32(mmVCE_CLOCK_GATING_B); 137 data |= 0x1ff; 138 data &= ~0xef0000; 139 WREG32(mmVCE_CLOCK_GATING_B, data); 140 141 data = RREG32(mmVCE_UENC_CLOCK_GATING); 142 data |= 0x3ff000; 143 data &= ~0xffc00000; 144 WREG32(mmVCE_UENC_CLOCK_GATING, data); 145 146 data = RREG32(mmVCE_UENC_CLOCK_GATING_2); 147 data |= 0x2; 148 data &= ~0x00010000; 149 WREG32(mmVCE_UENC_CLOCK_GATING_2, data); 150 151 data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); 152 data |= 0x37f; 153 WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); 154 155 data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); 156 data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK | 157 VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | 158 VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | 159 0x8; 160 WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data); 161 } else { 162 data = RREG32(mmVCE_CLOCK_GATING_B); 163 data &= ~0x80010; 164 data |= 0xe70008; 165 WREG32(mmVCE_CLOCK_GATING_B, data); 166 167 data = RREG32(mmVCE_UENC_CLOCK_GATING); 168 data |= 0xffc00000; 169 WREG32(mmVCE_UENC_CLOCK_GATING, data); 170 171 data = RREG32(mmVCE_UENC_CLOCK_GATING_2); 172 data |= 0x10000; 173 WREG32(mmVCE_UENC_CLOCK_GATING_2, data); 174 175 data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); 176 data &= ~0xffc00000; 177 WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); 178 179 data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); 180 data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK | 181 VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | 182 VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | 183 0x8); 184 WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data); 185 } 186 vce_v3_0_override_vce_clock_gating(adev, false); 187 } 188 189 static int vce_v3_0_firmware_loaded(struct amdgpu_device *adev) 190 { 191 int i, j; 192 193 for (i = 0; i < 10; ++i) { 194 for (j = 0; j < 100; ++j) { 195 uint32_t status = RREG32(mmVCE_STATUS); 196 197 if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK) 198 return 0; 199 mdelay(10); 200 } 201 202 DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n"); 203 WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1); 204 mdelay(10); 205 WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0); 206 mdelay(10); 207 } 208 209 return -ETIMEDOUT; 210 } 211 212 /** 213 * vce_v3_0_start - start VCE block 214 * 215 * @adev: amdgpu_device pointer 216 * 217 * Setup and start the VCE block 218 */ 219 static int vce_v3_0_start(struct amdgpu_device *adev) 220 { 221 struct amdgpu_ring *ring; 222 int idx, r; 223 224 ring = &adev->vce.ring[0]; 225 WREG32(mmVCE_RB_RPTR, ring->wptr); 226 WREG32(mmVCE_RB_WPTR, ring->wptr); 227 WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr); 228 WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 229 WREG32(mmVCE_RB_SIZE, ring->ring_size / 4); 230 231 ring = &adev->vce.ring[1]; 232 WREG32(mmVCE_RB_RPTR2, ring->wptr); 233 WREG32(mmVCE_RB_WPTR2, ring->wptr); 234 WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr); 235 WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); 236 WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); 237 238 ring = &adev->vce.ring[2]; 239 WREG32(mmVCE_RB_RPTR3, ring->wptr); 240 WREG32(mmVCE_RB_WPTR3, ring->wptr); 241 WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr); 242 WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr)); 243 WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4); 244 245 mutex_lock(&adev->grbm_idx_mutex); 246 for (idx = 0; idx < 2; ++idx) { 247 if (adev->vce.harvest_config & (1 << idx)) 248 continue; 249 250 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx); 251 vce_v3_0_mc_resume(adev, idx); 252 WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1); 253 254 if (adev->asic_type >= CHIP_STONEY) 255 WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001); 256 else 257 WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1); 258 259 WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0); 260 mdelay(100); 261 262 r = vce_v3_0_firmware_loaded(adev); 263 264 /* clear BUSY flag */ 265 WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0); 266 267 if (r) { 268 DRM_ERROR("VCE not responding, giving up!!!\n"); 269 mutex_unlock(&adev->grbm_idx_mutex); 270 return r; 271 } 272 } 273 274 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); 275 mutex_unlock(&adev->grbm_idx_mutex); 276 277 return 0; 278 } 279 280 static int vce_v3_0_stop(struct amdgpu_device *adev) 281 { 282 int idx; 283 284 mutex_lock(&adev->grbm_idx_mutex); 285 for (idx = 0; idx < 2; ++idx) { 286 if (adev->vce.harvest_config & (1 << idx)) 287 continue; 288 289 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx); 290 291 if (adev->asic_type >= CHIP_STONEY) 292 WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001); 293 else 294 WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 0); 295 296 /* hold on ECPU */ 297 WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1); 298 299 /* clear BUSY flag */ 300 WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0); 301 302 /* Set Clock-Gating off */ 303 if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG) 304 vce_v3_0_set_vce_sw_clock_gating(adev, false); 305 } 306 307 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); 308 mutex_unlock(&adev->grbm_idx_mutex); 309 310 return 0; 311 } 312 313 #define ixVCE_HARVEST_FUSE_MACRO__ADDRESS 0xC0014074 314 #define VCE_HARVEST_FUSE_MACRO__SHIFT 27 315 #define VCE_HARVEST_FUSE_MACRO__MASK 0x18000000 316 317 static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev) 318 { 319 u32 tmp; 320 321 /* Fiji, Stoney, Polaris10, Polaris11 are single pipe */ 322 if ((adev->asic_type == CHIP_FIJI) || 323 (adev->asic_type == CHIP_STONEY) || 324 (adev->asic_type == CHIP_POLARIS10) || 325 (adev->asic_type == CHIP_POLARIS11)) 326 return AMDGPU_VCE_HARVEST_VCE1; 327 328 /* Tonga and CZ are dual or single pipe */ 329 if (adev->flags & AMD_IS_APU) 330 tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) & 331 VCE_HARVEST_FUSE_MACRO__MASK) >> 332 VCE_HARVEST_FUSE_MACRO__SHIFT; 333 else 334 tmp = (RREG32_SMC(ixCC_HARVEST_FUSES) & 335 CC_HARVEST_FUSES__VCE_DISABLE_MASK) >> 336 CC_HARVEST_FUSES__VCE_DISABLE__SHIFT; 337 338 switch (tmp) { 339 case 1: 340 return AMDGPU_VCE_HARVEST_VCE0; 341 case 2: 342 return AMDGPU_VCE_HARVEST_VCE1; 343 case 3: 344 return AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1; 345 default: 346 return 0; 347 } 348 } 349 350 static int vce_v3_0_early_init(void *handle) 351 { 352 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 353 354 adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev); 355 356 if ((adev->vce.harvest_config & 357 (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) == 358 (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) 359 return -ENOENT; 360 361 adev->vce.num_rings = 3; 362 363 vce_v3_0_set_ring_funcs(adev); 364 vce_v3_0_set_irq_funcs(adev); 365 366 return 0; 367 } 368 369 static int vce_v3_0_sw_init(void *handle) 370 { 371 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 372 struct amdgpu_ring *ring; 373 int r, i; 374 375 /* VCE */ 376 r = amdgpu_irq_add_id(adev, 167, &adev->vce.irq); 377 if (r) 378 return r; 379 380 r = amdgpu_vce_sw_init(adev, VCE_V3_0_FW_SIZE + 381 (VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE) * 2); 382 if (r) 383 return r; 384 385 r = amdgpu_vce_resume(adev); 386 if (r) 387 return r; 388 389 for (i = 0; i < adev->vce.num_rings; i++) { 390 ring = &adev->vce.ring[i]; 391 sprintf(ring->name, "vce%d", i); 392 r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf, 393 &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE); 394 if (r) 395 return r; 396 } 397 398 return r; 399 } 400 401 static int vce_v3_0_sw_fini(void *handle) 402 { 403 int r; 404 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 405 406 r = amdgpu_vce_suspend(adev); 407 if (r) 408 return r; 409 410 r = amdgpu_vce_sw_fini(adev); 411 if (r) 412 return r; 413 414 return r; 415 } 416 417 static int vce_v3_0_hw_init(void *handle) 418 { 419 int r, i; 420 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 421 422 r = vce_v3_0_start(adev); 423 if (r) 424 return r; 425 426 for (i = 0; i < adev->vce.num_rings; i++) 427 adev->vce.ring[i].ready = false; 428 429 for (i = 0; i < adev->vce.num_rings; i++) { 430 r = amdgpu_ring_test_ring(&adev->vce.ring[i]); 431 if (r) 432 return r; 433 else 434 adev->vce.ring[i].ready = true; 435 } 436 437 DRM_INFO("VCE initialized successfully.\n"); 438 439 return 0; 440 } 441 442 static int vce_v3_0_hw_fini(void *handle) 443 { 444 int r; 445 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 446 447 r = vce_v3_0_wait_for_idle(handle); 448 if (r) 449 return r; 450 451 return vce_v3_0_stop(adev); 452 } 453 454 static int vce_v3_0_suspend(void *handle) 455 { 456 int r; 457 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 458 459 r = vce_v3_0_hw_fini(adev); 460 if (r) 461 return r; 462 463 r = amdgpu_vce_suspend(adev); 464 if (r) 465 return r; 466 467 return r; 468 } 469 470 static int vce_v3_0_resume(void *handle) 471 { 472 int r; 473 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 474 475 r = amdgpu_vce_resume(adev); 476 if (r) 477 return r; 478 479 r = vce_v3_0_hw_init(adev); 480 if (r) 481 return r; 482 483 return r; 484 } 485 486 static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx) 487 { 488 uint32_t offset, size; 489 490 WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16)); 491 WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000); 492 WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F); 493 WREG32(mmVCE_CLOCK_GATING_B, 0x1FF); 494 495 WREG32(mmVCE_LMI_CTRL, 0x00398000); 496 WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1); 497 WREG32(mmVCE_LMI_SWAP_CNTL, 0); 498 WREG32(mmVCE_LMI_SWAP_CNTL1, 0); 499 WREG32(mmVCE_LMI_VM_CTRL, 0); 500 if (adev->asic_type >= CHIP_STONEY) { 501 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR0, (adev->vce.gpu_addr >> 8)); 502 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR1, (adev->vce.gpu_addr >> 8)); 503 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR2, (adev->vce.gpu_addr >> 8)); 504 } else 505 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8)); 506 offset = AMDGPU_VCE_FIRMWARE_OFFSET; 507 size = VCE_V3_0_FW_SIZE; 508 WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff); 509 WREG32(mmVCE_VCPU_CACHE_SIZE0, size); 510 511 if (idx == 0) { 512 offset += size; 513 size = VCE_V3_0_STACK_SIZE; 514 WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff); 515 WREG32(mmVCE_VCPU_CACHE_SIZE1, size); 516 offset += size; 517 size = VCE_V3_0_DATA_SIZE; 518 WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff); 519 WREG32(mmVCE_VCPU_CACHE_SIZE2, size); 520 } else { 521 offset += size + VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE; 522 size = VCE_V3_0_STACK_SIZE; 523 WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0xfffffff); 524 WREG32(mmVCE_VCPU_CACHE_SIZE1, size); 525 offset += size; 526 size = VCE_V3_0_DATA_SIZE; 527 WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0xfffffff); 528 WREG32(mmVCE_VCPU_CACHE_SIZE2, size); 529 } 530 531 WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100); 532 WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1); 533 } 534 535 static bool vce_v3_0_is_idle(void *handle) 536 { 537 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 538 u32 mask = 0; 539 540 mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_STATUS2__VCE0_BUSY_MASK; 541 mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_STATUS2__VCE1_BUSY_MASK; 542 543 return !(RREG32(mmSRBM_STATUS2) & mask); 544 } 545 546 static int vce_v3_0_wait_for_idle(void *handle) 547 { 548 unsigned i; 549 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 550 551 for (i = 0; i < adev->usec_timeout; i++) 552 if (vce_v3_0_is_idle(handle)) 553 return 0; 554 555 return -ETIMEDOUT; 556 } 557 558 #define VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK 0x00000008L /* AUTO_BUSY */ 559 #define VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK 0x00000010L /* RB0_BUSY */ 560 #define VCE_STATUS_VCPU_REPORT_RB1_BUSY_MASK 0x00000020L /* RB1_BUSY */ 561 #define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \ 562 VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK) 563 564 static bool vce_v3_0_check_soft_reset(void *handle) 565 { 566 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 567 u32 srbm_soft_reset = 0; 568 569 /* According to VCE team , we should use VCE_STATUS instead 570 * SRBM_STATUS.VCE_BUSY bit for busy status checking. 571 * GRBM_GFX_INDEX.INSTANCE_INDEX is used to specify which VCE 572 * instance's registers are accessed 573 * (0 for 1st instance, 10 for 2nd instance). 574 * 575 *VCE_STATUS 576 *|UENC|ACPI|AUTO ACTIVE|RB1 |RB0 |RB2 | |FW_LOADED|JOB | 577 *|----+----+-----------+----+----+----+----------+---------+----| 578 *|bit8|bit7| bit6 |bit5|bit4|bit3| bit2 | bit1 |bit0| 579 * 580 * VCE team suggest use bit 3--bit 6 for busy status check 581 */ 582 mutex_lock(&adev->grbm_idx_mutex); 583 WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); 584 if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { 585 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); 586 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); 587 } 588 WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10); 589 if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { 590 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); 591 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); 592 } 593 WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); 594 mutex_unlock(&adev->grbm_idx_mutex); 595 596 if (srbm_soft_reset) { 597 adev->vce.srbm_soft_reset = srbm_soft_reset; 598 return true; 599 } else { 600 adev->vce.srbm_soft_reset = 0; 601 return false; 602 } 603 } 604 605 static int vce_v3_0_soft_reset(void *handle) 606 { 607 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 608 u32 srbm_soft_reset; 609 610 if (!adev->vce.srbm_soft_reset) 611 return 0; 612 srbm_soft_reset = adev->vce.srbm_soft_reset; 613 614 if (srbm_soft_reset) { 615 u32 tmp; 616 617 tmp = RREG32(mmSRBM_SOFT_RESET); 618 tmp |= srbm_soft_reset; 619 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); 620 WREG32(mmSRBM_SOFT_RESET, tmp); 621 tmp = RREG32(mmSRBM_SOFT_RESET); 622 623 udelay(50); 624 625 tmp &= ~srbm_soft_reset; 626 WREG32(mmSRBM_SOFT_RESET, tmp); 627 tmp = RREG32(mmSRBM_SOFT_RESET); 628 629 /* Wait a little for things to settle down */ 630 udelay(50); 631 } 632 633 return 0; 634 } 635 636 static int vce_v3_0_pre_soft_reset(void *handle) 637 { 638 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 639 640 if (!adev->vce.srbm_soft_reset) 641 return 0; 642 643 mdelay(5); 644 645 return vce_v3_0_suspend(adev); 646 } 647 648 649 static int vce_v3_0_post_soft_reset(void *handle) 650 { 651 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 652 653 if (!adev->vce.srbm_soft_reset) 654 return 0; 655 656 mdelay(5); 657 658 return vce_v3_0_resume(adev); 659 } 660 661 static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev, 662 struct amdgpu_irq_src *source, 663 unsigned type, 664 enum amdgpu_interrupt_state state) 665 { 666 uint32_t val = 0; 667 668 if (state == AMDGPU_IRQ_STATE_ENABLE) 669 val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK; 670 671 WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); 672 return 0; 673 } 674 675 static int vce_v3_0_process_interrupt(struct amdgpu_device *adev, 676 struct amdgpu_irq_src *source, 677 struct amdgpu_iv_entry *entry) 678 { 679 DRM_DEBUG("IH: VCE\n"); 680 681 WREG32_FIELD(VCE_SYS_INT_STATUS, VCE_SYS_INT_TRAP_INTERRUPT_INT, 1); 682 683 switch (entry->src_data) { 684 case 0: 685 case 1: 686 case 2: 687 amdgpu_fence_process(&adev->vce.ring[entry->src_data]); 688 break; 689 default: 690 DRM_ERROR("Unhandled interrupt: %d %d\n", 691 entry->src_id, entry->src_data); 692 break; 693 } 694 695 return 0; 696 } 697 698 static void vce_v3_0_set_bypass_mode(struct amdgpu_device *adev, bool enable) 699 { 700 u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL); 701 702 if (enable) 703 tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK; 704 else 705 tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK; 706 707 WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp); 708 } 709 710 static int vce_v3_0_set_clockgating_state(void *handle, 711 enum amd_clockgating_state state) 712 { 713 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 714 bool enable = (state == AMD_CG_STATE_GATE) ? true : false; 715 int i; 716 717 if ((adev->asic_type == CHIP_POLARIS10) || 718 (adev->asic_type == CHIP_TONGA) || 719 (adev->asic_type == CHIP_FIJI)) 720 vce_v3_0_set_bypass_mode(adev, enable); 721 722 if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) 723 return 0; 724 725 mutex_lock(&adev->grbm_idx_mutex); 726 for (i = 0; i < 2; i++) { 727 /* Program VCE Instance 0 or 1 if not harvested */ 728 if (adev->vce.harvest_config & (1 << i)) 729 continue; 730 731 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i); 732 733 if (enable) { 734 /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */ 735 uint32_t data = RREG32(mmVCE_CLOCK_GATING_A); 736 data &= ~(0xf | 0xff0); 737 data |= ((0x0 << 0) | (0x04 << 4)); 738 WREG32(mmVCE_CLOCK_GATING_A, data); 739 740 /* initialize VCE_UENC_CLOCK_GATING: Clock ON/OFF delay */ 741 data = RREG32(mmVCE_UENC_CLOCK_GATING); 742 data &= ~(0xf | 0xff0); 743 data |= ((0x0 << 0) | (0x04 << 4)); 744 WREG32(mmVCE_UENC_CLOCK_GATING, data); 745 } 746 747 vce_v3_0_set_vce_sw_clock_gating(adev, enable); 748 } 749 750 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); 751 mutex_unlock(&adev->grbm_idx_mutex); 752 753 return 0; 754 } 755 756 static int vce_v3_0_set_powergating_state(void *handle, 757 enum amd_powergating_state state) 758 { 759 /* This doesn't actually powergate the VCE block. 760 * That's done in the dpm code via the SMC. This 761 * just re-inits the block as necessary. The actual 762 * gating still happens in the dpm code. We should 763 * revisit this when there is a cleaner line between 764 * the smc and the hw blocks 765 */ 766 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 767 768 if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE)) 769 return 0; 770 771 if (state == AMD_PG_STATE_GATE) 772 /* XXX do we need a vce_v3_0_stop()? */ 773 return 0; 774 else 775 return vce_v3_0_start(adev); 776 } 777 778 static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring, 779 struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch) 780 { 781 amdgpu_ring_write(ring, VCE_CMD_IB_VM); 782 amdgpu_ring_write(ring, vm_id); 783 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); 784 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 785 amdgpu_ring_write(ring, ib->length_dw); 786 } 787 788 static void vce_v3_0_emit_vm_flush(struct amdgpu_ring *ring, 789 unsigned int vm_id, uint64_t pd_addr) 790 { 791 amdgpu_ring_write(ring, VCE_CMD_UPDATE_PTB); 792 amdgpu_ring_write(ring, vm_id); 793 amdgpu_ring_write(ring, pd_addr >> 12); 794 795 amdgpu_ring_write(ring, VCE_CMD_FLUSH_TLB); 796 amdgpu_ring_write(ring, vm_id); 797 amdgpu_ring_write(ring, VCE_CMD_END); 798 } 799 800 static void vce_v3_0_emit_pipeline_sync(struct amdgpu_ring *ring) 801 { 802 uint32_t seq = ring->fence_drv.sync_seq; 803 uint64_t addr = ring->fence_drv.gpu_addr; 804 805 amdgpu_ring_write(ring, VCE_CMD_WAIT_GE); 806 amdgpu_ring_write(ring, lower_32_bits(addr)); 807 amdgpu_ring_write(ring, upper_32_bits(addr)); 808 amdgpu_ring_write(ring, seq); 809 } 810 811 static unsigned vce_v3_0_ring_get_emit_ib_size(struct amdgpu_ring *ring) 812 { 813 return 814 5; /* vce_v3_0_ring_emit_ib */ 815 } 816 817 static unsigned vce_v3_0_ring_get_dma_frame_size(struct amdgpu_ring *ring) 818 { 819 return 820 4 + /* vce_v3_0_emit_pipeline_sync */ 821 6; /* amdgpu_vce_ring_emit_fence x1 no user fence */ 822 } 823 824 static unsigned vce_v3_0_ring_get_dma_frame_size_vm(struct amdgpu_ring *ring) 825 { 826 return 827 6 + /* vce_v3_0_emit_vm_flush */ 828 4 + /* vce_v3_0_emit_pipeline_sync */ 829 6 + 6; /* amdgpu_vce_ring_emit_fence x2 vm fence */ 830 } 831 832 const struct amd_ip_funcs vce_v3_0_ip_funcs = { 833 .name = "vce_v3_0", 834 .early_init = vce_v3_0_early_init, 835 .late_init = NULL, 836 .sw_init = vce_v3_0_sw_init, 837 .sw_fini = vce_v3_0_sw_fini, 838 .hw_init = vce_v3_0_hw_init, 839 .hw_fini = vce_v3_0_hw_fini, 840 .suspend = vce_v3_0_suspend, 841 .resume = vce_v3_0_resume, 842 .is_idle = vce_v3_0_is_idle, 843 .wait_for_idle = vce_v3_0_wait_for_idle, 844 .check_soft_reset = vce_v3_0_check_soft_reset, 845 .pre_soft_reset = vce_v3_0_pre_soft_reset, 846 .soft_reset = vce_v3_0_soft_reset, 847 .post_soft_reset = vce_v3_0_post_soft_reset, 848 .set_clockgating_state = vce_v3_0_set_clockgating_state, 849 .set_powergating_state = vce_v3_0_set_powergating_state, 850 }; 851 852 static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = { 853 .get_rptr = vce_v3_0_ring_get_rptr, 854 .get_wptr = vce_v3_0_ring_get_wptr, 855 .set_wptr = vce_v3_0_ring_set_wptr, 856 .parse_cs = amdgpu_vce_ring_parse_cs, 857 .emit_ib = amdgpu_vce_ring_emit_ib, 858 .emit_fence = amdgpu_vce_ring_emit_fence, 859 .test_ring = amdgpu_vce_ring_test_ring, 860 .test_ib = amdgpu_vce_ring_test_ib, 861 .insert_nop = amdgpu_ring_insert_nop, 862 .pad_ib = amdgpu_ring_generic_pad_ib, 863 .begin_use = amdgpu_vce_ring_begin_use, 864 .end_use = amdgpu_vce_ring_end_use, 865 .get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size, 866 .get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size, 867 }; 868 869 static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = { 870 .get_rptr = vce_v3_0_ring_get_rptr, 871 .get_wptr = vce_v3_0_ring_get_wptr, 872 .set_wptr = vce_v3_0_ring_set_wptr, 873 .parse_cs = NULL, 874 .emit_ib = vce_v3_0_ring_emit_ib, 875 .emit_vm_flush = vce_v3_0_emit_vm_flush, 876 .emit_pipeline_sync = vce_v3_0_emit_pipeline_sync, 877 .emit_fence = amdgpu_vce_ring_emit_fence, 878 .test_ring = amdgpu_vce_ring_test_ring, 879 .test_ib = amdgpu_vce_ring_test_ib, 880 .insert_nop = amdgpu_ring_insert_nop, 881 .pad_ib = amdgpu_ring_generic_pad_ib, 882 .begin_use = amdgpu_vce_ring_begin_use, 883 .end_use = amdgpu_vce_ring_end_use, 884 .get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size, 885 .get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size_vm, 886 }; 887 888 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev) 889 { 890 int i; 891 892 if (adev->asic_type >= CHIP_STONEY) { 893 for (i = 0; i < adev->vce.num_rings; i++) 894 adev->vce.ring[i].funcs = &vce_v3_0_ring_vm_funcs; 895 DRM_INFO("VCE enabled in VM mode\n"); 896 } else { 897 for (i = 0; i < adev->vce.num_rings; i++) 898 adev->vce.ring[i].funcs = &vce_v3_0_ring_phys_funcs; 899 DRM_INFO("VCE enabled in physical mode\n"); 900 } 901 } 902 903 static const struct amdgpu_irq_src_funcs vce_v3_0_irq_funcs = { 904 .set = vce_v3_0_set_interrupt_state, 905 .process = vce_v3_0_process_interrupt, 906 }; 907 908 static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev) 909 { 910 adev->vce.irq.num_types = 1; 911 adev->vce.irq.funcs = &vce_v3_0_irq_funcs; 912 }; 913