1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 * Authors: Christian König <christian.koenig@amd.com> 26 */ 27 28 #include <linux/firmware.h> 29 #include <drm/drmP.h> 30 #include "amdgpu.h" 31 #include "amdgpu_vce.h" 32 #include "vid.h" 33 #include "vce/vce_3_0_d.h" 34 #include "vce/vce_3_0_sh_mask.h" 35 #include "oss/oss_3_0_d.h" 36 #include "oss/oss_3_0_sh_mask.h" 37 #include "gca/gfx_8_0_d.h" 38 #include "smu/smu_7_1_2_d.h" 39 #include "smu/smu_7_1_2_sh_mask.h" 40 #include "gca/gfx_8_0_d.h" 41 #include "gca/gfx_8_0_sh_mask.h" 42 #include "ivsrcid/ivsrcid_vislands30.h" 43 44 45 #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 46 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 47 #define GRBM_GFX_INDEX__VCE_ALL_PIPE 0x07 48 49 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616 50 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617 51 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618 52 #define mmGRBM_GFX_INDEX_DEFAULT 0xE0000000 53 54 #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02 55 56 #define VCE_V3_0_FW_SIZE (384 * 1024) 57 #define VCE_V3_0_STACK_SIZE (64 * 1024) 58 #define VCE_V3_0_DATA_SIZE ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024)) 59 60 #define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8)) 61 62 #define GET_VCE_INSTANCE(i) ((i) << GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT \ 63 | GRBM_GFX_INDEX__VCE_ALL_PIPE) 64 65 static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx); 66 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev); 67 static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev); 68 static int vce_v3_0_wait_for_idle(void *handle); 69 static int vce_v3_0_set_clockgating_state(void *handle, 70 enum amd_clockgating_state state); 71 /** 72 * vce_v3_0_ring_get_rptr - get read pointer 73 * 74 * @ring: amdgpu_ring pointer 75 * 76 * Returns the current hardware read pointer 77 */ 78 static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring) 79 { 80 struct amdgpu_device *adev = ring->adev; 81 u32 v; 82 83 mutex_lock(&adev->grbm_idx_mutex); 84 if (adev->vce.harvest_config == 0 || 85 adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1) 86 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); 87 else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) 88 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); 89 90 if (ring->me == 0) 91 v = RREG32(mmVCE_RB_RPTR); 92 else if (ring->me == 1) 93 v = RREG32(mmVCE_RB_RPTR2); 94 else 95 v = RREG32(mmVCE_RB_RPTR3); 96 97 WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); 98 mutex_unlock(&adev->grbm_idx_mutex); 99 100 return v; 101 } 102 103 /** 104 * vce_v3_0_ring_get_wptr - get write pointer 105 * 106 * @ring: amdgpu_ring pointer 107 * 108 * Returns the current hardware write pointer 109 */ 110 static uint64_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring) 111 { 112 struct amdgpu_device *adev = ring->adev; 113 u32 v; 114 115 mutex_lock(&adev->grbm_idx_mutex); 116 if (adev->vce.harvest_config == 0 || 117 adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1) 118 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); 119 else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) 120 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); 121 122 if (ring->me == 0) 123 v = RREG32(mmVCE_RB_WPTR); 124 else if (ring->me == 1) 125 v = RREG32(mmVCE_RB_WPTR2); 126 else 127 v = RREG32(mmVCE_RB_WPTR3); 128 129 WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); 130 mutex_unlock(&adev->grbm_idx_mutex); 131 132 return v; 133 } 134 135 /** 136 * vce_v3_0_ring_set_wptr - set write pointer 137 * 138 * @ring: amdgpu_ring pointer 139 * 140 * Commits the write pointer to the hardware 141 */ 142 static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring) 143 { 144 struct amdgpu_device *adev = ring->adev; 145 146 mutex_lock(&adev->grbm_idx_mutex); 147 if (adev->vce.harvest_config == 0 || 148 adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1) 149 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); 150 else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) 151 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); 152 153 if (ring->me == 0) 154 WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr)); 155 else if (ring->me == 1) 156 WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr)); 157 else 158 WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr)); 159 160 WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); 161 mutex_unlock(&adev->grbm_idx_mutex); 162 } 163 164 static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override) 165 { 166 WREG32_FIELD(VCE_RB_ARB_CTRL, VCE_CGTT_OVERRIDE, override ? 1 : 0); 167 } 168 169 static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, 170 bool gated) 171 { 172 u32 data; 173 174 /* Set Override to disable Clock Gating */ 175 vce_v3_0_override_vce_clock_gating(adev, true); 176 177 /* This function enables MGCG which is controlled by firmware. 178 With the clocks in the gated state the core is still 179 accessible but the firmware will throttle the clocks on the 180 fly as necessary. 181 */ 182 if (!gated) { 183 data = RREG32(mmVCE_CLOCK_GATING_B); 184 data |= 0x1ff; 185 data &= ~0xef0000; 186 WREG32(mmVCE_CLOCK_GATING_B, data); 187 188 data = RREG32(mmVCE_UENC_CLOCK_GATING); 189 data |= 0x3ff000; 190 data &= ~0xffc00000; 191 WREG32(mmVCE_UENC_CLOCK_GATING, data); 192 193 data = RREG32(mmVCE_UENC_CLOCK_GATING_2); 194 data |= 0x2; 195 data &= ~0x00010000; 196 WREG32(mmVCE_UENC_CLOCK_GATING_2, data); 197 198 data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); 199 data |= 0x37f; 200 WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); 201 202 data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); 203 data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK | 204 VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | 205 VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | 206 0x8; 207 WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data); 208 } else { 209 data = RREG32(mmVCE_CLOCK_GATING_B); 210 data &= ~0x80010; 211 data |= 0xe70008; 212 WREG32(mmVCE_CLOCK_GATING_B, data); 213 214 data = RREG32(mmVCE_UENC_CLOCK_GATING); 215 data |= 0xffc00000; 216 WREG32(mmVCE_UENC_CLOCK_GATING, data); 217 218 data = RREG32(mmVCE_UENC_CLOCK_GATING_2); 219 data |= 0x10000; 220 WREG32(mmVCE_UENC_CLOCK_GATING_2, data); 221 222 data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); 223 data &= ~0x3ff; 224 WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); 225 226 data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); 227 data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK | 228 VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | 229 VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | 230 0x8); 231 WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data); 232 } 233 vce_v3_0_override_vce_clock_gating(adev, false); 234 } 235 236 static int vce_v3_0_firmware_loaded(struct amdgpu_device *adev) 237 { 238 int i, j; 239 240 for (i = 0; i < 10; ++i) { 241 for (j = 0; j < 100; ++j) { 242 uint32_t status = RREG32(mmVCE_STATUS); 243 244 if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK) 245 return 0; 246 mdelay(10); 247 } 248 249 DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n"); 250 WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1); 251 mdelay(10); 252 WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0); 253 mdelay(10); 254 } 255 256 return -ETIMEDOUT; 257 } 258 259 /** 260 * vce_v3_0_start - start VCE block 261 * 262 * @adev: amdgpu_device pointer 263 * 264 * Setup and start the VCE block 265 */ 266 static int vce_v3_0_start(struct amdgpu_device *adev) 267 { 268 struct amdgpu_ring *ring; 269 int idx, r; 270 271 mutex_lock(&adev->grbm_idx_mutex); 272 for (idx = 0; idx < 2; ++idx) { 273 if (adev->vce.harvest_config & (1 << idx)) 274 continue; 275 276 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx)); 277 278 /* Program instance 0 reg space for two instances or instance 0 case 279 program instance 1 reg space for only instance 1 available case */ 280 if (idx != 1 || adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) { 281 ring = &adev->vce.ring[0]; 282 WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr)); 283 WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr)); 284 WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr); 285 WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 286 WREG32(mmVCE_RB_SIZE, ring->ring_size / 4); 287 288 ring = &adev->vce.ring[1]; 289 WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr)); 290 WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr)); 291 WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr); 292 WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); 293 WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); 294 295 ring = &adev->vce.ring[2]; 296 WREG32(mmVCE_RB_RPTR3, lower_32_bits(ring->wptr)); 297 WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr)); 298 WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr); 299 WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr)); 300 WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4); 301 } 302 303 vce_v3_0_mc_resume(adev, idx); 304 WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1); 305 306 if (adev->asic_type >= CHIP_STONEY) 307 WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001); 308 else 309 WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1); 310 311 WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0); 312 mdelay(100); 313 314 r = vce_v3_0_firmware_loaded(adev); 315 316 /* clear BUSY flag */ 317 WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0); 318 319 if (r) { 320 DRM_ERROR("VCE not responding, giving up!!!\n"); 321 mutex_unlock(&adev->grbm_idx_mutex); 322 return r; 323 } 324 } 325 326 WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); 327 mutex_unlock(&adev->grbm_idx_mutex); 328 329 return 0; 330 } 331 332 static int vce_v3_0_stop(struct amdgpu_device *adev) 333 { 334 int idx; 335 336 mutex_lock(&adev->grbm_idx_mutex); 337 for (idx = 0; idx < 2; ++idx) { 338 if (adev->vce.harvest_config & (1 << idx)) 339 continue; 340 341 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx)); 342 343 if (adev->asic_type >= CHIP_STONEY) 344 WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001); 345 else 346 WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 0); 347 348 /* hold on ECPU */ 349 WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1); 350 351 /* clear VCE STATUS */ 352 WREG32(mmVCE_STATUS, 0); 353 } 354 355 WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); 356 mutex_unlock(&adev->grbm_idx_mutex); 357 358 return 0; 359 } 360 361 #define ixVCE_HARVEST_FUSE_MACRO__ADDRESS 0xC0014074 362 #define VCE_HARVEST_FUSE_MACRO__SHIFT 27 363 #define VCE_HARVEST_FUSE_MACRO__MASK 0x18000000 364 365 static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev) 366 { 367 u32 tmp; 368 369 if ((adev->asic_type == CHIP_FIJI) || 370 (adev->asic_type == CHIP_STONEY)) 371 return AMDGPU_VCE_HARVEST_VCE1; 372 373 if (adev->flags & AMD_IS_APU) 374 tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) & 375 VCE_HARVEST_FUSE_MACRO__MASK) >> 376 VCE_HARVEST_FUSE_MACRO__SHIFT; 377 else 378 tmp = (RREG32_SMC(ixCC_HARVEST_FUSES) & 379 CC_HARVEST_FUSES__VCE_DISABLE_MASK) >> 380 CC_HARVEST_FUSES__VCE_DISABLE__SHIFT; 381 382 switch (tmp) { 383 case 1: 384 return AMDGPU_VCE_HARVEST_VCE0; 385 case 2: 386 return AMDGPU_VCE_HARVEST_VCE1; 387 case 3: 388 return AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1; 389 default: 390 if ((adev->asic_type == CHIP_POLARIS10) || 391 (adev->asic_type == CHIP_POLARIS11) || 392 (adev->asic_type == CHIP_POLARIS12) || 393 (adev->asic_type == CHIP_VEGAM)) 394 return AMDGPU_VCE_HARVEST_VCE1; 395 396 return 0; 397 } 398 } 399 400 static int vce_v3_0_early_init(void *handle) 401 { 402 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 403 404 adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev); 405 406 if ((adev->vce.harvest_config & 407 (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) == 408 (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) 409 return -ENOENT; 410 411 adev->vce.num_rings = 3; 412 413 vce_v3_0_set_ring_funcs(adev); 414 vce_v3_0_set_irq_funcs(adev); 415 416 return 0; 417 } 418 419 static int vce_v3_0_sw_init(void *handle) 420 { 421 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 422 struct amdgpu_ring *ring; 423 int r, i; 424 425 /* VCE */ 426 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_VCE_TRAP, &adev->vce.irq); 427 if (r) 428 return r; 429 430 r = amdgpu_vce_sw_init(adev, VCE_V3_0_FW_SIZE + 431 (VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE) * 2); 432 if (r) 433 return r; 434 435 /* 52.8.3 required for 3 ring support */ 436 if (adev->vce.fw_version < FW_52_8_3) 437 adev->vce.num_rings = 2; 438 439 r = amdgpu_vce_resume(adev); 440 if (r) 441 return r; 442 443 for (i = 0; i < adev->vce.num_rings; i++) { 444 ring = &adev->vce.ring[i]; 445 sprintf(ring->name, "vce%d", i); 446 r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0); 447 if (r) 448 return r; 449 } 450 451 r = amdgpu_vce_entity_init(adev); 452 453 return r; 454 } 455 456 static int vce_v3_0_sw_fini(void *handle) 457 { 458 int r; 459 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 460 461 r = amdgpu_vce_suspend(adev); 462 if (r) 463 return r; 464 465 return amdgpu_vce_sw_fini(adev); 466 } 467 468 static int vce_v3_0_hw_init(void *handle) 469 { 470 int r, i; 471 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 472 473 vce_v3_0_override_vce_clock_gating(adev, true); 474 475 amdgpu_asic_set_vce_clocks(adev, 10000, 10000); 476 477 for (i = 0; i < adev->vce.num_rings; i++) { 478 r = amdgpu_ring_test_helper(&adev->vce.ring[i]); 479 if (r) 480 return r; 481 } 482 483 DRM_INFO("VCE initialized successfully.\n"); 484 485 return 0; 486 } 487 488 static int vce_v3_0_hw_fini(void *handle) 489 { 490 int r; 491 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 492 493 r = vce_v3_0_wait_for_idle(handle); 494 if (r) 495 return r; 496 497 vce_v3_0_stop(adev); 498 return vce_v3_0_set_clockgating_state(adev, AMD_CG_STATE_GATE); 499 } 500 501 static int vce_v3_0_suspend(void *handle) 502 { 503 int r; 504 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 505 506 r = vce_v3_0_hw_fini(adev); 507 if (r) 508 return r; 509 510 return amdgpu_vce_suspend(adev); 511 } 512 513 static int vce_v3_0_resume(void *handle) 514 { 515 int r; 516 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 517 518 r = amdgpu_vce_resume(adev); 519 if (r) 520 return r; 521 522 return vce_v3_0_hw_init(adev); 523 } 524 525 static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx) 526 { 527 uint32_t offset, size; 528 529 WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16)); 530 WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000); 531 WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F); 532 WREG32(mmVCE_CLOCK_GATING_B, 0x1FF); 533 534 WREG32(mmVCE_LMI_CTRL, 0x00398000); 535 WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1); 536 WREG32(mmVCE_LMI_SWAP_CNTL, 0); 537 WREG32(mmVCE_LMI_SWAP_CNTL1, 0); 538 WREG32(mmVCE_LMI_VM_CTRL, 0); 539 WREG32_OR(mmVCE_VCPU_CNTL, 0x00100000); 540 541 if (adev->asic_type >= CHIP_STONEY) { 542 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR0, (adev->vce.gpu_addr >> 8)); 543 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR1, (adev->vce.gpu_addr >> 8)); 544 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR2, (adev->vce.gpu_addr >> 8)); 545 } else 546 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8)); 547 offset = AMDGPU_VCE_FIRMWARE_OFFSET; 548 size = VCE_V3_0_FW_SIZE; 549 WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff); 550 WREG32(mmVCE_VCPU_CACHE_SIZE0, size); 551 552 if (idx == 0) { 553 offset += size; 554 size = VCE_V3_0_STACK_SIZE; 555 WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff); 556 WREG32(mmVCE_VCPU_CACHE_SIZE1, size); 557 offset += size; 558 size = VCE_V3_0_DATA_SIZE; 559 WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff); 560 WREG32(mmVCE_VCPU_CACHE_SIZE2, size); 561 } else { 562 offset += size + VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE; 563 size = VCE_V3_0_STACK_SIZE; 564 WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0xfffffff); 565 WREG32(mmVCE_VCPU_CACHE_SIZE1, size); 566 offset += size; 567 size = VCE_V3_0_DATA_SIZE; 568 WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0xfffffff); 569 WREG32(mmVCE_VCPU_CACHE_SIZE2, size); 570 } 571 572 WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100); 573 WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1); 574 } 575 576 static bool vce_v3_0_is_idle(void *handle) 577 { 578 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 579 u32 mask = 0; 580 581 mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_STATUS2__VCE0_BUSY_MASK; 582 mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_STATUS2__VCE1_BUSY_MASK; 583 584 return !(RREG32(mmSRBM_STATUS2) & mask); 585 } 586 587 static int vce_v3_0_wait_for_idle(void *handle) 588 { 589 unsigned i; 590 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 591 592 for (i = 0; i < adev->usec_timeout; i++) 593 if (vce_v3_0_is_idle(handle)) 594 return 0; 595 596 return -ETIMEDOUT; 597 } 598 599 #define VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK 0x00000008L /* AUTO_BUSY */ 600 #define VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK 0x00000010L /* RB0_BUSY */ 601 #define VCE_STATUS_VCPU_REPORT_RB1_BUSY_MASK 0x00000020L /* RB1_BUSY */ 602 #define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \ 603 VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK) 604 605 static bool vce_v3_0_check_soft_reset(void *handle) 606 { 607 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 608 u32 srbm_soft_reset = 0; 609 610 /* According to VCE team , we should use VCE_STATUS instead 611 * SRBM_STATUS.VCE_BUSY bit for busy status checking. 612 * GRBM_GFX_INDEX.INSTANCE_INDEX is used to specify which VCE 613 * instance's registers are accessed 614 * (0 for 1st instance, 10 for 2nd instance). 615 * 616 *VCE_STATUS 617 *|UENC|ACPI|AUTO ACTIVE|RB1 |RB0 |RB2 | |FW_LOADED|JOB | 618 *|----+----+-----------+----+----+----+----------+---------+----| 619 *|bit8|bit7| bit6 |bit5|bit4|bit3| bit2 | bit1 |bit0| 620 * 621 * VCE team suggest use bit 3--bit 6 for busy status check 622 */ 623 mutex_lock(&adev->grbm_idx_mutex); 624 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); 625 if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { 626 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); 627 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); 628 } 629 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); 630 if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { 631 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); 632 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); 633 } 634 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); 635 mutex_unlock(&adev->grbm_idx_mutex); 636 637 if (srbm_soft_reset) { 638 adev->vce.srbm_soft_reset = srbm_soft_reset; 639 return true; 640 } else { 641 adev->vce.srbm_soft_reset = 0; 642 return false; 643 } 644 } 645 646 static int vce_v3_0_soft_reset(void *handle) 647 { 648 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 649 u32 srbm_soft_reset; 650 651 if (!adev->vce.srbm_soft_reset) 652 return 0; 653 srbm_soft_reset = adev->vce.srbm_soft_reset; 654 655 if (srbm_soft_reset) { 656 u32 tmp; 657 658 tmp = RREG32(mmSRBM_SOFT_RESET); 659 tmp |= srbm_soft_reset; 660 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); 661 WREG32(mmSRBM_SOFT_RESET, tmp); 662 tmp = RREG32(mmSRBM_SOFT_RESET); 663 664 udelay(50); 665 666 tmp &= ~srbm_soft_reset; 667 WREG32(mmSRBM_SOFT_RESET, tmp); 668 tmp = RREG32(mmSRBM_SOFT_RESET); 669 670 /* Wait a little for things to settle down */ 671 udelay(50); 672 } 673 674 return 0; 675 } 676 677 static int vce_v3_0_pre_soft_reset(void *handle) 678 { 679 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 680 681 if (!adev->vce.srbm_soft_reset) 682 return 0; 683 684 mdelay(5); 685 686 return vce_v3_0_suspend(adev); 687 } 688 689 690 static int vce_v3_0_post_soft_reset(void *handle) 691 { 692 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 693 694 if (!adev->vce.srbm_soft_reset) 695 return 0; 696 697 mdelay(5); 698 699 return vce_v3_0_resume(adev); 700 } 701 702 static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev, 703 struct amdgpu_irq_src *source, 704 unsigned type, 705 enum amdgpu_interrupt_state state) 706 { 707 uint32_t val = 0; 708 709 if (state == AMDGPU_IRQ_STATE_ENABLE) 710 val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK; 711 712 WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); 713 return 0; 714 } 715 716 static int vce_v3_0_process_interrupt(struct amdgpu_device *adev, 717 struct amdgpu_irq_src *source, 718 struct amdgpu_iv_entry *entry) 719 { 720 DRM_DEBUG("IH: VCE\n"); 721 722 WREG32_FIELD(VCE_SYS_INT_STATUS, VCE_SYS_INT_TRAP_INTERRUPT_INT, 1); 723 724 switch (entry->src_data[0]) { 725 case 0: 726 case 1: 727 case 2: 728 amdgpu_fence_process(&adev->vce.ring[entry->src_data[0]]); 729 break; 730 default: 731 DRM_ERROR("Unhandled interrupt: %d %d\n", 732 entry->src_id, entry->src_data[0]); 733 break; 734 } 735 736 return 0; 737 } 738 739 static int vce_v3_0_set_clockgating_state(void *handle, 740 enum amd_clockgating_state state) 741 { 742 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 743 bool enable = (state == AMD_CG_STATE_GATE) ? true : false; 744 int i; 745 746 if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) 747 return 0; 748 749 mutex_lock(&adev->grbm_idx_mutex); 750 for (i = 0; i < 2; i++) { 751 /* Program VCE Instance 0 or 1 if not harvested */ 752 if (adev->vce.harvest_config & (1 << i)) 753 continue; 754 755 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(i)); 756 757 if (!enable) { 758 /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */ 759 uint32_t data = RREG32(mmVCE_CLOCK_GATING_A); 760 data &= ~(0xf | 0xff0); 761 data |= ((0x0 << 0) | (0x04 << 4)); 762 WREG32(mmVCE_CLOCK_GATING_A, data); 763 764 /* initialize VCE_UENC_CLOCK_GATING: Clock ON/OFF delay */ 765 data = RREG32(mmVCE_UENC_CLOCK_GATING); 766 data &= ~(0xf | 0xff0); 767 data |= ((0x0 << 0) | (0x04 << 4)); 768 WREG32(mmVCE_UENC_CLOCK_GATING, data); 769 } 770 771 vce_v3_0_set_vce_sw_clock_gating(adev, enable); 772 } 773 774 WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); 775 mutex_unlock(&adev->grbm_idx_mutex); 776 777 return 0; 778 } 779 780 static int vce_v3_0_set_powergating_state(void *handle, 781 enum amd_powergating_state state) 782 { 783 /* This doesn't actually powergate the VCE block. 784 * That's done in the dpm code via the SMC. This 785 * just re-inits the block as necessary. The actual 786 * gating still happens in the dpm code. We should 787 * revisit this when there is a cleaner line between 788 * the smc and the hw blocks 789 */ 790 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 791 int ret = 0; 792 793 if (state == AMD_PG_STATE_GATE) { 794 ret = vce_v3_0_stop(adev); 795 if (ret) 796 goto out; 797 } else { 798 ret = vce_v3_0_start(adev); 799 if (ret) 800 goto out; 801 } 802 803 out: 804 return ret; 805 } 806 807 static void vce_v3_0_get_clockgating_state(void *handle, u32 *flags) 808 { 809 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 810 int data; 811 812 mutex_lock(&adev->pm.mutex); 813 814 if (adev->flags & AMD_IS_APU) 815 data = RREG32_SMC(ixCURRENT_PG_STATUS_APU); 816 else 817 data = RREG32_SMC(ixCURRENT_PG_STATUS); 818 819 if (data & CURRENT_PG_STATUS__VCE_PG_STATUS_MASK) { 820 DRM_INFO("Cannot get clockgating state when VCE is powergated.\n"); 821 goto out; 822 } 823 824 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); 825 826 /* AMD_CG_SUPPORT_VCE_MGCG */ 827 data = RREG32(mmVCE_CLOCK_GATING_A); 828 if (data & (0x04 << 4)) 829 *flags |= AMD_CG_SUPPORT_VCE_MGCG; 830 831 out: 832 mutex_unlock(&adev->pm.mutex); 833 } 834 835 static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring, 836 struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch) 837 { 838 amdgpu_ring_write(ring, VCE_CMD_IB_VM); 839 amdgpu_ring_write(ring, vmid); 840 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); 841 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 842 amdgpu_ring_write(ring, ib->length_dw); 843 } 844 845 static void vce_v3_0_emit_vm_flush(struct amdgpu_ring *ring, 846 unsigned int vmid, uint64_t pd_addr) 847 { 848 amdgpu_ring_write(ring, VCE_CMD_UPDATE_PTB); 849 amdgpu_ring_write(ring, vmid); 850 amdgpu_ring_write(ring, pd_addr >> 12); 851 852 amdgpu_ring_write(ring, VCE_CMD_FLUSH_TLB); 853 amdgpu_ring_write(ring, vmid); 854 amdgpu_ring_write(ring, VCE_CMD_END); 855 } 856 857 static void vce_v3_0_emit_pipeline_sync(struct amdgpu_ring *ring) 858 { 859 uint32_t seq = ring->fence_drv.sync_seq; 860 uint64_t addr = ring->fence_drv.gpu_addr; 861 862 amdgpu_ring_write(ring, VCE_CMD_WAIT_GE); 863 amdgpu_ring_write(ring, lower_32_bits(addr)); 864 amdgpu_ring_write(ring, upper_32_bits(addr)); 865 amdgpu_ring_write(ring, seq); 866 } 867 868 static const struct amd_ip_funcs vce_v3_0_ip_funcs = { 869 .name = "vce_v3_0", 870 .early_init = vce_v3_0_early_init, 871 .late_init = NULL, 872 .sw_init = vce_v3_0_sw_init, 873 .sw_fini = vce_v3_0_sw_fini, 874 .hw_init = vce_v3_0_hw_init, 875 .hw_fini = vce_v3_0_hw_fini, 876 .suspend = vce_v3_0_suspend, 877 .resume = vce_v3_0_resume, 878 .is_idle = vce_v3_0_is_idle, 879 .wait_for_idle = vce_v3_0_wait_for_idle, 880 .check_soft_reset = vce_v3_0_check_soft_reset, 881 .pre_soft_reset = vce_v3_0_pre_soft_reset, 882 .soft_reset = vce_v3_0_soft_reset, 883 .post_soft_reset = vce_v3_0_post_soft_reset, 884 .set_clockgating_state = vce_v3_0_set_clockgating_state, 885 .set_powergating_state = vce_v3_0_set_powergating_state, 886 .get_clockgating_state = vce_v3_0_get_clockgating_state, 887 }; 888 889 static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = { 890 .type = AMDGPU_RING_TYPE_VCE, 891 .align_mask = 0xf, 892 .nop = VCE_CMD_NO_OP, 893 .support_64bit_ptrs = false, 894 .get_rptr = vce_v3_0_ring_get_rptr, 895 .get_wptr = vce_v3_0_ring_get_wptr, 896 .set_wptr = vce_v3_0_ring_set_wptr, 897 .parse_cs = amdgpu_vce_ring_parse_cs, 898 .emit_frame_size = 899 4 + /* vce_v3_0_emit_pipeline_sync */ 900 6, /* amdgpu_vce_ring_emit_fence x1 no user fence */ 901 .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */ 902 .emit_ib = amdgpu_vce_ring_emit_ib, 903 .emit_fence = amdgpu_vce_ring_emit_fence, 904 .test_ring = amdgpu_vce_ring_test_ring, 905 .test_ib = amdgpu_vce_ring_test_ib, 906 .insert_nop = amdgpu_ring_insert_nop, 907 .pad_ib = amdgpu_ring_generic_pad_ib, 908 .begin_use = amdgpu_vce_ring_begin_use, 909 .end_use = amdgpu_vce_ring_end_use, 910 }; 911 912 static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = { 913 .type = AMDGPU_RING_TYPE_VCE, 914 .align_mask = 0xf, 915 .nop = VCE_CMD_NO_OP, 916 .support_64bit_ptrs = false, 917 .get_rptr = vce_v3_0_ring_get_rptr, 918 .get_wptr = vce_v3_0_ring_get_wptr, 919 .set_wptr = vce_v3_0_ring_set_wptr, 920 .parse_cs = amdgpu_vce_ring_parse_cs_vm, 921 .emit_frame_size = 922 6 + /* vce_v3_0_emit_vm_flush */ 923 4 + /* vce_v3_0_emit_pipeline_sync */ 924 6 + 6, /* amdgpu_vce_ring_emit_fence x2 vm fence */ 925 .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */ 926 .emit_ib = vce_v3_0_ring_emit_ib, 927 .emit_vm_flush = vce_v3_0_emit_vm_flush, 928 .emit_pipeline_sync = vce_v3_0_emit_pipeline_sync, 929 .emit_fence = amdgpu_vce_ring_emit_fence, 930 .test_ring = amdgpu_vce_ring_test_ring, 931 .test_ib = amdgpu_vce_ring_test_ib, 932 .insert_nop = amdgpu_ring_insert_nop, 933 .pad_ib = amdgpu_ring_generic_pad_ib, 934 .begin_use = amdgpu_vce_ring_begin_use, 935 .end_use = amdgpu_vce_ring_end_use, 936 }; 937 938 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev) 939 { 940 int i; 941 942 if (adev->asic_type >= CHIP_STONEY) { 943 for (i = 0; i < adev->vce.num_rings; i++) { 944 adev->vce.ring[i].funcs = &vce_v3_0_ring_vm_funcs; 945 adev->vce.ring[i].me = i; 946 } 947 DRM_INFO("VCE enabled in VM mode\n"); 948 } else { 949 for (i = 0; i < adev->vce.num_rings; i++) { 950 adev->vce.ring[i].funcs = &vce_v3_0_ring_phys_funcs; 951 adev->vce.ring[i].me = i; 952 } 953 DRM_INFO("VCE enabled in physical mode\n"); 954 } 955 } 956 957 static const struct amdgpu_irq_src_funcs vce_v3_0_irq_funcs = { 958 .set = vce_v3_0_set_interrupt_state, 959 .process = vce_v3_0_process_interrupt, 960 }; 961 962 static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev) 963 { 964 adev->vce.irq.num_types = 1; 965 adev->vce.irq.funcs = &vce_v3_0_irq_funcs; 966 }; 967 968 const struct amdgpu_ip_block_version vce_v3_0_ip_block = 969 { 970 .type = AMD_IP_BLOCK_TYPE_VCE, 971 .major = 3, 972 .minor = 0, 973 .rev = 0, 974 .funcs = &vce_v3_0_ip_funcs, 975 }; 976 977 const struct amdgpu_ip_block_version vce_v3_1_ip_block = 978 { 979 .type = AMD_IP_BLOCK_TYPE_VCE, 980 .major = 3, 981 .minor = 1, 982 .rev = 0, 983 .funcs = &vce_v3_0_ip_funcs, 984 }; 985 986 const struct amdgpu_ip_block_version vce_v3_4_ip_block = 987 { 988 .type = AMD_IP_BLOCK_TYPE_VCE, 989 .major = 3, 990 .minor = 4, 991 .rev = 0, 992 .funcs = &vce_v3_0_ip_funcs, 993 }; 994