1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 * Authors: Christian König <christian.koenig@amd.com> 26 */ 27 28 #include <linux/firmware.h> 29 #include <drm/drmP.h> 30 #include "amdgpu.h" 31 #include "amdgpu_vce.h" 32 #include "vid.h" 33 #include "vce/vce_3_0_d.h" 34 #include "vce/vce_3_0_sh_mask.h" 35 #include "oss/oss_3_0_d.h" 36 #include "oss/oss_3_0_sh_mask.h" 37 #include "gca/gfx_8_0_d.h" 38 #include "smu/smu_7_1_2_d.h" 39 #include "smu/smu_7_1_2_sh_mask.h" 40 #include "gca/gfx_8_0_d.h" 41 #include "gca/gfx_8_0_sh_mask.h" 42 #include "ivsrcid/ivsrcid_vislands30.h" 43 44 45 #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 46 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 47 #define GRBM_GFX_INDEX__VCE_ALL_PIPE 0x07 48 49 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616 50 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617 51 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618 52 #define mmGRBM_GFX_INDEX_DEFAULT 0xE0000000 53 54 #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02 55 56 #define VCE_V3_0_FW_SIZE (384 * 1024) 57 #define VCE_V3_0_STACK_SIZE (64 * 1024) 58 #define VCE_V3_0_DATA_SIZE ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024)) 59 60 #define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8)) 61 62 #define GET_VCE_INSTANCE(i) ((i) << GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT \ 63 | GRBM_GFX_INDEX__VCE_ALL_PIPE) 64 65 static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx); 66 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev); 67 static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev); 68 static int vce_v3_0_wait_for_idle(void *handle); 69 static int vce_v3_0_set_clockgating_state(void *handle, 70 enum amd_clockgating_state state); 71 /** 72 * vce_v3_0_ring_get_rptr - get read pointer 73 * 74 * @ring: amdgpu_ring pointer 75 * 76 * Returns the current hardware read pointer 77 */ 78 static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring) 79 { 80 struct amdgpu_device *adev = ring->adev; 81 u32 v; 82 83 mutex_lock(&adev->grbm_idx_mutex); 84 if (adev->vce.harvest_config == 0 || 85 adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1) 86 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); 87 else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) 88 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); 89 90 if (ring->me == 0) 91 v = RREG32(mmVCE_RB_RPTR); 92 else if (ring->me == 1) 93 v = RREG32(mmVCE_RB_RPTR2); 94 else 95 v = RREG32(mmVCE_RB_RPTR3); 96 97 WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); 98 mutex_unlock(&adev->grbm_idx_mutex); 99 100 return v; 101 } 102 103 /** 104 * vce_v3_0_ring_get_wptr - get write pointer 105 * 106 * @ring: amdgpu_ring pointer 107 * 108 * Returns the current hardware write pointer 109 */ 110 static uint64_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring) 111 { 112 struct amdgpu_device *adev = ring->adev; 113 u32 v; 114 115 mutex_lock(&adev->grbm_idx_mutex); 116 if (adev->vce.harvest_config == 0 || 117 adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1) 118 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); 119 else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) 120 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); 121 122 if (ring->me == 0) 123 v = RREG32(mmVCE_RB_WPTR); 124 else if (ring->me == 1) 125 v = RREG32(mmVCE_RB_WPTR2); 126 else 127 v = RREG32(mmVCE_RB_WPTR3); 128 129 WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); 130 mutex_unlock(&adev->grbm_idx_mutex); 131 132 return v; 133 } 134 135 /** 136 * vce_v3_0_ring_set_wptr - set write pointer 137 * 138 * @ring: amdgpu_ring pointer 139 * 140 * Commits the write pointer to the hardware 141 */ 142 static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring) 143 { 144 struct amdgpu_device *adev = ring->adev; 145 146 mutex_lock(&adev->grbm_idx_mutex); 147 if (adev->vce.harvest_config == 0 || 148 adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1) 149 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); 150 else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) 151 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); 152 153 if (ring->me == 0) 154 WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr)); 155 else if (ring->me == 1) 156 WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr)); 157 else 158 WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr)); 159 160 WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); 161 mutex_unlock(&adev->grbm_idx_mutex); 162 } 163 164 static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override) 165 { 166 WREG32_FIELD(VCE_RB_ARB_CTRL, VCE_CGTT_OVERRIDE, override ? 1 : 0); 167 } 168 169 static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, 170 bool gated) 171 { 172 u32 data; 173 174 /* Set Override to disable Clock Gating */ 175 vce_v3_0_override_vce_clock_gating(adev, true); 176 177 /* This function enables MGCG which is controlled by firmware. 178 With the clocks in the gated state the core is still 179 accessible but the firmware will throttle the clocks on the 180 fly as necessary. 181 */ 182 if (!gated) { 183 data = RREG32(mmVCE_CLOCK_GATING_B); 184 data |= 0x1ff; 185 data &= ~0xef0000; 186 WREG32(mmVCE_CLOCK_GATING_B, data); 187 188 data = RREG32(mmVCE_UENC_CLOCK_GATING); 189 data |= 0x3ff000; 190 data &= ~0xffc00000; 191 WREG32(mmVCE_UENC_CLOCK_GATING, data); 192 193 data = RREG32(mmVCE_UENC_CLOCK_GATING_2); 194 data |= 0x2; 195 data &= ~0x00010000; 196 WREG32(mmVCE_UENC_CLOCK_GATING_2, data); 197 198 data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); 199 data |= 0x37f; 200 WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); 201 202 data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); 203 data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK | 204 VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | 205 VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | 206 0x8; 207 WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data); 208 } else { 209 data = RREG32(mmVCE_CLOCK_GATING_B); 210 data &= ~0x80010; 211 data |= 0xe70008; 212 WREG32(mmVCE_CLOCK_GATING_B, data); 213 214 data = RREG32(mmVCE_UENC_CLOCK_GATING); 215 data |= 0xffc00000; 216 WREG32(mmVCE_UENC_CLOCK_GATING, data); 217 218 data = RREG32(mmVCE_UENC_CLOCK_GATING_2); 219 data |= 0x10000; 220 WREG32(mmVCE_UENC_CLOCK_GATING_2, data); 221 222 data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); 223 data &= ~0x3ff; 224 WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); 225 226 data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); 227 data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK | 228 VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | 229 VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | 230 0x8); 231 WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data); 232 } 233 vce_v3_0_override_vce_clock_gating(adev, false); 234 } 235 236 static int vce_v3_0_firmware_loaded(struct amdgpu_device *adev) 237 { 238 int i, j; 239 240 for (i = 0; i < 10; ++i) { 241 for (j = 0; j < 100; ++j) { 242 uint32_t status = RREG32(mmVCE_STATUS); 243 244 if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK) 245 return 0; 246 mdelay(10); 247 } 248 249 DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n"); 250 WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1); 251 mdelay(10); 252 WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0); 253 mdelay(10); 254 } 255 256 return -ETIMEDOUT; 257 } 258 259 /** 260 * vce_v3_0_start - start VCE block 261 * 262 * @adev: amdgpu_device pointer 263 * 264 * Setup and start the VCE block 265 */ 266 static int vce_v3_0_start(struct amdgpu_device *adev) 267 { 268 struct amdgpu_ring *ring; 269 int idx, r; 270 271 mutex_lock(&adev->grbm_idx_mutex); 272 for (idx = 0; idx < 2; ++idx) { 273 if (adev->vce.harvest_config & (1 << idx)) 274 continue; 275 276 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx)); 277 278 /* Program instance 0 reg space for two instances or instance 0 case 279 program instance 1 reg space for only instance 1 available case */ 280 if (idx != 1 || adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) { 281 ring = &adev->vce.ring[0]; 282 WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr)); 283 WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr)); 284 WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr); 285 WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 286 WREG32(mmVCE_RB_SIZE, ring->ring_size / 4); 287 288 ring = &adev->vce.ring[1]; 289 WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr)); 290 WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr)); 291 WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr); 292 WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); 293 WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); 294 295 ring = &adev->vce.ring[2]; 296 WREG32(mmVCE_RB_RPTR3, lower_32_bits(ring->wptr)); 297 WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr)); 298 WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr); 299 WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr)); 300 WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4); 301 } 302 303 vce_v3_0_mc_resume(adev, idx); 304 WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1); 305 306 if (adev->asic_type >= CHIP_STONEY) 307 WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001); 308 else 309 WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1); 310 311 WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0); 312 mdelay(100); 313 314 r = vce_v3_0_firmware_loaded(adev); 315 316 /* clear BUSY flag */ 317 WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0); 318 319 if (r) { 320 DRM_ERROR("VCE not responding, giving up!!!\n"); 321 mutex_unlock(&adev->grbm_idx_mutex); 322 return r; 323 } 324 } 325 326 WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); 327 mutex_unlock(&adev->grbm_idx_mutex); 328 329 return 0; 330 } 331 332 static int vce_v3_0_stop(struct amdgpu_device *adev) 333 { 334 int idx; 335 336 mutex_lock(&adev->grbm_idx_mutex); 337 for (idx = 0; idx < 2; ++idx) { 338 if (adev->vce.harvest_config & (1 << idx)) 339 continue; 340 341 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx)); 342 343 if (adev->asic_type >= CHIP_STONEY) 344 WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001); 345 else 346 WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 0); 347 348 /* hold on ECPU */ 349 WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1); 350 351 /* clear VCE STATUS */ 352 WREG32(mmVCE_STATUS, 0); 353 } 354 355 WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); 356 mutex_unlock(&adev->grbm_idx_mutex); 357 358 return 0; 359 } 360 361 #define ixVCE_HARVEST_FUSE_MACRO__ADDRESS 0xC0014074 362 #define VCE_HARVEST_FUSE_MACRO__SHIFT 27 363 #define VCE_HARVEST_FUSE_MACRO__MASK 0x18000000 364 365 static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev) 366 { 367 u32 tmp; 368 369 if ((adev->asic_type == CHIP_FIJI) || 370 (adev->asic_type == CHIP_STONEY)) 371 return AMDGPU_VCE_HARVEST_VCE1; 372 373 if (adev->flags & AMD_IS_APU) 374 tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) & 375 VCE_HARVEST_FUSE_MACRO__MASK) >> 376 VCE_HARVEST_FUSE_MACRO__SHIFT; 377 else 378 tmp = (RREG32_SMC(ixCC_HARVEST_FUSES) & 379 CC_HARVEST_FUSES__VCE_DISABLE_MASK) >> 380 CC_HARVEST_FUSES__VCE_DISABLE__SHIFT; 381 382 switch (tmp) { 383 case 1: 384 return AMDGPU_VCE_HARVEST_VCE0; 385 case 2: 386 return AMDGPU_VCE_HARVEST_VCE1; 387 case 3: 388 return AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1; 389 default: 390 if ((adev->asic_type == CHIP_POLARIS10) || 391 (adev->asic_type == CHIP_POLARIS11) || 392 (adev->asic_type == CHIP_POLARIS12) || 393 (adev->asic_type == CHIP_VEGAM)) 394 return AMDGPU_VCE_HARVEST_VCE1; 395 396 return 0; 397 } 398 } 399 400 static int vce_v3_0_early_init(void *handle) 401 { 402 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 403 404 adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev); 405 406 if ((adev->vce.harvest_config & 407 (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) == 408 (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) 409 return -ENOENT; 410 411 adev->vce.num_rings = 3; 412 413 vce_v3_0_set_ring_funcs(adev); 414 vce_v3_0_set_irq_funcs(adev); 415 416 return 0; 417 } 418 419 static int vce_v3_0_sw_init(void *handle) 420 { 421 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 422 struct amdgpu_ring *ring; 423 int r, i; 424 425 /* VCE */ 426 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_VCE_TRAP, &adev->vce.irq); 427 if (r) 428 return r; 429 430 r = amdgpu_vce_sw_init(adev, VCE_V3_0_FW_SIZE + 431 (VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE) * 2); 432 if (r) 433 return r; 434 435 /* 52.8.3 required for 3 ring support */ 436 if (adev->vce.fw_version < FW_52_8_3) 437 adev->vce.num_rings = 2; 438 439 r = amdgpu_vce_resume(adev); 440 if (r) 441 return r; 442 443 for (i = 0; i < adev->vce.num_rings; i++) { 444 ring = &adev->vce.ring[i]; 445 sprintf(ring->name, "vce%d", i); 446 r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0); 447 if (r) 448 return r; 449 } 450 451 return r; 452 } 453 454 static int vce_v3_0_sw_fini(void *handle) 455 { 456 int r; 457 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 458 459 r = amdgpu_vce_suspend(adev); 460 if (r) 461 return r; 462 463 return amdgpu_vce_sw_fini(adev); 464 } 465 466 static int vce_v3_0_hw_init(void *handle) 467 { 468 int r, i; 469 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 470 471 vce_v3_0_override_vce_clock_gating(adev, true); 472 473 amdgpu_asic_set_vce_clocks(adev, 10000, 10000); 474 475 for (i = 0; i < adev->vce.num_rings; i++) 476 adev->vce.ring[i].ready = false; 477 478 for (i = 0; i < adev->vce.num_rings; i++) { 479 r = amdgpu_ring_test_ring(&adev->vce.ring[i]); 480 if (r) 481 return r; 482 else 483 adev->vce.ring[i].ready = true; 484 } 485 486 DRM_INFO("VCE initialized successfully.\n"); 487 488 return 0; 489 } 490 491 static int vce_v3_0_hw_fini(void *handle) 492 { 493 int r; 494 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 495 496 r = vce_v3_0_wait_for_idle(handle); 497 if (r) 498 return r; 499 500 vce_v3_0_stop(adev); 501 return vce_v3_0_set_clockgating_state(adev, AMD_CG_STATE_GATE); 502 } 503 504 static int vce_v3_0_suspend(void *handle) 505 { 506 int r; 507 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 508 509 r = vce_v3_0_hw_fini(adev); 510 if (r) 511 return r; 512 513 return amdgpu_vce_suspend(adev); 514 } 515 516 static int vce_v3_0_resume(void *handle) 517 { 518 int r; 519 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 520 521 r = amdgpu_vce_resume(adev); 522 if (r) 523 return r; 524 525 return vce_v3_0_hw_init(adev); 526 } 527 528 static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx) 529 { 530 uint32_t offset, size; 531 532 WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16)); 533 WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000); 534 WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F); 535 WREG32(mmVCE_CLOCK_GATING_B, 0x1FF); 536 537 WREG32(mmVCE_LMI_CTRL, 0x00398000); 538 WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1); 539 WREG32(mmVCE_LMI_SWAP_CNTL, 0); 540 WREG32(mmVCE_LMI_SWAP_CNTL1, 0); 541 WREG32(mmVCE_LMI_VM_CTRL, 0); 542 WREG32_OR(mmVCE_VCPU_CNTL, 0x00100000); 543 544 if (adev->asic_type >= CHIP_STONEY) { 545 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR0, (adev->vce.gpu_addr >> 8)); 546 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR1, (adev->vce.gpu_addr >> 8)); 547 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR2, (adev->vce.gpu_addr >> 8)); 548 } else 549 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8)); 550 offset = AMDGPU_VCE_FIRMWARE_OFFSET; 551 size = VCE_V3_0_FW_SIZE; 552 WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff); 553 WREG32(mmVCE_VCPU_CACHE_SIZE0, size); 554 555 if (idx == 0) { 556 offset += size; 557 size = VCE_V3_0_STACK_SIZE; 558 WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff); 559 WREG32(mmVCE_VCPU_CACHE_SIZE1, size); 560 offset += size; 561 size = VCE_V3_0_DATA_SIZE; 562 WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff); 563 WREG32(mmVCE_VCPU_CACHE_SIZE2, size); 564 } else { 565 offset += size + VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE; 566 size = VCE_V3_0_STACK_SIZE; 567 WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0xfffffff); 568 WREG32(mmVCE_VCPU_CACHE_SIZE1, size); 569 offset += size; 570 size = VCE_V3_0_DATA_SIZE; 571 WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0xfffffff); 572 WREG32(mmVCE_VCPU_CACHE_SIZE2, size); 573 } 574 575 WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100); 576 WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1); 577 } 578 579 static bool vce_v3_0_is_idle(void *handle) 580 { 581 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 582 u32 mask = 0; 583 584 mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_STATUS2__VCE0_BUSY_MASK; 585 mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_STATUS2__VCE1_BUSY_MASK; 586 587 return !(RREG32(mmSRBM_STATUS2) & mask); 588 } 589 590 static int vce_v3_0_wait_for_idle(void *handle) 591 { 592 unsigned i; 593 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 594 595 for (i = 0; i < adev->usec_timeout; i++) 596 if (vce_v3_0_is_idle(handle)) 597 return 0; 598 599 return -ETIMEDOUT; 600 } 601 602 #define VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK 0x00000008L /* AUTO_BUSY */ 603 #define VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK 0x00000010L /* RB0_BUSY */ 604 #define VCE_STATUS_VCPU_REPORT_RB1_BUSY_MASK 0x00000020L /* RB1_BUSY */ 605 #define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \ 606 VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK) 607 608 static bool vce_v3_0_check_soft_reset(void *handle) 609 { 610 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 611 u32 srbm_soft_reset = 0; 612 613 /* According to VCE team , we should use VCE_STATUS instead 614 * SRBM_STATUS.VCE_BUSY bit for busy status checking. 615 * GRBM_GFX_INDEX.INSTANCE_INDEX is used to specify which VCE 616 * instance's registers are accessed 617 * (0 for 1st instance, 10 for 2nd instance). 618 * 619 *VCE_STATUS 620 *|UENC|ACPI|AUTO ACTIVE|RB1 |RB0 |RB2 | |FW_LOADED|JOB | 621 *|----+----+-----------+----+----+----+----------+---------+----| 622 *|bit8|bit7| bit6 |bit5|bit4|bit3| bit2 | bit1 |bit0| 623 * 624 * VCE team suggest use bit 3--bit 6 for busy status check 625 */ 626 mutex_lock(&adev->grbm_idx_mutex); 627 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); 628 if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { 629 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); 630 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); 631 } 632 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); 633 if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { 634 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); 635 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); 636 } 637 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); 638 mutex_unlock(&adev->grbm_idx_mutex); 639 640 if (srbm_soft_reset) { 641 adev->vce.srbm_soft_reset = srbm_soft_reset; 642 return true; 643 } else { 644 adev->vce.srbm_soft_reset = 0; 645 return false; 646 } 647 } 648 649 static int vce_v3_0_soft_reset(void *handle) 650 { 651 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 652 u32 srbm_soft_reset; 653 654 if (!adev->vce.srbm_soft_reset) 655 return 0; 656 srbm_soft_reset = adev->vce.srbm_soft_reset; 657 658 if (srbm_soft_reset) { 659 u32 tmp; 660 661 tmp = RREG32(mmSRBM_SOFT_RESET); 662 tmp |= srbm_soft_reset; 663 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); 664 WREG32(mmSRBM_SOFT_RESET, tmp); 665 tmp = RREG32(mmSRBM_SOFT_RESET); 666 667 udelay(50); 668 669 tmp &= ~srbm_soft_reset; 670 WREG32(mmSRBM_SOFT_RESET, tmp); 671 tmp = RREG32(mmSRBM_SOFT_RESET); 672 673 /* Wait a little for things to settle down */ 674 udelay(50); 675 } 676 677 return 0; 678 } 679 680 static int vce_v3_0_pre_soft_reset(void *handle) 681 { 682 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 683 684 if (!adev->vce.srbm_soft_reset) 685 return 0; 686 687 mdelay(5); 688 689 return vce_v3_0_suspend(adev); 690 } 691 692 693 static int vce_v3_0_post_soft_reset(void *handle) 694 { 695 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 696 697 if (!adev->vce.srbm_soft_reset) 698 return 0; 699 700 mdelay(5); 701 702 return vce_v3_0_resume(adev); 703 } 704 705 static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev, 706 struct amdgpu_irq_src *source, 707 unsigned type, 708 enum amdgpu_interrupt_state state) 709 { 710 uint32_t val = 0; 711 712 if (state == AMDGPU_IRQ_STATE_ENABLE) 713 val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK; 714 715 WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); 716 return 0; 717 } 718 719 static int vce_v3_0_process_interrupt(struct amdgpu_device *adev, 720 struct amdgpu_irq_src *source, 721 struct amdgpu_iv_entry *entry) 722 { 723 DRM_DEBUG("IH: VCE\n"); 724 725 WREG32_FIELD(VCE_SYS_INT_STATUS, VCE_SYS_INT_TRAP_INTERRUPT_INT, 1); 726 727 switch (entry->src_data[0]) { 728 case 0: 729 case 1: 730 case 2: 731 amdgpu_fence_process(&adev->vce.ring[entry->src_data[0]]); 732 break; 733 default: 734 DRM_ERROR("Unhandled interrupt: %d %d\n", 735 entry->src_id, entry->src_data[0]); 736 break; 737 } 738 739 return 0; 740 } 741 742 static int vce_v3_0_set_clockgating_state(void *handle, 743 enum amd_clockgating_state state) 744 { 745 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 746 bool enable = (state == AMD_CG_STATE_GATE) ? true : false; 747 int i; 748 749 if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) 750 return 0; 751 752 mutex_lock(&adev->grbm_idx_mutex); 753 for (i = 0; i < 2; i++) { 754 /* Program VCE Instance 0 or 1 if not harvested */ 755 if (adev->vce.harvest_config & (1 << i)) 756 continue; 757 758 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(i)); 759 760 if (!enable) { 761 /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */ 762 uint32_t data = RREG32(mmVCE_CLOCK_GATING_A); 763 data &= ~(0xf | 0xff0); 764 data |= ((0x0 << 0) | (0x04 << 4)); 765 WREG32(mmVCE_CLOCK_GATING_A, data); 766 767 /* initialize VCE_UENC_CLOCK_GATING: Clock ON/OFF delay */ 768 data = RREG32(mmVCE_UENC_CLOCK_GATING); 769 data &= ~(0xf | 0xff0); 770 data |= ((0x0 << 0) | (0x04 << 4)); 771 WREG32(mmVCE_UENC_CLOCK_GATING, data); 772 } 773 774 vce_v3_0_set_vce_sw_clock_gating(adev, enable); 775 } 776 777 WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); 778 mutex_unlock(&adev->grbm_idx_mutex); 779 780 return 0; 781 } 782 783 static int vce_v3_0_set_powergating_state(void *handle, 784 enum amd_powergating_state state) 785 { 786 /* This doesn't actually powergate the VCE block. 787 * That's done in the dpm code via the SMC. This 788 * just re-inits the block as necessary. The actual 789 * gating still happens in the dpm code. We should 790 * revisit this when there is a cleaner line between 791 * the smc and the hw blocks 792 */ 793 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 794 int ret = 0; 795 796 if (state == AMD_PG_STATE_GATE) { 797 ret = vce_v3_0_stop(adev); 798 if (ret) 799 goto out; 800 } else { 801 ret = vce_v3_0_start(adev); 802 if (ret) 803 goto out; 804 } 805 806 out: 807 return ret; 808 } 809 810 static void vce_v3_0_get_clockgating_state(void *handle, u32 *flags) 811 { 812 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 813 int data; 814 815 mutex_lock(&adev->pm.mutex); 816 817 if (adev->flags & AMD_IS_APU) 818 data = RREG32_SMC(ixCURRENT_PG_STATUS_APU); 819 else 820 data = RREG32_SMC(ixCURRENT_PG_STATUS); 821 822 if (data & CURRENT_PG_STATUS__VCE_PG_STATUS_MASK) { 823 DRM_INFO("Cannot get clockgating state when VCE is powergated.\n"); 824 goto out; 825 } 826 827 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); 828 829 /* AMD_CG_SUPPORT_VCE_MGCG */ 830 data = RREG32(mmVCE_CLOCK_GATING_A); 831 if (data & (0x04 << 4)) 832 *flags |= AMD_CG_SUPPORT_VCE_MGCG; 833 834 out: 835 mutex_unlock(&adev->pm.mutex); 836 } 837 838 static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring, 839 struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch) 840 { 841 amdgpu_ring_write(ring, VCE_CMD_IB_VM); 842 amdgpu_ring_write(ring, vmid); 843 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); 844 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 845 amdgpu_ring_write(ring, ib->length_dw); 846 } 847 848 static void vce_v3_0_emit_vm_flush(struct amdgpu_ring *ring, 849 unsigned int vmid, uint64_t pd_addr) 850 { 851 amdgpu_ring_write(ring, VCE_CMD_UPDATE_PTB); 852 amdgpu_ring_write(ring, vmid); 853 amdgpu_ring_write(ring, pd_addr >> 12); 854 855 amdgpu_ring_write(ring, VCE_CMD_FLUSH_TLB); 856 amdgpu_ring_write(ring, vmid); 857 amdgpu_ring_write(ring, VCE_CMD_END); 858 } 859 860 static void vce_v3_0_emit_pipeline_sync(struct amdgpu_ring *ring) 861 { 862 uint32_t seq = ring->fence_drv.sync_seq; 863 uint64_t addr = ring->fence_drv.gpu_addr; 864 865 amdgpu_ring_write(ring, VCE_CMD_WAIT_GE); 866 amdgpu_ring_write(ring, lower_32_bits(addr)); 867 amdgpu_ring_write(ring, upper_32_bits(addr)); 868 amdgpu_ring_write(ring, seq); 869 } 870 871 static const struct amd_ip_funcs vce_v3_0_ip_funcs = { 872 .name = "vce_v3_0", 873 .early_init = vce_v3_0_early_init, 874 .late_init = NULL, 875 .sw_init = vce_v3_0_sw_init, 876 .sw_fini = vce_v3_0_sw_fini, 877 .hw_init = vce_v3_0_hw_init, 878 .hw_fini = vce_v3_0_hw_fini, 879 .suspend = vce_v3_0_suspend, 880 .resume = vce_v3_0_resume, 881 .is_idle = vce_v3_0_is_idle, 882 .wait_for_idle = vce_v3_0_wait_for_idle, 883 .check_soft_reset = vce_v3_0_check_soft_reset, 884 .pre_soft_reset = vce_v3_0_pre_soft_reset, 885 .soft_reset = vce_v3_0_soft_reset, 886 .post_soft_reset = vce_v3_0_post_soft_reset, 887 .set_clockgating_state = vce_v3_0_set_clockgating_state, 888 .set_powergating_state = vce_v3_0_set_powergating_state, 889 .get_clockgating_state = vce_v3_0_get_clockgating_state, 890 }; 891 892 static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = { 893 .type = AMDGPU_RING_TYPE_VCE, 894 .align_mask = 0xf, 895 .nop = VCE_CMD_NO_OP, 896 .support_64bit_ptrs = false, 897 .get_rptr = vce_v3_0_ring_get_rptr, 898 .get_wptr = vce_v3_0_ring_get_wptr, 899 .set_wptr = vce_v3_0_ring_set_wptr, 900 .parse_cs = amdgpu_vce_ring_parse_cs, 901 .emit_frame_size = 902 4 + /* vce_v3_0_emit_pipeline_sync */ 903 6, /* amdgpu_vce_ring_emit_fence x1 no user fence */ 904 .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */ 905 .emit_ib = amdgpu_vce_ring_emit_ib, 906 .emit_fence = amdgpu_vce_ring_emit_fence, 907 .test_ring = amdgpu_vce_ring_test_ring, 908 .test_ib = amdgpu_vce_ring_test_ib, 909 .insert_nop = amdgpu_ring_insert_nop, 910 .pad_ib = amdgpu_ring_generic_pad_ib, 911 .begin_use = amdgpu_vce_ring_begin_use, 912 .end_use = amdgpu_vce_ring_end_use, 913 }; 914 915 static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = { 916 .type = AMDGPU_RING_TYPE_VCE, 917 .align_mask = 0xf, 918 .nop = VCE_CMD_NO_OP, 919 .support_64bit_ptrs = false, 920 .get_rptr = vce_v3_0_ring_get_rptr, 921 .get_wptr = vce_v3_0_ring_get_wptr, 922 .set_wptr = vce_v3_0_ring_set_wptr, 923 .parse_cs = amdgpu_vce_ring_parse_cs_vm, 924 .emit_frame_size = 925 6 + /* vce_v3_0_emit_vm_flush */ 926 4 + /* vce_v3_0_emit_pipeline_sync */ 927 6 + 6, /* amdgpu_vce_ring_emit_fence x2 vm fence */ 928 .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */ 929 .emit_ib = vce_v3_0_ring_emit_ib, 930 .emit_vm_flush = vce_v3_0_emit_vm_flush, 931 .emit_pipeline_sync = vce_v3_0_emit_pipeline_sync, 932 .emit_fence = amdgpu_vce_ring_emit_fence, 933 .test_ring = amdgpu_vce_ring_test_ring, 934 .test_ib = amdgpu_vce_ring_test_ib, 935 .insert_nop = amdgpu_ring_insert_nop, 936 .pad_ib = amdgpu_ring_generic_pad_ib, 937 .begin_use = amdgpu_vce_ring_begin_use, 938 .end_use = amdgpu_vce_ring_end_use, 939 }; 940 941 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev) 942 { 943 int i; 944 945 if (adev->asic_type >= CHIP_STONEY) { 946 for (i = 0; i < adev->vce.num_rings; i++) { 947 adev->vce.ring[i].funcs = &vce_v3_0_ring_vm_funcs; 948 adev->vce.ring[i].me = i; 949 } 950 DRM_INFO("VCE enabled in VM mode\n"); 951 } else { 952 for (i = 0; i < adev->vce.num_rings; i++) { 953 adev->vce.ring[i].funcs = &vce_v3_0_ring_phys_funcs; 954 adev->vce.ring[i].me = i; 955 } 956 DRM_INFO("VCE enabled in physical mode\n"); 957 } 958 } 959 960 static const struct amdgpu_irq_src_funcs vce_v3_0_irq_funcs = { 961 .set = vce_v3_0_set_interrupt_state, 962 .process = vce_v3_0_process_interrupt, 963 }; 964 965 static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev) 966 { 967 adev->vce.irq.num_types = 1; 968 adev->vce.irq.funcs = &vce_v3_0_irq_funcs; 969 }; 970 971 const struct amdgpu_ip_block_version vce_v3_0_ip_block = 972 { 973 .type = AMD_IP_BLOCK_TYPE_VCE, 974 .major = 3, 975 .minor = 0, 976 .rev = 0, 977 .funcs = &vce_v3_0_ip_funcs, 978 }; 979 980 const struct amdgpu_ip_block_version vce_v3_1_ip_block = 981 { 982 .type = AMD_IP_BLOCK_TYPE_VCE, 983 .major = 3, 984 .minor = 1, 985 .rev = 0, 986 .funcs = &vce_v3_0_ip_funcs, 987 }; 988 989 const struct amdgpu_ip_block_version vce_v3_4_ip_block = 990 { 991 .type = AMD_IP_BLOCK_TYPE_VCE, 992 .major = 3, 993 .minor = 4, 994 .rev = 0, 995 .funcs = &vce_v3_0_ip_funcs, 996 }; 997