1 /* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 * Authors: Christian König <christian.koenig@amd.com> 26 */ 27 28 #include <linux/firmware.h> 29 #include <drm/drmP.h> 30 #include "amdgpu.h" 31 #include "amdgpu_vce.h" 32 #include "cikd.h" 33 34 #include "vce/vce_2_0_d.h" 35 #include "vce/vce_2_0_sh_mask.h" 36 37 #include "oss/oss_2_0_d.h" 38 #include "oss/oss_2_0_sh_mask.h" 39 40 #define VCE_V2_0_FW_SIZE (256 * 1024) 41 #define VCE_V2_0_STACK_SIZE (64 * 1024) 42 #define VCE_V2_0_DATA_SIZE (23552 * AMDGPU_MAX_VCE_HANDLES) 43 44 static void vce_v2_0_mc_resume(struct amdgpu_device *adev); 45 static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev); 46 static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev); 47 48 /** 49 * vce_v2_0_ring_get_rptr - get read pointer 50 * 51 * @ring: amdgpu_ring pointer 52 * 53 * Returns the current hardware read pointer 54 */ 55 static uint32_t vce_v2_0_ring_get_rptr(struct amdgpu_ring *ring) 56 { 57 struct amdgpu_device *adev = ring->adev; 58 59 if (ring == &adev->vce.ring[0]) 60 return RREG32(mmVCE_RB_RPTR); 61 else 62 return RREG32(mmVCE_RB_RPTR2); 63 } 64 65 /** 66 * vce_v2_0_ring_get_wptr - get write pointer 67 * 68 * @ring: amdgpu_ring pointer 69 * 70 * Returns the current hardware write pointer 71 */ 72 static uint32_t vce_v2_0_ring_get_wptr(struct amdgpu_ring *ring) 73 { 74 struct amdgpu_device *adev = ring->adev; 75 76 if (ring == &adev->vce.ring[0]) 77 return RREG32(mmVCE_RB_WPTR); 78 else 79 return RREG32(mmVCE_RB_WPTR2); 80 } 81 82 /** 83 * vce_v2_0_ring_set_wptr - set write pointer 84 * 85 * @ring: amdgpu_ring pointer 86 * 87 * Commits the write pointer to the hardware 88 */ 89 static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring) 90 { 91 struct amdgpu_device *adev = ring->adev; 92 93 if (ring == &adev->vce.ring[0]) 94 WREG32(mmVCE_RB_WPTR, ring->wptr); 95 else 96 WREG32(mmVCE_RB_WPTR2, ring->wptr); 97 } 98 99 /** 100 * vce_v2_0_start - start VCE block 101 * 102 * @adev: amdgpu_device pointer 103 * 104 * Setup and start the VCE block 105 */ 106 static int vce_v2_0_start(struct amdgpu_device *adev) 107 { 108 struct amdgpu_ring *ring; 109 int i, j, r; 110 111 vce_v2_0_mc_resume(adev); 112 113 /* set BUSY flag */ 114 WREG32_P(mmVCE_STATUS, 1, ~1); 115 116 ring = &adev->vce.ring[0]; 117 WREG32(mmVCE_RB_RPTR, ring->wptr); 118 WREG32(mmVCE_RB_WPTR, ring->wptr); 119 WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr); 120 WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 121 WREG32(mmVCE_RB_SIZE, ring->ring_size / 4); 122 123 ring = &adev->vce.ring[1]; 124 WREG32(mmVCE_RB_RPTR2, ring->wptr); 125 WREG32(mmVCE_RB_WPTR2, ring->wptr); 126 WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr); 127 WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); 128 WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); 129 130 WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK); 131 132 WREG32_P(mmVCE_SOFT_RESET, 133 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, 134 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); 135 136 mdelay(100); 137 138 WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); 139 140 for (i = 0; i < 10; ++i) { 141 uint32_t status; 142 for (j = 0; j < 100; ++j) { 143 status = RREG32(mmVCE_STATUS); 144 if (status & 2) 145 break; 146 mdelay(10); 147 } 148 r = 0; 149 if (status & 2) 150 break; 151 152 DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n"); 153 WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, 154 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); 155 mdelay(10); 156 WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); 157 mdelay(10); 158 r = -1; 159 } 160 161 /* clear BUSY flag */ 162 WREG32_P(mmVCE_STATUS, 0, ~1); 163 164 if (r) { 165 DRM_ERROR("VCE not responding, giving up!!!\n"); 166 return r; 167 } 168 169 return 0; 170 } 171 172 static int vce_v2_0_early_init(void *handle) 173 { 174 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 175 176 vce_v2_0_set_ring_funcs(adev); 177 vce_v2_0_set_irq_funcs(adev); 178 179 return 0; 180 } 181 182 static int vce_v2_0_sw_init(void *handle) 183 { 184 struct amdgpu_ring *ring; 185 int r; 186 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 187 188 /* VCE */ 189 r = amdgpu_irq_add_id(adev, 167, &adev->vce.irq); 190 if (r) 191 return r; 192 193 r = amdgpu_vce_sw_init(adev, VCE_V2_0_FW_SIZE + 194 VCE_V2_0_STACK_SIZE + VCE_V2_0_DATA_SIZE); 195 if (r) 196 return r; 197 198 r = amdgpu_vce_resume(adev); 199 if (r) 200 return r; 201 202 ring = &adev->vce.ring[0]; 203 sprintf(ring->name, "vce0"); 204 r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf, 205 &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE); 206 if (r) 207 return r; 208 209 ring = &adev->vce.ring[1]; 210 sprintf(ring->name, "vce1"); 211 r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf, 212 &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE); 213 if (r) 214 return r; 215 216 return r; 217 } 218 219 static int vce_v2_0_sw_fini(void *handle) 220 { 221 int r; 222 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 223 224 r = amdgpu_vce_suspend(adev); 225 if (r) 226 return r; 227 228 r = amdgpu_vce_sw_fini(adev); 229 if (r) 230 return r; 231 232 return r; 233 } 234 235 static int vce_v2_0_hw_init(void *handle) 236 { 237 struct amdgpu_ring *ring; 238 int r; 239 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 240 241 r = vce_v2_0_start(adev); 242 if (r) 243 return r; 244 245 ring = &adev->vce.ring[0]; 246 ring->ready = true; 247 r = amdgpu_ring_test_ring(ring); 248 if (r) { 249 ring->ready = false; 250 return r; 251 } 252 253 ring = &adev->vce.ring[1]; 254 ring->ready = true; 255 r = amdgpu_ring_test_ring(ring); 256 if (r) { 257 ring->ready = false; 258 return r; 259 } 260 261 DRM_INFO("VCE initialized successfully.\n"); 262 263 return 0; 264 } 265 266 static int vce_v2_0_hw_fini(void *handle) 267 { 268 return 0; 269 } 270 271 static int vce_v2_0_suspend(void *handle) 272 { 273 int r; 274 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 275 276 r = vce_v2_0_hw_fini(adev); 277 if (r) 278 return r; 279 280 r = amdgpu_vce_suspend(adev); 281 if (r) 282 return r; 283 284 return r; 285 } 286 287 static int vce_v2_0_resume(void *handle) 288 { 289 int r; 290 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 291 292 r = amdgpu_vce_resume(adev); 293 if (r) 294 return r; 295 296 r = vce_v2_0_hw_init(adev); 297 if (r) 298 return r; 299 300 return r; 301 } 302 303 static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated) 304 { 305 u32 tmp; 306 307 if (gated) { 308 tmp = RREG32(mmVCE_CLOCK_GATING_B); 309 tmp |= 0xe70000; 310 WREG32(mmVCE_CLOCK_GATING_B, tmp); 311 312 tmp = RREG32(mmVCE_UENC_CLOCK_GATING); 313 tmp |= 0xff000000; 314 WREG32(mmVCE_UENC_CLOCK_GATING, tmp); 315 316 tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING); 317 tmp &= ~0x3fc; 318 WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp); 319 320 WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0); 321 } else { 322 tmp = RREG32(mmVCE_CLOCK_GATING_B); 323 tmp |= 0xe7; 324 tmp &= ~0xe70000; 325 WREG32(mmVCE_CLOCK_GATING_B, tmp); 326 327 tmp = RREG32(mmVCE_UENC_CLOCK_GATING); 328 tmp |= 0x1fe000; 329 tmp &= ~0xff000000; 330 WREG32(mmVCE_UENC_CLOCK_GATING, tmp); 331 332 tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING); 333 tmp |= 0x3fc; 334 WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp); 335 } 336 } 337 338 static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated) 339 { 340 u32 orig, tmp; 341 342 tmp = RREG32(mmVCE_CLOCK_GATING_B); 343 tmp &= ~0x00060006; 344 if (gated) { 345 tmp |= 0xe10000; 346 } else { 347 tmp |= 0xe1; 348 tmp &= ~0xe10000; 349 } 350 WREG32(mmVCE_CLOCK_GATING_B, tmp); 351 352 orig = tmp = RREG32(mmVCE_UENC_CLOCK_GATING); 353 tmp &= ~0x1fe000; 354 tmp &= ~0xff000000; 355 if (tmp != orig) 356 WREG32(mmVCE_UENC_CLOCK_GATING, tmp); 357 358 orig = tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING); 359 tmp &= ~0x3fc; 360 if (tmp != orig) 361 WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp); 362 363 if (gated) 364 WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0); 365 } 366 367 static void vce_v2_0_disable_cg(struct amdgpu_device *adev) 368 { 369 WREG32(mmVCE_CGTT_CLK_OVERRIDE, 7); 370 } 371 372 static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable) 373 { 374 bool sw_cg = false; 375 376 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_VCE_MGCG)) { 377 if (sw_cg) 378 vce_v2_0_set_sw_cg(adev, true); 379 else 380 vce_v2_0_set_dyn_cg(adev, true); 381 } else { 382 vce_v2_0_disable_cg(adev); 383 384 if (sw_cg) 385 vce_v2_0_set_sw_cg(adev, false); 386 else 387 vce_v2_0_set_dyn_cg(adev, false); 388 } 389 } 390 391 static void vce_v2_0_init_cg(struct amdgpu_device *adev) 392 { 393 u32 tmp; 394 395 tmp = RREG32(mmVCE_CLOCK_GATING_A); 396 tmp &= ~0xfff; 397 tmp |= ((0 << 0) | (4 << 4)); 398 tmp |= 0x40000; 399 WREG32(mmVCE_CLOCK_GATING_A, tmp); 400 401 tmp = RREG32(mmVCE_UENC_CLOCK_GATING); 402 tmp &= ~0xfff; 403 tmp |= ((0 << 0) | (4 << 4)); 404 WREG32(mmVCE_UENC_CLOCK_GATING, tmp); 405 406 tmp = RREG32(mmVCE_CLOCK_GATING_B); 407 tmp |= 0x10; 408 tmp &= ~0x100000; 409 WREG32(mmVCE_CLOCK_GATING_B, tmp); 410 } 411 412 static void vce_v2_0_mc_resume(struct amdgpu_device *adev) 413 { 414 uint64_t addr = adev->vce.gpu_addr; 415 uint32_t size; 416 417 WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16)); 418 WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000); 419 WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F); 420 WREG32(mmVCE_CLOCK_GATING_B, 0xf7); 421 422 WREG32(mmVCE_LMI_CTRL, 0x00398000); 423 WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1); 424 WREG32(mmVCE_LMI_SWAP_CNTL, 0); 425 WREG32(mmVCE_LMI_SWAP_CNTL1, 0); 426 WREG32(mmVCE_LMI_VM_CTRL, 0); 427 428 addr += AMDGPU_VCE_FIRMWARE_OFFSET; 429 size = VCE_V2_0_FW_SIZE; 430 WREG32(mmVCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff); 431 WREG32(mmVCE_VCPU_CACHE_SIZE0, size); 432 433 addr += size; 434 size = VCE_V2_0_STACK_SIZE; 435 WREG32(mmVCE_VCPU_CACHE_OFFSET1, addr & 0x7fffffff); 436 WREG32(mmVCE_VCPU_CACHE_SIZE1, size); 437 438 addr += size; 439 size = VCE_V2_0_DATA_SIZE; 440 WREG32(mmVCE_VCPU_CACHE_OFFSET2, addr & 0x7fffffff); 441 WREG32(mmVCE_VCPU_CACHE_SIZE2, size); 442 443 WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100); 444 445 WREG32_P(mmVCE_SYS_INT_EN, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK, 446 ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); 447 448 vce_v2_0_init_cg(adev); 449 } 450 451 static bool vce_v2_0_is_idle(void *handle) 452 { 453 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 454 455 return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK); 456 } 457 458 static int vce_v2_0_wait_for_idle(void *handle) 459 { 460 unsigned i; 461 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 462 463 for (i = 0; i < adev->usec_timeout; i++) { 464 if (!(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK)) 465 return 0; 466 } 467 return -ETIMEDOUT; 468 } 469 470 static int vce_v2_0_soft_reset(void *handle) 471 { 472 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 473 474 WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK, 475 ~SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK); 476 mdelay(5); 477 478 return vce_v2_0_start(adev); 479 } 480 481 static void vce_v2_0_print_status(void *handle) 482 { 483 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 484 485 dev_info(adev->dev, "VCE 2.0 registers\n"); 486 dev_info(adev->dev, " VCE_STATUS=0x%08X\n", 487 RREG32(mmVCE_STATUS)); 488 dev_info(adev->dev, " VCE_VCPU_CNTL=0x%08X\n", 489 RREG32(mmVCE_VCPU_CNTL)); 490 dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET0=0x%08X\n", 491 RREG32(mmVCE_VCPU_CACHE_OFFSET0)); 492 dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE0=0x%08X\n", 493 RREG32(mmVCE_VCPU_CACHE_SIZE0)); 494 dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET1=0x%08X\n", 495 RREG32(mmVCE_VCPU_CACHE_OFFSET1)); 496 dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE1=0x%08X\n", 497 RREG32(mmVCE_VCPU_CACHE_SIZE1)); 498 dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET2=0x%08X\n", 499 RREG32(mmVCE_VCPU_CACHE_OFFSET2)); 500 dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE2=0x%08X\n", 501 RREG32(mmVCE_VCPU_CACHE_SIZE2)); 502 dev_info(adev->dev, " VCE_SOFT_RESET=0x%08X\n", 503 RREG32(mmVCE_SOFT_RESET)); 504 dev_info(adev->dev, " VCE_RB_BASE_LO2=0x%08X\n", 505 RREG32(mmVCE_RB_BASE_LO2)); 506 dev_info(adev->dev, " VCE_RB_BASE_HI2=0x%08X\n", 507 RREG32(mmVCE_RB_BASE_HI2)); 508 dev_info(adev->dev, " VCE_RB_SIZE2=0x%08X\n", 509 RREG32(mmVCE_RB_SIZE2)); 510 dev_info(adev->dev, " VCE_RB_RPTR2=0x%08X\n", 511 RREG32(mmVCE_RB_RPTR2)); 512 dev_info(adev->dev, " VCE_RB_WPTR2=0x%08X\n", 513 RREG32(mmVCE_RB_WPTR2)); 514 dev_info(adev->dev, " VCE_RB_BASE_LO=0x%08X\n", 515 RREG32(mmVCE_RB_BASE_LO)); 516 dev_info(adev->dev, " VCE_RB_BASE_HI=0x%08X\n", 517 RREG32(mmVCE_RB_BASE_HI)); 518 dev_info(adev->dev, " VCE_RB_SIZE=0x%08X\n", 519 RREG32(mmVCE_RB_SIZE)); 520 dev_info(adev->dev, " VCE_RB_RPTR=0x%08X\n", 521 RREG32(mmVCE_RB_RPTR)); 522 dev_info(adev->dev, " VCE_RB_WPTR=0x%08X\n", 523 RREG32(mmVCE_RB_WPTR)); 524 dev_info(adev->dev, " VCE_CLOCK_GATING_A=0x%08X\n", 525 RREG32(mmVCE_CLOCK_GATING_A)); 526 dev_info(adev->dev, " VCE_CLOCK_GATING_B=0x%08X\n", 527 RREG32(mmVCE_CLOCK_GATING_B)); 528 dev_info(adev->dev, " VCE_CGTT_CLK_OVERRIDE=0x%08X\n", 529 RREG32(mmVCE_CGTT_CLK_OVERRIDE)); 530 dev_info(adev->dev, " VCE_UENC_CLOCK_GATING=0x%08X\n", 531 RREG32(mmVCE_UENC_CLOCK_GATING)); 532 dev_info(adev->dev, " VCE_UENC_REG_CLOCK_GATING=0x%08X\n", 533 RREG32(mmVCE_UENC_REG_CLOCK_GATING)); 534 dev_info(adev->dev, " VCE_SYS_INT_EN=0x%08X\n", 535 RREG32(mmVCE_SYS_INT_EN)); 536 dev_info(adev->dev, " VCE_LMI_CTRL2=0x%08X\n", 537 RREG32(mmVCE_LMI_CTRL2)); 538 dev_info(adev->dev, " VCE_LMI_CTRL=0x%08X\n", 539 RREG32(mmVCE_LMI_CTRL)); 540 dev_info(adev->dev, " VCE_LMI_VM_CTRL=0x%08X\n", 541 RREG32(mmVCE_LMI_VM_CTRL)); 542 dev_info(adev->dev, " VCE_LMI_SWAP_CNTL=0x%08X\n", 543 RREG32(mmVCE_LMI_SWAP_CNTL)); 544 dev_info(adev->dev, " VCE_LMI_SWAP_CNTL1=0x%08X\n", 545 RREG32(mmVCE_LMI_SWAP_CNTL1)); 546 dev_info(adev->dev, " VCE_LMI_CACHE_CTRL=0x%08X\n", 547 RREG32(mmVCE_LMI_CACHE_CTRL)); 548 } 549 550 static int vce_v2_0_set_interrupt_state(struct amdgpu_device *adev, 551 struct amdgpu_irq_src *source, 552 unsigned type, 553 enum amdgpu_interrupt_state state) 554 { 555 uint32_t val = 0; 556 557 if (state == AMDGPU_IRQ_STATE_ENABLE) 558 val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK; 559 560 WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); 561 return 0; 562 } 563 564 static int vce_v2_0_process_interrupt(struct amdgpu_device *adev, 565 struct amdgpu_irq_src *source, 566 struct amdgpu_iv_entry *entry) 567 { 568 DRM_DEBUG("IH: VCE\n"); 569 switch (entry->src_data) { 570 case 0: 571 amdgpu_fence_process(&adev->vce.ring[0]); 572 break; 573 case 1: 574 amdgpu_fence_process(&adev->vce.ring[1]); 575 break; 576 default: 577 DRM_ERROR("Unhandled interrupt: %d %d\n", 578 entry->src_id, entry->src_data); 579 break; 580 } 581 582 return 0; 583 } 584 585 static int vce_v2_0_set_clockgating_state(void *handle, 586 enum amd_clockgating_state state) 587 { 588 bool gate = false; 589 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 590 591 if (state == AMD_CG_STATE_GATE) 592 gate = true; 593 594 vce_v2_0_enable_mgcg(adev, gate); 595 596 return 0; 597 } 598 599 static int vce_v2_0_set_powergating_state(void *handle, 600 enum amd_powergating_state state) 601 { 602 /* This doesn't actually powergate the VCE block. 603 * That's done in the dpm code via the SMC. This 604 * just re-inits the block as necessary. The actual 605 * gating still happens in the dpm code. We should 606 * revisit this when there is a cleaner line between 607 * the smc and the hw blocks 608 */ 609 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 610 611 if (state == AMD_PG_STATE_GATE) 612 /* XXX do we need a vce_v2_0_stop()? */ 613 return 0; 614 else 615 return vce_v2_0_start(adev); 616 } 617 618 const struct amd_ip_funcs vce_v2_0_ip_funcs = { 619 .early_init = vce_v2_0_early_init, 620 .late_init = NULL, 621 .sw_init = vce_v2_0_sw_init, 622 .sw_fini = vce_v2_0_sw_fini, 623 .hw_init = vce_v2_0_hw_init, 624 .hw_fini = vce_v2_0_hw_fini, 625 .suspend = vce_v2_0_suspend, 626 .resume = vce_v2_0_resume, 627 .is_idle = vce_v2_0_is_idle, 628 .wait_for_idle = vce_v2_0_wait_for_idle, 629 .soft_reset = vce_v2_0_soft_reset, 630 .print_status = vce_v2_0_print_status, 631 .set_clockgating_state = vce_v2_0_set_clockgating_state, 632 .set_powergating_state = vce_v2_0_set_powergating_state, 633 }; 634 635 static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = { 636 .get_rptr = vce_v2_0_ring_get_rptr, 637 .get_wptr = vce_v2_0_ring_get_wptr, 638 .set_wptr = vce_v2_0_ring_set_wptr, 639 .parse_cs = amdgpu_vce_ring_parse_cs, 640 .emit_ib = amdgpu_vce_ring_emit_ib, 641 .emit_fence = amdgpu_vce_ring_emit_fence, 642 .emit_semaphore = amdgpu_vce_ring_emit_semaphore, 643 .test_ring = amdgpu_vce_ring_test_ring, 644 .test_ib = amdgpu_vce_ring_test_ib, 645 .insert_nop = amdgpu_ring_insert_nop, 646 }; 647 648 static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev) 649 { 650 adev->vce.ring[0].funcs = &vce_v2_0_ring_funcs; 651 adev->vce.ring[1].funcs = &vce_v2_0_ring_funcs; 652 } 653 654 static const struct amdgpu_irq_src_funcs vce_v2_0_irq_funcs = { 655 .set = vce_v2_0_set_interrupt_state, 656 .process = vce_v2_0_process_interrupt, 657 }; 658 659 static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev) 660 { 661 adev->vce.irq.num_types = 1; 662 adev->vce.irq.funcs = &vce_v2_0_irq_funcs; 663 }; 664