1 /* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Christian König <christian.koenig@amd.com> 23 */ 24 25 #include <linux/firmware.h> 26 #include <drm/drmP.h> 27 #include "amdgpu.h" 28 #include "amdgpu_uvd.h" 29 #include "cikd.h" 30 31 #include "uvd/uvd_4_2_d.h" 32 #include "uvd/uvd_4_2_sh_mask.h" 33 34 #include "oss/oss_2_0_d.h" 35 #include "oss/oss_2_0_sh_mask.h" 36 37 #include "bif/bif_4_1_d.h" 38 39 #include "smu/smu_7_0_1_d.h" 40 #include "smu/smu_7_0_1_sh_mask.h" 41 42 static void uvd_v4_2_mc_resume(struct amdgpu_device *adev); 43 static void uvd_v4_2_init_cg(struct amdgpu_device *adev); 44 static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev); 45 static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev); 46 static int uvd_v4_2_start(struct amdgpu_device *adev); 47 static void uvd_v4_2_stop(struct amdgpu_device *adev); 48 static int uvd_v4_2_set_clockgating_state(void *handle, 49 enum amd_clockgating_state state); 50 /** 51 * uvd_v4_2_ring_get_rptr - get read pointer 52 * 53 * @ring: amdgpu_ring pointer 54 * 55 * Returns the current hardware read pointer 56 */ 57 static uint32_t uvd_v4_2_ring_get_rptr(struct amdgpu_ring *ring) 58 { 59 struct amdgpu_device *adev = ring->adev; 60 61 return RREG32(mmUVD_RBC_RB_RPTR); 62 } 63 64 /** 65 * uvd_v4_2_ring_get_wptr - get write pointer 66 * 67 * @ring: amdgpu_ring pointer 68 * 69 * Returns the current hardware write pointer 70 */ 71 static uint32_t uvd_v4_2_ring_get_wptr(struct amdgpu_ring *ring) 72 { 73 struct amdgpu_device *adev = ring->adev; 74 75 return RREG32(mmUVD_RBC_RB_WPTR); 76 } 77 78 /** 79 * uvd_v4_2_ring_set_wptr - set write pointer 80 * 81 * @ring: amdgpu_ring pointer 82 * 83 * Commits the write pointer to the hardware 84 */ 85 static void uvd_v4_2_ring_set_wptr(struct amdgpu_ring *ring) 86 { 87 struct amdgpu_device *adev = ring->adev; 88 89 WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); 90 } 91 92 static int uvd_v4_2_early_init(void *handle) 93 { 94 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 95 96 uvd_v4_2_set_ring_funcs(adev); 97 uvd_v4_2_set_irq_funcs(adev); 98 99 return 0; 100 } 101 102 static int uvd_v4_2_sw_init(void *handle) 103 { 104 struct amdgpu_ring *ring; 105 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 106 int r; 107 108 /* UVD TRAP */ 109 r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq); 110 if (r) 111 return r; 112 113 r = amdgpu_uvd_sw_init(adev); 114 if (r) 115 return r; 116 117 r = amdgpu_uvd_resume(adev); 118 if (r) 119 return r; 120 121 ring = &adev->uvd.ring; 122 sprintf(ring->name, "uvd"); 123 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); 124 125 return r; 126 } 127 128 static int uvd_v4_2_sw_fini(void *handle) 129 { 130 int r; 131 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 132 133 r = amdgpu_uvd_suspend(adev); 134 if (r) 135 return r; 136 137 r = amdgpu_uvd_sw_fini(adev); 138 if (r) 139 return r; 140 141 return r; 142 } 143 144 /** 145 * uvd_v4_2_hw_init - start and test UVD block 146 * 147 * @adev: amdgpu_device pointer 148 * 149 * Initialize the hardware, boot up the VCPU and do some testing 150 */ 151 static int uvd_v4_2_hw_init(void *handle) 152 { 153 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 154 struct amdgpu_ring *ring = &adev->uvd.ring; 155 uint32_t tmp; 156 int r; 157 158 uvd_v4_2_init_cg(adev); 159 uvd_v4_2_set_clockgating_state(adev, AMD_CG_STATE_GATE); 160 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); 161 r = uvd_v4_2_start(adev); 162 if (r) 163 goto done; 164 165 ring->ready = true; 166 r = amdgpu_ring_test_ring(ring); 167 if (r) { 168 ring->ready = false; 169 goto done; 170 } 171 172 r = amdgpu_ring_alloc(ring, 10); 173 if (r) { 174 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); 175 goto done; 176 } 177 178 tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0); 179 amdgpu_ring_write(ring, tmp); 180 amdgpu_ring_write(ring, 0xFFFFF); 181 182 tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0); 183 amdgpu_ring_write(ring, tmp); 184 amdgpu_ring_write(ring, 0xFFFFF); 185 186 tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0); 187 amdgpu_ring_write(ring, tmp); 188 amdgpu_ring_write(ring, 0xFFFFF); 189 190 /* Clear timeout status bits */ 191 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0)); 192 amdgpu_ring_write(ring, 0x8); 193 194 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); 195 amdgpu_ring_write(ring, 3); 196 197 amdgpu_ring_commit(ring); 198 199 done: 200 201 if (!r) 202 DRM_INFO("UVD initialized successfully.\n"); 203 204 return r; 205 } 206 207 /** 208 * uvd_v4_2_hw_fini - stop the hardware block 209 * 210 * @adev: amdgpu_device pointer 211 * 212 * Stop the UVD block, mark ring as not ready any more 213 */ 214 static int uvd_v4_2_hw_fini(void *handle) 215 { 216 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 217 struct amdgpu_ring *ring = &adev->uvd.ring; 218 219 uvd_v4_2_stop(adev); 220 ring->ready = false; 221 222 return 0; 223 } 224 225 static int uvd_v4_2_suspend(void *handle) 226 { 227 int r; 228 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 229 230 r = uvd_v4_2_hw_fini(adev); 231 if (r) 232 return r; 233 234 r = amdgpu_uvd_suspend(adev); 235 if (r) 236 return r; 237 238 return r; 239 } 240 241 static int uvd_v4_2_resume(void *handle) 242 { 243 int r; 244 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 245 246 r = amdgpu_uvd_resume(adev); 247 if (r) 248 return r; 249 250 r = uvd_v4_2_hw_init(adev); 251 if (r) 252 return r; 253 254 return r; 255 } 256 257 /** 258 * uvd_v4_2_start - start UVD block 259 * 260 * @adev: amdgpu_device pointer 261 * 262 * Setup and start the UVD block 263 */ 264 static int uvd_v4_2_start(struct amdgpu_device *adev) 265 { 266 struct amdgpu_ring *ring = &adev->uvd.ring; 267 uint32_t rb_bufsz; 268 int i, j, r; 269 270 /* disable byte swapping */ 271 u32 lmi_swap_cntl = 0; 272 u32 mp_swap_cntl = 0; 273 274 uvd_v4_2_mc_resume(adev); 275 276 /* disable interupt */ 277 WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); 278 279 /* Stall UMC and register bus before resetting VCPU */ 280 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); 281 mdelay(1); 282 283 /* put LMI, VCPU, RBC etc... into reset */ 284 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | 285 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | 286 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | 287 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | 288 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); 289 mdelay(5); 290 291 /* take UVD block out of reset */ 292 WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); 293 mdelay(5); 294 295 /* initialize UVD memory controller */ 296 WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | 297 (1 << 21) | (1 << 9) | (1 << 20)); 298 299 #ifdef __BIG_ENDIAN 300 /* swap (8 in 32) RB and IB */ 301 lmi_swap_cntl = 0xa; 302 mp_swap_cntl = 0; 303 #endif 304 WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); 305 WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl); 306 307 WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040); 308 WREG32(mmUVD_MPC_SET_MUXA1, 0x0); 309 WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040); 310 WREG32(mmUVD_MPC_SET_MUXB1, 0x0); 311 WREG32(mmUVD_MPC_SET_ALU, 0); 312 WREG32(mmUVD_MPC_SET_MUX, 0x88); 313 314 /* take all subblocks out of reset, except VCPU */ 315 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 316 mdelay(5); 317 318 /* enable VCPU clock */ 319 WREG32(mmUVD_VCPU_CNTL, 1 << 9); 320 321 /* enable UMC */ 322 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); 323 324 /* boot up the VCPU */ 325 WREG32(mmUVD_SOFT_RESET, 0); 326 mdelay(10); 327 328 for (i = 0; i < 10; ++i) { 329 uint32_t status; 330 for (j = 0; j < 100; ++j) { 331 status = RREG32(mmUVD_STATUS); 332 if (status & 2) 333 break; 334 mdelay(10); 335 } 336 r = 0; 337 if (status & 2) 338 break; 339 340 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n"); 341 WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, 342 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 343 mdelay(10); 344 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 345 mdelay(10); 346 r = -1; 347 } 348 349 if (r) { 350 DRM_ERROR("UVD not responding, giving up!!!\n"); 351 return r; 352 } 353 354 /* enable interupt */ 355 WREG32_P(mmUVD_MASTINT_EN, 3<<1, ~(3 << 1)); 356 357 /* force RBC into idle state */ 358 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); 359 360 /* Set the write pointer delay */ 361 WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0); 362 363 /* programm the 4GB memory segment for rptr and ring buffer */ 364 WREG32(mmUVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) | 365 (0x7 << 16) | (0x1 << 31)); 366 367 /* Initialize the ring buffer's read and write pointers */ 368 WREG32(mmUVD_RBC_RB_RPTR, 0x0); 369 370 ring->wptr = RREG32(mmUVD_RBC_RB_RPTR); 371 WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); 372 373 /* set the ring address */ 374 WREG32(mmUVD_RBC_RB_BASE, ring->gpu_addr); 375 376 /* Set ring buffer size */ 377 rb_bufsz = order_base_2(ring->ring_size); 378 rb_bufsz = (0x1 << 8) | rb_bufsz; 379 WREG32_P(mmUVD_RBC_RB_CNTL, rb_bufsz, ~0x11f1f); 380 381 return 0; 382 } 383 384 /** 385 * uvd_v4_2_stop - stop UVD block 386 * 387 * @adev: amdgpu_device pointer 388 * 389 * stop the UVD block 390 */ 391 static void uvd_v4_2_stop(struct amdgpu_device *adev) 392 { 393 /* force RBC into idle state */ 394 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); 395 396 /* Stall UMC and register bus before resetting VCPU */ 397 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); 398 mdelay(1); 399 400 /* put VCPU into reset */ 401 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 402 mdelay(5); 403 404 /* disable VCPU clock */ 405 WREG32(mmUVD_VCPU_CNTL, 0x0); 406 407 /* Unstall UMC and register bus */ 408 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); 409 } 410 411 /** 412 * uvd_v4_2_ring_emit_fence - emit an fence & trap command 413 * 414 * @ring: amdgpu_ring pointer 415 * @fence: fence to emit 416 * 417 * Write a fence and a trap command to the ring. 418 */ 419 static void uvd_v4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 420 unsigned flags) 421 { 422 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 423 424 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); 425 amdgpu_ring_write(ring, seq); 426 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 427 amdgpu_ring_write(ring, addr & 0xffffffff); 428 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 429 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff); 430 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); 431 amdgpu_ring_write(ring, 0); 432 433 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 434 amdgpu_ring_write(ring, 0); 435 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 436 amdgpu_ring_write(ring, 0); 437 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); 438 amdgpu_ring_write(ring, 2); 439 } 440 441 /** 442 * uvd_v4_2_ring_emit_hdp_flush - emit an hdp flush 443 * 444 * @ring: amdgpu_ring pointer 445 * 446 * Emits an hdp flush. 447 */ 448 static void uvd_v4_2_ring_emit_hdp_flush(struct amdgpu_ring *ring) 449 { 450 amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0)); 451 amdgpu_ring_write(ring, 0); 452 } 453 454 /** 455 * uvd_v4_2_ring_hdp_invalidate - emit an hdp invalidate 456 * 457 * @ring: amdgpu_ring pointer 458 * 459 * Emits an hdp invalidate. 460 */ 461 static void uvd_v4_2_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) 462 { 463 amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0)); 464 amdgpu_ring_write(ring, 1); 465 } 466 467 /** 468 * uvd_v4_2_ring_test_ring - register write test 469 * 470 * @ring: amdgpu_ring pointer 471 * 472 * Test if we can successfully write to the context register 473 */ 474 static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring) 475 { 476 struct amdgpu_device *adev = ring->adev; 477 uint32_t tmp = 0; 478 unsigned i; 479 int r; 480 481 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); 482 r = amdgpu_ring_alloc(ring, 3); 483 if (r) { 484 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", 485 ring->idx, r); 486 return r; 487 } 488 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); 489 amdgpu_ring_write(ring, 0xDEADBEEF); 490 amdgpu_ring_commit(ring); 491 for (i = 0; i < adev->usec_timeout; i++) { 492 tmp = RREG32(mmUVD_CONTEXT_ID); 493 if (tmp == 0xDEADBEEF) 494 break; 495 DRM_UDELAY(1); 496 } 497 498 if (i < adev->usec_timeout) { 499 DRM_INFO("ring test on %d succeeded in %d usecs\n", 500 ring->idx, i); 501 } else { 502 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", 503 ring->idx, tmp); 504 r = -EINVAL; 505 } 506 return r; 507 } 508 509 /** 510 * uvd_v4_2_ring_emit_ib - execute indirect buffer 511 * 512 * @ring: amdgpu_ring pointer 513 * @ib: indirect buffer to execute 514 * 515 * Write ring commands to execute the indirect buffer 516 */ 517 static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring, 518 struct amdgpu_ib *ib, 519 unsigned vm_id, bool ctx_switch) 520 { 521 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0)); 522 amdgpu_ring_write(ring, ib->gpu_addr); 523 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0)); 524 amdgpu_ring_write(ring, ib->length_dw); 525 } 526 527 /** 528 * uvd_v4_2_mc_resume - memory controller programming 529 * 530 * @adev: amdgpu_device pointer 531 * 532 * Let the UVD memory controller know it's offsets 533 */ 534 static void uvd_v4_2_mc_resume(struct amdgpu_device *adev) 535 { 536 uint64_t addr; 537 uint32_t size; 538 539 /* programm the VCPU memory controller bits 0-27 */ 540 addr = (adev->uvd.gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3; 541 size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4) >> 3; 542 WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr); 543 WREG32(mmUVD_VCPU_CACHE_SIZE0, size); 544 545 addr += size; 546 size = AMDGPU_UVD_HEAP_SIZE >> 3; 547 WREG32(mmUVD_VCPU_CACHE_OFFSET1, addr); 548 WREG32(mmUVD_VCPU_CACHE_SIZE1, size); 549 550 addr += size; 551 size = (AMDGPU_UVD_STACK_SIZE + 552 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles)) >> 3; 553 WREG32(mmUVD_VCPU_CACHE_OFFSET2, addr); 554 WREG32(mmUVD_VCPU_CACHE_SIZE2, size); 555 556 /* bits 28-31 */ 557 addr = (adev->uvd.gpu_addr >> 28) & 0xF; 558 WREG32(mmUVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0)); 559 560 /* bits 32-39 */ 561 addr = (adev->uvd.gpu_addr >> 32) & 0xFF; 562 WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31)); 563 564 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 565 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 566 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 567 } 568 569 static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev, 570 bool enable) 571 { 572 u32 orig, data; 573 574 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) { 575 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); 576 data |= 0xfff; 577 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); 578 579 orig = data = RREG32(mmUVD_CGC_CTRL); 580 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; 581 if (orig != data) 582 WREG32(mmUVD_CGC_CTRL, data); 583 } else { 584 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); 585 data &= ~0xfff; 586 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); 587 588 orig = data = RREG32(mmUVD_CGC_CTRL); 589 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; 590 if (orig != data) 591 WREG32(mmUVD_CGC_CTRL, data); 592 } 593 } 594 595 static void uvd_v4_2_set_dcm(struct amdgpu_device *adev, 596 bool sw_mode) 597 { 598 u32 tmp, tmp2; 599 600 tmp = RREG32(mmUVD_CGC_CTRL); 601 tmp &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK); 602 tmp |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK | 603 (1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT) | 604 (4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT); 605 606 if (sw_mode) { 607 tmp &= ~0x7ffff800; 608 tmp2 = UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK | 609 UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK | 610 (7 << UVD_CGC_CTRL2__GATER_DIV_ID__SHIFT); 611 } else { 612 tmp |= 0x7ffff800; 613 tmp2 = 0; 614 } 615 616 WREG32(mmUVD_CGC_CTRL, tmp); 617 WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2); 618 } 619 620 static void uvd_v4_2_init_cg(struct amdgpu_device *adev) 621 { 622 bool hw_mode = true; 623 624 if (hw_mode) { 625 uvd_v4_2_set_dcm(adev, false); 626 } else { 627 u32 tmp = RREG32(mmUVD_CGC_CTRL); 628 tmp &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; 629 WREG32(mmUVD_CGC_CTRL, tmp); 630 } 631 } 632 633 static bool uvd_v4_2_is_idle(void *handle) 634 { 635 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 636 637 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); 638 } 639 640 static int uvd_v4_2_wait_for_idle(void *handle) 641 { 642 unsigned i; 643 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 644 645 for (i = 0; i < adev->usec_timeout; i++) { 646 if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK)) 647 return 0; 648 } 649 return -ETIMEDOUT; 650 } 651 652 static int uvd_v4_2_soft_reset(void *handle) 653 { 654 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 655 656 uvd_v4_2_stop(adev); 657 658 WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK, 659 ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); 660 mdelay(5); 661 662 return uvd_v4_2_start(adev); 663 } 664 665 static int uvd_v4_2_set_interrupt_state(struct amdgpu_device *adev, 666 struct amdgpu_irq_src *source, 667 unsigned type, 668 enum amdgpu_interrupt_state state) 669 { 670 // TODO 671 return 0; 672 } 673 674 static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev, 675 struct amdgpu_irq_src *source, 676 struct amdgpu_iv_entry *entry) 677 { 678 DRM_DEBUG("IH: UVD TRAP\n"); 679 amdgpu_fence_process(&adev->uvd.ring); 680 return 0; 681 } 682 683 static int uvd_v4_2_set_clockgating_state(void *handle, 684 enum amd_clockgating_state state) 685 { 686 bool gate = false; 687 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 688 689 if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) 690 return 0; 691 692 if (state == AMD_CG_STATE_GATE) 693 gate = true; 694 695 uvd_v4_2_enable_mgcg(adev, gate); 696 697 return 0; 698 } 699 700 static int uvd_v4_2_set_powergating_state(void *handle, 701 enum amd_powergating_state state) 702 { 703 /* This doesn't actually powergate the UVD block. 704 * That's done in the dpm code via the SMC. This 705 * just re-inits the block as necessary. The actual 706 * gating still happens in the dpm code. We should 707 * revisit this when there is a cleaner line between 708 * the smc and the hw blocks 709 */ 710 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 711 712 if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD)) 713 return 0; 714 715 if (state == AMD_PG_STATE_GATE) { 716 uvd_v4_2_stop(adev); 717 return 0; 718 } else { 719 return uvd_v4_2_start(adev); 720 } 721 } 722 723 static const struct amd_ip_funcs uvd_v4_2_ip_funcs = { 724 .name = "uvd_v4_2", 725 .early_init = uvd_v4_2_early_init, 726 .late_init = NULL, 727 .sw_init = uvd_v4_2_sw_init, 728 .sw_fini = uvd_v4_2_sw_fini, 729 .hw_init = uvd_v4_2_hw_init, 730 .hw_fini = uvd_v4_2_hw_fini, 731 .suspend = uvd_v4_2_suspend, 732 .resume = uvd_v4_2_resume, 733 .is_idle = uvd_v4_2_is_idle, 734 .wait_for_idle = uvd_v4_2_wait_for_idle, 735 .soft_reset = uvd_v4_2_soft_reset, 736 .set_clockgating_state = uvd_v4_2_set_clockgating_state, 737 .set_powergating_state = uvd_v4_2_set_powergating_state, 738 }; 739 740 static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = { 741 .type = AMDGPU_RING_TYPE_UVD, 742 .align_mask = 0xf, 743 .nop = PACKET0(mmUVD_NO_OP, 0), 744 .get_rptr = uvd_v4_2_ring_get_rptr, 745 .get_wptr = uvd_v4_2_ring_get_wptr, 746 .set_wptr = uvd_v4_2_ring_set_wptr, 747 .parse_cs = amdgpu_uvd_ring_parse_cs, 748 .emit_frame_size = 749 2 + /* uvd_v4_2_ring_emit_hdp_flush */ 750 2 + /* uvd_v4_2_ring_emit_hdp_invalidate */ 751 14, /* uvd_v4_2_ring_emit_fence x1 no user fence */ 752 .emit_ib_size = 4, /* uvd_v4_2_ring_emit_ib */ 753 .emit_ib = uvd_v4_2_ring_emit_ib, 754 .emit_fence = uvd_v4_2_ring_emit_fence, 755 .emit_hdp_flush = uvd_v4_2_ring_emit_hdp_flush, 756 .emit_hdp_invalidate = uvd_v4_2_ring_emit_hdp_invalidate, 757 .test_ring = uvd_v4_2_ring_test_ring, 758 .test_ib = amdgpu_uvd_ring_test_ib, 759 .insert_nop = amdgpu_ring_insert_nop, 760 .pad_ib = amdgpu_ring_generic_pad_ib, 761 .begin_use = amdgpu_uvd_ring_begin_use, 762 .end_use = amdgpu_uvd_ring_end_use, 763 }; 764 765 static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev) 766 { 767 adev->uvd.ring.funcs = &uvd_v4_2_ring_funcs; 768 } 769 770 static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = { 771 .set = uvd_v4_2_set_interrupt_state, 772 .process = uvd_v4_2_process_interrupt, 773 }; 774 775 static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev) 776 { 777 adev->uvd.irq.num_types = 1; 778 adev->uvd.irq.funcs = &uvd_v4_2_irq_funcs; 779 } 780 781 const struct amdgpu_ip_block_version uvd_v4_2_ip_block = 782 { 783 .type = AMD_IP_BLOCK_TYPE_UVD, 784 .major = 4, 785 .minor = 2, 786 .rev = 0, 787 .funcs = &uvd_v4_2_ip_funcs, 788 }; 789