1 /* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Christian König <christian.koenig@amd.com> 23 */ 24 25 #include <linux/firmware.h> 26 #include <drm/drmP.h> 27 #include "amdgpu.h" 28 #include "amdgpu_uvd.h" 29 #include "cikd.h" 30 31 #include "uvd/uvd_4_2_d.h" 32 #include "uvd/uvd_4_2_sh_mask.h" 33 34 #include "oss/oss_2_0_d.h" 35 #include "oss/oss_2_0_sh_mask.h" 36 37 #include "bif/bif_4_1_d.h" 38 39 static void uvd_v4_2_mc_resume(struct amdgpu_device *adev); 40 static void uvd_v4_2_init_cg(struct amdgpu_device *adev); 41 static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev); 42 static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev); 43 static int uvd_v4_2_start(struct amdgpu_device *adev); 44 static void uvd_v4_2_stop(struct amdgpu_device *adev); 45 46 /** 47 * uvd_v4_2_ring_get_rptr - get read pointer 48 * 49 * @ring: amdgpu_ring pointer 50 * 51 * Returns the current hardware read pointer 52 */ 53 static uint32_t uvd_v4_2_ring_get_rptr(struct amdgpu_ring *ring) 54 { 55 struct amdgpu_device *adev = ring->adev; 56 57 return RREG32(mmUVD_RBC_RB_RPTR); 58 } 59 60 /** 61 * uvd_v4_2_ring_get_wptr - get write pointer 62 * 63 * @ring: amdgpu_ring pointer 64 * 65 * Returns the current hardware write pointer 66 */ 67 static uint32_t uvd_v4_2_ring_get_wptr(struct amdgpu_ring *ring) 68 { 69 struct amdgpu_device *adev = ring->adev; 70 71 return RREG32(mmUVD_RBC_RB_WPTR); 72 } 73 74 /** 75 * uvd_v4_2_ring_set_wptr - set write pointer 76 * 77 * @ring: amdgpu_ring pointer 78 * 79 * Commits the write pointer to the hardware 80 */ 81 static void uvd_v4_2_ring_set_wptr(struct amdgpu_ring *ring) 82 { 83 struct amdgpu_device *adev = ring->adev; 84 85 WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); 86 } 87 88 static int uvd_v4_2_early_init(void *handle) 89 { 90 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 91 92 uvd_v4_2_set_ring_funcs(adev); 93 uvd_v4_2_set_irq_funcs(adev); 94 95 return 0; 96 } 97 98 static int uvd_v4_2_sw_init(void *handle) 99 { 100 struct amdgpu_ring *ring; 101 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 102 int r; 103 104 /* UVD TRAP */ 105 r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq); 106 if (r) 107 return r; 108 109 r = amdgpu_uvd_sw_init(adev); 110 if (r) 111 return r; 112 113 r = amdgpu_uvd_resume(adev); 114 if (r) 115 return r; 116 117 ring = &adev->uvd.ring; 118 sprintf(ring->name, "uvd"); 119 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); 120 121 return r; 122 } 123 124 static int uvd_v4_2_sw_fini(void *handle) 125 { 126 int r; 127 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 128 129 r = amdgpu_uvd_suspend(adev); 130 if (r) 131 return r; 132 133 r = amdgpu_uvd_sw_fini(adev); 134 if (r) 135 return r; 136 137 return r; 138 } 139 140 /** 141 * uvd_v4_2_hw_init - start and test UVD block 142 * 143 * @adev: amdgpu_device pointer 144 * 145 * Initialize the hardware, boot up the VCPU and do some testing 146 */ 147 static int uvd_v4_2_hw_init(void *handle) 148 { 149 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 150 struct amdgpu_ring *ring = &adev->uvd.ring; 151 uint32_t tmp; 152 int r; 153 154 /* raise clocks while booting up the VCPU */ 155 amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); 156 157 r = uvd_v4_2_start(adev); 158 if (r) 159 goto done; 160 161 ring->ready = true; 162 r = amdgpu_ring_test_ring(ring); 163 if (r) { 164 ring->ready = false; 165 goto done; 166 } 167 168 r = amdgpu_ring_alloc(ring, 10); 169 if (r) { 170 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); 171 goto done; 172 } 173 174 tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0); 175 amdgpu_ring_write(ring, tmp); 176 amdgpu_ring_write(ring, 0xFFFFF); 177 178 tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0); 179 amdgpu_ring_write(ring, tmp); 180 amdgpu_ring_write(ring, 0xFFFFF); 181 182 tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0); 183 amdgpu_ring_write(ring, tmp); 184 amdgpu_ring_write(ring, 0xFFFFF); 185 186 /* Clear timeout status bits */ 187 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0)); 188 amdgpu_ring_write(ring, 0x8); 189 190 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); 191 amdgpu_ring_write(ring, 3); 192 193 amdgpu_ring_commit(ring); 194 195 done: 196 /* lower clocks again */ 197 amdgpu_asic_set_uvd_clocks(adev, 0, 0); 198 199 if (!r) 200 DRM_INFO("UVD initialized successfully.\n"); 201 202 return r; 203 } 204 205 /** 206 * uvd_v4_2_hw_fini - stop the hardware block 207 * 208 * @adev: amdgpu_device pointer 209 * 210 * Stop the UVD block, mark ring as not ready any more 211 */ 212 static int uvd_v4_2_hw_fini(void *handle) 213 { 214 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 215 struct amdgpu_ring *ring = &adev->uvd.ring; 216 217 uvd_v4_2_stop(adev); 218 ring->ready = false; 219 220 return 0; 221 } 222 223 static int uvd_v4_2_suspend(void *handle) 224 { 225 int r; 226 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 227 228 r = uvd_v4_2_hw_fini(adev); 229 if (r) 230 return r; 231 232 r = amdgpu_uvd_suspend(adev); 233 if (r) 234 return r; 235 236 return r; 237 } 238 239 static int uvd_v4_2_resume(void *handle) 240 { 241 int r; 242 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 243 244 r = amdgpu_uvd_resume(adev); 245 if (r) 246 return r; 247 248 r = uvd_v4_2_hw_init(adev); 249 if (r) 250 return r; 251 252 return r; 253 } 254 255 /** 256 * uvd_v4_2_start - start UVD block 257 * 258 * @adev: amdgpu_device pointer 259 * 260 * Setup and start the UVD block 261 */ 262 static int uvd_v4_2_start(struct amdgpu_device *adev) 263 { 264 struct amdgpu_ring *ring = &adev->uvd.ring; 265 uint32_t rb_bufsz; 266 int i, j, r; 267 268 /* disable byte swapping */ 269 u32 lmi_swap_cntl = 0; 270 u32 mp_swap_cntl = 0; 271 272 uvd_v4_2_mc_resume(adev); 273 274 /* disable clock gating */ 275 WREG32(mmUVD_CGC_GATE, 0); 276 277 /* disable interupt */ 278 WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); 279 280 /* Stall UMC and register bus before resetting VCPU */ 281 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); 282 mdelay(1); 283 284 /* put LMI, VCPU, RBC etc... into reset */ 285 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | 286 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | 287 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | 288 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | 289 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); 290 mdelay(5); 291 292 /* take UVD block out of reset */ 293 WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); 294 mdelay(5); 295 296 /* initialize UVD memory controller */ 297 WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | 298 (1 << 21) | (1 << 9) | (1 << 20)); 299 300 #ifdef __BIG_ENDIAN 301 /* swap (8 in 32) RB and IB */ 302 lmi_swap_cntl = 0xa; 303 mp_swap_cntl = 0; 304 #endif 305 WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); 306 WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl); 307 308 WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040); 309 WREG32(mmUVD_MPC_SET_MUXA1, 0x0); 310 WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040); 311 WREG32(mmUVD_MPC_SET_MUXB1, 0x0); 312 WREG32(mmUVD_MPC_SET_ALU, 0); 313 WREG32(mmUVD_MPC_SET_MUX, 0x88); 314 315 /* take all subblocks out of reset, except VCPU */ 316 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 317 mdelay(5); 318 319 /* enable VCPU clock */ 320 WREG32(mmUVD_VCPU_CNTL, 1 << 9); 321 322 /* enable UMC */ 323 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); 324 325 /* boot up the VCPU */ 326 WREG32(mmUVD_SOFT_RESET, 0); 327 mdelay(10); 328 329 for (i = 0; i < 10; ++i) { 330 uint32_t status; 331 for (j = 0; j < 100; ++j) { 332 status = RREG32(mmUVD_STATUS); 333 if (status & 2) 334 break; 335 mdelay(10); 336 } 337 r = 0; 338 if (status & 2) 339 break; 340 341 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n"); 342 WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, 343 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 344 mdelay(10); 345 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 346 mdelay(10); 347 r = -1; 348 } 349 350 if (r) { 351 DRM_ERROR("UVD not responding, giving up!!!\n"); 352 return r; 353 } 354 355 /* enable interupt */ 356 WREG32_P(mmUVD_MASTINT_EN, 3<<1, ~(3 << 1)); 357 358 /* force RBC into idle state */ 359 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); 360 361 /* Set the write pointer delay */ 362 WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0); 363 364 /* programm the 4GB memory segment for rptr and ring buffer */ 365 WREG32(mmUVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) | 366 (0x7 << 16) | (0x1 << 31)); 367 368 /* Initialize the ring buffer's read and write pointers */ 369 WREG32(mmUVD_RBC_RB_RPTR, 0x0); 370 371 ring->wptr = RREG32(mmUVD_RBC_RB_RPTR); 372 WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); 373 374 /* set the ring address */ 375 WREG32(mmUVD_RBC_RB_BASE, ring->gpu_addr); 376 377 /* Set ring buffer size */ 378 rb_bufsz = order_base_2(ring->ring_size); 379 rb_bufsz = (0x1 << 8) | rb_bufsz; 380 WREG32_P(mmUVD_RBC_RB_CNTL, rb_bufsz, ~0x11f1f); 381 382 return 0; 383 } 384 385 /** 386 * uvd_v4_2_stop - stop UVD block 387 * 388 * @adev: amdgpu_device pointer 389 * 390 * stop the UVD block 391 */ 392 static void uvd_v4_2_stop(struct amdgpu_device *adev) 393 { 394 /* force RBC into idle state */ 395 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); 396 397 /* Stall UMC and register bus before resetting VCPU */ 398 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); 399 mdelay(1); 400 401 /* put VCPU into reset */ 402 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 403 mdelay(5); 404 405 /* disable VCPU clock */ 406 WREG32(mmUVD_VCPU_CNTL, 0x0); 407 408 /* Unstall UMC and register bus */ 409 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); 410 } 411 412 /** 413 * uvd_v4_2_ring_emit_fence - emit an fence & trap command 414 * 415 * @ring: amdgpu_ring pointer 416 * @fence: fence to emit 417 * 418 * Write a fence and a trap command to the ring. 419 */ 420 static void uvd_v4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 421 unsigned flags) 422 { 423 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 424 425 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); 426 amdgpu_ring_write(ring, seq); 427 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 428 amdgpu_ring_write(ring, addr & 0xffffffff); 429 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 430 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff); 431 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); 432 amdgpu_ring_write(ring, 0); 433 434 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 435 amdgpu_ring_write(ring, 0); 436 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 437 amdgpu_ring_write(ring, 0); 438 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); 439 amdgpu_ring_write(ring, 2); 440 } 441 442 /** 443 * uvd_v4_2_ring_emit_hdp_flush - emit an hdp flush 444 * 445 * @ring: amdgpu_ring pointer 446 * 447 * Emits an hdp flush. 448 */ 449 static void uvd_v4_2_ring_emit_hdp_flush(struct amdgpu_ring *ring) 450 { 451 amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0)); 452 amdgpu_ring_write(ring, 0); 453 } 454 455 /** 456 * uvd_v4_2_ring_hdp_invalidate - emit an hdp invalidate 457 * 458 * @ring: amdgpu_ring pointer 459 * 460 * Emits an hdp invalidate. 461 */ 462 static void uvd_v4_2_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) 463 { 464 amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0)); 465 amdgpu_ring_write(ring, 1); 466 } 467 468 /** 469 * uvd_v4_2_ring_test_ring - register write test 470 * 471 * @ring: amdgpu_ring pointer 472 * 473 * Test if we can successfully write to the context register 474 */ 475 static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring) 476 { 477 struct amdgpu_device *adev = ring->adev; 478 uint32_t tmp = 0; 479 unsigned i; 480 int r; 481 482 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); 483 r = amdgpu_ring_alloc(ring, 3); 484 if (r) { 485 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", 486 ring->idx, r); 487 return r; 488 } 489 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); 490 amdgpu_ring_write(ring, 0xDEADBEEF); 491 amdgpu_ring_commit(ring); 492 for (i = 0; i < adev->usec_timeout; i++) { 493 tmp = RREG32(mmUVD_CONTEXT_ID); 494 if (tmp == 0xDEADBEEF) 495 break; 496 DRM_UDELAY(1); 497 } 498 499 if (i < adev->usec_timeout) { 500 DRM_INFO("ring test on %d succeeded in %d usecs\n", 501 ring->idx, i); 502 } else { 503 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", 504 ring->idx, tmp); 505 r = -EINVAL; 506 } 507 return r; 508 } 509 510 /** 511 * uvd_v4_2_ring_emit_ib - execute indirect buffer 512 * 513 * @ring: amdgpu_ring pointer 514 * @ib: indirect buffer to execute 515 * 516 * Write ring commands to execute the indirect buffer 517 */ 518 static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring, 519 struct amdgpu_ib *ib, 520 unsigned vm_id, bool ctx_switch) 521 { 522 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0)); 523 amdgpu_ring_write(ring, ib->gpu_addr); 524 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0)); 525 amdgpu_ring_write(ring, ib->length_dw); 526 } 527 528 /** 529 * uvd_v4_2_mc_resume - memory controller programming 530 * 531 * @adev: amdgpu_device pointer 532 * 533 * Let the UVD memory controller know it's offsets 534 */ 535 static void uvd_v4_2_mc_resume(struct amdgpu_device *adev) 536 { 537 uint64_t addr; 538 uint32_t size; 539 540 /* programm the VCPU memory controller bits 0-27 */ 541 addr = (adev->uvd.gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3; 542 size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4) >> 3; 543 WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr); 544 WREG32(mmUVD_VCPU_CACHE_SIZE0, size); 545 546 addr += size; 547 size = AMDGPU_UVD_HEAP_SIZE >> 3; 548 WREG32(mmUVD_VCPU_CACHE_OFFSET1, addr); 549 WREG32(mmUVD_VCPU_CACHE_SIZE1, size); 550 551 addr += size; 552 size = (AMDGPU_UVD_STACK_SIZE + 553 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles)) >> 3; 554 WREG32(mmUVD_VCPU_CACHE_OFFSET2, addr); 555 WREG32(mmUVD_VCPU_CACHE_SIZE2, size); 556 557 /* bits 28-31 */ 558 addr = (adev->uvd.gpu_addr >> 28) & 0xF; 559 WREG32(mmUVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0)); 560 561 /* bits 32-39 */ 562 addr = (adev->uvd.gpu_addr >> 32) & 0xFF; 563 WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31)); 564 565 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 566 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 567 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 568 569 uvd_v4_2_init_cg(adev); 570 } 571 572 static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev, 573 bool enable) 574 { 575 u32 orig, data; 576 577 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) { 578 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); 579 data = 0xfff; 580 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); 581 582 orig = data = RREG32(mmUVD_CGC_CTRL); 583 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; 584 if (orig != data) 585 WREG32(mmUVD_CGC_CTRL, data); 586 } else { 587 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); 588 data &= ~0xfff; 589 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); 590 591 orig = data = RREG32(mmUVD_CGC_CTRL); 592 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; 593 if (orig != data) 594 WREG32(mmUVD_CGC_CTRL, data); 595 } 596 } 597 598 static void uvd_v4_2_set_dcm(struct amdgpu_device *adev, 599 bool sw_mode) 600 { 601 u32 tmp, tmp2; 602 603 tmp = RREG32(mmUVD_CGC_CTRL); 604 tmp &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK); 605 tmp |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK | 606 (1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT) | 607 (4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT); 608 609 if (sw_mode) { 610 tmp &= ~0x7ffff800; 611 tmp2 = UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK | 612 UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK | 613 (7 << UVD_CGC_CTRL2__GATER_DIV_ID__SHIFT); 614 } else { 615 tmp |= 0x7ffff800; 616 tmp2 = 0; 617 } 618 619 WREG32(mmUVD_CGC_CTRL, tmp); 620 WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2); 621 } 622 623 static void uvd_v4_2_init_cg(struct amdgpu_device *adev) 624 { 625 bool hw_mode = true; 626 627 if (hw_mode) { 628 uvd_v4_2_set_dcm(adev, false); 629 } else { 630 u32 tmp = RREG32(mmUVD_CGC_CTRL); 631 tmp &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; 632 WREG32(mmUVD_CGC_CTRL, tmp); 633 } 634 } 635 636 static bool uvd_v4_2_is_idle(void *handle) 637 { 638 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 639 640 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); 641 } 642 643 static int uvd_v4_2_wait_for_idle(void *handle) 644 { 645 unsigned i; 646 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 647 648 for (i = 0; i < adev->usec_timeout; i++) { 649 if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK)) 650 return 0; 651 } 652 return -ETIMEDOUT; 653 } 654 655 static int uvd_v4_2_soft_reset(void *handle) 656 { 657 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 658 659 uvd_v4_2_stop(adev); 660 661 WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK, 662 ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); 663 mdelay(5); 664 665 return uvd_v4_2_start(adev); 666 } 667 668 static int uvd_v4_2_set_interrupt_state(struct amdgpu_device *adev, 669 struct amdgpu_irq_src *source, 670 unsigned type, 671 enum amdgpu_interrupt_state state) 672 { 673 // TODO 674 return 0; 675 } 676 677 static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev, 678 struct amdgpu_irq_src *source, 679 struct amdgpu_iv_entry *entry) 680 { 681 DRM_DEBUG("IH: UVD TRAP\n"); 682 amdgpu_fence_process(&adev->uvd.ring); 683 return 0; 684 } 685 686 static int uvd_v4_2_set_clockgating_state(void *handle, 687 enum amd_clockgating_state state) 688 { 689 bool gate = false; 690 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 691 692 if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) 693 return 0; 694 695 if (state == AMD_CG_STATE_GATE) 696 gate = true; 697 698 uvd_v4_2_enable_mgcg(adev, gate); 699 700 return 0; 701 } 702 703 static int uvd_v4_2_set_powergating_state(void *handle, 704 enum amd_powergating_state state) 705 { 706 /* This doesn't actually powergate the UVD block. 707 * That's done in the dpm code via the SMC. This 708 * just re-inits the block as necessary. The actual 709 * gating still happens in the dpm code. We should 710 * revisit this when there is a cleaner line between 711 * the smc and the hw blocks 712 */ 713 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 714 715 if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD)) 716 return 0; 717 718 if (state == AMD_PG_STATE_GATE) { 719 uvd_v4_2_stop(adev); 720 return 0; 721 } else { 722 return uvd_v4_2_start(adev); 723 } 724 } 725 726 static const struct amd_ip_funcs uvd_v4_2_ip_funcs = { 727 .name = "uvd_v4_2", 728 .early_init = uvd_v4_2_early_init, 729 .late_init = NULL, 730 .sw_init = uvd_v4_2_sw_init, 731 .sw_fini = uvd_v4_2_sw_fini, 732 .hw_init = uvd_v4_2_hw_init, 733 .hw_fini = uvd_v4_2_hw_fini, 734 .suspend = uvd_v4_2_suspend, 735 .resume = uvd_v4_2_resume, 736 .is_idle = uvd_v4_2_is_idle, 737 .wait_for_idle = uvd_v4_2_wait_for_idle, 738 .soft_reset = uvd_v4_2_soft_reset, 739 .set_clockgating_state = uvd_v4_2_set_clockgating_state, 740 .set_powergating_state = uvd_v4_2_set_powergating_state, 741 }; 742 743 static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = { 744 .type = AMDGPU_RING_TYPE_UVD, 745 .align_mask = 0xf, 746 .nop = PACKET0(mmUVD_NO_OP, 0), 747 .get_rptr = uvd_v4_2_ring_get_rptr, 748 .get_wptr = uvd_v4_2_ring_get_wptr, 749 .set_wptr = uvd_v4_2_ring_set_wptr, 750 .parse_cs = amdgpu_uvd_ring_parse_cs, 751 .emit_frame_size = 752 2 + /* uvd_v4_2_ring_emit_hdp_flush */ 753 2 + /* uvd_v4_2_ring_emit_hdp_invalidate */ 754 14, /* uvd_v4_2_ring_emit_fence x1 no user fence */ 755 .emit_ib_size = 4, /* uvd_v4_2_ring_emit_ib */ 756 .emit_ib = uvd_v4_2_ring_emit_ib, 757 .emit_fence = uvd_v4_2_ring_emit_fence, 758 .emit_hdp_flush = uvd_v4_2_ring_emit_hdp_flush, 759 .emit_hdp_invalidate = uvd_v4_2_ring_emit_hdp_invalidate, 760 .test_ring = uvd_v4_2_ring_test_ring, 761 .test_ib = amdgpu_uvd_ring_test_ib, 762 .insert_nop = amdgpu_ring_insert_nop, 763 .pad_ib = amdgpu_ring_generic_pad_ib, 764 .begin_use = amdgpu_uvd_ring_begin_use, 765 .end_use = amdgpu_uvd_ring_end_use, 766 }; 767 768 static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev) 769 { 770 adev->uvd.ring.funcs = &uvd_v4_2_ring_funcs; 771 } 772 773 static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = { 774 .set = uvd_v4_2_set_interrupt_state, 775 .process = uvd_v4_2_process_interrupt, 776 }; 777 778 static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev) 779 { 780 adev->uvd.irq.num_types = 1; 781 adev->uvd.irq.funcs = &uvd_v4_2_irq_funcs; 782 } 783 784 const struct amdgpu_ip_block_version uvd_v4_2_ip_block = 785 { 786 .type = AMD_IP_BLOCK_TYPE_UVD, 787 .major = 4, 788 .minor = 2, 789 .rev = 0, 790 .funcs = &uvd_v4_2_ip_funcs, 791 }; 792