1 /* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Christian König <christian.koenig@amd.com> 23 */ 24 25 #include <linux/firmware.h> 26 #include <drm/drmP.h> 27 #include "amdgpu.h" 28 #include "amdgpu_uvd.h" 29 #include "cikd.h" 30 31 #include "uvd/uvd_4_2_d.h" 32 #include "uvd/uvd_4_2_sh_mask.h" 33 34 #include "oss/oss_2_0_d.h" 35 #include "oss/oss_2_0_sh_mask.h" 36 37 #include "bif/bif_4_1_d.h" 38 39 #include "smu/smu_7_0_1_d.h" 40 #include "smu/smu_7_0_1_sh_mask.h" 41 42 static void uvd_v4_2_mc_resume(struct amdgpu_device *adev); 43 static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev); 44 static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev); 45 static int uvd_v4_2_start(struct amdgpu_device *adev); 46 static void uvd_v4_2_stop(struct amdgpu_device *adev); 47 static int uvd_v4_2_set_clockgating_state(void *handle, 48 enum amd_clockgating_state state); 49 static void uvd_v4_2_set_dcm(struct amdgpu_device *adev, 50 bool sw_mode); 51 /** 52 * uvd_v4_2_ring_get_rptr - get read pointer 53 * 54 * @ring: amdgpu_ring pointer 55 * 56 * Returns the current hardware read pointer 57 */ 58 static uint32_t uvd_v4_2_ring_get_rptr(struct amdgpu_ring *ring) 59 { 60 struct amdgpu_device *adev = ring->adev; 61 62 return RREG32(mmUVD_RBC_RB_RPTR); 63 } 64 65 /** 66 * uvd_v4_2_ring_get_wptr - get write pointer 67 * 68 * @ring: amdgpu_ring pointer 69 * 70 * Returns the current hardware write pointer 71 */ 72 static uint32_t uvd_v4_2_ring_get_wptr(struct amdgpu_ring *ring) 73 { 74 struct amdgpu_device *adev = ring->adev; 75 76 return RREG32(mmUVD_RBC_RB_WPTR); 77 } 78 79 /** 80 * uvd_v4_2_ring_set_wptr - set write pointer 81 * 82 * @ring: amdgpu_ring pointer 83 * 84 * Commits the write pointer to the hardware 85 */ 86 static void uvd_v4_2_ring_set_wptr(struct amdgpu_ring *ring) 87 { 88 struct amdgpu_device *adev = ring->adev; 89 90 WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); 91 } 92 93 static int uvd_v4_2_early_init(void *handle) 94 { 95 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 96 97 uvd_v4_2_set_ring_funcs(adev); 98 uvd_v4_2_set_irq_funcs(adev); 99 100 return 0; 101 } 102 103 static int uvd_v4_2_sw_init(void *handle) 104 { 105 struct amdgpu_ring *ring; 106 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 107 int r; 108 109 /* UVD TRAP */ 110 r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq); 111 if (r) 112 return r; 113 114 r = amdgpu_uvd_sw_init(adev); 115 if (r) 116 return r; 117 118 r = amdgpu_uvd_resume(adev); 119 if (r) 120 return r; 121 122 ring = &adev->uvd.ring; 123 sprintf(ring->name, "uvd"); 124 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); 125 126 return r; 127 } 128 129 static int uvd_v4_2_sw_fini(void *handle) 130 { 131 int r; 132 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 133 134 r = amdgpu_uvd_suspend(adev); 135 if (r) 136 return r; 137 138 r = amdgpu_uvd_sw_fini(adev); 139 if (r) 140 return r; 141 142 return r; 143 } 144 static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev, 145 bool enable); 146 /** 147 * uvd_v4_2_hw_init - start and test UVD block 148 * 149 * @adev: amdgpu_device pointer 150 * 151 * Initialize the hardware, boot up the VCPU and do some testing 152 */ 153 static int uvd_v4_2_hw_init(void *handle) 154 { 155 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 156 struct amdgpu_ring *ring = &adev->uvd.ring; 157 uint32_t tmp; 158 int r; 159 160 uvd_v4_2_enable_mgcg(adev, true); 161 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); 162 r = uvd_v4_2_start(adev); 163 if (r) 164 goto done; 165 166 ring->ready = true; 167 r = amdgpu_ring_test_ring(ring); 168 if (r) { 169 ring->ready = false; 170 goto done; 171 } 172 173 r = amdgpu_ring_alloc(ring, 10); 174 if (r) { 175 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); 176 goto done; 177 } 178 179 tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0); 180 amdgpu_ring_write(ring, tmp); 181 amdgpu_ring_write(ring, 0xFFFFF); 182 183 tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0); 184 amdgpu_ring_write(ring, tmp); 185 amdgpu_ring_write(ring, 0xFFFFF); 186 187 tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0); 188 amdgpu_ring_write(ring, tmp); 189 amdgpu_ring_write(ring, 0xFFFFF); 190 191 /* Clear timeout status bits */ 192 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0)); 193 amdgpu_ring_write(ring, 0x8); 194 195 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); 196 amdgpu_ring_write(ring, 3); 197 198 amdgpu_ring_commit(ring); 199 200 done: 201 if (!r) 202 DRM_INFO("UVD initialized successfully.\n"); 203 204 return r; 205 } 206 207 /** 208 * uvd_v4_2_hw_fini - stop the hardware block 209 * 210 * @adev: amdgpu_device pointer 211 * 212 * Stop the UVD block, mark ring as not ready any more 213 */ 214 static int uvd_v4_2_hw_fini(void *handle) 215 { 216 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 217 struct amdgpu_ring *ring = &adev->uvd.ring; 218 219 uvd_v4_2_stop(adev); 220 ring->ready = false; 221 222 return 0; 223 } 224 225 static int uvd_v4_2_suspend(void *handle) 226 { 227 int r; 228 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 229 230 r = uvd_v4_2_hw_fini(adev); 231 if (r) 232 return r; 233 234 r = amdgpu_uvd_suspend(adev); 235 if (r) 236 return r; 237 238 return r; 239 } 240 241 static int uvd_v4_2_resume(void *handle) 242 { 243 int r; 244 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 245 246 r = amdgpu_uvd_resume(adev); 247 if (r) 248 return r; 249 250 r = uvd_v4_2_hw_init(adev); 251 if (r) 252 return r; 253 254 return r; 255 } 256 257 /** 258 * uvd_v4_2_start - start UVD block 259 * 260 * @adev: amdgpu_device pointer 261 * 262 * Setup and start the UVD block 263 */ 264 static int uvd_v4_2_start(struct amdgpu_device *adev) 265 { 266 struct amdgpu_ring *ring = &adev->uvd.ring; 267 uint32_t rb_bufsz; 268 int i, j, r; 269 /* disable byte swapping */ 270 u32 lmi_swap_cntl = 0; 271 u32 mp_swap_cntl = 0; 272 273 WREG32(mmUVD_CGC_GATE, 0); 274 uvd_v4_2_set_dcm(adev, true); 275 276 uvd_v4_2_mc_resume(adev); 277 278 /* disable interupt */ 279 WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); 280 281 /* Stall UMC and register bus before resetting VCPU */ 282 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); 283 mdelay(1); 284 285 /* put LMI, VCPU, RBC etc... into reset */ 286 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | 287 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | 288 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | 289 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | 290 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); 291 mdelay(5); 292 293 /* take UVD block out of reset */ 294 WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); 295 mdelay(5); 296 297 /* initialize UVD memory controller */ 298 WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | 299 (1 << 21) | (1 << 9) | (1 << 20)); 300 301 #ifdef __BIG_ENDIAN 302 /* swap (8 in 32) RB and IB */ 303 lmi_swap_cntl = 0xa; 304 mp_swap_cntl = 0; 305 #endif 306 WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); 307 WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl); 308 309 WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040); 310 WREG32(mmUVD_MPC_SET_MUXA1, 0x0); 311 WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040); 312 WREG32(mmUVD_MPC_SET_MUXB1, 0x0); 313 WREG32(mmUVD_MPC_SET_ALU, 0); 314 WREG32(mmUVD_MPC_SET_MUX, 0x88); 315 316 /* take all subblocks out of reset, except VCPU */ 317 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 318 mdelay(5); 319 320 /* enable VCPU clock */ 321 WREG32(mmUVD_VCPU_CNTL, 1 << 9); 322 323 /* enable UMC */ 324 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); 325 326 /* boot up the VCPU */ 327 WREG32(mmUVD_SOFT_RESET, 0); 328 mdelay(10); 329 330 for (i = 0; i < 10; ++i) { 331 uint32_t status; 332 for (j = 0; j < 100; ++j) { 333 status = RREG32(mmUVD_STATUS); 334 if (status & 2) 335 break; 336 mdelay(10); 337 } 338 r = 0; 339 if (status & 2) 340 break; 341 342 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n"); 343 WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, 344 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 345 mdelay(10); 346 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 347 mdelay(10); 348 r = -1; 349 } 350 351 if (r) { 352 DRM_ERROR("UVD not responding, giving up!!!\n"); 353 return r; 354 } 355 356 /* enable interupt */ 357 WREG32_P(mmUVD_MASTINT_EN, 3<<1, ~(3 << 1)); 358 359 /* force RBC into idle state */ 360 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); 361 362 /* Set the write pointer delay */ 363 WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0); 364 365 /* programm the 4GB memory segment for rptr and ring buffer */ 366 WREG32(mmUVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) | 367 (0x7 << 16) | (0x1 << 31)); 368 369 /* Initialize the ring buffer's read and write pointers */ 370 WREG32(mmUVD_RBC_RB_RPTR, 0x0); 371 372 ring->wptr = RREG32(mmUVD_RBC_RB_RPTR); 373 WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); 374 375 /* set the ring address */ 376 WREG32(mmUVD_RBC_RB_BASE, ring->gpu_addr); 377 378 /* Set ring buffer size */ 379 rb_bufsz = order_base_2(ring->ring_size); 380 rb_bufsz = (0x1 << 8) | rb_bufsz; 381 WREG32_P(mmUVD_RBC_RB_CNTL, rb_bufsz, ~0x11f1f); 382 383 return 0; 384 } 385 386 /** 387 * uvd_v4_2_stop - stop UVD block 388 * 389 * @adev: amdgpu_device pointer 390 * 391 * stop the UVD block 392 */ 393 static void uvd_v4_2_stop(struct amdgpu_device *adev) 394 { 395 /* force RBC into idle state */ 396 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); 397 398 /* Stall UMC and register bus before resetting VCPU */ 399 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); 400 mdelay(1); 401 402 /* put VCPU into reset */ 403 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 404 mdelay(5); 405 406 /* disable VCPU clock */ 407 WREG32(mmUVD_VCPU_CNTL, 0x0); 408 409 /* Unstall UMC and register bus */ 410 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); 411 412 uvd_v4_2_set_dcm(adev, false); 413 } 414 415 /** 416 * uvd_v4_2_ring_emit_fence - emit an fence & trap command 417 * 418 * @ring: amdgpu_ring pointer 419 * @fence: fence to emit 420 * 421 * Write a fence and a trap command to the ring. 422 */ 423 static void uvd_v4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 424 unsigned flags) 425 { 426 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 427 428 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); 429 amdgpu_ring_write(ring, seq); 430 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 431 amdgpu_ring_write(ring, addr & 0xffffffff); 432 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 433 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff); 434 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); 435 amdgpu_ring_write(ring, 0); 436 437 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 438 amdgpu_ring_write(ring, 0); 439 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 440 amdgpu_ring_write(ring, 0); 441 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); 442 amdgpu_ring_write(ring, 2); 443 } 444 445 /** 446 * uvd_v4_2_ring_emit_hdp_flush - emit an hdp flush 447 * 448 * @ring: amdgpu_ring pointer 449 * 450 * Emits an hdp flush. 451 */ 452 static void uvd_v4_2_ring_emit_hdp_flush(struct amdgpu_ring *ring) 453 { 454 amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0)); 455 amdgpu_ring_write(ring, 0); 456 } 457 458 /** 459 * uvd_v4_2_ring_hdp_invalidate - emit an hdp invalidate 460 * 461 * @ring: amdgpu_ring pointer 462 * 463 * Emits an hdp invalidate. 464 */ 465 static void uvd_v4_2_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) 466 { 467 amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0)); 468 amdgpu_ring_write(ring, 1); 469 } 470 471 /** 472 * uvd_v4_2_ring_test_ring - register write test 473 * 474 * @ring: amdgpu_ring pointer 475 * 476 * Test if we can successfully write to the context register 477 */ 478 static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring) 479 { 480 struct amdgpu_device *adev = ring->adev; 481 uint32_t tmp = 0; 482 unsigned i; 483 int r; 484 485 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); 486 r = amdgpu_ring_alloc(ring, 3); 487 if (r) { 488 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", 489 ring->idx, r); 490 return r; 491 } 492 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); 493 amdgpu_ring_write(ring, 0xDEADBEEF); 494 amdgpu_ring_commit(ring); 495 for (i = 0; i < adev->usec_timeout; i++) { 496 tmp = RREG32(mmUVD_CONTEXT_ID); 497 if (tmp == 0xDEADBEEF) 498 break; 499 DRM_UDELAY(1); 500 } 501 502 if (i < adev->usec_timeout) { 503 DRM_INFO("ring test on %d succeeded in %d usecs\n", 504 ring->idx, i); 505 } else { 506 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", 507 ring->idx, tmp); 508 r = -EINVAL; 509 } 510 return r; 511 } 512 513 /** 514 * uvd_v4_2_ring_emit_ib - execute indirect buffer 515 * 516 * @ring: amdgpu_ring pointer 517 * @ib: indirect buffer to execute 518 * 519 * Write ring commands to execute the indirect buffer 520 */ 521 static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring, 522 struct amdgpu_ib *ib, 523 unsigned vm_id, bool ctx_switch) 524 { 525 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0)); 526 amdgpu_ring_write(ring, ib->gpu_addr); 527 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0)); 528 amdgpu_ring_write(ring, ib->length_dw); 529 } 530 531 /** 532 * uvd_v4_2_mc_resume - memory controller programming 533 * 534 * @adev: amdgpu_device pointer 535 * 536 * Let the UVD memory controller know it's offsets 537 */ 538 static void uvd_v4_2_mc_resume(struct amdgpu_device *adev) 539 { 540 uint64_t addr; 541 uint32_t size; 542 543 /* programm the VCPU memory controller bits 0-27 */ 544 addr = (adev->uvd.gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3; 545 size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4) >> 3; 546 WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr); 547 WREG32(mmUVD_VCPU_CACHE_SIZE0, size); 548 549 addr += size; 550 size = AMDGPU_UVD_HEAP_SIZE >> 3; 551 WREG32(mmUVD_VCPU_CACHE_OFFSET1, addr); 552 WREG32(mmUVD_VCPU_CACHE_SIZE1, size); 553 554 addr += size; 555 size = (AMDGPU_UVD_STACK_SIZE + 556 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles)) >> 3; 557 WREG32(mmUVD_VCPU_CACHE_OFFSET2, addr); 558 WREG32(mmUVD_VCPU_CACHE_SIZE2, size); 559 560 /* bits 28-31 */ 561 addr = (adev->uvd.gpu_addr >> 28) & 0xF; 562 WREG32(mmUVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0)); 563 564 /* bits 32-39 */ 565 addr = (adev->uvd.gpu_addr >> 32) & 0xFF; 566 WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31)); 567 568 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 569 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 570 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 571 } 572 573 static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev, 574 bool enable) 575 { 576 u32 orig, data; 577 578 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) { 579 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); 580 data |= 0xfff; 581 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); 582 583 orig = data = RREG32(mmUVD_CGC_CTRL); 584 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; 585 if (orig != data) 586 WREG32(mmUVD_CGC_CTRL, data); 587 } else { 588 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); 589 data &= ~0xfff; 590 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); 591 592 orig = data = RREG32(mmUVD_CGC_CTRL); 593 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; 594 if (orig != data) 595 WREG32(mmUVD_CGC_CTRL, data); 596 } 597 } 598 599 static void uvd_v4_2_set_dcm(struct amdgpu_device *adev, 600 bool sw_mode) 601 { 602 u32 tmp, tmp2; 603 604 WREG32_FIELD(UVD_CGC_GATE, REGS, 0); 605 606 tmp = RREG32(mmUVD_CGC_CTRL); 607 tmp &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK); 608 tmp |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK | 609 (1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT) | 610 (4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT); 611 612 if (sw_mode) { 613 tmp &= ~0x7ffff800; 614 tmp2 = UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK | 615 UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK | 616 (7 << UVD_CGC_CTRL2__GATER_DIV_ID__SHIFT); 617 } else { 618 tmp |= 0x7ffff800; 619 tmp2 = 0; 620 } 621 622 WREG32(mmUVD_CGC_CTRL, tmp); 623 WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2); 624 } 625 626 static bool uvd_v4_2_is_idle(void *handle) 627 { 628 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 629 630 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); 631 } 632 633 static int uvd_v4_2_wait_for_idle(void *handle) 634 { 635 unsigned i; 636 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 637 638 for (i = 0; i < adev->usec_timeout; i++) { 639 if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK)) 640 return 0; 641 } 642 return -ETIMEDOUT; 643 } 644 645 static int uvd_v4_2_soft_reset(void *handle) 646 { 647 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 648 649 uvd_v4_2_stop(adev); 650 651 WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK, 652 ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); 653 mdelay(5); 654 655 return uvd_v4_2_start(adev); 656 } 657 658 static int uvd_v4_2_set_interrupt_state(struct amdgpu_device *adev, 659 struct amdgpu_irq_src *source, 660 unsigned type, 661 enum amdgpu_interrupt_state state) 662 { 663 // TODO 664 return 0; 665 } 666 667 static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev, 668 struct amdgpu_irq_src *source, 669 struct amdgpu_iv_entry *entry) 670 { 671 DRM_DEBUG("IH: UVD TRAP\n"); 672 amdgpu_fence_process(&adev->uvd.ring); 673 return 0; 674 } 675 676 static int uvd_v4_2_set_clockgating_state(void *handle, 677 enum amd_clockgating_state state) 678 { 679 return 0; 680 } 681 682 static int uvd_v4_2_set_powergating_state(void *handle, 683 enum amd_powergating_state state) 684 { 685 /* This doesn't actually powergate the UVD block. 686 * That's done in the dpm code via the SMC. This 687 * just re-inits the block as necessary. The actual 688 * gating still happens in the dpm code. We should 689 * revisit this when there is a cleaner line between 690 * the smc and the hw blocks 691 */ 692 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 693 694 if (state == AMD_PG_STATE_GATE) { 695 uvd_v4_2_stop(adev); 696 if (adev->pg_flags & AMD_PG_SUPPORT_UVD && amdgpu_dpm == 0) { 697 if (!(RREG32_SMC(ixCURRENT_PG_STATUS) & 0x4)) { 698 WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK | 699 UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_DOWN_MASK | 700 UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK)); 701 mdelay(20); 702 } 703 } 704 return 0; 705 } else { 706 if (adev->pg_flags & AMD_PG_SUPPORT_UVD && amdgpu_dpm == 0) { 707 if (RREG32_SMC(ixCURRENT_PG_STATUS) & 0x4) { 708 WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK | 709 UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_UP_MASK | 710 UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK)); 711 mdelay(30); 712 } 713 } 714 return uvd_v4_2_start(adev); 715 } 716 } 717 718 static const struct amd_ip_funcs uvd_v4_2_ip_funcs = { 719 .name = "uvd_v4_2", 720 .early_init = uvd_v4_2_early_init, 721 .late_init = NULL, 722 .sw_init = uvd_v4_2_sw_init, 723 .sw_fini = uvd_v4_2_sw_fini, 724 .hw_init = uvd_v4_2_hw_init, 725 .hw_fini = uvd_v4_2_hw_fini, 726 .suspend = uvd_v4_2_suspend, 727 .resume = uvd_v4_2_resume, 728 .is_idle = uvd_v4_2_is_idle, 729 .wait_for_idle = uvd_v4_2_wait_for_idle, 730 .soft_reset = uvd_v4_2_soft_reset, 731 .set_clockgating_state = uvd_v4_2_set_clockgating_state, 732 .set_powergating_state = uvd_v4_2_set_powergating_state, 733 }; 734 735 static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = { 736 .type = AMDGPU_RING_TYPE_UVD, 737 .align_mask = 0xf, 738 .nop = PACKET0(mmUVD_NO_OP, 0), 739 .get_rptr = uvd_v4_2_ring_get_rptr, 740 .get_wptr = uvd_v4_2_ring_get_wptr, 741 .set_wptr = uvd_v4_2_ring_set_wptr, 742 .parse_cs = amdgpu_uvd_ring_parse_cs, 743 .emit_frame_size = 744 2 + /* uvd_v4_2_ring_emit_hdp_flush */ 745 2 + /* uvd_v4_2_ring_emit_hdp_invalidate */ 746 14, /* uvd_v4_2_ring_emit_fence x1 no user fence */ 747 .emit_ib_size = 4, /* uvd_v4_2_ring_emit_ib */ 748 .emit_ib = uvd_v4_2_ring_emit_ib, 749 .emit_fence = uvd_v4_2_ring_emit_fence, 750 .emit_hdp_flush = uvd_v4_2_ring_emit_hdp_flush, 751 .emit_hdp_invalidate = uvd_v4_2_ring_emit_hdp_invalidate, 752 .test_ring = uvd_v4_2_ring_test_ring, 753 .test_ib = amdgpu_uvd_ring_test_ib, 754 .insert_nop = amdgpu_ring_insert_nop, 755 .pad_ib = amdgpu_ring_generic_pad_ib, 756 .begin_use = amdgpu_uvd_ring_begin_use, 757 .end_use = amdgpu_uvd_ring_end_use, 758 }; 759 760 static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev) 761 { 762 adev->uvd.ring.funcs = &uvd_v4_2_ring_funcs; 763 } 764 765 static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = { 766 .set = uvd_v4_2_set_interrupt_state, 767 .process = uvd_v4_2_process_interrupt, 768 }; 769 770 static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev) 771 { 772 adev->uvd.irq.num_types = 1; 773 adev->uvd.irq.funcs = &uvd_v4_2_irq_funcs; 774 } 775 776 const struct amdgpu_ip_block_version uvd_v4_2_ip_block = 777 { 778 .type = AMD_IP_BLOCK_TYPE_UVD, 779 .major = 4, 780 .minor = 2, 781 .rev = 0, 782 .funcs = &uvd_v4_2_ip_funcs, 783 }; 784