1 /* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Christian König <christian.koenig@amd.com> 23 */ 24 25 #include <linux/firmware.h> 26 #include <drm/drmP.h> 27 #include "amdgpu.h" 28 #include "amdgpu_uvd.h" 29 #include "cikd.h" 30 31 #include "uvd/uvd_4_2_d.h" 32 #include "uvd/uvd_4_2_sh_mask.h" 33 34 #include "oss/oss_2_0_d.h" 35 #include "oss/oss_2_0_sh_mask.h" 36 37 #include "bif/bif_4_1_d.h" 38 39 #include "smu/smu_7_0_1_d.h" 40 #include "smu/smu_7_0_1_sh_mask.h" 41 42 static void uvd_v4_2_mc_resume(struct amdgpu_device *adev); 43 static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev); 44 static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev); 45 static int uvd_v4_2_start(struct amdgpu_device *adev); 46 static void uvd_v4_2_stop(struct amdgpu_device *adev); 47 static int uvd_v4_2_set_clockgating_state(void *handle, 48 enum amd_clockgating_state state); 49 static void uvd_v4_2_set_dcm(struct amdgpu_device *adev, 50 bool sw_mode); 51 /** 52 * uvd_v4_2_ring_get_rptr - get read pointer 53 * 54 * @ring: amdgpu_ring pointer 55 * 56 * Returns the current hardware read pointer 57 */ 58 static uint32_t uvd_v4_2_ring_get_rptr(struct amdgpu_ring *ring) 59 { 60 struct amdgpu_device *adev = ring->adev; 61 62 return RREG32(mmUVD_RBC_RB_RPTR); 63 } 64 65 /** 66 * uvd_v4_2_ring_get_wptr - get write pointer 67 * 68 * @ring: amdgpu_ring pointer 69 * 70 * Returns the current hardware write pointer 71 */ 72 static uint32_t uvd_v4_2_ring_get_wptr(struct amdgpu_ring *ring) 73 { 74 struct amdgpu_device *adev = ring->adev; 75 76 return RREG32(mmUVD_RBC_RB_WPTR); 77 } 78 79 /** 80 * uvd_v4_2_ring_set_wptr - set write pointer 81 * 82 * @ring: amdgpu_ring pointer 83 * 84 * Commits the write pointer to the hardware 85 */ 86 static void uvd_v4_2_ring_set_wptr(struct amdgpu_ring *ring) 87 { 88 struct amdgpu_device *adev = ring->adev; 89 90 WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); 91 } 92 93 static int uvd_v4_2_early_init(void *handle) 94 { 95 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 96 97 uvd_v4_2_set_ring_funcs(adev); 98 uvd_v4_2_set_irq_funcs(adev); 99 100 return 0; 101 } 102 103 static int uvd_v4_2_sw_init(void *handle) 104 { 105 struct amdgpu_ring *ring; 106 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 107 int r; 108 109 /* UVD TRAP */ 110 r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq); 111 if (r) 112 return r; 113 114 r = amdgpu_uvd_sw_init(adev); 115 if (r) 116 return r; 117 118 r = amdgpu_uvd_resume(adev); 119 if (r) 120 return r; 121 122 ring = &adev->uvd.ring; 123 sprintf(ring->name, "uvd"); 124 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); 125 126 return r; 127 } 128 129 static int uvd_v4_2_sw_fini(void *handle) 130 { 131 int r; 132 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 133 134 r = amdgpu_uvd_suspend(adev); 135 if (r) 136 return r; 137 138 r = amdgpu_uvd_sw_fini(adev); 139 if (r) 140 return r; 141 142 return r; 143 } 144 static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev, 145 bool enable); 146 /** 147 * uvd_v4_2_hw_init - start and test UVD block 148 * 149 * @adev: amdgpu_device pointer 150 * 151 * Initialize the hardware, boot up the VCPU and do some testing 152 */ 153 static int uvd_v4_2_hw_init(void *handle) 154 { 155 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 156 struct amdgpu_ring *ring = &adev->uvd.ring; 157 uint32_t tmp; 158 int r; 159 160 uvd_v4_2_enable_mgcg(adev, true); 161 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); 162 163 ring->ready = true; 164 r = amdgpu_ring_test_ring(ring); 165 if (r) { 166 ring->ready = false; 167 goto done; 168 } 169 170 r = amdgpu_ring_alloc(ring, 10); 171 if (r) { 172 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); 173 goto done; 174 } 175 176 tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0); 177 amdgpu_ring_write(ring, tmp); 178 amdgpu_ring_write(ring, 0xFFFFF); 179 180 tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0); 181 amdgpu_ring_write(ring, tmp); 182 amdgpu_ring_write(ring, 0xFFFFF); 183 184 tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0); 185 amdgpu_ring_write(ring, tmp); 186 amdgpu_ring_write(ring, 0xFFFFF); 187 188 /* Clear timeout status bits */ 189 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0)); 190 amdgpu_ring_write(ring, 0x8); 191 192 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); 193 amdgpu_ring_write(ring, 3); 194 195 amdgpu_ring_commit(ring); 196 197 done: 198 if (!r) 199 DRM_INFO("UVD initialized successfully.\n"); 200 201 return r; 202 } 203 204 /** 205 * uvd_v4_2_hw_fini - stop the hardware block 206 * 207 * @adev: amdgpu_device pointer 208 * 209 * Stop the UVD block, mark ring as not ready any more 210 */ 211 static int uvd_v4_2_hw_fini(void *handle) 212 { 213 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 214 struct amdgpu_ring *ring = &adev->uvd.ring; 215 216 if (RREG32(mmUVD_STATUS) != 0) 217 uvd_v4_2_stop(adev); 218 219 ring->ready = false; 220 221 return 0; 222 } 223 224 static int uvd_v4_2_suspend(void *handle) 225 { 226 int r; 227 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 228 229 r = uvd_v4_2_hw_fini(adev); 230 if (r) 231 return r; 232 233 r = amdgpu_uvd_suspend(adev); 234 if (r) 235 return r; 236 237 return r; 238 } 239 240 static int uvd_v4_2_resume(void *handle) 241 { 242 int r; 243 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 244 245 r = amdgpu_uvd_resume(adev); 246 if (r) 247 return r; 248 249 r = uvd_v4_2_hw_init(adev); 250 if (r) 251 return r; 252 253 return r; 254 } 255 256 /** 257 * uvd_v4_2_start - start UVD block 258 * 259 * @adev: amdgpu_device pointer 260 * 261 * Setup and start the UVD block 262 */ 263 static int uvd_v4_2_start(struct amdgpu_device *adev) 264 { 265 struct amdgpu_ring *ring = &adev->uvd.ring; 266 uint32_t rb_bufsz; 267 int i, j, r; 268 u32 tmp; 269 /* disable byte swapping */ 270 u32 lmi_swap_cntl = 0; 271 u32 mp_swap_cntl = 0; 272 273 /* set uvd busy */ 274 WREG32_P(mmUVD_STATUS, 1<<2, ~(1<<2)); 275 276 uvd_v4_2_set_dcm(adev, true); 277 WREG32(mmUVD_CGC_GATE, 0); 278 279 /* take UVD block out of reset */ 280 WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); 281 mdelay(5); 282 283 /* enable VCPU clock */ 284 WREG32(mmUVD_VCPU_CNTL, 1 << 9); 285 286 /* disable interupt */ 287 WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); 288 289 #ifdef __BIG_ENDIAN 290 /* swap (8 in 32) RB and IB */ 291 lmi_swap_cntl = 0xa; 292 mp_swap_cntl = 0; 293 #endif 294 WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); 295 WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl); 296 /* initialize UVD memory controller */ 297 WREG32(mmUVD_LMI_CTRL, 0x203108); 298 299 tmp = RREG32(mmUVD_MPC_CNTL); 300 WREG32(mmUVD_MPC_CNTL, tmp | 0x10); 301 302 WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040); 303 WREG32(mmUVD_MPC_SET_MUXA1, 0x0); 304 WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040); 305 WREG32(mmUVD_MPC_SET_MUXB1, 0x0); 306 WREG32(mmUVD_MPC_SET_ALU, 0); 307 WREG32(mmUVD_MPC_SET_MUX, 0x88); 308 309 uvd_v4_2_mc_resume(adev); 310 311 tmp = RREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL); 312 WREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL, tmp & (~0x10)); 313 314 /* enable UMC */ 315 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); 316 317 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK); 318 319 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); 320 321 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 322 323 mdelay(10); 324 325 for (i = 0; i < 10; ++i) { 326 uint32_t status; 327 for (j = 0; j < 100; ++j) { 328 status = RREG32(mmUVD_STATUS); 329 if (status & 2) 330 break; 331 mdelay(10); 332 } 333 r = 0; 334 if (status & 2) 335 break; 336 337 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n"); 338 WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, 339 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 340 mdelay(10); 341 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 342 mdelay(10); 343 r = -1; 344 } 345 346 if (r) { 347 DRM_ERROR("UVD not responding, giving up!!!\n"); 348 return r; 349 } 350 351 /* enable interupt */ 352 WREG32_P(mmUVD_MASTINT_EN, 3<<1, ~(3 << 1)); 353 354 WREG32_P(mmUVD_STATUS, 0, ~(1<<2)); 355 356 /* force RBC into idle state */ 357 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); 358 359 /* Set the write pointer delay */ 360 WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0); 361 362 /* programm the 4GB memory segment for rptr and ring buffer */ 363 WREG32(mmUVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) | 364 (0x7 << 16) | (0x1 << 31)); 365 366 /* Initialize the ring buffer's read and write pointers */ 367 WREG32(mmUVD_RBC_RB_RPTR, 0x0); 368 369 ring->wptr = RREG32(mmUVD_RBC_RB_RPTR); 370 WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); 371 372 /* set the ring address */ 373 WREG32(mmUVD_RBC_RB_BASE, ring->gpu_addr); 374 375 /* Set ring buffer size */ 376 rb_bufsz = order_base_2(ring->ring_size); 377 rb_bufsz = (0x1 << 8) | rb_bufsz; 378 WREG32_P(mmUVD_RBC_RB_CNTL, rb_bufsz, ~0x11f1f); 379 380 return 0; 381 } 382 383 /** 384 * uvd_v4_2_stop - stop UVD block 385 * 386 * @adev: amdgpu_device pointer 387 * 388 * stop the UVD block 389 */ 390 static void uvd_v4_2_stop(struct amdgpu_device *adev) 391 { 392 uint32_t i, j; 393 uint32_t status; 394 395 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); 396 397 for (i = 0; i < 10; ++i) { 398 for (j = 0; j < 100; ++j) { 399 status = RREG32(mmUVD_STATUS); 400 if (status & 2) 401 break; 402 mdelay(1); 403 } 404 if (status & 2) 405 break; 406 } 407 408 for (i = 0; i < 10; ++i) { 409 for (j = 0; j < 100; ++j) { 410 status = RREG32(mmUVD_LMI_STATUS); 411 if (status & 0xf) 412 break; 413 mdelay(1); 414 } 415 if (status & 0xf) 416 break; 417 } 418 419 /* Stall UMC and register bus before resetting VCPU */ 420 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); 421 422 for (i = 0; i < 10; ++i) { 423 for (j = 0; j < 100; ++j) { 424 status = RREG32(mmUVD_LMI_STATUS); 425 if (status & 0x240) 426 break; 427 mdelay(1); 428 } 429 if (status & 0x240) 430 break; 431 } 432 433 WREG32_P(0x3D49, 0, ~(1 << 2)); 434 435 WREG32_P(mmUVD_VCPU_CNTL, 0, ~(1 << 9)); 436 437 /* put LMI, VCPU, RBC etc... into reset */ 438 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | 439 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | 440 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); 441 442 WREG32(mmUVD_STATUS, 0); 443 444 uvd_v4_2_set_dcm(adev, false); 445 } 446 447 /** 448 * uvd_v4_2_ring_emit_fence - emit an fence & trap command 449 * 450 * @ring: amdgpu_ring pointer 451 * @fence: fence to emit 452 * 453 * Write a fence and a trap command to the ring. 454 */ 455 static void uvd_v4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 456 unsigned flags) 457 { 458 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 459 460 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); 461 amdgpu_ring_write(ring, seq); 462 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 463 amdgpu_ring_write(ring, addr & 0xffffffff); 464 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 465 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff); 466 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); 467 amdgpu_ring_write(ring, 0); 468 469 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 470 amdgpu_ring_write(ring, 0); 471 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 472 amdgpu_ring_write(ring, 0); 473 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); 474 amdgpu_ring_write(ring, 2); 475 } 476 477 /** 478 * uvd_v4_2_ring_emit_hdp_flush - emit an hdp flush 479 * 480 * @ring: amdgpu_ring pointer 481 * 482 * Emits an hdp flush. 483 */ 484 static void uvd_v4_2_ring_emit_hdp_flush(struct amdgpu_ring *ring) 485 { 486 amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0)); 487 amdgpu_ring_write(ring, 0); 488 } 489 490 /** 491 * uvd_v4_2_ring_hdp_invalidate - emit an hdp invalidate 492 * 493 * @ring: amdgpu_ring pointer 494 * 495 * Emits an hdp invalidate. 496 */ 497 static void uvd_v4_2_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) 498 { 499 amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0)); 500 amdgpu_ring_write(ring, 1); 501 } 502 503 /** 504 * uvd_v4_2_ring_test_ring - register write test 505 * 506 * @ring: amdgpu_ring pointer 507 * 508 * Test if we can successfully write to the context register 509 */ 510 static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring) 511 { 512 struct amdgpu_device *adev = ring->adev; 513 uint32_t tmp = 0; 514 unsigned i; 515 int r; 516 517 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); 518 r = amdgpu_ring_alloc(ring, 3); 519 if (r) { 520 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", 521 ring->idx, r); 522 return r; 523 } 524 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); 525 amdgpu_ring_write(ring, 0xDEADBEEF); 526 amdgpu_ring_commit(ring); 527 for (i = 0; i < adev->usec_timeout; i++) { 528 tmp = RREG32(mmUVD_CONTEXT_ID); 529 if (tmp == 0xDEADBEEF) 530 break; 531 DRM_UDELAY(1); 532 } 533 534 if (i < adev->usec_timeout) { 535 DRM_INFO("ring test on %d succeeded in %d usecs\n", 536 ring->idx, i); 537 } else { 538 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", 539 ring->idx, tmp); 540 r = -EINVAL; 541 } 542 return r; 543 } 544 545 /** 546 * uvd_v4_2_ring_emit_ib - execute indirect buffer 547 * 548 * @ring: amdgpu_ring pointer 549 * @ib: indirect buffer to execute 550 * 551 * Write ring commands to execute the indirect buffer 552 */ 553 static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring, 554 struct amdgpu_ib *ib, 555 unsigned vm_id, bool ctx_switch) 556 { 557 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0)); 558 amdgpu_ring_write(ring, ib->gpu_addr); 559 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0)); 560 amdgpu_ring_write(ring, ib->length_dw); 561 } 562 563 /** 564 * uvd_v4_2_mc_resume - memory controller programming 565 * 566 * @adev: amdgpu_device pointer 567 * 568 * Let the UVD memory controller know it's offsets 569 */ 570 static void uvd_v4_2_mc_resume(struct amdgpu_device *adev) 571 { 572 uint64_t addr; 573 uint32_t size; 574 575 /* programm the VCPU memory controller bits 0-27 */ 576 addr = (adev->uvd.gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3; 577 size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4) >> 3; 578 WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr); 579 WREG32(mmUVD_VCPU_CACHE_SIZE0, size); 580 581 addr += size; 582 size = AMDGPU_UVD_HEAP_SIZE >> 3; 583 WREG32(mmUVD_VCPU_CACHE_OFFSET1, addr); 584 WREG32(mmUVD_VCPU_CACHE_SIZE1, size); 585 586 addr += size; 587 size = (AMDGPU_UVD_STACK_SIZE + 588 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles)) >> 3; 589 WREG32(mmUVD_VCPU_CACHE_OFFSET2, addr); 590 WREG32(mmUVD_VCPU_CACHE_SIZE2, size); 591 592 /* bits 28-31 */ 593 addr = (adev->uvd.gpu_addr >> 28) & 0xF; 594 WREG32(mmUVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0)); 595 596 /* bits 32-39 */ 597 addr = (adev->uvd.gpu_addr >> 32) & 0xFF; 598 WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31)); 599 600 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 601 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 602 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 603 } 604 605 static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev, 606 bool enable) 607 { 608 u32 orig, data; 609 610 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) { 611 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); 612 data |= 0xfff; 613 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); 614 615 orig = data = RREG32(mmUVD_CGC_CTRL); 616 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; 617 if (orig != data) 618 WREG32(mmUVD_CGC_CTRL, data); 619 } else { 620 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); 621 data &= ~0xfff; 622 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); 623 624 orig = data = RREG32(mmUVD_CGC_CTRL); 625 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; 626 if (orig != data) 627 WREG32(mmUVD_CGC_CTRL, data); 628 } 629 } 630 631 static void uvd_v4_2_set_dcm(struct amdgpu_device *adev, 632 bool sw_mode) 633 { 634 u32 tmp, tmp2; 635 636 WREG32_FIELD(UVD_CGC_GATE, REGS, 0); 637 638 tmp = RREG32(mmUVD_CGC_CTRL); 639 tmp &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK); 640 tmp |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK | 641 (1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT) | 642 (4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT); 643 644 if (sw_mode) { 645 tmp &= ~0x7ffff800; 646 tmp2 = UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK | 647 UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK | 648 (7 << UVD_CGC_CTRL2__GATER_DIV_ID__SHIFT); 649 } else { 650 tmp |= 0x7ffff800; 651 tmp2 = 0; 652 } 653 654 WREG32(mmUVD_CGC_CTRL, tmp); 655 WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2); 656 } 657 658 static bool uvd_v4_2_is_idle(void *handle) 659 { 660 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 661 662 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); 663 } 664 665 static int uvd_v4_2_wait_for_idle(void *handle) 666 { 667 unsigned i; 668 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 669 670 for (i = 0; i < adev->usec_timeout; i++) { 671 if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK)) 672 return 0; 673 } 674 return -ETIMEDOUT; 675 } 676 677 static int uvd_v4_2_soft_reset(void *handle) 678 { 679 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 680 681 uvd_v4_2_stop(adev); 682 683 WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK, 684 ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); 685 mdelay(5); 686 687 return uvd_v4_2_start(adev); 688 } 689 690 static int uvd_v4_2_set_interrupt_state(struct amdgpu_device *adev, 691 struct amdgpu_irq_src *source, 692 unsigned type, 693 enum amdgpu_interrupt_state state) 694 { 695 // TODO 696 return 0; 697 } 698 699 static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev, 700 struct amdgpu_irq_src *source, 701 struct amdgpu_iv_entry *entry) 702 { 703 DRM_DEBUG("IH: UVD TRAP\n"); 704 amdgpu_fence_process(&adev->uvd.ring); 705 return 0; 706 } 707 708 static int uvd_v4_2_set_clockgating_state(void *handle, 709 enum amd_clockgating_state state) 710 { 711 return 0; 712 } 713 714 static int uvd_v4_2_set_powergating_state(void *handle, 715 enum amd_powergating_state state) 716 { 717 /* This doesn't actually powergate the UVD block. 718 * That's done in the dpm code via the SMC. This 719 * just re-inits the block as necessary. The actual 720 * gating still happens in the dpm code. We should 721 * revisit this when there is a cleaner line between 722 * the smc and the hw blocks 723 */ 724 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 725 726 if (state == AMD_PG_STATE_GATE) { 727 uvd_v4_2_stop(adev); 728 if (adev->pg_flags & AMD_PG_SUPPORT_UVD && amdgpu_dpm == 0) { 729 if (!(RREG32_SMC(ixCURRENT_PG_STATUS) & 730 CURRENT_PG_STATUS__UVD_PG_STATUS_MASK)) { 731 WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK | 732 UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_DOWN_MASK | 733 UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK)); 734 mdelay(20); 735 } 736 } 737 return 0; 738 } else { 739 if (adev->pg_flags & AMD_PG_SUPPORT_UVD && amdgpu_dpm == 0) { 740 if (RREG32_SMC(ixCURRENT_PG_STATUS) & 741 CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) { 742 WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK | 743 UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_UP_MASK | 744 UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK)); 745 mdelay(30); 746 } 747 } 748 return uvd_v4_2_start(adev); 749 } 750 } 751 752 static const struct amd_ip_funcs uvd_v4_2_ip_funcs = { 753 .name = "uvd_v4_2", 754 .early_init = uvd_v4_2_early_init, 755 .late_init = NULL, 756 .sw_init = uvd_v4_2_sw_init, 757 .sw_fini = uvd_v4_2_sw_fini, 758 .hw_init = uvd_v4_2_hw_init, 759 .hw_fini = uvd_v4_2_hw_fini, 760 .suspend = uvd_v4_2_suspend, 761 .resume = uvd_v4_2_resume, 762 .is_idle = uvd_v4_2_is_idle, 763 .wait_for_idle = uvd_v4_2_wait_for_idle, 764 .soft_reset = uvd_v4_2_soft_reset, 765 .set_clockgating_state = uvd_v4_2_set_clockgating_state, 766 .set_powergating_state = uvd_v4_2_set_powergating_state, 767 }; 768 769 static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = { 770 .type = AMDGPU_RING_TYPE_UVD, 771 .align_mask = 0xf, 772 .nop = PACKET0(mmUVD_NO_OP, 0), 773 .get_rptr = uvd_v4_2_ring_get_rptr, 774 .get_wptr = uvd_v4_2_ring_get_wptr, 775 .set_wptr = uvd_v4_2_ring_set_wptr, 776 .parse_cs = amdgpu_uvd_ring_parse_cs, 777 .emit_frame_size = 778 2 + /* uvd_v4_2_ring_emit_hdp_flush */ 779 2 + /* uvd_v4_2_ring_emit_hdp_invalidate */ 780 14, /* uvd_v4_2_ring_emit_fence x1 no user fence */ 781 .emit_ib_size = 4, /* uvd_v4_2_ring_emit_ib */ 782 .emit_ib = uvd_v4_2_ring_emit_ib, 783 .emit_fence = uvd_v4_2_ring_emit_fence, 784 .emit_hdp_flush = uvd_v4_2_ring_emit_hdp_flush, 785 .emit_hdp_invalidate = uvd_v4_2_ring_emit_hdp_invalidate, 786 .test_ring = uvd_v4_2_ring_test_ring, 787 .test_ib = amdgpu_uvd_ring_test_ib, 788 .insert_nop = amdgpu_ring_insert_nop, 789 .pad_ib = amdgpu_ring_generic_pad_ib, 790 .begin_use = amdgpu_uvd_ring_begin_use, 791 .end_use = amdgpu_uvd_ring_end_use, 792 }; 793 794 static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev) 795 { 796 adev->uvd.ring.funcs = &uvd_v4_2_ring_funcs; 797 } 798 799 static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = { 800 .set = uvd_v4_2_set_interrupt_state, 801 .process = uvd_v4_2_process_interrupt, 802 }; 803 804 static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev) 805 { 806 adev->uvd.irq.num_types = 1; 807 adev->uvd.irq.funcs = &uvd_v4_2_irq_funcs; 808 } 809 810 const struct amdgpu_ip_block_version uvd_v4_2_ip_block = 811 { 812 .type = AMD_IP_BLOCK_TYPE_UVD, 813 .major = 4, 814 .minor = 2, 815 .rev = 0, 816 .funcs = &uvd_v4_2_ip_funcs, 817 }; 818