1 /* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Christian König <christian.koenig@amd.com> 23 */ 24 25 #include <linux/firmware.h> 26 27 #include "amdgpu.h" 28 #include "amdgpu_uvd.h" 29 #include "cikd.h" 30 31 #include "uvd/uvd_4_2_d.h" 32 #include "uvd/uvd_4_2_sh_mask.h" 33 34 #include "oss/oss_2_0_d.h" 35 #include "oss/oss_2_0_sh_mask.h" 36 37 #include "bif/bif_4_1_d.h" 38 39 #include "smu/smu_7_0_1_d.h" 40 #include "smu/smu_7_0_1_sh_mask.h" 41 42 static void uvd_v4_2_mc_resume(struct amdgpu_device *adev); 43 static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev); 44 static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev); 45 static int uvd_v4_2_start(struct amdgpu_device *adev); 46 static void uvd_v4_2_stop(struct amdgpu_device *adev); 47 static int uvd_v4_2_set_clockgating_state(void *handle, 48 enum amd_clockgating_state state); 49 static void uvd_v4_2_set_dcm(struct amdgpu_device *adev, 50 bool sw_mode); 51 /** 52 * uvd_v4_2_ring_get_rptr - get read pointer 53 * 54 * @ring: amdgpu_ring pointer 55 * 56 * Returns the current hardware read pointer 57 */ 58 static uint64_t uvd_v4_2_ring_get_rptr(struct amdgpu_ring *ring) 59 { 60 struct amdgpu_device *adev = ring->adev; 61 62 return RREG32(mmUVD_RBC_RB_RPTR); 63 } 64 65 /** 66 * uvd_v4_2_ring_get_wptr - get write pointer 67 * 68 * @ring: amdgpu_ring pointer 69 * 70 * Returns the current hardware write pointer 71 */ 72 static uint64_t uvd_v4_2_ring_get_wptr(struct amdgpu_ring *ring) 73 { 74 struct amdgpu_device *adev = ring->adev; 75 76 return RREG32(mmUVD_RBC_RB_WPTR); 77 } 78 79 /** 80 * uvd_v4_2_ring_set_wptr - set write pointer 81 * 82 * @ring: amdgpu_ring pointer 83 * 84 * Commits the write pointer to the hardware 85 */ 86 static void uvd_v4_2_ring_set_wptr(struct amdgpu_ring *ring) 87 { 88 struct amdgpu_device *adev = ring->adev; 89 90 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); 91 } 92 93 static int uvd_v4_2_early_init(void *handle) 94 { 95 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 96 adev->uvd.num_uvd_inst = 1; 97 98 uvd_v4_2_set_ring_funcs(adev); 99 uvd_v4_2_set_irq_funcs(adev); 100 101 return 0; 102 } 103 104 static int uvd_v4_2_sw_init(void *handle) 105 { 106 struct amdgpu_ring *ring; 107 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 108 int r; 109 110 /* UVD TRAP */ 111 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq); 112 if (r) 113 return r; 114 115 r = amdgpu_uvd_sw_init(adev); 116 if (r) 117 return r; 118 119 ring = &adev->uvd.inst->ring; 120 sprintf(ring->name, "uvd"); 121 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); 122 if (r) 123 return r; 124 125 r = amdgpu_uvd_resume(adev); 126 if (r) 127 return r; 128 129 r = amdgpu_uvd_entity_init(adev); 130 131 return r; 132 } 133 134 static int uvd_v4_2_sw_fini(void *handle) 135 { 136 int r; 137 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 138 139 r = amdgpu_uvd_suspend(adev); 140 if (r) 141 return r; 142 143 return amdgpu_uvd_sw_fini(adev); 144 } 145 146 static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev, 147 bool enable); 148 /** 149 * uvd_v4_2_hw_init - start and test UVD block 150 * 151 * @adev: amdgpu_device pointer 152 * 153 * Initialize the hardware, boot up the VCPU and do some testing 154 */ 155 static int uvd_v4_2_hw_init(void *handle) 156 { 157 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 158 struct amdgpu_ring *ring = &adev->uvd.inst->ring; 159 uint32_t tmp; 160 int r; 161 162 uvd_v4_2_enable_mgcg(adev, true); 163 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); 164 165 r = amdgpu_ring_test_helper(ring); 166 if (r) 167 goto done; 168 169 r = amdgpu_ring_alloc(ring, 10); 170 if (r) { 171 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); 172 goto done; 173 } 174 175 tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0); 176 amdgpu_ring_write(ring, tmp); 177 amdgpu_ring_write(ring, 0xFFFFF); 178 179 tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0); 180 amdgpu_ring_write(ring, tmp); 181 amdgpu_ring_write(ring, 0xFFFFF); 182 183 tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0); 184 amdgpu_ring_write(ring, tmp); 185 amdgpu_ring_write(ring, 0xFFFFF); 186 187 /* Clear timeout status bits */ 188 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0)); 189 amdgpu_ring_write(ring, 0x8); 190 191 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); 192 amdgpu_ring_write(ring, 3); 193 194 amdgpu_ring_commit(ring); 195 196 done: 197 if (!r) 198 DRM_INFO("UVD initialized successfully.\n"); 199 200 return r; 201 } 202 203 /** 204 * uvd_v4_2_hw_fini - stop the hardware block 205 * 206 * @adev: amdgpu_device pointer 207 * 208 * Stop the UVD block, mark ring as not ready any more 209 */ 210 static int uvd_v4_2_hw_fini(void *handle) 211 { 212 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 213 214 if (RREG32(mmUVD_STATUS) != 0) 215 uvd_v4_2_stop(adev); 216 217 return 0; 218 } 219 220 static int uvd_v4_2_suspend(void *handle) 221 { 222 int r; 223 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 224 225 r = uvd_v4_2_hw_fini(adev); 226 if (r) 227 return r; 228 229 return amdgpu_uvd_suspend(adev); 230 } 231 232 static int uvd_v4_2_resume(void *handle) 233 { 234 int r; 235 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 236 237 r = amdgpu_uvd_resume(adev); 238 if (r) 239 return r; 240 241 return uvd_v4_2_hw_init(adev); 242 } 243 244 /** 245 * uvd_v4_2_start - start UVD block 246 * 247 * @adev: amdgpu_device pointer 248 * 249 * Setup and start the UVD block 250 */ 251 static int uvd_v4_2_start(struct amdgpu_device *adev) 252 { 253 struct amdgpu_ring *ring = &adev->uvd.inst->ring; 254 uint32_t rb_bufsz; 255 int i, j, r; 256 u32 tmp; 257 /* disable byte swapping */ 258 u32 lmi_swap_cntl = 0; 259 u32 mp_swap_cntl = 0; 260 261 /* set uvd busy */ 262 WREG32_P(mmUVD_STATUS, 1<<2, ~(1<<2)); 263 264 uvd_v4_2_set_dcm(adev, true); 265 WREG32(mmUVD_CGC_GATE, 0); 266 267 /* take UVD block out of reset */ 268 WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); 269 mdelay(5); 270 271 /* enable VCPU clock */ 272 WREG32(mmUVD_VCPU_CNTL, 1 << 9); 273 274 /* disable interupt */ 275 WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); 276 277 #ifdef __BIG_ENDIAN 278 /* swap (8 in 32) RB and IB */ 279 lmi_swap_cntl = 0xa; 280 mp_swap_cntl = 0; 281 #endif 282 WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); 283 WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl); 284 /* initialize UVD memory controller */ 285 WREG32(mmUVD_LMI_CTRL, 0x203108); 286 287 tmp = RREG32(mmUVD_MPC_CNTL); 288 WREG32(mmUVD_MPC_CNTL, tmp | 0x10); 289 290 WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040); 291 WREG32(mmUVD_MPC_SET_MUXA1, 0x0); 292 WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040); 293 WREG32(mmUVD_MPC_SET_MUXB1, 0x0); 294 WREG32(mmUVD_MPC_SET_ALU, 0); 295 WREG32(mmUVD_MPC_SET_MUX, 0x88); 296 297 uvd_v4_2_mc_resume(adev); 298 299 tmp = RREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL); 300 WREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL, tmp & (~0x10)); 301 302 /* enable UMC */ 303 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); 304 305 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK); 306 307 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); 308 309 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 310 311 mdelay(10); 312 313 for (i = 0; i < 10; ++i) { 314 uint32_t status; 315 for (j = 0; j < 100; ++j) { 316 status = RREG32(mmUVD_STATUS); 317 if (status & 2) 318 break; 319 mdelay(10); 320 } 321 r = 0; 322 if (status & 2) 323 break; 324 325 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n"); 326 WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, 327 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 328 mdelay(10); 329 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 330 mdelay(10); 331 r = -1; 332 } 333 334 if (r) { 335 DRM_ERROR("UVD not responding, giving up!!!\n"); 336 return r; 337 } 338 339 /* enable interupt */ 340 WREG32_P(mmUVD_MASTINT_EN, 3<<1, ~(3 << 1)); 341 342 WREG32_P(mmUVD_STATUS, 0, ~(1<<2)); 343 344 /* force RBC into idle state */ 345 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); 346 347 /* Set the write pointer delay */ 348 WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0); 349 350 /* programm the 4GB memory segment for rptr and ring buffer */ 351 WREG32(mmUVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) | 352 (0x7 << 16) | (0x1 << 31)); 353 354 /* Initialize the ring buffer's read and write pointers */ 355 WREG32(mmUVD_RBC_RB_RPTR, 0x0); 356 357 ring->wptr = RREG32(mmUVD_RBC_RB_RPTR); 358 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); 359 360 /* set the ring address */ 361 WREG32(mmUVD_RBC_RB_BASE, ring->gpu_addr); 362 363 /* Set ring buffer size */ 364 rb_bufsz = order_base_2(ring->ring_size); 365 rb_bufsz = (0x1 << 8) | rb_bufsz; 366 WREG32_P(mmUVD_RBC_RB_CNTL, rb_bufsz, ~0x11f1f); 367 368 return 0; 369 } 370 371 /** 372 * uvd_v4_2_stop - stop UVD block 373 * 374 * @adev: amdgpu_device pointer 375 * 376 * stop the UVD block 377 */ 378 static void uvd_v4_2_stop(struct amdgpu_device *adev) 379 { 380 uint32_t i, j; 381 uint32_t status; 382 383 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); 384 385 for (i = 0; i < 10; ++i) { 386 for (j = 0; j < 100; ++j) { 387 status = RREG32(mmUVD_STATUS); 388 if (status & 2) 389 break; 390 mdelay(1); 391 } 392 if (status & 2) 393 break; 394 } 395 396 for (i = 0; i < 10; ++i) { 397 for (j = 0; j < 100; ++j) { 398 status = RREG32(mmUVD_LMI_STATUS); 399 if (status & 0xf) 400 break; 401 mdelay(1); 402 } 403 if (status & 0xf) 404 break; 405 } 406 407 /* Stall UMC and register bus before resetting VCPU */ 408 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); 409 410 for (i = 0; i < 10; ++i) { 411 for (j = 0; j < 100; ++j) { 412 status = RREG32(mmUVD_LMI_STATUS); 413 if (status & 0x240) 414 break; 415 mdelay(1); 416 } 417 if (status & 0x240) 418 break; 419 } 420 421 WREG32_P(0x3D49, 0, ~(1 << 2)); 422 423 WREG32_P(mmUVD_VCPU_CNTL, 0, ~(1 << 9)); 424 425 /* put LMI, VCPU, RBC etc... into reset */ 426 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | 427 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | 428 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); 429 430 WREG32(mmUVD_STATUS, 0); 431 432 uvd_v4_2_set_dcm(adev, false); 433 } 434 435 /** 436 * uvd_v4_2_ring_emit_fence - emit an fence & trap command 437 * 438 * @ring: amdgpu_ring pointer 439 * @fence: fence to emit 440 * 441 * Write a fence and a trap command to the ring. 442 */ 443 static void uvd_v4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 444 unsigned flags) 445 { 446 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 447 448 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); 449 amdgpu_ring_write(ring, seq); 450 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 451 amdgpu_ring_write(ring, addr & 0xffffffff); 452 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 453 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff); 454 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); 455 amdgpu_ring_write(ring, 0); 456 457 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 458 amdgpu_ring_write(ring, 0); 459 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 460 amdgpu_ring_write(ring, 0); 461 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); 462 amdgpu_ring_write(ring, 2); 463 } 464 465 /** 466 * uvd_v4_2_ring_test_ring - register write test 467 * 468 * @ring: amdgpu_ring pointer 469 * 470 * Test if we can successfully write to the context register 471 */ 472 static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring) 473 { 474 struct amdgpu_device *adev = ring->adev; 475 uint32_t tmp = 0; 476 unsigned i; 477 int r; 478 479 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); 480 r = amdgpu_ring_alloc(ring, 3); 481 if (r) 482 return r; 483 484 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); 485 amdgpu_ring_write(ring, 0xDEADBEEF); 486 amdgpu_ring_commit(ring); 487 for (i = 0; i < adev->usec_timeout; i++) { 488 tmp = RREG32(mmUVD_CONTEXT_ID); 489 if (tmp == 0xDEADBEEF) 490 break; 491 udelay(1); 492 } 493 494 if (i >= adev->usec_timeout) 495 r = -ETIMEDOUT; 496 497 return r; 498 } 499 500 /** 501 * uvd_v4_2_ring_emit_ib - execute indirect buffer 502 * 503 * @ring: amdgpu_ring pointer 504 * @ib: indirect buffer to execute 505 * 506 * Write ring commands to execute the indirect buffer 507 */ 508 static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring, 509 struct amdgpu_job *job, 510 struct amdgpu_ib *ib, 511 uint32_t flags) 512 { 513 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0)); 514 amdgpu_ring_write(ring, ib->gpu_addr); 515 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0)); 516 amdgpu_ring_write(ring, ib->length_dw); 517 } 518 519 static void uvd_v4_2_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) 520 { 521 int i; 522 523 WARN_ON(ring->wptr % 2 || count % 2); 524 525 for (i = 0; i < count / 2; i++) { 526 amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0)); 527 amdgpu_ring_write(ring, 0); 528 } 529 } 530 531 /** 532 * uvd_v4_2_mc_resume - memory controller programming 533 * 534 * @adev: amdgpu_device pointer 535 * 536 * Let the UVD memory controller know it's offsets 537 */ 538 static void uvd_v4_2_mc_resume(struct amdgpu_device *adev) 539 { 540 uint64_t addr; 541 uint32_t size; 542 543 /* programm the VCPU memory controller bits 0-27 */ 544 addr = (adev->uvd.inst->gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3; 545 size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3; 546 WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr); 547 WREG32(mmUVD_VCPU_CACHE_SIZE0, size); 548 549 addr += size; 550 size = AMDGPU_UVD_HEAP_SIZE >> 3; 551 WREG32(mmUVD_VCPU_CACHE_OFFSET1, addr); 552 WREG32(mmUVD_VCPU_CACHE_SIZE1, size); 553 554 addr += size; 555 size = (AMDGPU_UVD_STACK_SIZE + 556 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles)) >> 3; 557 WREG32(mmUVD_VCPU_CACHE_OFFSET2, addr); 558 WREG32(mmUVD_VCPU_CACHE_SIZE2, size); 559 560 /* bits 28-31 */ 561 addr = (adev->uvd.inst->gpu_addr >> 28) & 0xF; 562 WREG32(mmUVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0)); 563 564 /* bits 32-39 */ 565 addr = (adev->uvd.inst->gpu_addr >> 32) & 0xFF; 566 WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31)); 567 568 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 569 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 570 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 571 } 572 573 static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev, 574 bool enable) 575 { 576 u32 orig, data; 577 578 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) { 579 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); 580 data |= 0xfff; 581 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); 582 583 orig = data = RREG32(mmUVD_CGC_CTRL); 584 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; 585 if (orig != data) 586 WREG32(mmUVD_CGC_CTRL, data); 587 } else { 588 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); 589 data &= ~0xfff; 590 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); 591 592 orig = data = RREG32(mmUVD_CGC_CTRL); 593 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; 594 if (orig != data) 595 WREG32(mmUVD_CGC_CTRL, data); 596 } 597 } 598 599 static void uvd_v4_2_set_dcm(struct amdgpu_device *adev, 600 bool sw_mode) 601 { 602 u32 tmp, tmp2; 603 604 WREG32_FIELD(UVD_CGC_GATE, REGS, 0); 605 606 tmp = RREG32(mmUVD_CGC_CTRL); 607 tmp &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK); 608 tmp |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK | 609 (1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT) | 610 (4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT); 611 612 if (sw_mode) { 613 tmp &= ~0x7ffff800; 614 tmp2 = UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK | 615 UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK | 616 (7 << UVD_CGC_CTRL2__GATER_DIV_ID__SHIFT); 617 } else { 618 tmp |= 0x7ffff800; 619 tmp2 = 0; 620 } 621 622 WREG32(mmUVD_CGC_CTRL, tmp); 623 WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2); 624 } 625 626 static bool uvd_v4_2_is_idle(void *handle) 627 { 628 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 629 630 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); 631 } 632 633 static int uvd_v4_2_wait_for_idle(void *handle) 634 { 635 unsigned i; 636 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 637 638 for (i = 0; i < adev->usec_timeout; i++) { 639 if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK)) 640 return 0; 641 } 642 return -ETIMEDOUT; 643 } 644 645 static int uvd_v4_2_soft_reset(void *handle) 646 { 647 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 648 649 uvd_v4_2_stop(adev); 650 651 WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK, 652 ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); 653 mdelay(5); 654 655 return uvd_v4_2_start(adev); 656 } 657 658 static int uvd_v4_2_set_interrupt_state(struct amdgpu_device *adev, 659 struct amdgpu_irq_src *source, 660 unsigned type, 661 enum amdgpu_interrupt_state state) 662 { 663 // TODO 664 return 0; 665 } 666 667 static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev, 668 struct amdgpu_irq_src *source, 669 struct amdgpu_iv_entry *entry) 670 { 671 DRM_DEBUG("IH: UVD TRAP\n"); 672 amdgpu_fence_process(&adev->uvd.inst->ring); 673 return 0; 674 } 675 676 static int uvd_v4_2_set_clockgating_state(void *handle, 677 enum amd_clockgating_state state) 678 { 679 return 0; 680 } 681 682 static int uvd_v4_2_set_powergating_state(void *handle, 683 enum amd_powergating_state state) 684 { 685 /* This doesn't actually powergate the UVD block. 686 * That's done in the dpm code via the SMC. This 687 * just re-inits the block as necessary. The actual 688 * gating still happens in the dpm code. We should 689 * revisit this when there is a cleaner line between 690 * the smc and the hw blocks 691 */ 692 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 693 694 if (state == AMD_PG_STATE_GATE) { 695 uvd_v4_2_stop(adev); 696 if (adev->pg_flags & AMD_PG_SUPPORT_UVD && !adev->pm.dpm_enabled) { 697 if (!(RREG32_SMC(ixCURRENT_PG_STATUS) & 698 CURRENT_PG_STATUS__UVD_PG_STATUS_MASK)) { 699 WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK | 700 UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_DOWN_MASK | 701 UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK)); 702 mdelay(20); 703 } 704 } 705 return 0; 706 } else { 707 if (adev->pg_flags & AMD_PG_SUPPORT_UVD && !adev->pm.dpm_enabled) { 708 if (RREG32_SMC(ixCURRENT_PG_STATUS) & 709 CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) { 710 WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK | 711 UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_UP_MASK | 712 UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK)); 713 mdelay(30); 714 } 715 } 716 return uvd_v4_2_start(adev); 717 } 718 } 719 720 static const struct amd_ip_funcs uvd_v4_2_ip_funcs = { 721 .name = "uvd_v4_2", 722 .early_init = uvd_v4_2_early_init, 723 .late_init = NULL, 724 .sw_init = uvd_v4_2_sw_init, 725 .sw_fini = uvd_v4_2_sw_fini, 726 .hw_init = uvd_v4_2_hw_init, 727 .hw_fini = uvd_v4_2_hw_fini, 728 .suspend = uvd_v4_2_suspend, 729 .resume = uvd_v4_2_resume, 730 .is_idle = uvd_v4_2_is_idle, 731 .wait_for_idle = uvd_v4_2_wait_for_idle, 732 .soft_reset = uvd_v4_2_soft_reset, 733 .set_clockgating_state = uvd_v4_2_set_clockgating_state, 734 .set_powergating_state = uvd_v4_2_set_powergating_state, 735 }; 736 737 static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = { 738 .type = AMDGPU_RING_TYPE_UVD, 739 .align_mask = 0xf, 740 .support_64bit_ptrs = false, 741 .no_user_fence = true, 742 .get_rptr = uvd_v4_2_ring_get_rptr, 743 .get_wptr = uvd_v4_2_ring_get_wptr, 744 .set_wptr = uvd_v4_2_ring_set_wptr, 745 .parse_cs = amdgpu_uvd_ring_parse_cs, 746 .emit_frame_size = 747 14, /* uvd_v4_2_ring_emit_fence x1 no user fence */ 748 .emit_ib_size = 4, /* uvd_v4_2_ring_emit_ib */ 749 .emit_ib = uvd_v4_2_ring_emit_ib, 750 .emit_fence = uvd_v4_2_ring_emit_fence, 751 .test_ring = uvd_v4_2_ring_test_ring, 752 .test_ib = amdgpu_uvd_ring_test_ib, 753 .insert_nop = uvd_v4_2_ring_insert_nop, 754 .pad_ib = amdgpu_ring_generic_pad_ib, 755 .begin_use = amdgpu_uvd_ring_begin_use, 756 .end_use = amdgpu_uvd_ring_end_use, 757 }; 758 759 static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev) 760 { 761 adev->uvd.inst->ring.funcs = &uvd_v4_2_ring_funcs; 762 } 763 764 static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = { 765 .set = uvd_v4_2_set_interrupt_state, 766 .process = uvd_v4_2_process_interrupt, 767 }; 768 769 static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev) 770 { 771 adev->uvd.inst->irq.num_types = 1; 772 adev->uvd.inst->irq.funcs = &uvd_v4_2_irq_funcs; 773 } 774 775 const struct amdgpu_ip_block_version uvd_v4_2_ip_block = 776 { 777 .type = AMD_IP_BLOCK_TYPE_UVD, 778 .major = 4, 779 .minor = 2, 780 .rev = 0, 781 .funcs = &uvd_v4_2_ip_funcs, 782 }; 783