1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Christian König <christian.koenig@amd.com> 23 */ 24 25 #include <linux/firmware.h> 26 #include <drm/drmP.h> 27 #include "amdgpu.h" 28 #include "amdgpu_uvd.h" 29 #include "vid.h" 30 #include "uvd/uvd_5_0_d.h" 31 #include "uvd/uvd_5_0_sh_mask.h" 32 #include "oss/oss_2_0_d.h" 33 #include "oss/oss_2_0_sh_mask.h" 34 #include "bif/bif_5_0_d.h" 35 #include "vi.h" 36 #include "smu/smu_7_1_2_d.h" 37 #include "smu/smu_7_1_2_sh_mask.h" 38 #include "ivsrcid/ivsrcid_vislands30.h" 39 40 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev); 41 static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev); 42 static int uvd_v5_0_start(struct amdgpu_device *adev); 43 static void uvd_v5_0_stop(struct amdgpu_device *adev); 44 static int uvd_v5_0_set_clockgating_state(void *handle, 45 enum amd_clockgating_state state); 46 static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev, 47 bool enable); 48 /** 49 * uvd_v5_0_ring_get_rptr - get read pointer 50 * 51 * @ring: amdgpu_ring pointer 52 * 53 * Returns the current hardware read pointer 54 */ 55 static uint64_t uvd_v5_0_ring_get_rptr(struct amdgpu_ring *ring) 56 { 57 struct amdgpu_device *adev = ring->adev; 58 59 return RREG32(mmUVD_RBC_RB_RPTR); 60 } 61 62 /** 63 * uvd_v5_0_ring_get_wptr - get write pointer 64 * 65 * @ring: amdgpu_ring pointer 66 * 67 * Returns the current hardware write pointer 68 */ 69 static uint64_t uvd_v5_0_ring_get_wptr(struct amdgpu_ring *ring) 70 { 71 struct amdgpu_device *adev = ring->adev; 72 73 return RREG32(mmUVD_RBC_RB_WPTR); 74 } 75 76 /** 77 * uvd_v5_0_ring_set_wptr - set write pointer 78 * 79 * @ring: amdgpu_ring pointer 80 * 81 * Commits the write pointer to the hardware 82 */ 83 static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring) 84 { 85 struct amdgpu_device *adev = ring->adev; 86 87 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); 88 } 89 90 static int uvd_v5_0_early_init(void *handle) 91 { 92 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 93 adev->uvd.num_uvd_inst = 1; 94 95 uvd_v5_0_set_ring_funcs(adev); 96 uvd_v5_0_set_irq_funcs(adev); 97 98 return 0; 99 } 100 101 static int uvd_v5_0_sw_init(void *handle) 102 { 103 struct amdgpu_ring *ring; 104 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 105 int r; 106 107 /* UVD TRAP */ 108 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq); 109 if (r) 110 return r; 111 112 r = amdgpu_uvd_sw_init(adev); 113 if (r) 114 return r; 115 116 r = amdgpu_uvd_resume(adev); 117 if (r) 118 return r; 119 120 ring = &adev->uvd.inst->ring; 121 sprintf(ring->name, "uvd"); 122 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); 123 if (r) 124 return r; 125 126 r = amdgpu_uvd_entity_init(adev); 127 128 return r; 129 } 130 131 static int uvd_v5_0_sw_fini(void *handle) 132 { 133 int r; 134 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 135 136 r = amdgpu_uvd_suspend(adev); 137 if (r) 138 return r; 139 140 return amdgpu_uvd_sw_fini(adev); 141 } 142 143 /** 144 * uvd_v5_0_hw_init - start and test UVD block 145 * 146 * @adev: amdgpu_device pointer 147 * 148 * Initialize the hardware, boot up the VCPU and do some testing 149 */ 150 static int uvd_v5_0_hw_init(void *handle) 151 { 152 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 153 struct amdgpu_ring *ring = &adev->uvd.inst->ring; 154 uint32_t tmp; 155 int r; 156 157 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); 158 uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE); 159 uvd_v5_0_enable_mgcg(adev, true); 160 161 r = amdgpu_ring_test_helper(ring); 162 if (r) 163 goto done; 164 165 r = amdgpu_ring_alloc(ring, 10); 166 if (r) { 167 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); 168 goto done; 169 } 170 171 tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0); 172 amdgpu_ring_write(ring, tmp); 173 amdgpu_ring_write(ring, 0xFFFFF); 174 175 tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0); 176 amdgpu_ring_write(ring, tmp); 177 amdgpu_ring_write(ring, 0xFFFFF); 178 179 tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0); 180 amdgpu_ring_write(ring, tmp); 181 amdgpu_ring_write(ring, 0xFFFFF); 182 183 /* Clear timeout status bits */ 184 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0)); 185 amdgpu_ring_write(ring, 0x8); 186 187 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); 188 amdgpu_ring_write(ring, 3); 189 190 amdgpu_ring_commit(ring); 191 192 done: 193 if (!r) 194 DRM_INFO("UVD initialized successfully.\n"); 195 196 return r; 197 198 } 199 200 /** 201 * uvd_v5_0_hw_fini - stop the hardware block 202 * 203 * @adev: amdgpu_device pointer 204 * 205 * Stop the UVD block, mark ring as not ready any more 206 */ 207 static int uvd_v5_0_hw_fini(void *handle) 208 { 209 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 210 struct amdgpu_ring *ring = &adev->uvd.inst->ring; 211 212 if (RREG32(mmUVD_STATUS) != 0) 213 uvd_v5_0_stop(adev); 214 215 ring->sched.ready = false; 216 217 return 0; 218 } 219 220 static int uvd_v5_0_suspend(void *handle) 221 { 222 int r; 223 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 224 225 r = uvd_v5_0_hw_fini(adev); 226 if (r) 227 return r; 228 uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_GATE); 229 230 return amdgpu_uvd_suspend(adev); 231 } 232 233 static int uvd_v5_0_resume(void *handle) 234 { 235 int r; 236 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 237 238 r = amdgpu_uvd_resume(adev); 239 if (r) 240 return r; 241 242 return uvd_v5_0_hw_init(adev); 243 } 244 245 /** 246 * uvd_v5_0_mc_resume - memory controller programming 247 * 248 * @adev: amdgpu_device pointer 249 * 250 * Let the UVD memory controller know it's offsets 251 */ 252 static void uvd_v5_0_mc_resume(struct amdgpu_device *adev) 253 { 254 uint64_t offset; 255 uint32_t size; 256 257 /* programm memory controller bits 0-27 */ 258 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, 259 lower_32_bits(adev->uvd.inst->gpu_addr)); 260 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, 261 upper_32_bits(adev->uvd.inst->gpu_addr)); 262 263 offset = AMDGPU_UVD_FIRMWARE_OFFSET; 264 size = AMDGPU_UVD_FIRMWARE_SIZE(adev); 265 WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3); 266 WREG32(mmUVD_VCPU_CACHE_SIZE0, size); 267 268 offset += size; 269 size = AMDGPU_UVD_HEAP_SIZE; 270 WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3); 271 WREG32(mmUVD_VCPU_CACHE_SIZE1, size); 272 273 offset += size; 274 size = AMDGPU_UVD_STACK_SIZE + 275 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles); 276 WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3); 277 WREG32(mmUVD_VCPU_CACHE_SIZE2, size); 278 279 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 280 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 281 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 282 } 283 284 /** 285 * uvd_v5_0_start - start UVD block 286 * 287 * @adev: amdgpu_device pointer 288 * 289 * Setup and start the UVD block 290 */ 291 static int uvd_v5_0_start(struct amdgpu_device *adev) 292 { 293 struct amdgpu_ring *ring = &adev->uvd.inst->ring; 294 uint32_t rb_bufsz, tmp; 295 uint32_t lmi_swap_cntl; 296 uint32_t mp_swap_cntl; 297 int i, j, r; 298 299 /*disable DPG */ 300 WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2)); 301 302 /* disable byte swapping */ 303 lmi_swap_cntl = 0; 304 mp_swap_cntl = 0; 305 306 uvd_v5_0_mc_resume(adev); 307 308 /* disable interupt */ 309 WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); 310 311 /* stall UMC and register bus before resetting VCPU */ 312 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); 313 mdelay(1); 314 315 /* put LMI, VCPU, RBC etc... into reset */ 316 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | 317 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | 318 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | 319 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | 320 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); 321 mdelay(5); 322 323 /* take UVD block out of reset */ 324 WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); 325 mdelay(5); 326 327 /* initialize UVD memory controller */ 328 WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | 329 (1 << 21) | (1 << 9) | (1 << 20)); 330 331 #ifdef __BIG_ENDIAN 332 /* swap (8 in 32) RB and IB */ 333 lmi_swap_cntl = 0xa; 334 mp_swap_cntl = 0; 335 #endif 336 WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); 337 WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl); 338 339 WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040); 340 WREG32(mmUVD_MPC_SET_MUXA1, 0x0); 341 WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040); 342 WREG32(mmUVD_MPC_SET_MUXB1, 0x0); 343 WREG32(mmUVD_MPC_SET_ALU, 0); 344 WREG32(mmUVD_MPC_SET_MUX, 0x88); 345 346 /* take all subblocks out of reset, except VCPU */ 347 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 348 mdelay(5); 349 350 /* enable VCPU clock */ 351 WREG32(mmUVD_VCPU_CNTL, 1 << 9); 352 353 /* enable UMC */ 354 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); 355 356 /* boot up the VCPU */ 357 WREG32(mmUVD_SOFT_RESET, 0); 358 mdelay(10); 359 360 for (i = 0; i < 10; ++i) { 361 uint32_t status; 362 for (j = 0; j < 100; ++j) { 363 status = RREG32(mmUVD_STATUS); 364 if (status & 2) 365 break; 366 mdelay(10); 367 } 368 r = 0; 369 if (status & 2) 370 break; 371 372 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n"); 373 WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, 374 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 375 mdelay(10); 376 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 377 mdelay(10); 378 r = -1; 379 } 380 381 if (r) { 382 DRM_ERROR("UVD not responding, giving up!!!\n"); 383 return r; 384 } 385 /* enable master interrupt */ 386 WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1)); 387 388 /* clear the bit 4 of UVD_STATUS */ 389 WREG32_P(mmUVD_STATUS, 0, ~(2 << 1)); 390 391 rb_bufsz = order_base_2(ring->ring_size); 392 tmp = 0; 393 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); 394 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); 395 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); 396 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0); 397 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); 398 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); 399 /* force RBC into idle state */ 400 WREG32(mmUVD_RBC_RB_CNTL, tmp); 401 402 /* set the write pointer delay */ 403 WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0); 404 405 /* set the wb address */ 406 WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2)); 407 408 /* programm the RB_BASE for ring buffer */ 409 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, 410 lower_32_bits(ring->gpu_addr)); 411 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, 412 upper_32_bits(ring->gpu_addr)); 413 414 /* Initialize the ring buffer's read and write pointers */ 415 WREG32(mmUVD_RBC_RB_RPTR, 0); 416 417 ring->wptr = RREG32(mmUVD_RBC_RB_RPTR); 418 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); 419 420 WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); 421 422 return 0; 423 } 424 425 /** 426 * uvd_v5_0_stop - stop UVD block 427 * 428 * @adev: amdgpu_device pointer 429 * 430 * stop the UVD block 431 */ 432 static void uvd_v5_0_stop(struct amdgpu_device *adev) 433 { 434 /* force RBC into idle state */ 435 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); 436 437 /* Stall UMC and register bus before resetting VCPU */ 438 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); 439 mdelay(1); 440 441 /* put VCPU into reset */ 442 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 443 mdelay(5); 444 445 /* disable VCPU clock */ 446 WREG32(mmUVD_VCPU_CNTL, 0x0); 447 448 /* Unstall UMC and register bus */ 449 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); 450 451 WREG32(mmUVD_STATUS, 0); 452 } 453 454 /** 455 * uvd_v5_0_ring_emit_fence - emit an fence & trap command 456 * 457 * @ring: amdgpu_ring pointer 458 * @fence: fence to emit 459 * 460 * Write a fence and a trap command to the ring. 461 */ 462 static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 463 unsigned flags) 464 { 465 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 466 467 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); 468 amdgpu_ring_write(ring, seq); 469 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 470 amdgpu_ring_write(ring, addr & 0xffffffff); 471 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 472 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff); 473 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); 474 amdgpu_ring_write(ring, 0); 475 476 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 477 amdgpu_ring_write(ring, 0); 478 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 479 amdgpu_ring_write(ring, 0); 480 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); 481 amdgpu_ring_write(ring, 2); 482 } 483 484 /** 485 * uvd_v5_0_ring_test_ring - register write test 486 * 487 * @ring: amdgpu_ring pointer 488 * 489 * Test if we can successfully write to the context register 490 */ 491 static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring) 492 { 493 struct amdgpu_device *adev = ring->adev; 494 uint32_t tmp = 0; 495 unsigned i; 496 int r; 497 498 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); 499 r = amdgpu_ring_alloc(ring, 3); 500 if (r) { 501 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", 502 ring->idx, r); 503 return r; 504 } 505 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); 506 amdgpu_ring_write(ring, 0xDEADBEEF); 507 amdgpu_ring_commit(ring); 508 for (i = 0; i < adev->usec_timeout; i++) { 509 tmp = RREG32(mmUVD_CONTEXT_ID); 510 if (tmp == 0xDEADBEEF) 511 break; 512 DRM_UDELAY(1); 513 } 514 515 if (i < adev->usec_timeout) { 516 DRM_DEBUG("ring test on %d succeeded in %d usecs\n", 517 ring->idx, i); 518 } else { 519 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", 520 ring->idx, tmp); 521 r = -EINVAL; 522 } 523 return r; 524 } 525 526 /** 527 * uvd_v5_0_ring_emit_ib - execute indirect buffer 528 * 529 * @ring: amdgpu_ring pointer 530 * @ib: indirect buffer to execute 531 * 532 * Write ring commands to execute the indirect buffer 533 */ 534 static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring, 535 struct amdgpu_ib *ib, 536 unsigned vmid, bool ctx_switch) 537 { 538 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); 539 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); 540 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0)); 541 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 542 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0)); 543 amdgpu_ring_write(ring, ib->length_dw); 544 } 545 546 static void uvd_v5_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) 547 { 548 int i; 549 550 WARN_ON(ring->wptr % 2 || count % 2); 551 552 for (i = 0; i < count / 2; i++) { 553 amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0)); 554 amdgpu_ring_write(ring, 0); 555 } 556 } 557 558 static bool uvd_v5_0_is_idle(void *handle) 559 { 560 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 561 562 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); 563 } 564 565 static int uvd_v5_0_wait_for_idle(void *handle) 566 { 567 unsigned i; 568 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 569 570 for (i = 0; i < adev->usec_timeout; i++) { 571 if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK)) 572 return 0; 573 } 574 return -ETIMEDOUT; 575 } 576 577 static int uvd_v5_0_soft_reset(void *handle) 578 { 579 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 580 581 uvd_v5_0_stop(adev); 582 583 WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK, 584 ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); 585 mdelay(5); 586 587 return uvd_v5_0_start(adev); 588 } 589 590 static int uvd_v5_0_set_interrupt_state(struct amdgpu_device *adev, 591 struct amdgpu_irq_src *source, 592 unsigned type, 593 enum amdgpu_interrupt_state state) 594 { 595 // TODO 596 return 0; 597 } 598 599 static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev, 600 struct amdgpu_irq_src *source, 601 struct amdgpu_iv_entry *entry) 602 { 603 DRM_DEBUG("IH: UVD TRAP\n"); 604 amdgpu_fence_process(&adev->uvd.inst->ring); 605 return 0; 606 } 607 608 static void uvd_v5_0_enable_clock_gating(struct amdgpu_device *adev, bool enable) 609 { 610 uint32_t data1, data3, suvd_flags; 611 612 data1 = RREG32(mmUVD_SUVD_CGC_GATE); 613 data3 = RREG32(mmUVD_CGC_GATE); 614 615 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK | 616 UVD_SUVD_CGC_GATE__SIT_MASK | 617 UVD_SUVD_CGC_GATE__SMP_MASK | 618 UVD_SUVD_CGC_GATE__SCM_MASK | 619 UVD_SUVD_CGC_GATE__SDB_MASK; 620 621 if (enable) { 622 data3 |= (UVD_CGC_GATE__SYS_MASK | 623 UVD_CGC_GATE__UDEC_MASK | 624 UVD_CGC_GATE__MPEG2_MASK | 625 UVD_CGC_GATE__RBC_MASK | 626 UVD_CGC_GATE__LMI_MC_MASK | 627 UVD_CGC_GATE__IDCT_MASK | 628 UVD_CGC_GATE__MPRD_MASK | 629 UVD_CGC_GATE__MPC_MASK | 630 UVD_CGC_GATE__LBSI_MASK | 631 UVD_CGC_GATE__LRBBM_MASK | 632 UVD_CGC_GATE__UDEC_RE_MASK | 633 UVD_CGC_GATE__UDEC_CM_MASK | 634 UVD_CGC_GATE__UDEC_IT_MASK | 635 UVD_CGC_GATE__UDEC_DB_MASK | 636 UVD_CGC_GATE__UDEC_MP_MASK | 637 UVD_CGC_GATE__WCB_MASK | 638 UVD_CGC_GATE__JPEG_MASK | 639 UVD_CGC_GATE__SCPU_MASK); 640 /* only in pg enabled, we can gate clock to vcpu*/ 641 if (adev->pg_flags & AMD_PG_SUPPORT_UVD) 642 data3 |= UVD_CGC_GATE__VCPU_MASK; 643 data3 &= ~UVD_CGC_GATE__REGS_MASK; 644 data1 |= suvd_flags; 645 } else { 646 data3 = 0; 647 data1 = 0; 648 } 649 650 WREG32(mmUVD_SUVD_CGC_GATE, data1); 651 WREG32(mmUVD_CGC_GATE, data3); 652 } 653 654 static void uvd_v5_0_set_sw_clock_gating(struct amdgpu_device *adev) 655 { 656 uint32_t data, data2; 657 658 data = RREG32(mmUVD_CGC_CTRL); 659 data2 = RREG32(mmUVD_SUVD_CGC_CTRL); 660 661 662 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | 663 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK); 664 665 666 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK | 667 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) | 668 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY)); 669 670 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK | 671 UVD_CGC_CTRL__UDEC_CM_MODE_MASK | 672 UVD_CGC_CTRL__UDEC_IT_MODE_MASK | 673 UVD_CGC_CTRL__UDEC_DB_MODE_MASK | 674 UVD_CGC_CTRL__UDEC_MP_MODE_MASK | 675 UVD_CGC_CTRL__SYS_MODE_MASK | 676 UVD_CGC_CTRL__UDEC_MODE_MASK | 677 UVD_CGC_CTRL__MPEG2_MODE_MASK | 678 UVD_CGC_CTRL__REGS_MODE_MASK | 679 UVD_CGC_CTRL__RBC_MODE_MASK | 680 UVD_CGC_CTRL__LMI_MC_MODE_MASK | 681 UVD_CGC_CTRL__LMI_UMC_MODE_MASK | 682 UVD_CGC_CTRL__IDCT_MODE_MASK | 683 UVD_CGC_CTRL__MPRD_MODE_MASK | 684 UVD_CGC_CTRL__MPC_MODE_MASK | 685 UVD_CGC_CTRL__LBSI_MODE_MASK | 686 UVD_CGC_CTRL__LRBBM_MODE_MASK | 687 UVD_CGC_CTRL__WCB_MODE_MASK | 688 UVD_CGC_CTRL__VCPU_MODE_MASK | 689 UVD_CGC_CTRL__JPEG_MODE_MASK | 690 UVD_CGC_CTRL__SCPU_MODE_MASK); 691 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK | 692 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK | 693 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK | 694 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK | 695 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK); 696 697 WREG32(mmUVD_CGC_CTRL, data); 698 WREG32(mmUVD_SUVD_CGC_CTRL, data2); 699 } 700 701 #if 0 702 static void uvd_v5_0_set_hw_clock_gating(struct amdgpu_device *adev) 703 { 704 uint32_t data, data1, cgc_flags, suvd_flags; 705 706 data = RREG32(mmUVD_CGC_GATE); 707 data1 = RREG32(mmUVD_SUVD_CGC_GATE); 708 709 cgc_flags = UVD_CGC_GATE__SYS_MASK | 710 UVD_CGC_GATE__UDEC_MASK | 711 UVD_CGC_GATE__MPEG2_MASK | 712 UVD_CGC_GATE__RBC_MASK | 713 UVD_CGC_GATE__LMI_MC_MASK | 714 UVD_CGC_GATE__IDCT_MASK | 715 UVD_CGC_GATE__MPRD_MASK | 716 UVD_CGC_GATE__MPC_MASK | 717 UVD_CGC_GATE__LBSI_MASK | 718 UVD_CGC_GATE__LRBBM_MASK | 719 UVD_CGC_GATE__UDEC_RE_MASK | 720 UVD_CGC_GATE__UDEC_CM_MASK | 721 UVD_CGC_GATE__UDEC_IT_MASK | 722 UVD_CGC_GATE__UDEC_DB_MASK | 723 UVD_CGC_GATE__UDEC_MP_MASK | 724 UVD_CGC_GATE__WCB_MASK | 725 UVD_CGC_GATE__VCPU_MASK | 726 UVD_CGC_GATE__SCPU_MASK; 727 728 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK | 729 UVD_SUVD_CGC_GATE__SIT_MASK | 730 UVD_SUVD_CGC_GATE__SMP_MASK | 731 UVD_SUVD_CGC_GATE__SCM_MASK | 732 UVD_SUVD_CGC_GATE__SDB_MASK; 733 734 data |= cgc_flags; 735 data1 |= suvd_flags; 736 737 WREG32(mmUVD_CGC_GATE, data); 738 WREG32(mmUVD_SUVD_CGC_GATE, data1); 739 } 740 #endif 741 742 static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev, 743 bool enable) 744 { 745 u32 orig, data; 746 747 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) { 748 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); 749 data |= 0xfff; 750 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); 751 752 orig = data = RREG32(mmUVD_CGC_CTRL); 753 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; 754 if (orig != data) 755 WREG32(mmUVD_CGC_CTRL, data); 756 } else { 757 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); 758 data &= ~0xfff; 759 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); 760 761 orig = data = RREG32(mmUVD_CGC_CTRL); 762 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; 763 if (orig != data) 764 WREG32(mmUVD_CGC_CTRL, data); 765 } 766 } 767 768 static int uvd_v5_0_set_clockgating_state(void *handle, 769 enum amd_clockgating_state state) 770 { 771 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 772 bool enable = (state == AMD_CG_STATE_GATE) ? true : false; 773 774 if (enable) { 775 /* wait for STATUS to clear */ 776 if (uvd_v5_0_wait_for_idle(handle)) 777 return -EBUSY; 778 uvd_v5_0_enable_clock_gating(adev, true); 779 780 /* enable HW gates because UVD is idle */ 781 /* uvd_v5_0_set_hw_clock_gating(adev); */ 782 } else { 783 uvd_v5_0_enable_clock_gating(adev, false); 784 } 785 786 uvd_v5_0_set_sw_clock_gating(adev); 787 return 0; 788 } 789 790 static int uvd_v5_0_set_powergating_state(void *handle, 791 enum amd_powergating_state state) 792 { 793 /* This doesn't actually powergate the UVD block. 794 * That's done in the dpm code via the SMC. This 795 * just re-inits the block as necessary. The actual 796 * gating still happens in the dpm code. We should 797 * revisit this when there is a cleaner line between 798 * the smc and the hw blocks 799 */ 800 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 801 int ret = 0; 802 803 if (state == AMD_PG_STATE_GATE) { 804 uvd_v5_0_stop(adev); 805 } else { 806 ret = uvd_v5_0_start(adev); 807 if (ret) 808 goto out; 809 } 810 811 out: 812 return ret; 813 } 814 815 static void uvd_v5_0_get_clockgating_state(void *handle, u32 *flags) 816 { 817 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 818 int data; 819 820 mutex_lock(&adev->pm.mutex); 821 822 if (RREG32_SMC(ixCURRENT_PG_STATUS) & 823 CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) { 824 DRM_INFO("Cannot get clockgating state when UVD is powergated.\n"); 825 goto out; 826 } 827 828 /* AMD_CG_SUPPORT_UVD_MGCG */ 829 data = RREG32(mmUVD_CGC_CTRL); 830 if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK) 831 *flags |= AMD_CG_SUPPORT_UVD_MGCG; 832 833 out: 834 mutex_unlock(&adev->pm.mutex); 835 } 836 837 static const struct amd_ip_funcs uvd_v5_0_ip_funcs = { 838 .name = "uvd_v5_0", 839 .early_init = uvd_v5_0_early_init, 840 .late_init = NULL, 841 .sw_init = uvd_v5_0_sw_init, 842 .sw_fini = uvd_v5_0_sw_fini, 843 .hw_init = uvd_v5_0_hw_init, 844 .hw_fini = uvd_v5_0_hw_fini, 845 .suspend = uvd_v5_0_suspend, 846 .resume = uvd_v5_0_resume, 847 .is_idle = uvd_v5_0_is_idle, 848 .wait_for_idle = uvd_v5_0_wait_for_idle, 849 .soft_reset = uvd_v5_0_soft_reset, 850 .set_clockgating_state = uvd_v5_0_set_clockgating_state, 851 .set_powergating_state = uvd_v5_0_set_powergating_state, 852 .get_clockgating_state = uvd_v5_0_get_clockgating_state, 853 }; 854 855 static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = { 856 .type = AMDGPU_RING_TYPE_UVD, 857 .align_mask = 0xf, 858 .support_64bit_ptrs = false, 859 .get_rptr = uvd_v5_0_ring_get_rptr, 860 .get_wptr = uvd_v5_0_ring_get_wptr, 861 .set_wptr = uvd_v5_0_ring_set_wptr, 862 .parse_cs = amdgpu_uvd_ring_parse_cs, 863 .emit_frame_size = 864 14, /* uvd_v5_0_ring_emit_fence x1 no user fence */ 865 .emit_ib_size = 6, /* uvd_v5_0_ring_emit_ib */ 866 .emit_ib = uvd_v5_0_ring_emit_ib, 867 .emit_fence = uvd_v5_0_ring_emit_fence, 868 .test_ring = uvd_v5_0_ring_test_ring, 869 .test_ib = amdgpu_uvd_ring_test_ib, 870 .insert_nop = uvd_v5_0_ring_insert_nop, 871 .pad_ib = amdgpu_ring_generic_pad_ib, 872 .begin_use = amdgpu_uvd_ring_begin_use, 873 .end_use = amdgpu_uvd_ring_end_use, 874 }; 875 876 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev) 877 { 878 adev->uvd.inst->ring.funcs = &uvd_v5_0_ring_funcs; 879 } 880 881 static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = { 882 .set = uvd_v5_0_set_interrupt_state, 883 .process = uvd_v5_0_process_interrupt, 884 }; 885 886 static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev) 887 { 888 adev->uvd.inst->irq.num_types = 1; 889 adev->uvd.inst->irq.funcs = &uvd_v5_0_irq_funcs; 890 } 891 892 const struct amdgpu_ip_block_version uvd_v5_0_ip_block = 893 { 894 .type = AMD_IP_BLOCK_TYPE_UVD, 895 .major = 5, 896 .minor = 0, 897 .rev = 0, 898 .funcs = &uvd_v5_0_ip_funcs, 899 }; 900