1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Christian König <christian.koenig@amd.com> 23 */ 24 25 #include <linux/delay.h> 26 #include <linux/firmware.h> 27 28 #include "amdgpu.h" 29 #include "amdgpu_uvd.h" 30 #include "vid.h" 31 #include "uvd/uvd_5_0_d.h" 32 #include "uvd/uvd_5_0_sh_mask.h" 33 #include "oss/oss_2_0_d.h" 34 #include "oss/oss_2_0_sh_mask.h" 35 #include "bif/bif_5_0_d.h" 36 #include "vi.h" 37 #include "smu/smu_7_1_2_d.h" 38 #include "smu/smu_7_1_2_sh_mask.h" 39 #include "ivsrcid/ivsrcid_vislands30.h" 40 41 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev); 42 static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev); 43 static int uvd_v5_0_start(struct amdgpu_device *adev); 44 static void uvd_v5_0_stop(struct amdgpu_device *adev); 45 static int uvd_v5_0_set_clockgating_state(void *handle, 46 enum amd_clockgating_state state); 47 static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev, 48 bool enable); 49 /** 50 * uvd_v5_0_ring_get_rptr - get read pointer 51 * 52 * @ring: amdgpu_ring pointer 53 * 54 * Returns the current hardware read pointer 55 */ 56 static uint64_t uvd_v5_0_ring_get_rptr(struct amdgpu_ring *ring) 57 { 58 struct amdgpu_device *adev = ring->adev; 59 60 return RREG32(mmUVD_RBC_RB_RPTR); 61 } 62 63 /** 64 * uvd_v5_0_ring_get_wptr - get write pointer 65 * 66 * @ring: amdgpu_ring pointer 67 * 68 * Returns the current hardware write pointer 69 */ 70 static uint64_t uvd_v5_0_ring_get_wptr(struct amdgpu_ring *ring) 71 { 72 struct amdgpu_device *adev = ring->adev; 73 74 return RREG32(mmUVD_RBC_RB_WPTR); 75 } 76 77 /** 78 * uvd_v5_0_ring_set_wptr - set write pointer 79 * 80 * @ring: amdgpu_ring pointer 81 * 82 * Commits the write pointer to the hardware 83 */ 84 static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring) 85 { 86 struct amdgpu_device *adev = ring->adev; 87 88 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); 89 } 90 91 static int uvd_v5_0_early_init(void *handle) 92 { 93 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 94 adev->uvd.num_uvd_inst = 1; 95 96 uvd_v5_0_set_ring_funcs(adev); 97 uvd_v5_0_set_irq_funcs(adev); 98 99 return 0; 100 } 101 102 static int uvd_v5_0_sw_init(void *handle) 103 { 104 struct amdgpu_ring *ring; 105 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 106 int r; 107 108 /* UVD TRAP */ 109 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq); 110 if (r) 111 return r; 112 113 r = amdgpu_uvd_sw_init(adev); 114 if (r) 115 return r; 116 117 ring = &adev->uvd.inst->ring; 118 sprintf(ring->name, "uvd"); 119 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); 120 if (r) 121 return r; 122 123 r = amdgpu_uvd_resume(adev); 124 if (r) 125 return r; 126 127 r = amdgpu_uvd_entity_init(adev); 128 129 return r; 130 } 131 132 static int uvd_v5_0_sw_fini(void *handle) 133 { 134 int r; 135 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 136 137 r = amdgpu_uvd_suspend(adev); 138 if (r) 139 return r; 140 141 return amdgpu_uvd_sw_fini(adev); 142 } 143 144 /** 145 * uvd_v5_0_hw_init - start and test UVD block 146 * 147 * @adev: amdgpu_device pointer 148 * 149 * Initialize the hardware, boot up the VCPU and do some testing 150 */ 151 static int uvd_v5_0_hw_init(void *handle) 152 { 153 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 154 struct amdgpu_ring *ring = &adev->uvd.inst->ring; 155 uint32_t tmp; 156 int r; 157 158 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); 159 uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE); 160 uvd_v5_0_enable_mgcg(adev, true); 161 162 r = amdgpu_ring_test_helper(ring); 163 if (r) 164 goto done; 165 166 r = amdgpu_ring_alloc(ring, 10); 167 if (r) { 168 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); 169 goto done; 170 } 171 172 tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0); 173 amdgpu_ring_write(ring, tmp); 174 amdgpu_ring_write(ring, 0xFFFFF); 175 176 tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0); 177 amdgpu_ring_write(ring, tmp); 178 amdgpu_ring_write(ring, 0xFFFFF); 179 180 tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0); 181 amdgpu_ring_write(ring, tmp); 182 amdgpu_ring_write(ring, 0xFFFFF); 183 184 /* Clear timeout status bits */ 185 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0)); 186 amdgpu_ring_write(ring, 0x8); 187 188 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); 189 amdgpu_ring_write(ring, 3); 190 191 amdgpu_ring_commit(ring); 192 193 done: 194 if (!r) 195 DRM_INFO("UVD initialized successfully.\n"); 196 197 return r; 198 199 } 200 201 /** 202 * uvd_v5_0_hw_fini - stop the hardware block 203 * 204 * @adev: amdgpu_device pointer 205 * 206 * Stop the UVD block, mark ring as not ready any more 207 */ 208 static int uvd_v5_0_hw_fini(void *handle) 209 { 210 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 211 212 if (RREG32(mmUVD_STATUS) != 0) 213 uvd_v5_0_stop(adev); 214 215 return 0; 216 } 217 218 static int uvd_v5_0_suspend(void *handle) 219 { 220 int r; 221 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 222 223 r = uvd_v5_0_hw_fini(adev); 224 if (r) 225 return r; 226 uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_GATE); 227 228 return amdgpu_uvd_suspend(adev); 229 } 230 231 static int uvd_v5_0_resume(void *handle) 232 { 233 int r; 234 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 235 236 r = amdgpu_uvd_resume(adev); 237 if (r) 238 return r; 239 240 return uvd_v5_0_hw_init(adev); 241 } 242 243 /** 244 * uvd_v5_0_mc_resume - memory controller programming 245 * 246 * @adev: amdgpu_device pointer 247 * 248 * Let the UVD memory controller know it's offsets 249 */ 250 static void uvd_v5_0_mc_resume(struct amdgpu_device *adev) 251 { 252 uint64_t offset; 253 uint32_t size; 254 255 /* programm memory controller bits 0-27 */ 256 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, 257 lower_32_bits(adev->uvd.inst->gpu_addr)); 258 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, 259 upper_32_bits(adev->uvd.inst->gpu_addr)); 260 261 offset = AMDGPU_UVD_FIRMWARE_OFFSET; 262 size = AMDGPU_UVD_FIRMWARE_SIZE(adev); 263 WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3); 264 WREG32(mmUVD_VCPU_CACHE_SIZE0, size); 265 266 offset += size; 267 size = AMDGPU_UVD_HEAP_SIZE; 268 WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3); 269 WREG32(mmUVD_VCPU_CACHE_SIZE1, size); 270 271 offset += size; 272 size = AMDGPU_UVD_STACK_SIZE + 273 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles); 274 WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3); 275 WREG32(mmUVD_VCPU_CACHE_SIZE2, size); 276 277 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 278 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 279 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 280 } 281 282 /** 283 * uvd_v5_0_start - start UVD block 284 * 285 * @adev: amdgpu_device pointer 286 * 287 * Setup and start the UVD block 288 */ 289 static int uvd_v5_0_start(struct amdgpu_device *adev) 290 { 291 struct amdgpu_ring *ring = &adev->uvd.inst->ring; 292 uint32_t rb_bufsz, tmp; 293 uint32_t lmi_swap_cntl; 294 uint32_t mp_swap_cntl; 295 int i, j, r; 296 297 /*disable DPG */ 298 WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2)); 299 300 /* disable byte swapping */ 301 lmi_swap_cntl = 0; 302 mp_swap_cntl = 0; 303 304 uvd_v5_0_mc_resume(adev); 305 306 /* disable interupt */ 307 WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); 308 309 /* stall UMC and register bus before resetting VCPU */ 310 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); 311 mdelay(1); 312 313 /* put LMI, VCPU, RBC etc... into reset */ 314 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | 315 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | 316 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | 317 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | 318 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); 319 mdelay(5); 320 321 /* take UVD block out of reset */ 322 WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); 323 mdelay(5); 324 325 /* initialize UVD memory controller */ 326 WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | 327 (1 << 21) | (1 << 9) | (1 << 20)); 328 329 #ifdef __BIG_ENDIAN 330 /* swap (8 in 32) RB and IB */ 331 lmi_swap_cntl = 0xa; 332 mp_swap_cntl = 0; 333 #endif 334 WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); 335 WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl); 336 337 WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040); 338 WREG32(mmUVD_MPC_SET_MUXA1, 0x0); 339 WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040); 340 WREG32(mmUVD_MPC_SET_MUXB1, 0x0); 341 WREG32(mmUVD_MPC_SET_ALU, 0); 342 WREG32(mmUVD_MPC_SET_MUX, 0x88); 343 344 /* take all subblocks out of reset, except VCPU */ 345 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 346 mdelay(5); 347 348 /* enable VCPU clock */ 349 WREG32(mmUVD_VCPU_CNTL, 1 << 9); 350 351 /* enable UMC */ 352 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); 353 354 /* boot up the VCPU */ 355 WREG32(mmUVD_SOFT_RESET, 0); 356 mdelay(10); 357 358 for (i = 0; i < 10; ++i) { 359 uint32_t status; 360 for (j = 0; j < 100; ++j) { 361 status = RREG32(mmUVD_STATUS); 362 if (status & 2) 363 break; 364 mdelay(10); 365 } 366 r = 0; 367 if (status & 2) 368 break; 369 370 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n"); 371 WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, 372 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 373 mdelay(10); 374 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 375 mdelay(10); 376 r = -1; 377 } 378 379 if (r) { 380 DRM_ERROR("UVD not responding, giving up!!!\n"); 381 return r; 382 } 383 /* enable master interrupt */ 384 WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1)); 385 386 /* clear the bit 4 of UVD_STATUS */ 387 WREG32_P(mmUVD_STATUS, 0, ~(2 << 1)); 388 389 rb_bufsz = order_base_2(ring->ring_size); 390 tmp = 0; 391 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); 392 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); 393 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); 394 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0); 395 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); 396 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); 397 /* force RBC into idle state */ 398 WREG32(mmUVD_RBC_RB_CNTL, tmp); 399 400 /* set the write pointer delay */ 401 WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0); 402 403 /* set the wb address */ 404 WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2)); 405 406 /* programm the RB_BASE for ring buffer */ 407 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, 408 lower_32_bits(ring->gpu_addr)); 409 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, 410 upper_32_bits(ring->gpu_addr)); 411 412 /* Initialize the ring buffer's read and write pointers */ 413 WREG32(mmUVD_RBC_RB_RPTR, 0); 414 415 ring->wptr = RREG32(mmUVD_RBC_RB_RPTR); 416 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); 417 418 WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); 419 420 return 0; 421 } 422 423 /** 424 * uvd_v5_0_stop - stop UVD block 425 * 426 * @adev: amdgpu_device pointer 427 * 428 * stop the UVD block 429 */ 430 static void uvd_v5_0_stop(struct amdgpu_device *adev) 431 { 432 /* force RBC into idle state */ 433 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); 434 435 /* Stall UMC and register bus before resetting VCPU */ 436 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); 437 mdelay(1); 438 439 /* put VCPU into reset */ 440 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 441 mdelay(5); 442 443 /* disable VCPU clock */ 444 WREG32(mmUVD_VCPU_CNTL, 0x0); 445 446 /* Unstall UMC and register bus */ 447 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); 448 449 WREG32(mmUVD_STATUS, 0); 450 } 451 452 /** 453 * uvd_v5_0_ring_emit_fence - emit an fence & trap command 454 * 455 * @ring: amdgpu_ring pointer 456 * @fence: fence to emit 457 * 458 * Write a fence and a trap command to the ring. 459 */ 460 static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 461 unsigned flags) 462 { 463 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 464 465 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); 466 amdgpu_ring_write(ring, seq); 467 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 468 amdgpu_ring_write(ring, addr & 0xffffffff); 469 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 470 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff); 471 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); 472 amdgpu_ring_write(ring, 0); 473 474 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 475 amdgpu_ring_write(ring, 0); 476 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 477 amdgpu_ring_write(ring, 0); 478 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); 479 amdgpu_ring_write(ring, 2); 480 } 481 482 /** 483 * uvd_v5_0_ring_test_ring - register write test 484 * 485 * @ring: amdgpu_ring pointer 486 * 487 * Test if we can successfully write to the context register 488 */ 489 static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring) 490 { 491 struct amdgpu_device *adev = ring->adev; 492 uint32_t tmp = 0; 493 unsigned i; 494 int r; 495 496 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); 497 r = amdgpu_ring_alloc(ring, 3); 498 if (r) 499 return r; 500 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); 501 amdgpu_ring_write(ring, 0xDEADBEEF); 502 amdgpu_ring_commit(ring); 503 for (i = 0; i < adev->usec_timeout; i++) { 504 tmp = RREG32(mmUVD_CONTEXT_ID); 505 if (tmp == 0xDEADBEEF) 506 break; 507 udelay(1); 508 } 509 510 if (i >= adev->usec_timeout) 511 r = -ETIMEDOUT; 512 513 return r; 514 } 515 516 /** 517 * uvd_v5_0_ring_emit_ib - execute indirect buffer 518 * 519 * @ring: amdgpu_ring pointer 520 * @ib: indirect buffer to execute 521 * 522 * Write ring commands to execute the indirect buffer 523 */ 524 static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring, 525 struct amdgpu_job *job, 526 struct amdgpu_ib *ib, 527 uint32_t flags) 528 { 529 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); 530 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); 531 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0)); 532 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 533 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0)); 534 amdgpu_ring_write(ring, ib->length_dw); 535 } 536 537 static void uvd_v5_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) 538 { 539 int i; 540 541 WARN_ON(ring->wptr % 2 || count % 2); 542 543 for (i = 0; i < count / 2; i++) { 544 amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0)); 545 amdgpu_ring_write(ring, 0); 546 } 547 } 548 549 static bool uvd_v5_0_is_idle(void *handle) 550 { 551 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 552 553 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); 554 } 555 556 static int uvd_v5_0_wait_for_idle(void *handle) 557 { 558 unsigned i; 559 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 560 561 for (i = 0; i < adev->usec_timeout; i++) { 562 if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK)) 563 return 0; 564 } 565 return -ETIMEDOUT; 566 } 567 568 static int uvd_v5_0_soft_reset(void *handle) 569 { 570 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 571 572 uvd_v5_0_stop(adev); 573 574 WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK, 575 ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); 576 mdelay(5); 577 578 return uvd_v5_0_start(adev); 579 } 580 581 static int uvd_v5_0_set_interrupt_state(struct amdgpu_device *adev, 582 struct amdgpu_irq_src *source, 583 unsigned type, 584 enum amdgpu_interrupt_state state) 585 { 586 // TODO 587 return 0; 588 } 589 590 static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev, 591 struct amdgpu_irq_src *source, 592 struct amdgpu_iv_entry *entry) 593 { 594 DRM_DEBUG("IH: UVD TRAP\n"); 595 amdgpu_fence_process(&adev->uvd.inst->ring); 596 return 0; 597 } 598 599 static void uvd_v5_0_enable_clock_gating(struct amdgpu_device *adev, bool enable) 600 { 601 uint32_t data1, data3, suvd_flags; 602 603 data1 = RREG32(mmUVD_SUVD_CGC_GATE); 604 data3 = RREG32(mmUVD_CGC_GATE); 605 606 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK | 607 UVD_SUVD_CGC_GATE__SIT_MASK | 608 UVD_SUVD_CGC_GATE__SMP_MASK | 609 UVD_SUVD_CGC_GATE__SCM_MASK | 610 UVD_SUVD_CGC_GATE__SDB_MASK; 611 612 if (enable) { 613 data3 |= (UVD_CGC_GATE__SYS_MASK | 614 UVD_CGC_GATE__UDEC_MASK | 615 UVD_CGC_GATE__MPEG2_MASK | 616 UVD_CGC_GATE__RBC_MASK | 617 UVD_CGC_GATE__LMI_MC_MASK | 618 UVD_CGC_GATE__IDCT_MASK | 619 UVD_CGC_GATE__MPRD_MASK | 620 UVD_CGC_GATE__MPC_MASK | 621 UVD_CGC_GATE__LBSI_MASK | 622 UVD_CGC_GATE__LRBBM_MASK | 623 UVD_CGC_GATE__UDEC_RE_MASK | 624 UVD_CGC_GATE__UDEC_CM_MASK | 625 UVD_CGC_GATE__UDEC_IT_MASK | 626 UVD_CGC_GATE__UDEC_DB_MASK | 627 UVD_CGC_GATE__UDEC_MP_MASK | 628 UVD_CGC_GATE__WCB_MASK | 629 UVD_CGC_GATE__JPEG_MASK | 630 UVD_CGC_GATE__SCPU_MASK); 631 /* only in pg enabled, we can gate clock to vcpu*/ 632 if (adev->pg_flags & AMD_PG_SUPPORT_UVD) 633 data3 |= UVD_CGC_GATE__VCPU_MASK; 634 data3 &= ~UVD_CGC_GATE__REGS_MASK; 635 data1 |= suvd_flags; 636 } else { 637 data3 = 0; 638 data1 = 0; 639 } 640 641 WREG32(mmUVD_SUVD_CGC_GATE, data1); 642 WREG32(mmUVD_CGC_GATE, data3); 643 } 644 645 static void uvd_v5_0_set_sw_clock_gating(struct amdgpu_device *adev) 646 { 647 uint32_t data, data2; 648 649 data = RREG32(mmUVD_CGC_CTRL); 650 data2 = RREG32(mmUVD_SUVD_CGC_CTRL); 651 652 653 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | 654 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK); 655 656 657 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK | 658 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) | 659 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY)); 660 661 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK | 662 UVD_CGC_CTRL__UDEC_CM_MODE_MASK | 663 UVD_CGC_CTRL__UDEC_IT_MODE_MASK | 664 UVD_CGC_CTRL__UDEC_DB_MODE_MASK | 665 UVD_CGC_CTRL__UDEC_MP_MODE_MASK | 666 UVD_CGC_CTRL__SYS_MODE_MASK | 667 UVD_CGC_CTRL__UDEC_MODE_MASK | 668 UVD_CGC_CTRL__MPEG2_MODE_MASK | 669 UVD_CGC_CTRL__REGS_MODE_MASK | 670 UVD_CGC_CTRL__RBC_MODE_MASK | 671 UVD_CGC_CTRL__LMI_MC_MODE_MASK | 672 UVD_CGC_CTRL__LMI_UMC_MODE_MASK | 673 UVD_CGC_CTRL__IDCT_MODE_MASK | 674 UVD_CGC_CTRL__MPRD_MODE_MASK | 675 UVD_CGC_CTRL__MPC_MODE_MASK | 676 UVD_CGC_CTRL__LBSI_MODE_MASK | 677 UVD_CGC_CTRL__LRBBM_MODE_MASK | 678 UVD_CGC_CTRL__WCB_MODE_MASK | 679 UVD_CGC_CTRL__VCPU_MODE_MASK | 680 UVD_CGC_CTRL__JPEG_MODE_MASK | 681 UVD_CGC_CTRL__SCPU_MODE_MASK); 682 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK | 683 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK | 684 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK | 685 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK | 686 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK); 687 688 WREG32(mmUVD_CGC_CTRL, data); 689 WREG32(mmUVD_SUVD_CGC_CTRL, data2); 690 } 691 692 #if 0 693 static void uvd_v5_0_set_hw_clock_gating(struct amdgpu_device *adev) 694 { 695 uint32_t data, data1, cgc_flags, suvd_flags; 696 697 data = RREG32(mmUVD_CGC_GATE); 698 data1 = RREG32(mmUVD_SUVD_CGC_GATE); 699 700 cgc_flags = UVD_CGC_GATE__SYS_MASK | 701 UVD_CGC_GATE__UDEC_MASK | 702 UVD_CGC_GATE__MPEG2_MASK | 703 UVD_CGC_GATE__RBC_MASK | 704 UVD_CGC_GATE__LMI_MC_MASK | 705 UVD_CGC_GATE__IDCT_MASK | 706 UVD_CGC_GATE__MPRD_MASK | 707 UVD_CGC_GATE__MPC_MASK | 708 UVD_CGC_GATE__LBSI_MASK | 709 UVD_CGC_GATE__LRBBM_MASK | 710 UVD_CGC_GATE__UDEC_RE_MASK | 711 UVD_CGC_GATE__UDEC_CM_MASK | 712 UVD_CGC_GATE__UDEC_IT_MASK | 713 UVD_CGC_GATE__UDEC_DB_MASK | 714 UVD_CGC_GATE__UDEC_MP_MASK | 715 UVD_CGC_GATE__WCB_MASK | 716 UVD_CGC_GATE__VCPU_MASK | 717 UVD_CGC_GATE__SCPU_MASK; 718 719 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK | 720 UVD_SUVD_CGC_GATE__SIT_MASK | 721 UVD_SUVD_CGC_GATE__SMP_MASK | 722 UVD_SUVD_CGC_GATE__SCM_MASK | 723 UVD_SUVD_CGC_GATE__SDB_MASK; 724 725 data |= cgc_flags; 726 data1 |= suvd_flags; 727 728 WREG32(mmUVD_CGC_GATE, data); 729 WREG32(mmUVD_SUVD_CGC_GATE, data1); 730 } 731 #endif 732 733 static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev, 734 bool enable) 735 { 736 u32 orig, data; 737 738 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) { 739 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); 740 data |= 0xfff; 741 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); 742 743 orig = data = RREG32(mmUVD_CGC_CTRL); 744 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; 745 if (orig != data) 746 WREG32(mmUVD_CGC_CTRL, data); 747 } else { 748 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); 749 data &= ~0xfff; 750 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); 751 752 orig = data = RREG32(mmUVD_CGC_CTRL); 753 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; 754 if (orig != data) 755 WREG32(mmUVD_CGC_CTRL, data); 756 } 757 } 758 759 static int uvd_v5_0_set_clockgating_state(void *handle, 760 enum amd_clockgating_state state) 761 { 762 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 763 bool enable = (state == AMD_CG_STATE_GATE); 764 765 if (enable) { 766 /* wait for STATUS to clear */ 767 if (uvd_v5_0_wait_for_idle(handle)) 768 return -EBUSY; 769 uvd_v5_0_enable_clock_gating(adev, true); 770 771 /* enable HW gates because UVD is idle */ 772 /* uvd_v5_0_set_hw_clock_gating(adev); */ 773 } else { 774 uvd_v5_0_enable_clock_gating(adev, false); 775 } 776 777 uvd_v5_0_set_sw_clock_gating(adev); 778 return 0; 779 } 780 781 static int uvd_v5_0_set_powergating_state(void *handle, 782 enum amd_powergating_state state) 783 { 784 /* This doesn't actually powergate the UVD block. 785 * That's done in the dpm code via the SMC. This 786 * just re-inits the block as necessary. The actual 787 * gating still happens in the dpm code. We should 788 * revisit this when there is a cleaner line between 789 * the smc and the hw blocks 790 */ 791 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 792 int ret = 0; 793 794 if (state == AMD_PG_STATE_GATE) { 795 uvd_v5_0_stop(adev); 796 } else { 797 ret = uvd_v5_0_start(adev); 798 if (ret) 799 goto out; 800 } 801 802 out: 803 return ret; 804 } 805 806 static void uvd_v5_0_get_clockgating_state(void *handle, u32 *flags) 807 { 808 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 809 int data; 810 811 mutex_lock(&adev->pm.mutex); 812 813 if (RREG32_SMC(ixCURRENT_PG_STATUS) & 814 CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) { 815 DRM_INFO("Cannot get clockgating state when UVD is powergated.\n"); 816 goto out; 817 } 818 819 /* AMD_CG_SUPPORT_UVD_MGCG */ 820 data = RREG32(mmUVD_CGC_CTRL); 821 if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK) 822 *flags |= AMD_CG_SUPPORT_UVD_MGCG; 823 824 out: 825 mutex_unlock(&adev->pm.mutex); 826 } 827 828 static const struct amd_ip_funcs uvd_v5_0_ip_funcs = { 829 .name = "uvd_v5_0", 830 .early_init = uvd_v5_0_early_init, 831 .late_init = NULL, 832 .sw_init = uvd_v5_0_sw_init, 833 .sw_fini = uvd_v5_0_sw_fini, 834 .hw_init = uvd_v5_0_hw_init, 835 .hw_fini = uvd_v5_0_hw_fini, 836 .suspend = uvd_v5_0_suspend, 837 .resume = uvd_v5_0_resume, 838 .is_idle = uvd_v5_0_is_idle, 839 .wait_for_idle = uvd_v5_0_wait_for_idle, 840 .soft_reset = uvd_v5_0_soft_reset, 841 .set_clockgating_state = uvd_v5_0_set_clockgating_state, 842 .set_powergating_state = uvd_v5_0_set_powergating_state, 843 .get_clockgating_state = uvd_v5_0_get_clockgating_state, 844 }; 845 846 static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = { 847 .type = AMDGPU_RING_TYPE_UVD, 848 .align_mask = 0xf, 849 .support_64bit_ptrs = false, 850 .no_user_fence = true, 851 .get_rptr = uvd_v5_0_ring_get_rptr, 852 .get_wptr = uvd_v5_0_ring_get_wptr, 853 .set_wptr = uvd_v5_0_ring_set_wptr, 854 .parse_cs = amdgpu_uvd_ring_parse_cs, 855 .emit_frame_size = 856 14, /* uvd_v5_0_ring_emit_fence x1 no user fence */ 857 .emit_ib_size = 6, /* uvd_v5_0_ring_emit_ib */ 858 .emit_ib = uvd_v5_0_ring_emit_ib, 859 .emit_fence = uvd_v5_0_ring_emit_fence, 860 .test_ring = uvd_v5_0_ring_test_ring, 861 .test_ib = amdgpu_uvd_ring_test_ib, 862 .insert_nop = uvd_v5_0_ring_insert_nop, 863 .pad_ib = amdgpu_ring_generic_pad_ib, 864 .begin_use = amdgpu_uvd_ring_begin_use, 865 .end_use = amdgpu_uvd_ring_end_use, 866 }; 867 868 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev) 869 { 870 adev->uvd.inst->ring.funcs = &uvd_v5_0_ring_funcs; 871 } 872 873 static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = { 874 .set = uvd_v5_0_set_interrupt_state, 875 .process = uvd_v5_0_process_interrupt, 876 }; 877 878 static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev) 879 { 880 adev->uvd.inst->irq.num_types = 1; 881 adev->uvd.inst->irq.funcs = &uvd_v5_0_irq_funcs; 882 } 883 884 const struct amdgpu_ip_block_version uvd_v5_0_ip_block = 885 { 886 .type = AMD_IP_BLOCK_TYPE_UVD, 887 .major = 5, 888 .minor = 0, 889 .rev = 0, 890 .funcs = &uvd_v5_0_ip_funcs, 891 }; 892