1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Christian König <christian.koenig@amd.com> 23 */ 24 25 #include <linux/delay.h> 26 #include <linux/firmware.h> 27 28 #include "amdgpu.h" 29 #include "amdgpu_uvd.h" 30 #include "vid.h" 31 #include "uvd/uvd_5_0_d.h" 32 #include "uvd/uvd_5_0_sh_mask.h" 33 #include "oss/oss_2_0_d.h" 34 #include "oss/oss_2_0_sh_mask.h" 35 #include "bif/bif_5_0_d.h" 36 #include "vi.h" 37 #include "smu/smu_7_1_2_d.h" 38 #include "smu/smu_7_1_2_sh_mask.h" 39 #include "ivsrcid/ivsrcid_vislands30.h" 40 41 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev); 42 static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev); 43 static int uvd_v5_0_start(struct amdgpu_device *adev); 44 static void uvd_v5_0_stop(struct amdgpu_device *adev); 45 static int uvd_v5_0_set_clockgating_state(void *handle, 46 enum amd_clockgating_state state); 47 static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev, 48 bool enable); 49 /** 50 * uvd_v5_0_ring_get_rptr - get read pointer 51 * 52 * @ring: amdgpu_ring pointer 53 * 54 * Returns the current hardware read pointer 55 */ 56 static uint64_t uvd_v5_0_ring_get_rptr(struct amdgpu_ring *ring) 57 { 58 struct amdgpu_device *adev = ring->adev; 59 60 return RREG32(mmUVD_RBC_RB_RPTR); 61 } 62 63 /** 64 * uvd_v5_0_ring_get_wptr - get write pointer 65 * 66 * @ring: amdgpu_ring pointer 67 * 68 * Returns the current hardware write pointer 69 */ 70 static uint64_t uvd_v5_0_ring_get_wptr(struct amdgpu_ring *ring) 71 { 72 struct amdgpu_device *adev = ring->adev; 73 74 return RREG32(mmUVD_RBC_RB_WPTR); 75 } 76 77 /** 78 * uvd_v5_0_ring_set_wptr - set write pointer 79 * 80 * @ring: amdgpu_ring pointer 81 * 82 * Commits the write pointer to the hardware 83 */ 84 static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring) 85 { 86 struct amdgpu_device *adev = ring->adev; 87 88 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); 89 } 90 91 static int uvd_v5_0_early_init(void *handle) 92 { 93 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 94 adev->uvd.num_uvd_inst = 1; 95 96 uvd_v5_0_set_ring_funcs(adev); 97 uvd_v5_0_set_irq_funcs(adev); 98 99 return 0; 100 } 101 102 static int uvd_v5_0_sw_init(void *handle) 103 { 104 struct amdgpu_ring *ring; 105 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 106 int r; 107 108 /* UVD TRAP */ 109 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq); 110 if (r) 111 return r; 112 113 r = amdgpu_uvd_sw_init(adev); 114 if (r) 115 return r; 116 117 ring = &adev->uvd.inst->ring; 118 sprintf(ring->name, "uvd"); 119 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0, 120 AMDGPU_RING_PRIO_DEFAULT); 121 if (r) 122 return r; 123 124 r = amdgpu_uvd_resume(adev); 125 if (r) 126 return r; 127 128 r = amdgpu_uvd_entity_init(adev); 129 130 return r; 131 } 132 133 static int uvd_v5_0_sw_fini(void *handle) 134 { 135 int r; 136 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 137 138 r = amdgpu_uvd_suspend(adev); 139 if (r) 140 return r; 141 142 return amdgpu_uvd_sw_fini(adev); 143 } 144 145 /** 146 * uvd_v5_0_hw_init - start and test UVD block 147 * 148 * @adev: amdgpu_device pointer 149 * 150 * Initialize the hardware, boot up the VCPU and do some testing 151 */ 152 static int uvd_v5_0_hw_init(void *handle) 153 { 154 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 155 struct amdgpu_ring *ring = &adev->uvd.inst->ring; 156 uint32_t tmp; 157 int r; 158 159 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); 160 uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE); 161 uvd_v5_0_enable_mgcg(adev, true); 162 163 r = amdgpu_ring_test_helper(ring); 164 if (r) 165 goto done; 166 167 r = amdgpu_ring_alloc(ring, 10); 168 if (r) { 169 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); 170 goto done; 171 } 172 173 tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0); 174 amdgpu_ring_write(ring, tmp); 175 amdgpu_ring_write(ring, 0xFFFFF); 176 177 tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0); 178 amdgpu_ring_write(ring, tmp); 179 amdgpu_ring_write(ring, 0xFFFFF); 180 181 tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0); 182 amdgpu_ring_write(ring, tmp); 183 amdgpu_ring_write(ring, 0xFFFFF); 184 185 /* Clear timeout status bits */ 186 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0)); 187 amdgpu_ring_write(ring, 0x8); 188 189 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); 190 amdgpu_ring_write(ring, 3); 191 192 amdgpu_ring_commit(ring); 193 194 done: 195 if (!r) 196 DRM_INFO("UVD initialized successfully.\n"); 197 198 return r; 199 200 } 201 202 /** 203 * uvd_v5_0_hw_fini - stop the hardware block 204 * 205 * @adev: amdgpu_device pointer 206 * 207 * Stop the UVD block, mark ring as not ready any more 208 */ 209 static int uvd_v5_0_hw_fini(void *handle) 210 { 211 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 212 213 if (RREG32(mmUVD_STATUS) != 0) 214 uvd_v5_0_stop(adev); 215 216 return 0; 217 } 218 219 static int uvd_v5_0_suspend(void *handle) 220 { 221 int r; 222 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 223 224 r = uvd_v5_0_hw_fini(adev); 225 if (r) 226 return r; 227 uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_GATE); 228 229 return amdgpu_uvd_suspend(adev); 230 } 231 232 static int uvd_v5_0_resume(void *handle) 233 { 234 int r; 235 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 236 237 r = amdgpu_uvd_resume(adev); 238 if (r) 239 return r; 240 241 return uvd_v5_0_hw_init(adev); 242 } 243 244 /** 245 * uvd_v5_0_mc_resume - memory controller programming 246 * 247 * @adev: amdgpu_device pointer 248 * 249 * Let the UVD memory controller know it's offsets 250 */ 251 static void uvd_v5_0_mc_resume(struct amdgpu_device *adev) 252 { 253 uint64_t offset; 254 uint32_t size; 255 256 /* programm memory controller bits 0-27 */ 257 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, 258 lower_32_bits(adev->uvd.inst->gpu_addr)); 259 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, 260 upper_32_bits(adev->uvd.inst->gpu_addr)); 261 262 offset = AMDGPU_UVD_FIRMWARE_OFFSET; 263 size = AMDGPU_UVD_FIRMWARE_SIZE(adev); 264 WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3); 265 WREG32(mmUVD_VCPU_CACHE_SIZE0, size); 266 267 offset += size; 268 size = AMDGPU_UVD_HEAP_SIZE; 269 WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3); 270 WREG32(mmUVD_VCPU_CACHE_SIZE1, size); 271 272 offset += size; 273 size = AMDGPU_UVD_STACK_SIZE + 274 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles); 275 WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3); 276 WREG32(mmUVD_VCPU_CACHE_SIZE2, size); 277 278 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 279 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 280 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 281 } 282 283 /** 284 * uvd_v5_0_start - start UVD block 285 * 286 * @adev: amdgpu_device pointer 287 * 288 * Setup and start the UVD block 289 */ 290 static int uvd_v5_0_start(struct amdgpu_device *adev) 291 { 292 struct amdgpu_ring *ring = &adev->uvd.inst->ring; 293 uint32_t rb_bufsz, tmp; 294 uint32_t lmi_swap_cntl; 295 uint32_t mp_swap_cntl; 296 int i, j, r; 297 298 /*disable DPG */ 299 WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2)); 300 301 /* disable byte swapping */ 302 lmi_swap_cntl = 0; 303 mp_swap_cntl = 0; 304 305 uvd_v5_0_mc_resume(adev); 306 307 /* disable interupt */ 308 WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); 309 310 /* stall UMC and register bus before resetting VCPU */ 311 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); 312 mdelay(1); 313 314 /* put LMI, VCPU, RBC etc... into reset */ 315 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | 316 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | 317 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | 318 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | 319 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); 320 mdelay(5); 321 322 /* take UVD block out of reset */ 323 WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); 324 mdelay(5); 325 326 /* initialize UVD memory controller */ 327 WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | 328 (1 << 21) | (1 << 9) | (1 << 20)); 329 330 #ifdef __BIG_ENDIAN 331 /* swap (8 in 32) RB and IB */ 332 lmi_swap_cntl = 0xa; 333 mp_swap_cntl = 0; 334 #endif 335 WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); 336 WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl); 337 338 WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040); 339 WREG32(mmUVD_MPC_SET_MUXA1, 0x0); 340 WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040); 341 WREG32(mmUVD_MPC_SET_MUXB1, 0x0); 342 WREG32(mmUVD_MPC_SET_ALU, 0); 343 WREG32(mmUVD_MPC_SET_MUX, 0x88); 344 345 /* take all subblocks out of reset, except VCPU */ 346 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 347 mdelay(5); 348 349 /* enable VCPU clock */ 350 WREG32(mmUVD_VCPU_CNTL, 1 << 9); 351 352 /* enable UMC */ 353 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); 354 355 /* boot up the VCPU */ 356 WREG32(mmUVD_SOFT_RESET, 0); 357 mdelay(10); 358 359 for (i = 0; i < 10; ++i) { 360 uint32_t status; 361 for (j = 0; j < 100; ++j) { 362 status = RREG32(mmUVD_STATUS); 363 if (status & 2) 364 break; 365 mdelay(10); 366 } 367 r = 0; 368 if (status & 2) 369 break; 370 371 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n"); 372 WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, 373 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 374 mdelay(10); 375 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 376 mdelay(10); 377 r = -1; 378 } 379 380 if (r) { 381 DRM_ERROR("UVD not responding, giving up!!!\n"); 382 return r; 383 } 384 /* enable master interrupt */ 385 WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1)); 386 387 /* clear the bit 4 of UVD_STATUS */ 388 WREG32_P(mmUVD_STATUS, 0, ~(2 << 1)); 389 390 rb_bufsz = order_base_2(ring->ring_size); 391 tmp = 0; 392 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); 393 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); 394 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); 395 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0); 396 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); 397 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); 398 /* force RBC into idle state */ 399 WREG32(mmUVD_RBC_RB_CNTL, tmp); 400 401 /* set the write pointer delay */ 402 WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0); 403 404 /* set the wb address */ 405 WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2)); 406 407 /* programm the RB_BASE for ring buffer */ 408 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, 409 lower_32_bits(ring->gpu_addr)); 410 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, 411 upper_32_bits(ring->gpu_addr)); 412 413 /* Initialize the ring buffer's read and write pointers */ 414 WREG32(mmUVD_RBC_RB_RPTR, 0); 415 416 ring->wptr = RREG32(mmUVD_RBC_RB_RPTR); 417 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); 418 419 WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); 420 421 return 0; 422 } 423 424 /** 425 * uvd_v5_0_stop - stop UVD block 426 * 427 * @adev: amdgpu_device pointer 428 * 429 * stop the UVD block 430 */ 431 static void uvd_v5_0_stop(struct amdgpu_device *adev) 432 { 433 /* force RBC into idle state */ 434 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); 435 436 /* Stall UMC and register bus before resetting VCPU */ 437 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); 438 mdelay(1); 439 440 /* put VCPU into reset */ 441 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 442 mdelay(5); 443 444 /* disable VCPU clock */ 445 WREG32(mmUVD_VCPU_CNTL, 0x0); 446 447 /* Unstall UMC and register bus */ 448 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); 449 450 WREG32(mmUVD_STATUS, 0); 451 } 452 453 /** 454 * uvd_v5_0_ring_emit_fence - emit an fence & trap command 455 * 456 * @ring: amdgpu_ring pointer 457 * @fence: fence to emit 458 * 459 * Write a fence and a trap command to the ring. 460 */ 461 static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 462 unsigned flags) 463 { 464 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 465 466 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); 467 amdgpu_ring_write(ring, seq); 468 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 469 amdgpu_ring_write(ring, addr & 0xffffffff); 470 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 471 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff); 472 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); 473 amdgpu_ring_write(ring, 0); 474 475 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 476 amdgpu_ring_write(ring, 0); 477 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 478 amdgpu_ring_write(ring, 0); 479 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); 480 amdgpu_ring_write(ring, 2); 481 } 482 483 /** 484 * uvd_v5_0_ring_test_ring - register write test 485 * 486 * @ring: amdgpu_ring pointer 487 * 488 * Test if we can successfully write to the context register 489 */ 490 static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring) 491 { 492 struct amdgpu_device *adev = ring->adev; 493 uint32_t tmp = 0; 494 unsigned i; 495 int r; 496 497 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); 498 r = amdgpu_ring_alloc(ring, 3); 499 if (r) 500 return r; 501 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); 502 amdgpu_ring_write(ring, 0xDEADBEEF); 503 amdgpu_ring_commit(ring); 504 for (i = 0; i < adev->usec_timeout; i++) { 505 tmp = RREG32(mmUVD_CONTEXT_ID); 506 if (tmp == 0xDEADBEEF) 507 break; 508 udelay(1); 509 } 510 511 if (i >= adev->usec_timeout) 512 r = -ETIMEDOUT; 513 514 return r; 515 } 516 517 /** 518 * uvd_v5_0_ring_emit_ib - execute indirect buffer 519 * 520 * @ring: amdgpu_ring pointer 521 * @ib: indirect buffer to execute 522 * 523 * Write ring commands to execute the indirect buffer 524 */ 525 static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring, 526 struct amdgpu_job *job, 527 struct amdgpu_ib *ib, 528 uint32_t flags) 529 { 530 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); 531 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); 532 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0)); 533 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 534 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0)); 535 amdgpu_ring_write(ring, ib->length_dw); 536 } 537 538 static void uvd_v5_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) 539 { 540 int i; 541 542 WARN_ON(ring->wptr % 2 || count % 2); 543 544 for (i = 0; i < count / 2; i++) { 545 amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0)); 546 amdgpu_ring_write(ring, 0); 547 } 548 } 549 550 static bool uvd_v5_0_is_idle(void *handle) 551 { 552 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 553 554 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); 555 } 556 557 static int uvd_v5_0_wait_for_idle(void *handle) 558 { 559 unsigned i; 560 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 561 562 for (i = 0; i < adev->usec_timeout; i++) { 563 if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK)) 564 return 0; 565 } 566 return -ETIMEDOUT; 567 } 568 569 static int uvd_v5_0_soft_reset(void *handle) 570 { 571 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 572 573 uvd_v5_0_stop(adev); 574 575 WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK, 576 ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); 577 mdelay(5); 578 579 return uvd_v5_0_start(adev); 580 } 581 582 static int uvd_v5_0_set_interrupt_state(struct amdgpu_device *adev, 583 struct amdgpu_irq_src *source, 584 unsigned type, 585 enum amdgpu_interrupt_state state) 586 { 587 // TODO 588 return 0; 589 } 590 591 static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev, 592 struct amdgpu_irq_src *source, 593 struct amdgpu_iv_entry *entry) 594 { 595 DRM_DEBUG("IH: UVD TRAP\n"); 596 amdgpu_fence_process(&adev->uvd.inst->ring); 597 return 0; 598 } 599 600 static void uvd_v5_0_enable_clock_gating(struct amdgpu_device *adev, bool enable) 601 { 602 uint32_t data1, data3, suvd_flags; 603 604 data1 = RREG32(mmUVD_SUVD_CGC_GATE); 605 data3 = RREG32(mmUVD_CGC_GATE); 606 607 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK | 608 UVD_SUVD_CGC_GATE__SIT_MASK | 609 UVD_SUVD_CGC_GATE__SMP_MASK | 610 UVD_SUVD_CGC_GATE__SCM_MASK | 611 UVD_SUVD_CGC_GATE__SDB_MASK; 612 613 if (enable) { 614 data3 |= (UVD_CGC_GATE__SYS_MASK | 615 UVD_CGC_GATE__UDEC_MASK | 616 UVD_CGC_GATE__MPEG2_MASK | 617 UVD_CGC_GATE__RBC_MASK | 618 UVD_CGC_GATE__LMI_MC_MASK | 619 UVD_CGC_GATE__IDCT_MASK | 620 UVD_CGC_GATE__MPRD_MASK | 621 UVD_CGC_GATE__MPC_MASK | 622 UVD_CGC_GATE__LBSI_MASK | 623 UVD_CGC_GATE__LRBBM_MASK | 624 UVD_CGC_GATE__UDEC_RE_MASK | 625 UVD_CGC_GATE__UDEC_CM_MASK | 626 UVD_CGC_GATE__UDEC_IT_MASK | 627 UVD_CGC_GATE__UDEC_DB_MASK | 628 UVD_CGC_GATE__UDEC_MP_MASK | 629 UVD_CGC_GATE__WCB_MASK | 630 UVD_CGC_GATE__JPEG_MASK | 631 UVD_CGC_GATE__SCPU_MASK); 632 /* only in pg enabled, we can gate clock to vcpu*/ 633 if (adev->pg_flags & AMD_PG_SUPPORT_UVD) 634 data3 |= UVD_CGC_GATE__VCPU_MASK; 635 data3 &= ~UVD_CGC_GATE__REGS_MASK; 636 data1 |= suvd_flags; 637 } else { 638 data3 = 0; 639 data1 = 0; 640 } 641 642 WREG32(mmUVD_SUVD_CGC_GATE, data1); 643 WREG32(mmUVD_CGC_GATE, data3); 644 } 645 646 static void uvd_v5_0_set_sw_clock_gating(struct amdgpu_device *adev) 647 { 648 uint32_t data, data2; 649 650 data = RREG32(mmUVD_CGC_CTRL); 651 data2 = RREG32(mmUVD_SUVD_CGC_CTRL); 652 653 654 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | 655 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK); 656 657 658 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK | 659 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) | 660 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY)); 661 662 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK | 663 UVD_CGC_CTRL__UDEC_CM_MODE_MASK | 664 UVD_CGC_CTRL__UDEC_IT_MODE_MASK | 665 UVD_CGC_CTRL__UDEC_DB_MODE_MASK | 666 UVD_CGC_CTRL__UDEC_MP_MODE_MASK | 667 UVD_CGC_CTRL__SYS_MODE_MASK | 668 UVD_CGC_CTRL__UDEC_MODE_MASK | 669 UVD_CGC_CTRL__MPEG2_MODE_MASK | 670 UVD_CGC_CTRL__REGS_MODE_MASK | 671 UVD_CGC_CTRL__RBC_MODE_MASK | 672 UVD_CGC_CTRL__LMI_MC_MODE_MASK | 673 UVD_CGC_CTRL__LMI_UMC_MODE_MASK | 674 UVD_CGC_CTRL__IDCT_MODE_MASK | 675 UVD_CGC_CTRL__MPRD_MODE_MASK | 676 UVD_CGC_CTRL__MPC_MODE_MASK | 677 UVD_CGC_CTRL__LBSI_MODE_MASK | 678 UVD_CGC_CTRL__LRBBM_MODE_MASK | 679 UVD_CGC_CTRL__WCB_MODE_MASK | 680 UVD_CGC_CTRL__VCPU_MODE_MASK | 681 UVD_CGC_CTRL__JPEG_MODE_MASK | 682 UVD_CGC_CTRL__SCPU_MODE_MASK); 683 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK | 684 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK | 685 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK | 686 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK | 687 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK); 688 689 WREG32(mmUVD_CGC_CTRL, data); 690 WREG32(mmUVD_SUVD_CGC_CTRL, data2); 691 } 692 693 #if 0 694 static void uvd_v5_0_set_hw_clock_gating(struct amdgpu_device *adev) 695 { 696 uint32_t data, data1, cgc_flags, suvd_flags; 697 698 data = RREG32(mmUVD_CGC_GATE); 699 data1 = RREG32(mmUVD_SUVD_CGC_GATE); 700 701 cgc_flags = UVD_CGC_GATE__SYS_MASK | 702 UVD_CGC_GATE__UDEC_MASK | 703 UVD_CGC_GATE__MPEG2_MASK | 704 UVD_CGC_GATE__RBC_MASK | 705 UVD_CGC_GATE__LMI_MC_MASK | 706 UVD_CGC_GATE__IDCT_MASK | 707 UVD_CGC_GATE__MPRD_MASK | 708 UVD_CGC_GATE__MPC_MASK | 709 UVD_CGC_GATE__LBSI_MASK | 710 UVD_CGC_GATE__LRBBM_MASK | 711 UVD_CGC_GATE__UDEC_RE_MASK | 712 UVD_CGC_GATE__UDEC_CM_MASK | 713 UVD_CGC_GATE__UDEC_IT_MASK | 714 UVD_CGC_GATE__UDEC_DB_MASK | 715 UVD_CGC_GATE__UDEC_MP_MASK | 716 UVD_CGC_GATE__WCB_MASK | 717 UVD_CGC_GATE__VCPU_MASK | 718 UVD_CGC_GATE__SCPU_MASK; 719 720 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK | 721 UVD_SUVD_CGC_GATE__SIT_MASK | 722 UVD_SUVD_CGC_GATE__SMP_MASK | 723 UVD_SUVD_CGC_GATE__SCM_MASK | 724 UVD_SUVD_CGC_GATE__SDB_MASK; 725 726 data |= cgc_flags; 727 data1 |= suvd_flags; 728 729 WREG32(mmUVD_CGC_GATE, data); 730 WREG32(mmUVD_SUVD_CGC_GATE, data1); 731 } 732 #endif 733 734 static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev, 735 bool enable) 736 { 737 u32 orig, data; 738 739 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) { 740 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); 741 data |= 0xfff; 742 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); 743 744 orig = data = RREG32(mmUVD_CGC_CTRL); 745 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; 746 if (orig != data) 747 WREG32(mmUVD_CGC_CTRL, data); 748 } else { 749 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); 750 data &= ~0xfff; 751 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); 752 753 orig = data = RREG32(mmUVD_CGC_CTRL); 754 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; 755 if (orig != data) 756 WREG32(mmUVD_CGC_CTRL, data); 757 } 758 } 759 760 static int uvd_v5_0_set_clockgating_state(void *handle, 761 enum amd_clockgating_state state) 762 { 763 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 764 bool enable = (state == AMD_CG_STATE_GATE); 765 766 if (enable) { 767 /* wait for STATUS to clear */ 768 if (uvd_v5_0_wait_for_idle(handle)) 769 return -EBUSY; 770 uvd_v5_0_enable_clock_gating(adev, true); 771 772 /* enable HW gates because UVD is idle */ 773 /* uvd_v5_0_set_hw_clock_gating(adev); */ 774 } else { 775 uvd_v5_0_enable_clock_gating(adev, false); 776 } 777 778 uvd_v5_0_set_sw_clock_gating(adev); 779 return 0; 780 } 781 782 static int uvd_v5_0_set_powergating_state(void *handle, 783 enum amd_powergating_state state) 784 { 785 /* This doesn't actually powergate the UVD block. 786 * That's done in the dpm code via the SMC. This 787 * just re-inits the block as necessary. The actual 788 * gating still happens in the dpm code. We should 789 * revisit this when there is a cleaner line between 790 * the smc and the hw blocks 791 */ 792 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 793 int ret = 0; 794 795 if (state == AMD_PG_STATE_GATE) { 796 uvd_v5_0_stop(adev); 797 } else { 798 ret = uvd_v5_0_start(adev); 799 if (ret) 800 goto out; 801 } 802 803 out: 804 return ret; 805 } 806 807 static void uvd_v5_0_get_clockgating_state(void *handle, u32 *flags) 808 { 809 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 810 int data; 811 812 mutex_lock(&adev->pm.mutex); 813 814 if (RREG32_SMC(ixCURRENT_PG_STATUS) & 815 CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) { 816 DRM_INFO("Cannot get clockgating state when UVD is powergated.\n"); 817 goto out; 818 } 819 820 /* AMD_CG_SUPPORT_UVD_MGCG */ 821 data = RREG32(mmUVD_CGC_CTRL); 822 if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK) 823 *flags |= AMD_CG_SUPPORT_UVD_MGCG; 824 825 out: 826 mutex_unlock(&adev->pm.mutex); 827 } 828 829 static const struct amd_ip_funcs uvd_v5_0_ip_funcs = { 830 .name = "uvd_v5_0", 831 .early_init = uvd_v5_0_early_init, 832 .late_init = NULL, 833 .sw_init = uvd_v5_0_sw_init, 834 .sw_fini = uvd_v5_0_sw_fini, 835 .hw_init = uvd_v5_0_hw_init, 836 .hw_fini = uvd_v5_0_hw_fini, 837 .suspend = uvd_v5_0_suspend, 838 .resume = uvd_v5_0_resume, 839 .is_idle = uvd_v5_0_is_idle, 840 .wait_for_idle = uvd_v5_0_wait_for_idle, 841 .soft_reset = uvd_v5_0_soft_reset, 842 .set_clockgating_state = uvd_v5_0_set_clockgating_state, 843 .set_powergating_state = uvd_v5_0_set_powergating_state, 844 .get_clockgating_state = uvd_v5_0_get_clockgating_state, 845 }; 846 847 static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = { 848 .type = AMDGPU_RING_TYPE_UVD, 849 .align_mask = 0xf, 850 .support_64bit_ptrs = false, 851 .no_user_fence = true, 852 .get_rptr = uvd_v5_0_ring_get_rptr, 853 .get_wptr = uvd_v5_0_ring_get_wptr, 854 .set_wptr = uvd_v5_0_ring_set_wptr, 855 .parse_cs = amdgpu_uvd_ring_parse_cs, 856 .emit_frame_size = 857 14, /* uvd_v5_0_ring_emit_fence x1 no user fence */ 858 .emit_ib_size = 6, /* uvd_v5_0_ring_emit_ib */ 859 .emit_ib = uvd_v5_0_ring_emit_ib, 860 .emit_fence = uvd_v5_0_ring_emit_fence, 861 .test_ring = uvd_v5_0_ring_test_ring, 862 .test_ib = amdgpu_uvd_ring_test_ib, 863 .insert_nop = uvd_v5_0_ring_insert_nop, 864 .pad_ib = amdgpu_ring_generic_pad_ib, 865 .begin_use = amdgpu_uvd_ring_begin_use, 866 .end_use = amdgpu_uvd_ring_end_use, 867 }; 868 869 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev) 870 { 871 adev->uvd.inst->ring.funcs = &uvd_v5_0_ring_funcs; 872 } 873 874 static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = { 875 .set = uvd_v5_0_set_interrupt_state, 876 .process = uvd_v5_0_process_interrupt, 877 }; 878 879 static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev) 880 { 881 adev->uvd.inst->irq.num_types = 1; 882 adev->uvd.inst->irq.funcs = &uvd_v5_0_irq_funcs; 883 } 884 885 const struct amdgpu_ip_block_version uvd_v5_0_ip_block = 886 { 887 .type = AMD_IP_BLOCK_TYPE_UVD, 888 .major = 5, 889 .minor = 0, 890 .rev = 0, 891 .funcs = &uvd_v5_0_ip_funcs, 892 }; 893