1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Christian König <christian.koenig@amd.com> 23 */ 24 25 #include <linux/firmware.h> 26 #include <drm/drmP.h> 27 #include "amdgpu.h" 28 #include "amdgpu_uvd.h" 29 #include "vid.h" 30 #include "uvd/uvd_5_0_d.h" 31 #include "uvd/uvd_5_0_sh_mask.h" 32 #include "oss/oss_2_0_d.h" 33 #include "oss/oss_2_0_sh_mask.h" 34 #include "bif/bif_5_0_d.h" 35 #include "vi.h" 36 #include "smu/smu_7_1_2_d.h" 37 #include "smu/smu_7_1_2_sh_mask.h" 38 39 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev); 40 static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev); 41 static int uvd_v5_0_start(struct amdgpu_device *adev); 42 static void uvd_v5_0_stop(struct amdgpu_device *adev); 43 44 /** 45 * uvd_v5_0_ring_get_rptr - get read pointer 46 * 47 * @ring: amdgpu_ring pointer 48 * 49 * Returns the current hardware read pointer 50 */ 51 static uint32_t uvd_v5_0_ring_get_rptr(struct amdgpu_ring *ring) 52 { 53 struct amdgpu_device *adev = ring->adev; 54 55 return RREG32(mmUVD_RBC_RB_RPTR); 56 } 57 58 /** 59 * uvd_v5_0_ring_get_wptr - get write pointer 60 * 61 * @ring: amdgpu_ring pointer 62 * 63 * Returns the current hardware write pointer 64 */ 65 static uint32_t uvd_v5_0_ring_get_wptr(struct amdgpu_ring *ring) 66 { 67 struct amdgpu_device *adev = ring->adev; 68 69 return RREG32(mmUVD_RBC_RB_WPTR); 70 } 71 72 /** 73 * uvd_v5_0_ring_set_wptr - set write pointer 74 * 75 * @ring: amdgpu_ring pointer 76 * 77 * Commits the write pointer to the hardware 78 */ 79 static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring) 80 { 81 struct amdgpu_device *adev = ring->adev; 82 83 WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); 84 } 85 86 static int uvd_v5_0_early_init(void *handle) 87 { 88 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 89 90 uvd_v5_0_set_ring_funcs(adev); 91 uvd_v5_0_set_irq_funcs(adev); 92 93 return 0; 94 } 95 96 static int uvd_v5_0_sw_init(void *handle) 97 { 98 struct amdgpu_ring *ring; 99 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 100 int r; 101 102 /* UVD TRAP */ 103 r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq); 104 if (r) 105 return r; 106 107 r = amdgpu_uvd_sw_init(adev); 108 if (r) 109 return r; 110 111 r = amdgpu_uvd_resume(adev); 112 if (r) 113 return r; 114 115 ring = &adev->uvd.ring; 116 sprintf(ring->name, "uvd"); 117 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); 118 119 return r; 120 } 121 122 static int uvd_v5_0_sw_fini(void *handle) 123 { 124 int r; 125 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 126 127 r = amdgpu_uvd_suspend(adev); 128 if (r) 129 return r; 130 131 r = amdgpu_uvd_sw_fini(adev); 132 if (r) 133 return r; 134 135 return r; 136 } 137 138 /** 139 * uvd_v5_0_hw_init - start and test UVD block 140 * 141 * @adev: amdgpu_device pointer 142 * 143 * Initialize the hardware, boot up the VCPU and do some testing 144 */ 145 static int uvd_v5_0_hw_init(void *handle) 146 { 147 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 148 struct amdgpu_ring *ring = &adev->uvd.ring; 149 uint32_t tmp; 150 int r; 151 152 /* raise clocks while booting up the VCPU */ 153 amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); 154 155 r = uvd_v5_0_start(adev); 156 if (r) 157 goto done; 158 159 ring->ready = true; 160 r = amdgpu_ring_test_ring(ring); 161 if (r) { 162 ring->ready = false; 163 goto done; 164 } 165 166 r = amdgpu_ring_alloc(ring, 10); 167 if (r) { 168 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); 169 goto done; 170 } 171 172 tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0); 173 amdgpu_ring_write(ring, tmp); 174 amdgpu_ring_write(ring, 0xFFFFF); 175 176 tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0); 177 amdgpu_ring_write(ring, tmp); 178 amdgpu_ring_write(ring, 0xFFFFF); 179 180 tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0); 181 amdgpu_ring_write(ring, tmp); 182 amdgpu_ring_write(ring, 0xFFFFF); 183 184 /* Clear timeout status bits */ 185 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0)); 186 amdgpu_ring_write(ring, 0x8); 187 188 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); 189 amdgpu_ring_write(ring, 3); 190 191 amdgpu_ring_commit(ring); 192 193 done: 194 /* lower clocks again */ 195 amdgpu_asic_set_uvd_clocks(adev, 0, 0); 196 197 if (!r) 198 DRM_INFO("UVD initialized successfully.\n"); 199 200 return r; 201 } 202 203 /** 204 * uvd_v5_0_hw_fini - stop the hardware block 205 * 206 * @adev: amdgpu_device pointer 207 * 208 * Stop the UVD block, mark ring as not ready any more 209 */ 210 static int uvd_v5_0_hw_fini(void *handle) 211 { 212 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 213 struct amdgpu_ring *ring = &adev->uvd.ring; 214 215 uvd_v5_0_stop(adev); 216 ring->ready = false; 217 218 return 0; 219 } 220 221 static int uvd_v5_0_suspend(void *handle) 222 { 223 int r; 224 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 225 226 r = uvd_v5_0_hw_fini(adev); 227 if (r) 228 return r; 229 230 r = amdgpu_uvd_suspend(adev); 231 if (r) 232 return r; 233 234 return r; 235 } 236 237 static int uvd_v5_0_resume(void *handle) 238 { 239 int r; 240 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 241 242 r = amdgpu_uvd_resume(adev); 243 if (r) 244 return r; 245 246 r = uvd_v5_0_hw_init(adev); 247 if (r) 248 return r; 249 250 return r; 251 } 252 253 /** 254 * uvd_v5_0_mc_resume - memory controller programming 255 * 256 * @adev: amdgpu_device pointer 257 * 258 * Let the UVD memory controller know it's offsets 259 */ 260 static void uvd_v5_0_mc_resume(struct amdgpu_device *adev) 261 { 262 uint64_t offset; 263 uint32_t size; 264 265 /* programm memory controller bits 0-27 */ 266 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, 267 lower_32_bits(adev->uvd.gpu_addr)); 268 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, 269 upper_32_bits(adev->uvd.gpu_addr)); 270 271 offset = AMDGPU_UVD_FIRMWARE_OFFSET; 272 size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4); 273 WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3); 274 WREG32(mmUVD_VCPU_CACHE_SIZE0, size); 275 276 offset += size; 277 size = AMDGPU_UVD_HEAP_SIZE; 278 WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3); 279 WREG32(mmUVD_VCPU_CACHE_SIZE1, size); 280 281 offset += size; 282 size = AMDGPU_UVD_STACK_SIZE + 283 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles); 284 WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3); 285 WREG32(mmUVD_VCPU_CACHE_SIZE2, size); 286 287 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 288 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 289 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 290 } 291 292 /** 293 * uvd_v5_0_start - start UVD block 294 * 295 * @adev: amdgpu_device pointer 296 * 297 * Setup and start the UVD block 298 */ 299 static int uvd_v5_0_start(struct amdgpu_device *adev) 300 { 301 struct amdgpu_ring *ring = &adev->uvd.ring; 302 uint32_t rb_bufsz, tmp; 303 uint32_t lmi_swap_cntl; 304 uint32_t mp_swap_cntl; 305 int i, j, r; 306 307 /*disable DPG */ 308 WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2)); 309 310 /* disable byte swapping */ 311 lmi_swap_cntl = 0; 312 mp_swap_cntl = 0; 313 314 uvd_v5_0_mc_resume(adev); 315 316 /* disable clock gating */ 317 WREG32(mmUVD_CGC_GATE, 0); 318 319 /* disable interupt */ 320 WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); 321 322 /* stall UMC and register bus before resetting VCPU */ 323 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); 324 mdelay(1); 325 326 /* put LMI, VCPU, RBC etc... into reset */ 327 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | 328 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | 329 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | 330 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | 331 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); 332 mdelay(5); 333 334 /* take UVD block out of reset */ 335 WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); 336 mdelay(5); 337 338 /* initialize UVD memory controller */ 339 WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | 340 (1 << 21) | (1 << 9) | (1 << 20)); 341 342 #ifdef __BIG_ENDIAN 343 /* swap (8 in 32) RB and IB */ 344 lmi_swap_cntl = 0xa; 345 mp_swap_cntl = 0; 346 #endif 347 WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); 348 WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl); 349 350 WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040); 351 WREG32(mmUVD_MPC_SET_MUXA1, 0x0); 352 WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040); 353 WREG32(mmUVD_MPC_SET_MUXB1, 0x0); 354 WREG32(mmUVD_MPC_SET_ALU, 0); 355 WREG32(mmUVD_MPC_SET_MUX, 0x88); 356 357 /* take all subblocks out of reset, except VCPU */ 358 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 359 mdelay(5); 360 361 /* enable VCPU clock */ 362 WREG32(mmUVD_VCPU_CNTL, 1 << 9); 363 364 /* enable UMC */ 365 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); 366 367 /* boot up the VCPU */ 368 WREG32(mmUVD_SOFT_RESET, 0); 369 mdelay(10); 370 371 for (i = 0; i < 10; ++i) { 372 uint32_t status; 373 for (j = 0; j < 100; ++j) { 374 status = RREG32(mmUVD_STATUS); 375 if (status & 2) 376 break; 377 mdelay(10); 378 } 379 r = 0; 380 if (status & 2) 381 break; 382 383 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n"); 384 WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, 385 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 386 mdelay(10); 387 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 388 mdelay(10); 389 r = -1; 390 } 391 392 if (r) { 393 DRM_ERROR("UVD not responding, giving up!!!\n"); 394 return r; 395 } 396 /* enable master interrupt */ 397 WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1)); 398 399 /* clear the bit 4 of UVD_STATUS */ 400 WREG32_P(mmUVD_STATUS, 0, ~(2 << 1)); 401 402 rb_bufsz = order_base_2(ring->ring_size); 403 tmp = 0; 404 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); 405 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); 406 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); 407 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0); 408 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); 409 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); 410 /* force RBC into idle state */ 411 WREG32(mmUVD_RBC_RB_CNTL, tmp); 412 413 /* set the write pointer delay */ 414 WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0); 415 416 /* set the wb address */ 417 WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2)); 418 419 /* programm the RB_BASE for ring buffer */ 420 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, 421 lower_32_bits(ring->gpu_addr)); 422 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, 423 upper_32_bits(ring->gpu_addr)); 424 425 /* Initialize the ring buffer's read and write pointers */ 426 WREG32(mmUVD_RBC_RB_RPTR, 0); 427 428 ring->wptr = RREG32(mmUVD_RBC_RB_RPTR); 429 WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); 430 431 WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); 432 433 return 0; 434 } 435 436 /** 437 * uvd_v5_0_stop - stop UVD block 438 * 439 * @adev: amdgpu_device pointer 440 * 441 * stop the UVD block 442 */ 443 static void uvd_v5_0_stop(struct amdgpu_device *adev) 444 { 445 /* force RBC into idle state */ 446 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); 447 448 /* Stall UMC and register bus before resetting VCPU */ 449 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); 450 mdelay(1); 451 452 /* put VCPU into reset */ 453 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 454 mdelay(5); 455 456 /* disable VCPU clock */ 457 WREG32(mmUVD_VCPU_CNTL, 0x0); 458 459 /* Unstall UMC and register bus */ 460 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); 461 } 462 463 /** 464 * uvd_v5_0_ring_emit_fence - emit an fence & trap command 465 * 466 * @ring: amdgpu_ring pointer 467 * @fence: fence to emit 468 * 469 * Write a fence and a trap command to the ring. 470 */ 471 static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 472 unsigned flags) 473 { 474 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 475 476 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); 477 amdgpu_ring_write(ring, seq); 478 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 479 amdgpu_ring_write(ring, addr & 0xffffffff); 480 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 481 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff); 482 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); 483 amdgpu_ring_write(ring, 0); 484 485 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 486 amdgpu_ring_write(ring, 0); 487 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 488 amdgpu_ring_write(ring, 0); 489 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); 490 amdgpu_ring_write(ring, 2); 491 } 492 493 /** 494 * uvd_v5_0_ring_emit_hdp_flush - emit an hdp flush 495 * 496 * @ring: amdgpu_ring pointer 497 * 498 * Emits an hdp flush. 499 */ 500 static void uvd_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) 501 { 502 amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0)); 503 amdgpu_ring_write(ring, 0); 504 } 505 506 /** 507 * uvd_v5_0_ring_hdp_invalidate - emit an hdp invalidate 508 * 509 * @ring: amdgpu_ring pointer 510 * 511 * Emits an hdp invalidate. 512 */ 513 static void uvd_v5_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) 514 { 515 amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0)); 516 amdgpu_ring_write(ring, 1); 517 } 518 519 /** 520 * uvd_v5_0_ring_test_ring - register write test 521 * 522 * @ring: amdgpu_ring pointer 523 * 524 * Test if we can successfully write to the context register 525 */ 526 static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring) 527 { 528 struct amdgpu_device *adev = ring->adev; 529 uint32_t tmp = 0; 530 unsigned i; 531 int r; 532 533 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); 534 r = amdgpu_ring_alloc(ring, 3); 535 if (r) { 536 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", 537 ring->idx, r); 538 return r; 539 } 540 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); 541 amdgpu_ring_write(ring, 0xDEADBEEF); 542 amdgpu_ring_commit(ring); 543 for (i = 0; i < adev->usec_timeout; i++) { 544 tmp = RREG32(mmUVD_CONTEXT_ID); 545 if (tmp == 0xDEADBEEF) 546 break; 547 DRM_UDELAY(1); 548 } 549 550 if (i < adev->usec_timeout) { 551 DRM_INFO("ring test on %d succeeded in %d usecs\n", 552 ring->idx, i); 553 } else { 554 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", 555 ring->idx, tmp); 556 r = -EINVAL; 557 } 558 return r; 559 } 560 561 /** 562 * uvd_v5_0_ring_emit_ib - execute indirect buffer 563 * 564 * @ring: amdgpu_ring pointer 565 * @ib: indirect buffer to execute 566 * 567 * Write ring commands to execute the indirect buffer 568 */ 569 static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring, 570 struct amdgpu_ib *ib, 571 unsigned vm_id, bool ctx_switch) 572 { 573 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); 574 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); 575 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0)); 576 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 577 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0)); 578 amdgpu_ring_write(ring, ib->length_dw); 579 } 580 581 static bool uvd_v5_0_is_idle(void *handle) 582 { 583 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 584 585 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); 586 } 587 588 static int uvd_v5_0_wait_for_idle(void *handle) 589 { 590 unsigned i; 591 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 592 593 for (i = 0; i < adev->usec_timeout; i++) { 594 if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK)) 595 return 0; 596 } 597 return -ETIMEDOUT; 598 } 599 600 static int uvd_v5_0_soft_reset(void *handle) 601 { 602 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 603 604 uvd_v5_0_stop(adev); 605 606 WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK, 607 ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); 608 mdelay(5); 609 610 return uvd_v5_0_start(adev); 611 } 612 613 static int uvd_v5_0_set_interrupt_state(struct amdgpu_device *adev, 614 struct amdgpu_irq_src *source, 615 unsigned type, 616 enum amdgpu_interrupt_state state) 617 { 618 // TODO 619 return 0; 620 } 621 622 static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev, 623 struct amdgpu_irq_src *source, 624 struct amdgpu_iv_entry *entry) 625 { 626 DRM_DEBUG("IH: UVD TRAP\n"); 627 amdgpu_fence_process(&adev->uvd.ring); 628 return 0; 629 } 630 631 static void uvd_v5_0_set_sw_clock_gating(struct amdgpu_device *adev) 632 { 633 uint32_t data, data1, data2, suvd_flags; 634 635 data = RREG32(mmUVD_CGC_CTRL); 636 data1 = RREG32(mmUVD_SUVD_CGC_GATE); 637 data2 = RREG32(mmUVD_SUVD_CGC_CTRL); 638 639 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | 640 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK); 641 642 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK | 643 UVD_SUVD_CGC_GATE__SIT_MASK | 644 UVD_SUVD_CGC_GATE__SMP_MASK | 645 UVD_SUVD_CGC_GATE__SCM_MASK | 646 UVD_SUVD_CGC_GATE__SDB_MASK; 647 648 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK | 649 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) | 650 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY)); 651 652 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK | 653 UVD_CGC_CTRL__UDEC_CM_MODE_MASK | 654 UVD_CGC_CTRL__UDEC_IT_MODE_MASK | 655 UVD_CGC_CTRL__UDEC_DB_MODE_MASK | 656 UVD_CGC_CTRL__UDEC_MP_MODE_MASK | 657 UVD_CGC_CTRL__SYS_MODE_MASK | 658 UVD_CGC_CTRL__UDEC_MODE_MASK | 659 UVD_CGC_CTRL__MPEG2_MODE_MASK | 660 UVD_CGC_CTRL__REGS_MODE_MASK | 661 UVD_CGC_CTRL__RBC_MODE_MASK | 662 UVD_CGC_CTRL__LMI_MC_MODE_MASK | 663 UVD_CGC_CTRL__LMI_UMC_MODE_MASK | 664 UVD_CGC_CTRL__IDCT_MODE_MASK | 665 UVD_CGC_CTRL__MPRD_MODE_MASK | 666 UVD_CGC_CTRL__MPC_MODE_MASK | 667 UVD_CGC_CTRL__LBSI_MODE_MASK | 668 UVD_CGC_CTRL__LRBBM_MODE_MASK | 669 UVD_CGC_CTRL__WCB_MODE_MASK | 670 UVD_CGC_CTRL__VCPU_MODE_MASK | 671 UVD_CGC_CTRL__JPEG_MODE_MASK | 672 UVD_CGC_CTRL__SCPU_MODE_MASK); 673 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK | 674 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK | 675 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK | 676 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK | 677 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK); 678 data1 |= suvd_flags; 679 680 WREG32(mmUVD_CGC_CTRL, data); 681 WREG32(mmUVD_CGC_GATE, 0); 682 WREG32(mmUVD_SUVD_CGC_GATE, data1); 683 WREG32(mmUVD_SUVD_CGC_CTRL, data2); 684 } 685 686 #if 0 687 static void uvd_v5_0_set_hw_clock_gating(struct amdgpu_device *adev) 688 { 689 uint32_t data, data1, cgc_flags, suvd_flags; 690 691 data = RREG32(mmUVD_CGC_GATE); 692 data1 = RREG32(mmUVD_SUVD_CGC_GATE); 693 694 cgc_flags = UVD_CGC_GATE__SYS_MASK | 695 UVD_CGC_GATE__UDEC_MASK | 696 UVD_CGC_GATE__MPEG2_MASK | 697 UVD_CGC_GATE__RBC_MASK | 698 UVD_CGC_GATE__LMI_MC_MASK | 699 UVD_CGC_GATE__IDCT_MASK | 700 UVD_CGC_GATE__MPRD_MASK | 701 UVD_CGC_GATE__MPC_MASK | 702 UVD_CGC_GATE__LBSI_MASK | 703 UVD_CGC_GATE__LRBBM_MASK | 704 UVD_CGC_GATE__UDEC_RE_MASK | 705 UVD_CGC_GATE__UDEC_CM_MASK | 706 UVD_CGC_GATE__UDEC_IT_MASK | 707 UVD_CGC_GATE__UDEC_DB_MASK | 708 UVD_CGC_GATE__UDEC_MP_MASK | 709 UVD_CGC_GATE__WCB_MASK | 710 UVD_CGC_GATE__VCPU_MASK | 711 UVD_CGC_GATE__SCPU_MASK; 712 713 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK | 714 UVD_SUVD_CGC_GATE__SIT_MASK | 715 UVD_SUVD_CGC_GATE__SMP_MASK | 716 UVD_SUVD_CGC_GATE__SCM_MASK | 717 UVD_SUVD_CGC_GATE__SDB_MASK; 718 719 data |= cgc_flags; 720 data1 |= suvd_flags; 721 722 WREG32(mmUVD_CGC_GATE, data); 723 WREG32(mmUVD_SUVD_CGC_GATE, data1); 724 } 725 #endif 726 727 728 static int uvd_v5_0_set_clockgating_state(void *handle, 729 enum amd_clockgating_state state) 730 { 731 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 732 bool enable = (state == AMD_CG_STATE_GATE) ? true : false; 733 static int curstate = -1; 734 735 if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) 736 return 0; 737 738 if (curstate == state) 739 return 0; 740 741 curstate = state; 742 if (enable) { 743 /* disable HW gating and enable Sw gating */ 744 uvd_v5_0_set_sw_clock_gating(adev); 745 } else { 746 /* wait for STATUS to clear */ 747 if (uvd_v5_0_wait_for_idle(handle)) 748 return -EBUSY; 749 750 /* enable HW gates because UVD is idle */ 751 /* uvd_v5_0_set_hw_clock_gating(adev); */ 752 } 753 754 return 0; 755 } 756 757 static int uvd_v5_0_set_powergating_state(void *handle, 758 enum amd_powergating_state state) 759 { 760 /* This doesn't actually powergate the UVD block. 761 * That's done in the dpm code via the SMC. This 762 * just re-inits the block as necessary. The actual 763 * gating still happens in the dpm code. We should 764 * revisit this when there is a cleaner line between 765 * the smc and the hw blocks 766 */ 767 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 768 769 if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD)) 770 return 0; 771 772 if (state == AMD_PG_STATE_GATE) { 773 uvd_v5_0_stop(adev); 774 return 0; 775 } else { 776 return uvd_v5_0_start(adev); 777 } 778 } 779 780 static const struct amd_ip_funcs uvd_v5_0_ip_funcs = { 781 .name = "uvd_v5_0", 782 .early_init = uvd_v5_0_early_init, 783 .late_init = NULL, 784 .sw_init = uvd_v5_0_sw_init, 785 .sw_fini = uvd_v5_0_sw_fini, 786 .hw_init = uvd_v5_0_hw_init, 787 .hw_fini = uvd_v5_0_hw_fini, 788 .suspend = uvd_v5_0_suspend, 789 .resume = uvd_v5_0_resume, 790 .is_idle = uvd_v5_0_is_idle, 791 .wait_for_idle = uvd_v5_0_wait_for_idle, 792 .soft_reset = uvd_v5_0_soft_reset, 793 .set_clockgating_state = uvd_v5_0_set_clockgating_state, 794 .set_powergating_state = uvd_v5_0_set_powergating_state, 795 }; 796 797 static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = { 798 .type = AMDGPU_RING_TYPE_UVD, 799 .align_mask = 0xf, 800 .nop = PACKET0(mmUVD_NO_OP, 0), 801 .get_rptr = uvd_v5_0_ring_get_rptr, 802 .get_wptr = uvd_v5_0_ring_get_wptr, 803 .set_wptr = uvd_v5_0_ring_set_wptr, 804 .parse_cs = amdgpu_uvd_ring_parse_cs, 805 .emit_frame_size = 806 2 + /* uvd_v5_0_ring_emit_hdp_flush */ 807 2 + /* uvd_v5_0_ring_emit_hdp_invalidate */ 808 14, /* uvd_v5_0_ring_emit_fence x1 no user fence */ 809 .emit_ib_size = 6, /* uvd_v5_0_ring_emit_ib */ 810 .emit_ib = uvd_v5_0_ring_emit_ib, 811 .emit_fence = uvd_v5_0_ring_emit_fence, 812 .emit_hdp_flush = uvd_v5_0_ring_emit_hdp_flush, 813 .emit_hdp_invalidate = uvd_v5_0_ring_emit_hdp_invalidate, 814 .test_ring = uvd_v5_0_ring_test_ring, 815 .test_ib = amdgpu_uvd_ring_test_ib, 816 .insert_nop = amdgpu_ring_insert_nop, 817 .pad_ib = amdgpu_ring_generic_pad_ib, 818 .begin_use = amdgpu_uvd_ring_begin_use, 819 .end_use = amdgpu_uvd_ring_end_use, 820 }; 821 822 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev) 823 { 824 adev->uvd.ring.funcs = &uvd_v5_0_ring_funcs; 825 } 826 827 static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = { 828 .set = uvd_v5_0_set_interrupt_state, 829 .process = uvd_v5_0_process_interrupt, 830 }; 831 832 static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev) 833 { 834 adev->uvd.irq.num_types = 1; 835 adev->uvd.irq.funcs = &uvd_v5_0_irq_funcs; 836 } 837 838 const struct amdgpu_ip_block_version uvd_v5_0_ip_block = 839 { 840 .type = AMD_IP_BLOCK_TYPE_UVD, 841 .major = 5, 842 .minor = 0, 843 .rev = 0, 844 .funcs = &uvd_v5_0_ip_funcs, 845 }; 846