1a2e73f56SAlex Deucher /* 2a2e73f56SAlex Deucher * Copyright 2013 Advanced Micro Devices, Inc. 3a2e73f56SAlex Deucher * 4a2e73f56SAlex Deucher * Permission is hereby granted, free of charge, to any person obtaining a 5a2e73f56SAlex Deucher * copy of this software and associated documentation files (the "Software"), 6a2e73f56SAlex Deucher * to deal in the Software without restriction, including without limitation 7a2e73f56SAlex Deucher * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8a2e73f56SAlex Deucher * and/or sell copies of the Software, and to permit persons to whom the 9a2e73f56SAlex Deucher * Software is furnished to do so, subject to the following conditions: 10a2e73f56SAlex Deucher * 11a2e73f56SAlex Deucher * The above copyright notice and this permission notice shall be included in 12a2e73f56SAlex Deucher * all copies or substantial portions of the Software. 13a2e73f56SAlex Deucher * 14a2e73f56SAlex Deucher * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15a2e73f56SAlex Deucher * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16a2e73f56SAlex Deucher * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17a2e73f56SAlex Deucher * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18a2e73f56SAlex Deucher * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19a2e73f56SAlex Deucher * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20a2e73f56SAlex Deucher * OTHER DEALINGS IN THE SOFTWARE. 21a2e73f56SAlex Deucher * 22a2e73f56SAlex Deucher * Authors: Christian König <christian.koenig@amd.com> 23a2e73f56SAlex Deucher */ 24a2e73f56SAlex Deucher 25a2e73f56SAlex Deucher #include <linux/firmware.h> 26a2e73f56SAlex Deucher #include <drm/drmP.h> 27a2e73f56SAlex Deucher #include "amdgpu.h" 28a2e73f56SAlex Deucher #include "amdgpu_uvd.h" 29a2e73f56SAlex Deucher #include "cikd.h" 30a2e73f56SAlex Deucher 31a2e73f56SAlex Deucher #include "uvd/uvd_4_2_d.h" 32a2e73f56SAlex Deucher #include "uvd/uvd_4_2_sh_mask.h" 33a2e73f56SAlex Deucher 34a2e73f56SAlex Deucher #include "oss/oss_2_0_d.h" 35a2e73f56SAlex Deucher #include "oss/oss_2_0_sh_mask.h" 36a2e73f56SAlex Deucher 37d5b4e25dSChristian König #include "bif/bif_4_1_d.h" 38d5b4e25dSChristian König 39a2e73f56SAlex Deucher static void uvd_v4_2_mc_resume(struct amdgpu_device *adev); 40a2e73f56SAlex Deucher static void uvd_v4_2_init_cg(struct amdgpu_device *adev); 41a2e73f56SAlex Deucher static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev); 42a2e73f56SAlex Deucher static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev); 43a2e73f56SAlex Deucher static int uvd_v4_2_start(struct amdgpu_device *adev); 44a2e73f56SAlex Deucher static void uvd_v4_2_stop(struct amdgpu_device *adev); 45a2e73f56SAlex Deucher 46a2e73f56SAlex Deucher /** 47a2e73f56SAlex Deucher * uvd_v4_2_ring_get_rptr - get read pointer 48a2e73f56SAlex Deucher * 49a2e73f56SAlex Deucher * @ring: amdgpu_ring pointer 50a2e73f56SAlex Deucher * 51a2e73f56SAlex Deucher * Returns the current hardware read pointer 52a2e73f56SAlex Deucher */ 53a2e73f56SAlex Deucher static uint32_t uvd_v4_2_ring_get_rptr(struct amdgpu_ring *ring) 54a2e73f56SAlex Deucher { 55a2e73f56SAlex Deucher struct amdgpu_device *adev = ring->adev; 56a2e73f56SAlex Deucher 57a2e73f56SAlex Deucher return RREG32(mmUVD_RBC_RB_RPTR); 58a2e73f56SAlex Deucher } 59a2e73f56SAlex Deucher 60a2e73f56SAlex Deucher /** 61a2e73f56SAlex Deucher * uvd_v4_2_ring_get_wptr - get write pointer 62a2e73f56SAlex Deucher * 63a2e73f56SAlex Deucher * @ring: amdgpu_ring pointer 64a2e73f56SAlex Deucher * 65a2e73f56SAlex Deucher * Returns the current hardware write pointer 66a2e73f56SAlex Deucher */ 67a2e73f56SAlex Deucher static uint32_t uvd_v4_2_ring_get_wptr(struct amdgpu_ring *ring) 68a2e73f56SAlex Deucher { 69a2e73f56SAlex Deucher struct amdgpu_device *adev = ring->adev; 70a2e73f56SAlex Deucher 71a2e73f56SAlex Deucher return RREG32(mmUVD_RBC_RB_WPTR); 72a2e73f56SAlex Deucher } 73a2e73f56SAlex Deucher 74a2e73f56SAlex Deucher /** 75a2e73f56SAlex Deucher * uvd_v4_2_ring_set_wptr - set write pointer 76a2e73f56SAlex Deucher * 77a2e73f56SAlex Deucher * @ring: amdgpu_ring pointer 78a2e73f56SAlex Deucher * 79a2e73f56SAlex Deucher * Commits the write pointer to the hardware 80a2e73f56SAlex Deucher */ 81a2e73f56SAlex Deucher static void uvd_v4_2_ring_set_wptr(struct amdgpu_ring *ring) 82a2e73f56SAlex Deucher { 83a2e73f56SAlex Deucher struct amdgpu_device *adev = ring->adev; 84a2e73f56SAlex Deucher 85a2e73f56SAlex Deucher WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); 86a2e73f56SAlex Deucher } 87a2e73f56SAlex Deucher 885fc3aeebSyanyang1 static int uvd_v4_2_early_init(void *handle) 89a2e73f56SAlex Deucher { 905fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 915fc3aeebSyanyang1 92a2e73f56SAlex Deucher uvd_v4_2_set_ring_funcs(adev); 93a2e73f56SAlex Deucher uvd_v4_2_set_irq_funcs(adev); 94a2e73f56SAlex Deucher 95a2e73f56SAlex Deucher return 0; 96a2e73f56SAlex Deucher } 97a2e73f56SAlex Deucher 985fc3aeebSyanyang1 static int uvd_v4_2_sw_init(void *handle) 99a2e73f56SAlex Deucher { 100a2e73f56SAlex Deucher struct amdgpu_ring *ring; 1015fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 102a2e73f56SAlex Deucher int r; 103a2e73f56SAlex Deucher 104a2e73f56SAlex Deucher /* UVD TRAP */ 105a2e73f56SAlex Deucher r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq); 106a2e73f56SAlex Deucher if (r) 107a2e73f56SAlex Deucher return r; 108a2e73f56SAlex Deucher 109a2e73f56SAlex Deucher r = amdgpu_uvd_sw_init(adev); 110a2e73f56SAlex Deucher if (r) 111a2e73f56SAlex Deucher return r; 112a2e73f56SAlex Deucher 113a2e73f56SAlex Deucher r = amdgpu_uvd_resume(adev); 114a2e73f56SAlex Deucher if (r) 115a2e73f56SAlex Deucher return r; 116a2e73f56SAlex Deucher 117a2e73f56SAlex Deucher ring = &adev->uvd.ring; 118a2e73f56SAlex Deucher sprintf(ring->name, "uvd"); 11979887142SChristian König r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); 120a2e73f56SAlex Deucher 121a2e73f56SAlex Deucher return r; 122a2e73f56SAlex Deucher } 123a2e73f56SAlex Deucher 1245fc3aeebSyanyang1 static int uvd_v4_2_sw_fini(void *handle) 125a2e73f56SAlex Deucher { 126a2e73f56SAlex Deucher int r; 1275fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 128a2e73f56SAlex Deucher 129a2e73f56SAlex Deucher r = amdgpu_uvd_suspend(adev); 130a2e73f56SAlex Deucher if (r) 131a2e73f56SAlex Deucher return r; 132a2e73f56SAlex Deucher 133a2e73f56SAlex Deucher r = amdgpu_uvd_sw_fini(adev); 134a2e73f56SAlex Deucher if (r) 135a2e73f56SAlex Deucher return r; 136a2e73f56SAlex Deucher 137a2e73f56SAlex Deucher return r; 138a2e73f56SAlex Deucher } 139a2e73f56SAlex Deucher 140a2e73f56SAlex Deucher /** 141a2e73f56SAlex Deucher * uvd_v4_2_hw_init - start and test UVD block 142a2e73f56SAlex Deucher * 143a2e73f56SAlex Deucher * @adev: amdgpu_device pointer 144a2e73f56SAlex Deucher * 145a2e73f56SAlex Deucher * Initialize the hardware, boot up the VCPU and do some testing 146a2e73f56SAlex Deucher */ 1475fc3aeebSyanyang1 static int uvd_v4_2_hw_init(void *handle) 148a2e73f56SAlex Deucher { 1495fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 150a2e73f56SAlex Deucher struct amdgpu_ring *ring = &adev->uvd.ring; 151a2e73f56SAlex Deucher uint32_t tmp; 152a2e73f56SAlex Deucher int r; 153a2e73f56SAlex Deucher 154a2e73f56SAlex Deucher /* raise clocks while booting up the VCPU */ 155a2e73f56SAlex Deucher amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); 156a2e73f56SAlex Deucher 157a2e73f56SAlex Deucher r = uvd_v4_2_start(adev); 158a2e73f56SAlex Deucher if (r) 159a2e73f56SAlex Deucher goto done; 160a2e73f56SAlex Deucher 161a2e73f56SAlex Deucher ring->ready = true; 162a2e73f56SAlex Deucher r = amdgpu_ring_test_ring(ring); 163a2e73f56SAlex Deucher if (r) { 164a2e73f56SAlex Deucher ring->ready = false; 165a2e73f56SAlex Deucher goto done; 166a2e73f56SAlex Deucher } 167a2e73f56SAlex Deucher 168a27de35cSChristian König r = amdgpu_ring_alloc(ring, 10); 169a2e73f56SAlex Deucher if (r) { 170a2e73f56SAlex Deucher DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); 171a2e73f56SAlex Deucher goto done; 172a2e73f56SAlex Deucher } 173a2e73f56SAlex Deucher 174a2e73f56SAlex Deucher tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0); 175a2e73f56SAlex Deucher amdgpu_ring_write(ring, tmp); 176a2e73f56SAlex Deucher amdgpu_ring_write(ring, 0xFFFFF); 177a2e73f56SAlex Deucher 178a2e73f56SAlex Deucher tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0); 179a2e73f56SAlex Deucher amdgpu_ring_write(ring, tmp); 180a2e73f56SAlex Deucher amdgpu_ring_write(ring, 0xFFFFF); 181a2e73f56SAlex Deucher 182a2e73f56SAlex Deucher tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0); 183a2e73f56SAlex Deucher amdgpu_ring_write(ring, tmp); 184a2e73f56SAlex Deucher amdgpu_ring_write(ring, 0xFFFFF); 185a2e73f56SAlex Deucher 186a2e73f56SAlex Deucher /* Clear timeout status bits */ 187a2e73f56SAlex Deucher amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0)); 188a2e73f56SAlex Deucher amdgpu_ring_write(ring, 0x8); 189a2e73f56SAlex Deucher 190a2e73f56SAlex Deucher amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); 191a2e73f56SAlex Deucher amdgpu_ring_write(ring, 3); 192a2e73f56SAlex Deucher 193a27de35cSChristian König amdgpu_ring_commit(ring); 194a2e73f56SAlex Deucher 195a2e73f56SAlex Deucher done: 196a2e73f56SAlex Deucher /* lower clocks again */ 197a2e73f56SAlex Deucher amdgpu_asic_set_uvd_clocks(adev, 0, 0); 198a2e73f56SAlex Deucher 199a2e73f56SAlex Deucher if (!r) 200a2e73f56SAlex Deucher DRM_INFO("UVD initialized successfully.\n"); 201a2e73f56SAlex Deucher 202a2e73f56SAlex Deucher return r; 203a2e73f56SAlex Deucher } 204a2e73f56SAlex Deucher 205a2e73f56SAlex Deucher /** 206a2e73f56SAlex Deucher * uvd_v4_2_hw_fini - stop the hardware block 207a2e73f56SAlex Deucher * 208a2e73f56SAlex Deucher * @adev: amdgpu_device pointer 209a2e73f56SAlex Deucher * 210a2e73f56SAlex Deucher * Stop the UVD block, mark ring as not ready any more 211a2e73f56SAlex Deucher */ 2125fc3aeebSyanyang1 static int uvd_v4_2_hw_fini(void *handle) 213a2e73f56SAlex Deucher { 2145fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 215a2e73f56SAlex Deucher struct amdgpu_ring *ring = &adev->uvd.ring; 216a2e73f56SAlex Deucher 217a2e73f56SAlex Deucher uvd_v4_2_stop(adev); 218a2e73f56SAlex Deucher ring->ready = false; 219a2e73f56SAlex Deucher 220a2e73f56SAlex Deucher return 0; 221a2e73f56SAlex Deucher } 222a2e73f56SAlex Deucher 2235fc3aeebSyanyang1 static int uvd_v4_2_suspend(void *handle) 224a2e73f56SAlex Deucher { 225a2e73f56SAlex Deucher int r; 2265fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 227a2e73f56SAlex Deucher 2283f99dd81SLeo Liu r = uvd_v4_2_hw_fini(adev); 229a2e73f56SAlex Deucher if (r) 230a2e73f56SAlex Deucher return r; 231a2e73f56SAlex Deucher 2323f99dd81SLeo Liu r = amdgpu_uvd_suspend(adev); 233a2e73f56SAlex Deucher if (r) 234a2e73f56SAlex Deucher return r; 235a2e73f56SAlex Deucher 236a2e73f56SAlex Deucher return r; 237a2e73f56SAlex Deucher } 238a2e73f56SAlex Deucher 2395fc3aeebSyanyang1 static int uvd_v4_2_resume(void *handle) 240a2e73f56SAlex Deucher { 241a2e73f56SAlex Deucher int r; 2425fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 243a2e73f56SAlex Deucher 244a2e73f56SAlex Deucher r = amdgpu_uvd_resume(adev); 245a2e73f56SAlex Deucher if (r) 246a2e73f56SAlex Deucher return r; 247a2e73f56SAlex Deucher 248a2e73f56SAlex Deucher r = uvd_v4_2_hw_init(adev); 249a2e73f56SAlex Deucher if (r) 250a2e73f56SAlex Deucher return r; 251a2e73f56SAlex Deucher 252a2e73f56SAlex Deucher return r; 253a2e73f56SAlex Deucher } 254a2e73f56SAlex Deucher 255a2e73f56SAlex Deucher /** 256a2e73f56SAlex Deucher * uvd_v4_2_start - start UVD block 257a2e73f56SAlex Deucher * 258a2e73f56SAlex Deucher * @adev: amdgpu_device pointer 259a2e73f56SAlex Deucher * 260a2e73f56SAlex Deucher * Setup and start the UVD block 261a2e73f56SAlex Deucher */ 262a2e73f56SAlex Deucher static int uvd_v4_2_start(struct amdgpu_device *adev) 263a2e73f56SAlex Deucher { 264a2e73f56SAlex Deucher struct amdgpu_ring *ring = &adev->uvd.ring; 265a2e73f56SAlex Deucher uint32_t rb_bufsz; 266a2e73f56SAlex Deucher int i, j, r; 267a2e73f56SAlex Deucher 268a2e73f56SAlex Deucher /* disable byte swapping */ 269a2e73f56SAlex Deucher u32 lmi_swap_cntl = 0; 270a2e73f56SAlex Deucher u32 mp_swap_cntl = 0; 271a2e73f56SAlex Deucher 272a2e73f56SAlex Deucher uvd_v4_2_mc_resume(adev); 273a2e73f56SAlex Deucher 274a2e73f56SAlex Deucher /* disable clock gating */ 275a2e73f56SAlex Deucher WREG32(mmUVD_CGC_GATE, 0); 276a2e73f56SAlex Deucher 277a2e73f56SAlex Deucher /* disable interupt */ 278a2e73f56SAlex Deucher WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); 279a2e73f56SAlex Deucher 280a2e73f56SAlex Deucher /* Stall UMC and register bus before resetting VCPU */ 281a2e73f56SAlex Deucher WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); 282a2e73f56SAlex Deucher mdelay(1); 283a2e73f56SAlex Deucher 284a2e73f56SAlex Deucher /* put LMI, VCPU, RBC etc... into reset */ 285a2e73f56SAlex Deucher WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | 286a2e73f56SAlex Deucher UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | 287a2e73f56SAlex Deucher UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | 288a2e73f56SAlex Deucher UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | 289a2e73f56SAlex Deucher UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); 290a2e73f56SAlex Deucher mdelay(5); 291a2e73f56SAlex Deucher 292a2e73f56SAlex Deucher /* take UVD block out of reset */ 293a2e73f56SAlex Deucher WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); 294a2e73f56SAlex Deucher mdelay(5); 295a2e73f56SAlex Deucher 296a2e73f56SAlex Deucher /* initialize UVD memory controller */ 297a2e73f56SAlex Deucher WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | 298a2e73f56SAlex Deucher (1 << 21) | (1 << 9) | (1 << 20)); 299a2e73f56SAlex Deucher 300a2e73f56SAlex Deucher #ifdef __BIG_ENDIAN 301a2e73f56SAlex Deucher /* swap (8 in 32) RB and IB */ 302a2e73f56SAlex Deucher lmi_swap_cntl = 0xa; 303a2e73f56SAlex Deucher mp_swap_cntl = 0; 304a2e73f56SAlex Deucher #endif 305a2e73f56SAlex Deucher WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); 306a2e73f56SAlex Deucher WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl); 307a2e73f56SAlex Deucher 308a2e73f56SAlex Deucher WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040); 309a2e73f56SAlex Deucher WREG32(mmUVD_MPC_SET_MUXA1, 0x0); 310a2e73f56SAlex Deucher WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040); 311a2e73f56SAlex Deucher WREG32(mmUVD_MPC_SET_MUXB1, 0x0); 312a2e73f56SAlex Deucher WREG32(mmUVD_MPC_SET_ALU, 0); 313a2e73f56SAlex Deucher WREG32(mmUVD_MPC_SET_MUX, 0x88); 314a2e73f56SAlex Deucher 315a2e73f56SAlex Deucher /* take all subblocks out of reset, except VCPU */ 316a2e73f56SAlex Deucher WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 317a2e73f56SAlex Deucher mdelay(5); 318a2e73f56SAlex Deucher 319a2e73f56SAlex Deucher /* enable VCPU clock */ 320a2e73f56SAlex Deucher WREG32(mmUVD_VCPU_CNTL, 1 << 9); 321a2e73f56SAlex Deucher 322a2e73f56SAlex Deucher /* enable UMC */ 323a2e73f56SAlex Deucher WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); 324a2e73f56SAlex Deucher 325a2e73f56SAlex Deucher /* boot up the VCPU */ 326a2e73f56SAlex Deucher WREG32(mmUVD_SOFT_RESET, 0); 327a2e73f56SAlex Deucher mdelay(10); 328a2e73f56SAlex Deucher 329a2e73f56SAlex Deucher for (i = 0; i < 10; ++i) { 330a2e73f56SAlex Deucher uint32_t status; 331a2e73f56SAlex Deucher for (j = 0; j < 100; ++j) { 332a2e73f56SAlex Deucher status = RREG32(mmUVD_STATUS); 333a2e73f56SAlex Deucher if (status & 2) 334a2e73f56SAlex Deucher break; 335a2e73f56SAlex Deucher mdelay(10); 336a2e73f56SAlex Deucher } 337a2e73f56SAlex Deucher r = 0; 338a2e73f56SAlex Deucher if (status & 2) 339a2e73f56SAlex Deucher break; 340a2e73f56SAlex Deucher 341a2e73f56SAlex Deucher DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n"); 342a2e73f56SAlex Deucher WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, 343a2e73f56SAlex Deucher ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 344a2e73f56SAlex Deucher mdelay(10); 345a2e73f56SAlex Deucher WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 346a2e73f56SAlex Deucher mdelay(10); 347a2e73f56SAlex Deucher r = -1; 348a2e73f56SAlex Deucher } 349a2e73f56SAlex Deucher 350a2e73f56SAlex Deucher if (r) { 351a2e73f56SAlex Deucher DRM_ERROR("UVD not responding, giving up!!!\n"); 352a2e73f56SAlex Deucher return r; 353a2e73f56SAlex Deucher } 354a2e73f56SAlex Deucher 355a2e73f56SAlex Deucher /* enable interupt */ 356a2e73f56SAlex Deucher WREG32_P(mmUVD_MASTINT_EN, 3<<1, ~(3 << 1)); 357a2e73f56SAlex Deucher 358a2e73f56SAlex Deucher /* force RBC into idle state */ 359a2e73f56SAlex Deucher WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); 360a2e73f56SAlex Deucher 361a2e73f56SAlex Deucher /* Set the write pointer delay */ 362a2e73f56SAlex Deucher WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0); 363a2e73f56SAlex Deucher 364a2e73f56SAlex Deucher /* programm the 4GB memory segment for rptr and ring buffer */ 365a2e73f56SAlex Deucher WREG32(mmUVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) | 366a2e73f56SAlex Deucher (0x7 << 16) | (0x1 << 31)); 367a2e73f56SAlex Deucher 368a2e73f56SAlex Deucher /* Initialize the ring buffer's read and write pointers */ 369a2e73f56SAlex Deucher WREG32(mmUVD_RBC_RB_RPTR, 0x0); 370a2e73f56SAlex Deucher 371a2e73f56SAlex Deucher ring->wptr = RREG32(mmUVD_RBC_RB_RPTR); 372a2e73f56SAlex Deucher WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); 373a2e73f56SAlex Deucher 374a2e73f56SAlex Deucher /* set the ring address */ 375a2e73f56SAlex Deucher WREG32(mmUVD_RBC_RB_BASE, ring->gpu_addr); 376a2e73f56SAlex Deucher 377a2e73f56SAlex Deucher /* Set ring buffer size */ 378a2e73f56SAlex Deucher rb_bufsz = order_base_2(ring->ring_size); 379a2e73f56SAlex Deucher rb_bufsz = (0x1 << 8) | rb_bufsz; 380a2e73f56SAlex Deucher WREG32_P(mmUVD_RBC_RB_CNTL, rb_bufsz, ~0x11f1f); 381a2e73f56SAlex Deucher 382a2e73f56SAlex Deucher return 0; 383a2e73f56SAlex Deucher } 384a2e73f56SAlex Deucher 385a2e73f56SAlex Deucher /** 386a2e73f56SAlex Deucher * uvd_v4_2_stop - stop UVD block 387a2e73f56SAlex Deucher * 388a2e73f56SAlex Deucher * @adev: amdgpu_device pointer 389a2e73f56SAlex Deucher * 390a2e73f56SAlex Deucher * stop the UVD block 391a2e73f56SAlex Deucher */ 392a2e73f56SAlex Deucher static void uvd_v4_2_stop(struct amdgpu_device *adev) 393a2e73f56SAlex Deucher { 394a2e73f56SAlex Deucher /* force RBC into idle state */ 395a2e73f56SAlex Deucher WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); 396a2e73f56SAlex Deucher 397a2e73f56SAlex Deucher /* Stall UMC and register bus before resetting VCPU */ 398a2e73f56SAlex Deucher WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); 399a2e73f56SAlex Deucher mdelay(1); 400a2e73f56SAlex Deucher 401a2e73f56SAlex Deucher /* put VCPU into reset */ 402a2e73f56SAlex Deucher WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 403a2e73f56SAlex Deucher mdelay(5); 404a2e73f56SAlex Deucher 405a2e73f56SAlex Deucher /* disable VCPU clock */ 406a2e73f56SAlex Deucher WREG32(mmUVD_VCPU_CNTL, 0x0); 407a2e73f56SAlex Deucher 408a2e73f56SAlex Deucher /* Unstall UMC and register bus */ 409a2e73f56SAlex Deucher WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); 410a2e73f56SAlex Deucher } 411a2e73f56SAlex Deucher 412a2e73f56SAlex Deucher /** 413a2e73f56SAlex Deucher * uvd_v4_2_ring_emit_fence - emit an fence & trap command 414a2e73f56SAlex Deucher * 415a2e73f56SAlex Deucher * @ring: amdgpu_ring pointer 416a2e73f56SAlex Deucher * @fence: fence to emit 417a2e73f56SAlex Deucher * 418a2e73f56SAlex Deucher * Write a fence and a trap command to the ring. 419a2e73f56SAlex Deucher */ 420a2e73f56SAlex Deucher static void uvd_v4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 421890ee23fSChunming Zhou unsigned flags) 422a2e73f56SAlex Deucher { 423890ee23fSChunming Zhou WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 424a2e73f56SAlex Deucher 425a2e73f56SAlex Deucher amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); 426a2e73f56SAlex Deucher amdgpu_ring_write(ring, seq); 427a2e73f56SAlex Deucher amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 428a2e73f56SAlex Deucher amdgpu_ring_write(ring, addr & 0xffffffff); 429a2e73f56SAlex Deucher amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 430a2e73f56SAlex Deucher amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff); 431a2e73f56SAlex Deucher amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); 432a2e73f56SAlex Deucher amdgpu_ring_write(ring, 0); 433a2e73f56SAlex Deucher 434a2e73f56SAlex Deucher amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 435a2e73f56SAlex Deucher amdgpu_ring_write(ring, 0); 436a2e73f56SAlex Deucher amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 437a2e73f56SAlex Deucher amdgpu_ring_write(ring, 0); 438a2e73f56SAlex Deucher amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); 439a2e73f56SAlex Deucher amdgpu_ring_write(ring, 2); 440a2e73f56SAlex Deucher } 441a2e73f56SAlex Deucher 442a2e73f56SAlex Deucher /** 443d5b4e25dSChristian König * uvd_v4_2_ring_emit_hdp_flush - emit an hdp flush 444d5b4e25dSChristian König * 445d5b4e25dSChristian König * @ring: amdgpu_ring pointer 446d5b4e25dSChristian König * 447d5b4e25dSChristian König * Emits an hdp flush. 448d5b4e25dSChristian König */ 449d5b4e25dSChristian König static void uvd_v4_2_ring_emit_hdp_flush(struct amdgpu_ring *ring) 450d5b4e25dSChristian König { 451d5b4e25dSChristian König amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0)); 452d5b4e25dSChristian König amdgpu_ring_write(ring, 0); 453d5b4e25dSChristian König } 454d5b4e25dSChristian König 455d5b4e25dSChristian König /** 456d5b4e25dSChristian König * uvd_v4_2_ring_hdp_invalidate - emit an hdp invalidate 457d5b4e25dSChristian König * 458d5b4e25dSChristian König * @ring: amdgpu_ring pointer 459d5b4e25dSChristian König * 460d5b4e25dSChristian König * Emits an hdp invalidate. 461d5b4e25dSChristian König */ 462d5b4e25dSChristian König static void uvd_v4_2_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) 463d5b4e25dSChristian König { 464d5b4e25dSChristian König amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0)); 465d5b4e25dSChristian König amdgpu_ring_write(ring, 1); 466d5b4e25dSChristian König } 467d5b4e25dSChristian König 468d5b4e25dSChristian König /** 469a2e73f56SAlex Deucher * uvd_v4_2_ring_test_ring - register write test 470a2e73f56SAlex Deucher * 471a2e73f56SAlex Deucher * @ring: amdgpu_ring pointer 472a2e73f56SAlex Deucher * 473a2e73f56SAlex Deucher * Test if we can successfully write to the context register 474a2e73f56SAlex Deucher */ 475a2e73f56SAlex Deucher static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring) 476a2e73f56SAlex Deucher { 477a2e73f56SAlex Deucher struct amdgpu_device *adev = ring->adev; 478a2e73f56SAlex Deucher uint32_t tmp = 0; 479a2e73f56SAlex Deucher unsigned i; 480a2e73f56SAlex Deucher int r; 481a2e73f56SAlex Deucher 482a2e73f56SAlex Deucher WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); 483a27de35cSChristian König r = amdgpu_ring_alloc(ring, 3); 484a2e73f56SAlex Deucher if (r) { 485a2e73f56SAlex Deucher DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", 486a2e73f56SAlex Deucher ring->idx, r); 487a2e73f56SAlex Deucher return r; 488a2e73f56SAlex Deucher } 489a2e73f56SAlex Deucher amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); 490a2e73f56SAlex Deucher amdgpu_ring_write(ring, 0xDEADBEEF); 491a27de35cSChristian König amdgpu_ring_commit(ring); 492a2e73f56SAlex Deucher for (i = 0; i < adev->usec_timeout; i++) { 493a2e73f56SAlex Deucher tmp = RREG32(mmUVD_CONTEXT_ID); 494a2e73f56SAlex Deucher if (tmp == 0xDEADBEEF) 495a2e73f56SAlex Deucher break; 496a2e73f56SAlex Deucher DRM_UDELAY(1); 497a2e73f56SAlex Deucher } 498a2e73f56SAlex Deucher 499a2e73f56SAlex Deucher if (i < adev->usec_timeout) { 500a2e73f56SAlex Deucher DRM_INFO("ring test on %d succeeded in %d usecs\n", 501a2e73f56SAlex Deucher ring->idx, i); 502a2e73f56SAlex Deucher } else { 503a2e73f56SAlex Deucher DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", 504a2e73f56SAlex Deucher ring->idx, tmp); 505a2e73f56SAlex Deucher r = -EINVAL; 506a2e73f56SAlex Deucher } 507a2e73f56SAlex Deucher return r; 508a2e73f56SAlex Deucher } 509a2e73f56SAlex Deucher 510a2e73f56SAlex Deucher /** 511a2e73f56SAlex Deucher * uvd_v4_2_ring_emit_ib - execute indirect buffer 512a2e73f56SAlex Deucher * 513a2e73f56SAlex Deucher * @ring: amdgpu_ring pointer 514a2e73f56SAlex Deucher * @ib: indirect buffer to execute 515a2e73f56SAlex Deucher * 516a2e73f56SAlex Deucher * Write ring commands to execute the indirect buffer 517a2e73f56SAlex Deucher */ 518a2e73f56SAlex Deucher static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring, 519d88bf583SChristian König struct amdgpu_ib *ib, 520d88bf583SChristian König unsigned vm_id, bool ctx_switch) 521a2e73f56SAlex Deucher { 522a2e73f56SAlex Deucher amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0)); 523a2e73f56SAlex Deucher amdgpu_ring_write(ring, ib->gpu_addr); 524a2e73f56SAlex Deucher amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0)); 525a2e73f56SAlex Deucher amdgpu_ring_write(ring, ib->length_dw); 526a2e73f56SAlex Deucher } 527a2e73f56SAlex Deucher 528a2e73f56SAlex Deucher /** 529a2e73f56SAlex Deucher * uvd_v4_2_mc_resume - memory controller programming 530a2e73f56SAlex Deucher * 531a2e73f56SAlex Deucher * @adev: amdgpu_device pointer 532a2e73f56SAlex Deucher * 533a2e73f56SAlex Deucher * Let the UVD memory controller know it's offsets 534a2e73f56SAlex Deucher */ 535a2e73f56SAlex Deucher static void uvd_v4_2_mc_resume(struct amdgpu_device *adev) 536a2e73f56SAlex Deucher { 537a2e73f56SAlex Deucher uint64_t addr; 538a2e73f56SAlex Deucher uint32_t size; 539a2e73f56SAlex Deucher 540a2e73f56SAlex Deucher /* programm the VCPU memory controller bits 0-27 */ 541a2e73f56SAlex Deucher addr = (adev->uvd.gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3; 542a2e73f56SAlex Deucher size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4) >> 3; 543a2e73f56SAlex Deucher WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr); 544a2e73f56SAlex Deucher WREG32(mmUVD_VCPU_CACHE_SIZE0, size); 545a2e73f56SAlex Deucher 546a2e73f56SAlex Deucher addr += size; 547c0365541SArindam Nath size = AMDGPU_UVD_HEAP_SIZE >> 3; 548a2e73f56SAlex Deucher WREG32(mmUVD_VCPU_CACHE_OFFSET1, addr); 549a2e73f56SAlex Deucher WREG32(mmUVD_VCPU_CACHE_SIZE1, size); 550a2e73f56SAlex Deucher 551a2e73f56SAlex Deucher addr += size; 552c0365541SArindam Nath size = (AMDGPU_UVD_STACK_SIZE + 553c0365541SArindam Nath (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles)) >> 3; 554a2e73f56SAlex Deucher WREG32(mmUVD_VCPU_CACHE_OFFSET2, addr); 555a2e73f56SAlex Deucher WREG32(mmUVD_VCPU_CACHE_SIZE2, size); 556a2e73f56SAlex Deucher 557a2e73f56SAlex Deucher /* bits 28-31 */ 558a2e73f56SAlex Deucher addr = (adev->uvd.gpu_addr >> 28) & 0xF; 559a2e73f56SAlex Deucher WREG32(mmUVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0)); 560a2e73f56SAlex Deucher 561a2e73f56SAlex Deucher /* bits 32-39 */ 562a2e73f56SAlex Deucher addr = (adev->uvd.gpu_addr >> 32) & 0xFF; 563a2e73f56SAlex Deucher WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31)); 564a2e73f56SAlex Deucher 56576ed6cb0SAlex Deucher WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 56676ed6cb0SAlex Deucher WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 56776ed6cb0SAlex Deucher WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 56876ed6cb0SAlex Deucher 569a2e73f56SAlex Deucher uvd_v4_2_init_cg(adev); 570a2e73f56SAlex Deucher } 571a2e73f56SAlex Deucher 572a2e73f56SAlex Deucher static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev, 573a2e73f56SAlex Deucher bool enable) 574a2e73f56SAlex Deucher { 575a2e73f56SAlex Deucher u32 orig, data; 576a2e73f56SAlex Deucher 577e3b04bc7SAlex Deucher if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) { 578a2e73f56SAlex Deucher data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); 579a2e73f56SAlex Deucher data = 0xfff; 580a2e73f56SAlex Deucher WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); 581a2e73f56SAlex Deucher 582a2e73f56SAlex Deucher orig = data = RREG32(mmUVD_CGC_CTRL); 583a2e73f56SAlex Deucher data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; 584a2e73f56SAlex Deucher if (orig != data) 585a2e73f56SAlex Deucher WREG32(mmUVD_CGC_CTRL, data); 586a2e73f56SAlex Deucher } else { 587a2e73f56SAlex Deucher data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); 588a2e73f56SAlex Deucher data &= ~0xfff; 589a2e73f56SAlex Deucher WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); 590a2e73f56SAlex Deucher 591a2e73f56SAlex Deucher orig = data = RREG32(mmUVD_CGC_CTRL); 592a2e73f56SAlex Deucher data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; 593a2e73f56SAlex Deucher if (orig != data) 594a2e73f56SAlex Deucher WREG32(mmUVD_CGC_CTRL, data); 595a2e73f56SAlex Deucher } 596a2e73f56SAlex Deucher } 597a2e73f56SAlex Deucher 598a2e73f56SAlex Deucher static void uvd_v4_2_set_dcm(struct amdgpu_device *adev, 599a2e73f56SAlex Deucher bool sw_mode) 600a2e73f56SAlex Deucher { 601a2e73f56SAlex Deucher u32 tmp, tmp2; 602a2e73f56SAlex Deucher 603a2e73f56SAlex Deucher tmp = RREG32(mmUVD_CGC_CTRL); 604a2e73f56SAlex Deucher tmp &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK); 605a2e73f56SAlex Deucher tmp |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK | 606a2e73f56SAlex Deucher (1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT) | 607a2e73f56SAlex Deucher (4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT); 608a2e73f56SAlex Deucher 609a2e73f56SAlex Deucher if (sw_mode) { 610a2e73f56SAlex Deucher tmp &= ~0x7ffff800; 611a2e73f56SAlex Deucher tmp2 = UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK | 612a2e73f56SAlex Deucher UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK | 613a2e73f56SAlex Deucher (7 << UVD_CGC_CTRL2__GATER_DIV_ID__SHIFT); 614a2e73f56SAlex Deucher } else { 615a2e73f56SAlex Deucher tmp |= 0x7ffff800; 616a2e73f56SAlex Deucher tmp2 = 0; 617a2e73f56SAlex Deucher } 618a2e73f56SAlex Deucher 619a2e73f56SAlex Deucher WREG32(mmUVD_CGC_CTRL, tmp); 620a2e73f56SAlex Deucher WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2); 621a2e73f56SAlex Deucher } 622a2e73f56SAlex Deucher 623a2e73f56SAlex Deucher static void uvd_v4_2_init_cg(struct amdgpu_device *adev) 624a2e73f56SAlex Deucher { 625a2e73f56SAlex Deucher bool hw_mode = true; 626a2e73f56SAlex Deucher 627a2e73f56SAlex Deucher if (hw_mode) { 628a2e73f56SAlex Deucher uvd_v4_2_set_dcm(adev, false); 629a2e73f56SAlex Deucher } else { 630a2e73f56SAlex Deucher u32 tmp = RREG32(mmUVD_CGC_CTRL); 631a2e73f56SAlex Deucher tmp &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; 632a2e73f56SAlex Deucher WREG32(mmUVD_CGC_CTRL, tmp); 633a2e73f56SAlex Deucher } 634a2e73f56SAlex Deucher } 635a2e73f56SAlex Deucher 6365fc3aeebSyanyang1 static bool uvd_v4_2_is_idle(void *handle) 637a2e73f56SAlex Deucher { 6385fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 6395fc3aeebSyanyang1 640a2e73f56SAlex Deucher return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); 641a2e73f56SAlex Deucher } 642a2e73f56SAlex Deucher 6435fc3aeebSyanyang1 static int uvd_v4_2_wait_for_idle(void *handle) 644a2e73f56SAlex Deucher { 645a2e73f56SAlex Deucher unsigned i; 6465fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 647a2e73f56SAlex Deucher 648a2e73f56SAlex Deucher for (i = 0; i < adev->usec_timeout; i++) { 649a2e73f56SAlex Deucher if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK)) 650a2e73f56SAlex Deucher return 0; 651a2e73f56SAlex Deucher } 652a2e73f56SAlex Deucher return -ETIMEDOUT; 653a2e73f56SAlex Deucher } 654a2e73f56SAlex Deucher 6555fc3aeebSyanyang1 static int uvd_v4_2_soft_reset(void *handle) 656a2e73f56SAlex Deucher { 6575fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 6585fc3aeebSyanyang1 659a2e73f56SAlex Deucher uvd_v4_2_stop(adev); 660a2e73f56SAlex Deucher 661a2e73f56SAlex Deucher WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK, 662a2e73f56SAlex Deucher ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); 663a2e73f56SAlex Deucher mdelay(5); 664a2e73f56SAlex Deucher 665a2e73f56SAlex Deucher return uvd_v4_2_start(adev); 666a2e73f56SAlex Deucher } 667a2e73f56SAlex Deucher 668a2e73f56SAlex Deucher static int uvd_v4_2_set_interrupt_state(struct amdgpu_device *adev, 669a2e73f56SAlex Deucher struct amdgpu_irq_src *source, 670a2e73f56SAlex Deucher unsigned type, 671a2e73f56SAlex Deucher enum amdgpu_interrupt_state state) 672a2e73f56SAlex Deucher { 673a2e73f56SAlex Deucher // TODO 674a2e73f56SAlex Deucher return 0; 675a2e73f56SAlex Deucher } 676a2e73f56SAlex Deucher 677a2e73f56SAlex Deucher static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev, 678a2e73f56SAlex Deucher struct amdgpu_irq_src *source, 679a2e73f56SAlex Deucher struct amdgpu_iv_entry *entry) 680a2e73f56SAlex Deucher { 681a2e73f56SAlex Deucher DRM_DEBUG("IH: UVD TRAP\n"); 682a2e73f56SAlex Deucher amdgpu_fence_process(&adev->uvd.ring); 683a2e73f56SAlex Deucher return 0; 684a2e73f56SAlex Deucher } 685a2e73f56SAlex Deucher 6865fc3aeebSyanyang1 static int uvd_v4_2_set_clockgating_state(void *handle, 6875fc3aeebSyanyang1 enum amd_clockgating_state state) 688a2e73f56SAlex Deucher { 689a2e73f56SAlex Deucher bool gate = false; 6905fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 691a2e73f56SAlex Deucher 692e3b04bc7SAlex Deucher if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) 69335e5912dSAlex Deucher return 0; 69435e5912dSAlex Deucher 6955fc3aeebSyanyang1 if (state == AMD_CG_STATE_GATE) 696a2e73f56SAlex Deucher gate = true; 697a2e73f56SAlex Deucher 698a2e73f56SAlex Deucher uvd_v4_2_enable_mgcg(adev, gate); 699a2e73f56SAlex Deucher 700a2e73f56SAlex Deucher return 0; 701a2e73f56SAlex Deucher } 702a2e73f56SAlex Deucher 7035fc3aeebSyanyang1 static int uvd_v4_2_set_powergating_state(void *handle, 7045fc3aeebSyanyang1 enum amd_powergating_state state) 705a2e73f56SAlex Deucher { 706a2e73f56SAlex Deucher /* This doesn't actually powergate the UVD block. 707a2e73f56SAlex Deucher * That's done in the dpm code via the SMC. This 708a2e73f56SAlex Deucher * just re-inits the block as necessary. The actual 709a2e73f56SAlex Deucher * gating still happens in the dpm code. We should 710a2e73f56SAlex Deucher * revisit this when there is a cleaner line between 711a2e73f56SAlex Deucher * the smc and the hw blocks 712a2e73f56SAlex Deucher */ 7135fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 7145fc3aeebSyanyang1 715e3b04bc7SAlex Deucher if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD)) 716b6df77fcSAlex Deucher return 0; 7175fc3aeebSyanyang1 7185fc3aeebSyanyang1 if (state == AMD_PG_STATE_GATE) { 719a2e73f56SAlex Deucher uvd_v4_2_stop(adev); 720a2e73f56SAlex Deucher return 0; 721a2e73f56SAlex Deucher } else { 722a2e73f56SAlex Deucher return uvd_v4_2_start(adev); 723a2e73f56SAlex Deucher } 724a2e73f56SAlex Deucher } 725a2e73f56SAlex Deucher 726a1255107SAlex Deucher static const struct amd_ip_funcs uvd_v4_2_ip_funcs = { 72788a907d6STom St Denis .name = "uvd_v4_2", 728a2e73f56SAlex Deucher .early_init = uvd_v4_2_early_init, 729a2e73f56SAlex Deucher .late_init = NULL, 730a2e73f56SAlex Deucher .sw_init = uvd_v4_2_sw_init, 731a2e73f56SAlex Deucher .sw_fini = uvd_v4_2_sw_fini, 732a2e73f56SAlex Deucher .hw_init = uvd_v4_2_hw_init, 733a2e73f56SAlex Deucher .hw_fini = uvd_v4_2_hw_fini, 734a2e73f56SAlex Deucher .suspend = uvd_v4_2_suspend, 735a2e73f56SAlex Deucher .resume = uvd_v4_2_resume, 736a2e73f56SAlex Deucher .is_idle = uvd_v4_2_is_idle, 737a2e73f56SAlex Deucher .wait_for_idle = uvd_v4_2_wait_for_idle, 738a2e73f56SAlex Deucher .soft_reset = uvd_v4_2_soft_reset, 739a2e73f56SAlex Deucher .set_clockgating_state = uvd_v4_2_set_clockgating_state, 740a2e73f56SAlex Deucher .set_powergating_state = uvd_v4_2_set_powergating_state, 741a2e73f56SAlex Deucher }; 742a2e73f56SAlex Deucher 743a2e73f56SAlex Deucher static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = { 74421cd942eSChristian König .type = AMDGPU_RING_TYPE_UVD, 74579887142SChristian König .align_mask = 0xf, 74679887142SChristian König .nop = PACKET0(mmUVD_NO_OP, 0), 747a2e73f56SAlex Deucher .get_rptr = uvd_v4_2_ring_get_rptr, 748a2e73f56SAlex Deucher .get_wptr = uvd_v4_2_ring_get_wptr, 749a2e73f56SAlex Deucher .set_wptr = uvd_v4_2_ring_set_wptr, 750a2e73f56SAlex Deucher .parse_cs = amdgpu_uvd_ring_parse_cs, 751e12f3d7aSChristian König .emit_frame_size = 752e12f3d7aSChristian König 2 + /* uvd_v4_2_ring_emit_hdp_flush */ 753e12f3d7aSChristian König 2 + /* uvd_v4_2_ring_emit_hdp_invalidate */ 754e12f3d7aSChristian König 14, /* uvd_v4_2_ring_emit_fence x1 no user fence */ 755e12f3d7aSChristian König .emit_ib_size = 4, /* uvd_v4_2_ring_emit_ib */ 756a2e73f56SAlex Deucher .emit_ib = uvd_v4_2_ring_emit_ib, 757a2e73f56SAlex Deucher .emit_fence = uvd_v4_2_ring_emit_fence, 758d5b4e25dSChristian König .emit_hdp_flush = uvd_v4_2_ring_emit_hdp_flush, 759d5b4e25dSChristian König .emit_hdp_invalidate = uvd_v4_2_ring_emit_hdp_invalidate, 760a2e73f56SAlex Deucher .test_ring = uvd_v4_2_ring_test_ring, 7618de190c9SChristian König .test_ib = amdgpu_uvd_ring_test_ib, 762edff0e28SJammy Zhou .insert_nop = amdgpu_ring_insert_nop, 7639e5d5309SChristian König .pad_ib = amdgpu_ring_generic_pad_ib, 764c4120d55SChristian König .begin_use = amdgpu_uvd_ring_begin_use, 765c4120d55SChristian König .end_use = amdgpu_uvd_ring_end_use, 766a2e73f56SAlex Deucher }; 767a2e73f56SAlex Deucher 768a2e73f56SAlex Deucher static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev) 769a2e73f56SAlex Deucher { 770a2e73f56SAlex Deucher adev->uvd.ring.funcs = &uvd_v4_2_ring_funcs; 771a2e73f56SAlex Deucher } 772a2e73f56SAlex Deucher 773a2e73f56SAlex Deucher static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = { 774a2e73f56SAlex Deucher .set = uvd_v4_2_set_interrupt_state, 775a2e73f56SAlex Deucher .process = uvd_v4_2_process_interrupt, 776a2e73f56SAlex Deucher }; 777a2e73f56SAlex Deucher 778a2e73f56SAlex Deucher static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev) 779a2e73f56SAlex Deucher { 780a2e73f56SAlex Deucher adev->uvd.irq.num_types = 1; 781a2e73f56SAlex Deucher adev->uvd.irq.funcs = &uvd_v4_2_irq_funcs; 782a2e73f56SAlex Deucher } 783a1255107SAlex Deucher 784a1255107SAlex Deucher const struct amdgpu_ip_block_version uvd_v4_2_ip_block = 785a1255107SAlex Deucher { 786a1255107SAlex Deucher .type = AMD_IP_BLOCK_TYPE_UVD, 787a1255107SAlex Deucher .major = 4, 788a1255107SAlex Deucher .minor = 2, 789a1255107SAlex Deucher .rev = 0, 790a1255107SAlex Deucher .funcs = &uvd_v4_2_ip_funcs, 791a1255107SAlex Deucher }; 792