1a2e73f56SAlex Deucher /* 2a2e73f56SAlex Deucher * Copyright 2013 Advanced Micro Devices, Inc. 3a2e73f56SAlex Deucher * 4a2e73f56SAlex Deucher * Permission is hereby granted, free of charge, to any person obtaining a 5a2e73f56SAlex Deucher * copy of this software and associated documentation files (the "Software"), 6a2e73f56SAlex Deucher * to deal in the Software without restriction, including without limitation 7a2e73f56SAlex Deucher * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8a2e73f56SAlex Deucher * and/or sell copies of the Software, and to permit persons to whom the 9a2e73f56SAlex Deucher * Software is furnished to do so, subject to the following conditions: 10a2e73f56SAlex Deucher * 11a2e73f56SAlex Deucher * The above copyright notice and this permission notice shall be included in 12a2e73f56SAlex Deucher * all copies or substantial portions of the Software. 13a2e73f56SAlex Deucher * 14a2e73f56SAlex Deucher * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15a2e73f56SAlex Deucher * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16a2e73f56SAlex Deucher * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17a2e73f56SAlex Deucher * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18a2e73f56SAlex Deucher * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19a2e73f56SAlex Deucher * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20a2e73f56SAlex Deucher * OTHER DEALINGS IN THE SOFTWARE. 21a2e73f56SAlex Deucher * 22a2e73f56SAlex Deucher * Authors: Christian König <christian.koenig@amd.com> 23a2e73f56SAlex Deucher */ 24a2e73f56SAlex Deucher 25a2e73f56SAlex Deucher #include <linux/firmware.h> 26a2e73f56SAlex Deucher #include <drm/drmP.h> 27a2e73f56SAlex Deucher #include "amdgpu.h" 28a2e73f56SAlex Deucher #include "amdgpu_uvd.h" 29a2e73f56SAlex Deucher #include "cikd.h" 30a2e73f56SAlex Deucher 31a2e73f56SAlex Deucher #include "uvd/uvd_4_2_d.h" 32a2e73f56SAlex Deucher #include "uvd/uvd_4_2_sh_mask.h" 33a2e73f56SAlex Deucher 34a2e73f56SAlex Deucher #include "oss/oss_2_0_d.h" 35a2e73f56SAlex Deucher #include "oss/oss_2_0_sh_mask.h" 36a2e73f56SAlex Deucher 37d5b4e25dSChristian König #include "bif/bif_4_1_d.h" 38d5b4e25dSChristian König 394be5097cSRex Zhu #include "smu/smu_7_0_1_d.h" 404be5097cSRex Zhu #include "smu/smu_7_0_1_sh_mask.h" 414be5097cSRex Zhu 42a2e73f56SAlex Deucher static void uvd_v4_2_mc_resume(struct amdgpu_device *adev); 43a2e73f56SAlex Deucher static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev); 44a2e73f56SAlex Deucher static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev); 45a2e73f56SAlex Deucher static int uvd_v4_2_start(struct amdgpu_device *adev); 46a2e73f56SAlex Deucher static void uvd_v4_2_stop(struct amdgpu_device *adev); 47aa4747c0SRex Zhu static int uvd_v4_2_set_clockgating_state(void *handle, 48aa4747c0SRex Zhu enum amd_clockgating_state state); 49ca581e45SRex Zhu static void uvd_v4_2_set_dcm(struct amdgpu_device *adev, 50ca581e45SRex Zhu bool sw_mode); 51a2e73f56SAlex Deucher /** 52a2e73f56SAlex Deucher * uvd_v4_2_ring_get_rptr - get read pointer 53a2e73f56SAlex Deucher * 54a2e73f56SAlex Deucher * @ring: amdgpu_ring pointer 55a2e73f56SAlex Deucher * 56a2e73f56SAlex Deucher * Returns the current hardware read pointer 57a2e73f56SAlex Deucher */ 58536fbf94SKen Wang static uint64_t uvd_v4_2_ring_get_rptr(struct amdgpu_ring *ring) 59a2e73f56SAlex Deucher { 60a2e73f56SAlex Deucher struct amdgpu_device *adev = ring->adev; 61a2e73f56SAlex Deucher 62a2e73f56SAlex Deucher return RREG32(mmUVD_RBC_RB_RPTR); 63a2e73f56SAlex Deucher } 64a2e73f56SAlex Deucher 65a2e73f56SAlex Deucher /** 66a2e73f56SAlex Deucher * uvd_v4_2_ring_get_wptr - get write pointer 67a2e73f56SAlex Deucher * 68a2e73f56SAlex Deucher * @ring: amdgpu_ring pointer 69a2e73f56SAlex Deucher * 70a2e73f56SAlex Deucher * Returns the current hardware write pointer 71a2e73f56SAlex Deucher */ 72536fbf94SKen Wang static uint64_t uvd_v4_2_ring_get_wptr(struct amdgpu_ring *ring) 73a2e73f56SAlex Deucher { 74a2e73f56SAlex Deucher struct amdgpu_device *adev = ring->adev; 75a2e73f56SAlex Deucher 76a2e73f56SAlex Deucher return RREG32(mmUVD_RBC_RB_WPTR); 77a2e73f56SAlex Deucher } 78a2e73f56SAlex Deucher 79a2e73f56SAlex Deucher /** 80a2e73f56SAlex Deucher * uvd_v4_2_ring_set_wptr - set write pointer 81a2e73f56SAlex Deucher * 82a2e73f56SAlex Deucher * @ring: amdgpu_ring pointer 83a2e73f56SAlex Deucher * 84a2e73f56SAlex Deucher * Commits the write pointer to the hardware 85a2e73f56SAlex Deucher */ 86a2e73f56SAlex Deucher static void uvd_v4_2_ring_set_wptr(struct amdgpu_ring *ring) 87a2e73f56SAlex Deucher { 88a2e73f56SAlex Deucher struct amdgpu_device *adev = ring->adev; 89a2e73f56SAlex Deucher 90536fbf94SKen Wang WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); 91a2e73f56SAlex Deucher } 92a2e73f56SAlex Deucher 935fc3aeebSyanyang1 static int uvd_v4_2_early_init(void *handle) 94a2e73f56SAlex Deucher { 955fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 962bb795f5SJames Zhu adev->uvd.num_uvd_inst = 1; 975fc3aeebSyanyang1 98a2e73f56SAlex Deucher uvd_v4_2_set_ring_funcs(adev); 99a2e73f56SAlex Deucher uvd_v4_2_set_irq_funcs(adev); 100a2e73f56SAlex Deucher 101a2e73f56SAlex Deucher return 0; 102a2e73f56SAlex Deucher } 103a2e73f56SAlex Deucher 1045fc3aeebSyanyang1 static int uvd_v4_2_sw_init(void *handle) 105a2e73f56SAlex Deucher { 106a2e73f56SAlex Deucher struct amdgpu_ring *ring; 1075fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 108a2e73f56SAlex Deucher int r; 109a2e73f56SAlex Deucher 110a2e73f56SAlex Deucher /* UVD TRAP */ 1111ffdeca6SChristian König r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq); 112a2e73f56SAlex Deucher if (r) 113a2e73f56SAlex Deucher return r; 114a2e73f56SAlex Deucher 115a2e73f56SAlex Deucher r = amdgpu_uvd_sw_init(adev); 116a2e73f56SAlex Deucher if (r) 117a2e73f56SAlex Deucher return r; 118a2e73f56SAlex Deucher 119a2e73f56SAlex Deucher r = amdgpu_uvd_resume(adev); 120a2e73f56SAlex Deucher if (r) 121a2e73f56SAlex Deucher return r; 122a2e73f56SAlex Deucher 1232bb795f5SJames Zhu ring = &adev->uvd.inst->ring; 124a2e73f56SAlex Deucher sprintf(ring->name, "uvd"); 1252bb795f5SJames Zhu r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); 12633d5bd07SEmily Deng if (r) 12733d5bd07SEmily Deng return r; 12833d5bd07SEmily Deng 12933d5bd07SEmily Deng r = amdgpu_uvd_entity_init(adev); 130a2e73f56SAlex Deucher 131a2e73f56SAlex Deucher return r; 132a2e73f56SAlex Deucher } 133a2e73f56SAlex Deucher 1345fc3aeebSyanyang1 static int uvd_v4_2_sw_fini(void *handle) 135a2e73f56SAlex Deucher { 136a2e73f56SAlex Deucher int r; 1375fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 138a2e73f56SAlex Deucher 139a2e73f56SAlex Deucher r = amdgpu_uvd_suspend(adev); 140a2e73f56SAlex Deucher if (r) 141a2e73f56SAlex Deucher return r; 142a2e73f56SAlex Deucher 14350237287SRex Zhu return amdgpu_uvd_sw_fini(adev); 144a2e73f56SAlex Deucher } 14550237287SRex Zhu 146ca581e45SRex Zhu static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev, 147ca581e45SRex Zhu bool enable); 148a2e73f56SAlex Deucher /** 149a2e73f56SAlex Deucher * uvd_v4_2_hw_init - start and test UVD block 150a2e73f56SAlex Deucher * 151a2e73f56SAlex Deucher * @adev: amdgpu_device pointer 152a2e73f56SAlex Deucher * 153a2e73f56SAlex Deucher * Initialize the hardware, boot up the VCPU and do some testing 154a2e73f56SAlex Deucher */ 1555fc3aeebSyanyang1 static int uvd_v4_2_hw_init(void *handle) 156a2e73f56SAlex Deucher { 1575fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1582bb795f5SJames Zhu struct amdgpu_ring *ring = &adev->uvd.inst->ring; 159a2e73f56SAlex Deucher uint32_t tmp; 160a2e73f56SAlex Deucher int r; 161a2e73f56SAlex Deucher 162ca581e45SRex Zhu uvd_v4_2_enable_mgcg(adev, true); 163aa4747c0SRex Zhu amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); 164a2e73f56SAlex Deucher 165a2e73f56SAlex Deucher ring->ready = true; 166a2e73f56SAlex Deucher r = amdgpu_ring_test_ring(ring); 167a2e73f56SAlex Deucher if (r) { 168a2e73f56SAlex Deucher ring->ready = false; 169a2e73f56SAlex Deucher goto done; 170a2e73f56SAlex Deucher } 171a2e73f56SAlex Deucher 172a27de35cSChristian König r = amdgpu_ring_alloc(ring, 10); 173a2e73f56SAlex Deucher if (r) { 174a2e73f56SAlex Deucher DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); 175a2e73f56SAlex Deucher goto done; 176a2e73f56SAlex Deucher } 177a2e73f56SAlex Deucher 178a2e73f56SAlex Deucher tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0); 179a2e73f56SAlex Deucher amdgpu_ring_write(ring, tmp); 180a2e73f56SAlex Deucher amdgpu_ring_write(ring, 0xFFFFF); 181a2e73f56SAlex Deucher 182a2e73f56SAlex Deucher tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0); 183a2e73f56SAlex Deucher amdgpu_ring_write(ring, tmp); 184a2e73f56SAlex Deucher amdgpu_ring_write(ring, 0xFFFFF); 185a2e73f56SAlex Deucher 186a2e73f56SAlex Deucher tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0); 187a2e73f56SAlex Deucher amdgpu_ring_write(ring, tmp); 188a2e73f56SAlex Deucher amdgpu_ring_write(ring, 0xFFFFF); 189a2e73f56SAlex Deucher 190a2e73f56SAlex Deucher /* Clear timeout status bits */ 191a2e73f56SAlex Deucher amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0)); 192a2e73f56SAlex Deucher amdgpu_ring_write(ring, 0x8); 193a2e73f56SAlex Deucher 194a2e73f56SAlex Deucher amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); 195a2e73f56SAlex Deucher amdgpu_ring_write(ring, 3); 196a2e73f56SAlex Deucher 197a27de35cSChristian König amdgpu_ring_commit(ring); 198a2e73f56SAlex Deucher 199a2e73f56SAlex Deucher done: 200a2e73f56SAlex Deucher if (!r) 201a2e73f56SAlex Deucher DRM_INFO("UVD initialized successfully.\n"); 202a2e73f56SAlex Deucher 203a2e73f56SAlex Deucher return r; 204a2e73f56SAlex Deucher } 205a2e73f56SAlex Deucher 206a2e73f56SAlex Deucher /** 207a2e73f56SAlex Deucher * uvd_v4_2_hw_fini - stop the hardware block 208a2e73f56SAlex Deucher * 209a2e73f56SAlex Deucher * @adev: amdgpu_device pointer 210a2e73f56SAlex Deucher * 211a2e73f56SAlex Deucher * Stop the UVD block, mark ring as not ready any more 212a2e73f56SAlex Deucher */ 2135fc3aeebSyanyang1 static int uvd_v4_2_hw_fini(void *handle) 214a2e73f56SAlex Deucher { 2155fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2162bb795f5SJames Zhu struct amdgpu_ring *ring = &adev->uvd.inst->ring; 217a2e73f56SAlex Deucher 2188b55d17eSRex Zhu if (RREG32(mmUVD_STATUS) != 0) 219a2e73f56SAlex Deucher uvd_v4_2_stop(adev); 2208b55d17eSRex Zhu 221a2e73f56SAlex Deucher ring->ready = false; 222a2e73f56SAlex Deucher 223a2e73f56SAlex Deucher return 0; 224a2e73f56SAlex Deucher } 225a2e73f56SAlex Deucher 2265fc3aeebSyanyang1 static int uvd_v4_2_suspend(void *handle) 227a2e73f56SAlex Deucher { 228a2e73f56SAlex Deucher int r; 2295fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 230a2e73f56SAlex Deucher 2313f99dd81SLeo Liu r = uvd_v4_2_hw_fini(adev); 232a2e73f56SAlex Deucher if (r) 233a2e73f56SAlex Deucher return r; 234a2e73f56SAlex Deucher 23550237287SRex Zhu return amdgpu_uvd_suspend(adev); 236a2e73f56SAlex Deucher } 237a2e73f56SAlex Deucher 2385fc3aeebSyanyang1 static int uvd_v4_2_resume(void *handle) 239a2e73f56SAlex Deucher { 240a2e73f56SAlex Deucher int r; 2415fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 242a2e73f56SAlex Deucher 243a2e73f56SAlex Deucher r = amdgpu_uvd_resume(adev); 244a2e73f56SAlex Deucher if (r) 245a2e73f56SAlex Deucher return r; 246a2e73f56SAlex Deucher 24750237287SRex Zhu return uvd_v4_2_hw_init(adev); 248a2e73f56SAlex Deucher } 249a2e73f56SAlex Deucher 250a2e73f56SAlex Deucher /** 251a2e73f56SAlex Deucher * uvd_v4_2_start - start UVD block 252a2e73f56SAlex Deucher * 253a2e73f56SAlex Deucher * @adev: amdgpu_device pointer 254a2e73f56SAlex Deucher * 255a2e73f56SAlex Deucher * Setup and start the UVD block 256a2e73f56SAlex Deucher */ 257a2e73f56SAlex Deucher static int uvd_v4_2_start(struct amdgpu_device *adev) 258a2e73f56SAlex Deucher { 2592bb795f5SJames Zhu struct amdgpu_ring *ring = &adev->uvd.inst->ring; 260a2e73f56SAlex Deucher uint32_t rb_bufsz; 261a2e73f56SAlex Deucher int i, j, r; 2628b55d17eSRex Zhu u32 tmp; 263a2e73f56SAlex Deucher /* disable byte swapping */ 264a2e73f56SAlex Deucher u32 lmi_swap_cntl = 0; 265a2e73f56SAlex Deucher u32 mp_swap_cntl = 0; 266a2e73f56SAlex Deucher 2678b55d17eSRex Zhu /* set uvd busy */ 2688b55d17eSRex Zhu WREG32_P(mmUVD_STATUS, 1<<2, ~(1<<2)); 2698b55d17eSRex Zhu 270ca581e45SRex Zhu uvd_v4_2_set_dcm(adev, true); 2718b55d17eSRex Zhu WREG32(mmUVD_CGC_GATE, 0); 272a2e73f56SAlex Deucher 273a2e73f56SAlex Deucher /* take UVD block out of reset */ 274a2e73f56SAlex Deucher WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); 275a2e73f56SAlex Deucher mdelay(5); 276a2e73f56SAlex Deucher 2778b55d17eSRex Zhu /* enable VCPU clock */ 2788b55d17eSRex Zhu WREG32(mmUVD_VCPU_CNTL, 1 << 9); 2798b55d17eSRex Zhu 2808b55d17eSRex Zhu /* disable interupt */ 2818b55d17eSRex Zhu WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); 282a2e73f56SAlex Deucher 283a2e73f56SAlex Deucher #ifdef __BIG_ENDIAN 284a2e73f56SAlex Deucher /* swap (8 in 32) RB and IB */ 285a2e73f56SAlex Deucher lmi_swap_cntl = 0xa; 286a2e73f56SAlex Deucher mp_swap_cntl = 0; 287a2e73f56SAlex Deucher #endif 288a2e73f56SAlex Deucher WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); 289a2e73f56SAlex Deucher WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl); 2908b55d17eSRex Zhu /* initialize UVD memory controller */ 2918b55d17eSRex Zhu WREG32(mmUVD_LMI_CTRL, 0x203108); 2928b55d17eSRex Zhu 2938b55d17eSRex Zhu tmp = RREG32(mmUVD_MPC_CNTL); 2948b55d17eSRex Zhu WREG32(mmUVD_MPC_CNTL, tmp | 0x10); 295a2e73f56SAlex Deucher 296a2e73f56SAlex Deucher WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040); 297a2e73f56SAlex Deucher WREG32(mmUVD_MPC_SET_MUXA1, 0x0); 298a2e73f56SAlex Deucher WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040); 299a2e73f56SAlex Deucher WREG32(mmUVD_MPC_SET_MUXB1, 0x0); 300a2e73f56SAlex Deucher WREG32(mmUVD_MPC_SET_ALU, 0); 301a2e73f56SAlex Deucher WREG32(mmUVD_MPC_SET_MUX, 0x88); 302a2e73f56SAlex Deucher 3038b55d17eSRex Zhu uvd_v4_2_mc_resume(adev); 304a2e73f56SAlex Deucher 3058b55d17eSRex Zhu tmp = RREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL); 3068b55d17eSRex Zhu WREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL, tmp & (~0x10)); 307a2e73f56SAlex Deucher 308a2e73f56SAlex Deucher /* enable UMC */ 309a2e73f56SAlex Deucher WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); 310a2e73f56SAlex Deucher 3118b55d17eSRex Zhu WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK); 3128b55d17eSRex Zhu 3138b55d17eSRex Zhu WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); 3148b55d17eSRex Zhu 3158b55d17eSRex Zhu WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 3168b55d17eSRex Zhu 317a2e73f56SAlex Deucher mdelay(10); 318a2e73f56SAlex Deucher 319a2e73f56SAlex Deucher for (i = 0; i < 10; ++i) { 320a2e73f56SAlex Deucher uint32_t status; 321a2e73f56SAlex Deucher for (j = 0; j < 100; ++j) { 322a2e73f56SAlex Deucher status = RREG32(mmUVD_STATUS); 323a2e73f56SAlex Deucher if (status & 2) 324a2e73f56SAlex Deucher break; 325a2e73f56SAlex Deucher mdelay(10); 326a2e73f56SAlex Deucher } 327a2e73f56SAlex Deucher r = 0; 328a2e73f56SAlex Deucher if (status & 2) 329a2e73f56SAlex Deucher break; 330a2e73f56SAlex Deucher 331a2e73f56SAlex Deucher DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n"); 332a2e73f56SAlex Deucher WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, 333a2e73f56SAlex Deucher ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 334a2e73f56SAlex Deucher mdelay(10); 335a2e73f56SAlex Deucher WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 336a2e73f56SAlex Deucher mdelay(10); 337a2e73f56SAlex Deucher r = -1; 338a2e73f56SAlex Deucher } 339a2e73f56SAlex Deucher 340a2e73f56SAlex Deucher if (r) { 341a2e73f56SAlex Deucher DRM_ERROR("UVD not responding, giving up!!!\n"); 342a2e73f56SAlex Deucher return r; 343a2e73f56SAlex Deucher } 344a2e73f56SAlex Deucher 345a2e73f56SAlex Deucher /* enable interupt */ 346a2e73f56SAlex Deucher WREG32_P(mmUVD_MASTINT_EN, 3<<1, ~(3 << 1)); 347a2e73f56SAlex Deucher 3488b55d17eSRex Zhu WREG32_P(mmUVD_STATUS, 0, ~(1<<2)); 3498b55d17eSRex Zhu 350a2e73f56SAlex Deucher /* force RBC into idle state */ 351a2e73f56SAlex Deucher WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); 352a2e73f56SAlex Deucher 353a2e73f56SAlex Deucher /* Set the write pointer delay */ 354a2e73f56SAlex Deucher WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0); 355a2e73f56SAlex Deucher 356a2e73f56SAlex Deucher /* programm the 4GB memory segment for rptr and ring buffer */ 357a2e73f56SAlex Deucher WREG32(mmUVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) | 358a2e73f56SAlex Deucher (0x7 << 16) | (0x1 << 31)); 359a2e73f56SAlex Deucher 360a2e73f56SAlex Deucher /* Initialize the ring buffer's read and write pointers */ 361a2e73f56SAlex Deucher WREG32(mmUVD_RBC_RB_RPTR, 0x0); 362a2e73f56SAlex Deucher 363a2e73f56SAlex Deucher ring->wptr = RREG32(mmUVD_RBC_RB_RPTR); 364536fbf94SKen Wang WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); 365a2e73f56SAlex Deucher 366a2e73f56SAlex Deucher /* set the ring address */ 367a2e73f56SAlex Deucher WREG32(mmUVD_RBC_RB_BASE, ring->gpu_addr); 368a2e73f56SAlex Deucher 369a2e73f56SAlex Deucher /* Set ring buffer size */ 370a2e73f56SAlex Deucher rb_bufsz = order_base_2(ring->ring_size); 371a2e73f56SAlex Deucher rb_bufsz = (0x1 << 8) | rb_bufsz; 372a2e73f56SAlex Deucher WREG32_P(mmUVD_RBC_RB_CNTL, rb_bufsz, ~0x11f1f); 373a2e73f56SAlex Deucher 374a2e73f56SAlex Deucher return 0; 375a2e73f56SAlex Deucher } 376a2e73f56SAlex Deucher 377a2e73f56SAlex Deucher /** 378a2e73f56SAlex Deucher * uvd_v4_2_stop - stop UVD block 379a2e73f56SAlex Deucher * 380a2e73f56SAlex Deucher * @adev: amdgpu_device pointer 381a2e73f56SAlex Deucher * 382a2e73f56SAlex Deucher * stop the UVD block 383a2e73f56SAlex Deucher */ 384a2e73f56SAlex Deucher static void uvd_v4_2_stop(struct amdgpu_device *adev) 385a2e73f56SAlex Deucher { 3868b55d17eSRex Zhu uint32_t i, j; 3878b55d17eSRex Zhu uint32_t status; 3888b55d17eSRex Zhu 389a2e73f56SAlex Deucher WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); 390a2e73f56SAlex Deucher 3918b55d17eSRex Zhu for (i = 0; i < 10; ++i) { 3928b55d17eSRex Zhu for (j = 0; j < 100; ++j) { 3938b55d17eSRex Zhu status = RREG32(mmUVD_STATUS); 3948b55d17eSRex Zhu if (status & 2) 3958b55d17eSRex Zhu break; 3968b55d17eSRex Zhu mdelay(1); 3978b55d17eSRex Zhu } 398e89d5b5cSTom St Denis if (status & 2) 3998b55d17eSRex Zhu break; 4008b55d17eSRex Zhu } 4018b55d17eSRex Zhu 4028b55d17eSRex Zhu for (i = 0; i < 10; ++i) { 4038b55d17eSRex Zhu for (j = 0; j < 100; ++j) { 4048b55d17eSRex Zhu status = RREG32(mmUVD_LMI_STATUS); 4058b55d17eSRex Zhu if (status & 0xf) 4068b55d17eSRex Zhu break; 4078b55d17eSRex Zhu mdelay(1); 4088b55d17eSRex Zhu } 409e89d5b5cSTom St Denis if (status & 0xf) 4108b55d17eSRex Zhu break; 4118b55d17eSRex Zhu } 4128b55d17eSRex Zhu 413a2e73f56SAlex Deucher /* Stall UMC and register bus before resetting VCPU */ 414a2e73f56SAlex Deucher WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); 4158b55d17eSRex Zhu 4168b55d17eSRex Zhu for (i = 0; i < 10; ++i) { 4178b55d17eSRex Zhu for (j = 0; j < 100; ++j) { 4188b55d17eSRex Zhu status = RREG32(mmUVD_LMI_STATUS); 4198b55d17eSRex Zhu if (status & 0x240) 4208b55d17eSRex Zhu break; 421a2e73f56SAlex Deucher mdelay(1); 4228b55d17eSRex Zhu } 423e89d5b5cSTom St Denis if (status & 0x240) 4248b55d17eSRex Zhu break; 4258b55d17eSRex Zhu } 426a2e73f56SAlex Deucher 4278b55d17eSRex Zhu WREG32_P(0x3D49, 0, ~(1 << 2)); 428a2e73f56SAlex Deucher 4298b55d17eSRex Zhu WREG32_P(mmUVD_VCPU_CNTL, 0, ~(1 << 9)); 430a2e73f56SAlex Deucher 4318b55d17eSRex Zhu /* put LMI, VCPU, RBC etc... into reset */ 4328b55d17eSRex Zhu WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | 4338b55d17eSRex Zhu UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | 4348b55d17eSRex Zhu UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); 4358b55d17eSRex Zhu 4368b55d17eSRex Zhu WREG32(mmUVD_STATUS, 0); 437ca581e45SRex Zhu 438ca581e45SRex Zhu uvd_v4_2_set_dcm(adev, false); 439a2e73f56SAlex Deucher } 440a2e73f56SAlex Deucher 441a2e73f56SAlex Deucher /** 442a2e73f56SAlex Deucher * uvd_v4_2_ring_emit_fence - emit an fence & trap command 443a2e73f56SAlex Deucher * 444a2e73f56SAlex Deucher * @ring: amdgpu_ring pointer 445a2e73f56SAlex Deucher * @fence: fence to emit 446a2e73f56SAlex Deucher * 447a2e73f56SAlex Deucher * Write a fence and a trap command to the ring. 448a2e73f56SAlex Deucher */ 449a2e73f56SAlex Deucher static void uvd_v4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 450890ee23fSChunming Zhou unsigned flags) 451a2e73f56SAlex Deucher { 452890ee23fSChunming Zhou WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 453a2e73f56SAlex Deucher 454a2e73f56SAlex Deucher amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); 455a2e73f56SAlex Deucher amdgpu_ring_write(ring, seq); 456a2e73f56SAlex Deucher amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 457a2e73f56SAlex Deucher amdgpu_ring_write(ring, addr & 0xffffffff); 458a2e73f56SAlex Deucher amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 459a2e73f56SAlex Deucher amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff); 460a2e73f56SAlex Deucher amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); 461a2e73f56SAlex Deucher amdgpu_ring_write(ring, 0); 462a2e73f56SAlex Deucher 463a2e73f56SAlex Deucher amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 464a2e73f56SAlex Deucher amdgpu_ring_write(ring, 0); 465a2e73f56SAlex Deucher amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 466a2e73f56SAlex Deucher amdgpu_ring_write(ring, 0); 467a2e73f56SAlex Deucher amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); 468a2e73f56SAlex Deucher amdgpu_ring_write(ring, 2); 469a2e73f56SAlex Deucher } 470a2e73f56SAlex Deucher 471a2e73f56SAlex Deucher /** 472a2e73f56SAlex Deucher * uvd_v4_2_ring_test_ring - register write test 473a2e73f56SAlex Deucher * 474a2e73f56SAlex Deucher * @ring: amdgpu_ring pointer 475a2e73f56SAlex Deucher * 476a2e73f56SAlex Deucher * Test if we can successfully write to the context register 477a2e73f56SAlex Deucher */ 478a2e73f56SAlex Deucher static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring) 479a2e73f56SAlex Deucher { 480a2e73f56SAlex Deucher struct amdgpu_device *adev = ring->adev; 481a2e73f56SAlex Deucher uint32_t tmp = 0; 482a2e73f56SAlex Deucher unsigned i; 483a2e73f56SAlex Deucher int r; 484a2e73f56SAlex Deucher 485a2e73f56SAlex Deucher WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); 486a27de35cSChristian König r = amdgpu_ring_alloc(ring, 3); 487a2e73f56SAlex Deucher if (r) { 488a2e73f56SAlex Deucher DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", 489a2e73f56SAlex Deucher ring->idx, r); 490a2e73f56SAlex Deucher return r; 491a2e73f56SAlex Deucher } 492a2e73f56SAlex Deucher amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); 493a2e73f56SAlex Deucher amdgpu_ring_write(ring, 0xDEADBEEF); 494a27de35cSChristian König amdgpu_ring_commit(ring); 495a2e73f56SAlex Deucher for (i = 0; i < adev->usec_timeout; i++) { 496a2e73f56SAlex Deucher tmp = RREG32(mmUVD_CONTEXT_ID); 497a2e73f56SAlex Deucher if (tmp == 0xDEADBEEF) 498a2e73f56SAlex Deucher break; 499a2e73f56SAlex Deucher DRM_UDELAY(1); 500a2e73f56SAlex Deucher } 501a2e73f56SAlex Deucher 502a2e73f56SAlex Deucher if (i < adev->usec_timeout) { 5039953b72fSpding DRM_DEBUG("ring test on %d succeeded in %d usecs\n", 504a2e73f56SAlex Deucher ring->idx, i); 505a2e73f56SAlex Deucher } else { 506a2e73f56SAlex Deucher DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", 507a2e73f56SAlex Deucher ring->idx, tmp); 508a2e73f56SAlex Deucher r = -EINVAL; 509a2e73f56SAlex Deucher } 510a2e73f56SAlex Deucher return r; 511a2e73f56SAlex Deucher } 512a2e73f56SAlex Deucher 513a2e73f56SAlex Deucher /** 514a2e73f56SAlex Deucher * uvd_v4_2_ring_emit_ib - execute indirect buffer 515a2e73f56SAlex Deucher * 516a2e73f56SAlex Deucher * @ring: amdgpu_ring pointer 517a2e73f56SAlex Deucher * @ib: indirect buffer to execute 518a2e73f56SAlex Deucher * 519a2e73f56SAlex Deucher * Write ring commands to execute the indirect buffer 520a2e73f56SAlex Deucher */ 521a2e73f56SAlex Deucher static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring, 522d88bf583SChristian König struct amdgpu_ib *ib, 523c4f46f22SChristian König unsigned vmid, bool ctx_switch) 524a2e73f56SAlex Deucher { 525a2e73f56SAlex Deucher amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0)); 526a2e73f56SAlex Deucher amdgpu_ring_write(ring, ib->gpu_addr); 527a2e73f56SAlex Deucher amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0)); 528a2e73f56SAlex Deucher amdgpu_ring_write(ring, ib->length_dw); 529a2e73f56SAlex Deucher } 530a2e73f56SAlex Deucher 531def13903SLeo Liu static void uvd_v4_2_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) 532def13903SLeo Liu { 533def13903SLeo Liu int i; 534def13903SLeo Liu 535def13903SLeo Liu WARN_ON(ring->wptr % 2 || count % 2); 536def13903SLeo Liu 537def13903SLeo Liu for (i = 0; i < count / 2; i++) { 538def13903SLeo Liu amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0)); 539def13903SLeo Liu amdgpu_ring_write(ring, 0); 540def13903SLeo Liu } 541def13903SLeo Liu } 542def13903SLeo Liu 543a2e73f56SAlex Deucher /** 544a2e73f56SAlex Deucher * uvd_v4_2_mc_resume - memory controller programming 545a2e73f56SAlex Deucher * 546a2e73f56SAlex Deucher * @adev: amdgpu_device pointer 547a2e73f56SAlex Deucher * 548a2e73f56SAlex Deucher * Let the UVD memory controller know it's offsets 549a2e73f56SAlex Deucher */ 550a2e73f56SAlex Deucher static void uvd_v4_2_mc_resume(struct amdgpu_device *adev) 551a2e73f56SAlex Deucher { 552a2e73f56SAlex Deucher uint64_t addr; 553a2e73f56SAlex Deucher uint32_t size; 554a2e73f56SAlex Deucher 555a2e73f56SAlex Deucher /* programm the VCPU memory controller bits 0-27 */ 5562bb795f5SJames Zhu addr = (adev->uvd.inst->gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3; 557c1fe75c9SPiotr Redlewski size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3; 558a2e73f56SAlex Deucher WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr); 559a2e73f56SAlex Deucher WREG32(mmUVD_VCPU_CACHE_SIZE0, size); 560a2e73f56SAlex Deucher 561a2e73f56SAlex Deucher addr += size; 562c0365541SArindam Nath size = AMDGPU_UVD_HEAP_SIZE >> 3; 563a2e73f56SAlex Deucher WREG32(mmUVD_VCPU_CACHE_OFFSET1, addr); 564a2e73f56SAlex Deucher WREG32(mmUVD_VCPU_CACHE_SIZE1, size); 565a2e73f56SAlex Deucher 566a2e73f56SAlex Deucher addr += size; 567c0365541SArindam Nath size = (AMDGPU_UVD_STACK_SIZE + 568c0365541SArindam Nath (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles)) >> 3; 569a2e73f56SAlex Deucher WREG32(mmUVD_VCPU_CACHE_OFFSET2, addr); 570a2e73f56SAlex Deucher WREG32(mmUVD_VCPU_CACHE_SIZE2, size); 571a2e73f56SAlex Deucher 572a2e73f56SAlex Deucher /* bits 28-31 */ 5732bb795f5SJames Zhu addr = (adev->uvd.inst->gpu_addr >> 28) & 0xF; 574a2e73f56SAlex Deucher WREG32(mmUVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0)); 575a2e73f56SAlex Deucher 576a2e73f56SAlex Deucher /* bits 32-39 */ 5772bb795f5SJames Zhu addr = (adev->uvd.inst->gpu_addr >> 32) & 0xFF; 578a2e73f56SAlex Deucher WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31)); 579a2e73f56SAlex Deucher 58076ed6cb0SAlex Deucher WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 58176ed6cb0SAlex Deucher WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 58276ed6cb0SAlex Deucher WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 583a2e73f56SAlex Deucher } 584a2e73f56SAlex Deucher 585a2e73f56SAlex Deucher static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev, 586a2e73f56SAlex Deucher bool enable) 587a2e73f56SAlex Deucher { 588a2e73f56SAlex Deucher u32 orig, data; 589a2e73f56SAlex Deucher 590e3b04bc7SAlex Deucher if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) { 591a2e73f56SAlex Deucher data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); 592aa4747c0SRex Zhu data |= 0xfff; 593a2e73f56SAlex Deucher WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); 594a2e73f56SAlex Deucher 595a2e73f56SAlex Deucher orig = data = RREG32(mmUVD_CGC_CTRL); 596a2e73f56SAlex Deucher data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; 597a2e73f56SAlex Deucher if (orig != data) 598a2e73f56SAlex Deucher WREG32(mmUVD_CGC_CTRL, data); 599a2e73f56SAlex Deucher } else { 600a2e73f56SAlex Deucher data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); 601a2e73f56SAlex Deucher data &= ~0xfff; 602a2e73f56SAlex Deucher WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); 603a2e73f56SAlex Deucher 604a2e73f56SAlex Deucher orig = data = RREG32(mmUVD_CGC_CTRL); 605a2e73f56SAlex Deucher data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; 606a2e73f56SAlex Deucher if (orig != data) 607a2e73f56SAlex Deucher WREG32(mmUVD_CGC_CTRL, data); 608a2e73f56SAlex Deucher } 609a2e73f56SAlex Deucher } 610a2e73f56SAlex Deucher 611a2e73f56SAlex Deucher static void uvd_v4_2_set_dcm(struct amdgpu_device *adev, 612a2e73f56SAlex Deucher bool sw_mode) 613a2e73f56SAlex Deucher { 614a2e73f56SAlex Deucher u32 tmp, tmp2; 615a2e73f56SAlex Deucher 616953618cfSRex Zhu WREG32_FIELD(UVD_CGC_GATE, REGS, 0); 617953618cfSRex Zhu 618a2e73f56SAlex Deucher tmp = RREG32(mmUVD_CGC_CTRL); 619a2e73f56SAlex Deucher tmp &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK); 620a2e73f56SAlex Deucher tmp |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK | 621a2e73f56SAlex Deucher (1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT) | 622a2e73f56SAlex Deucher (4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT); 623a2e73f56SAlex Deucher 624a2e73f56SAlex Deucher if (sw_mode) { 625a2e73f56SAlex Deucher tmp &= ~0x7ffff800; 626a2e73f56SAlex Deucher tmp2 = UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK | 627a2e73f56SAlex Deucher UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK | 628a2e73f56SAlex Deucher (7 << UVD_CGC_CTRL2__GATER_DIV_ID__SHIFT); 629a2e73f56SAlex Deucher } else { 630a2e73f56SAlex Deucher tmp |= 0x7ffff800; 631a2e73f56SAlex Deucher tmp2 = 0; 632a2e73f56SAlex Deucher } 633a2e73f56SAlex Deucher 634a2e73f56SAlex Deucher WREG32(mmUVD_CGC_CTRL, tmp); 635a2e73f56SAlex Deucher WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2); 636a2e73f56SAlex Deucher } 637a2e73f56SAlex Deucher 6385fc3aeebSyanyang1 static bool uvd_v4_2_is_idle(void *handle) 639a2e73f56SAlex Deucher { 6405fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 6415fc3aeebSyanyang1 642a2e73f56SAlex Deucher return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); 643a2e73f56SAlex Deucher } 644a2e73f56SAlex Deucher 6455fc3aeebSyanyang1 static int uvd_v4_2_wait_for_idle(void *handle) 646a2e73f56SAlex Deucher { 647a2e73f56SAlex Deucher unsigned i; 6485fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 649a2e73f56SAlex Deucher 650a2e73f56SAlex Deucher for (i = 0; i < adev->usec_timeout; i++) { 651a2e73f56SAlex Deucher if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK)) 652a2e73f56SAlex Deucher return 0; 653a2e73f56SAlex Deucher } 654a2e73f56SAlex Deucher return -ETIMEDOUT; 655a2e73f56SAlex Deucher } 656a2e73f56SAlex Deucher 6575fc3aeebSyanyang1 static int uvd_v4_2_soft_reset(void *handle) 658a2e73f56SAlex Deucher { 6595fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 6605fc3aeebSyanyang1 661a2e73f56SAlex Deucher uvd_v4_2_stop(adev); 662a2e73f56SAlex Deucher 663a2e73f56SAlex Deucher WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK, 664a2e73f56SAlex Deucher ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); 665a2e73f56SAlex Deucher mdelay(5); 666a2e73f56SAlex Deucher 667a2e73f56SAlex Deucher return uvd_v4_2_start(adev); 668a2e73f56SAlex Deucher } 669a2e73f56SAlex Deucher 670a2e73f56SAlex Deucher static int uvd_v4_2_set_interrupt_state(struct amdgpu_device *adev, 671a2e73f56SAlex Deucher struct amdgpu_irq_src *source, 672a2e73f56SAlex Deucher unsigned type, 673a2e73f56SAlex Deucher enum amdgpu_interrupt_state state) 674a2e73f56SAlex Deucher { 675a2e73f56SAlex Deucher // TODO 676a2e73f56SAlex Deucher return 0; 677a2e73f56SAlex Deucher } 678a2e73f56SAlex Deucher 679a2e73f56SAlex Deucher static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev, 680a2e73f56SAlex Deucher struct amdgpu_irq_src *source, 681a2e73f56SAlex Deucher struct amdgpu_iv_entry *entry) 682a2e73f56SAlex Deucher { 683a2e73f56SAlex Deucher DRM_DEBUG("IH: UVD TRAP\n"); 6842bb795f5SJames Zhu amdgpu_fence_process(&adev->uvd.inst->ring); 685a2e73f56SAlex Deucher return 0; 686a2e73f56SAlex Deucher } 687a2e73f56SAlex Deucher 6885fc3aeebSyanyang1 static int uvd_v4_2_set_clockgating_state(void *handle, 6895fc3aeebSyanyang1 enum amd_clockgating_state state) 690a2e73f56SAlex Deucher { 691a2e73f56SAlex Deucher return 0; 692a2e73f56SAlex Deucher } 693a2e73f56SAlex Deucher 6945fc3aeebSyanyang1 static int uvd_v4_2_set_powergating_state(void *handle, 6955fc3aeebSyanyang1 enum amd_powergating_state state) 696a2e73f56SAlex Deucher { 697a2e73f56SAlex Deucher /* This doesn't actually powergate the UVD block. 698a2e73f56SAlex Deucher * That's done in the dpm code via the SMC. This 699a2e73f56SAlex Deucher * just re-inits the block as necessary. The actual 700a2e73f56SAlex Deucher * gating still happens in the dpm code. We should 701a2e73f56SAlex Deucher * revisit this when there is a cleaner line between 702a2e73f56SAlex Deucher * the smc and the hw blocks 703a2e73f56SAlex Deucher */ 7045fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 7055fc3aeebSyanyang1 7065fc3aeebSyanyang1 if (state == AMD_PG_STATE_GATE) { 707a2e73f56SAlex Deucher uvd_v4_2_stop(adev); 708b13aa109SRex Zhu if (adev->pg_flags & AMD_PG_SUPPORT_UVD && !adev->pm.dpm_enabled) { 709254cd2e0SRex Zhu if (!(RREG32_SMC(ixCURRENT_PG_STATUS) & 710254cd2e0SRex Zhu CURRENT_PG_STATUS__UVD_PG_STATUS_MASK)) { 7113a786966SRex Zhu WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK | 7123a786966SRex Zhu UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_DOWN_MASK | 7133a786966SRex Zhu UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK)); 7143a786966SRex Zhu mdelay(20); 7153a786966SRex Zhu } 7163a786966SRex Zhu } 717a2e73f56SAlex Deucher return 0; 718a2e73f56SAlex Deucher } else { 719b13aa109SRex Zhu if (adev->pg_flags & AMD_PG_SUPPORT_UVD && !adev->pm.dpm_enabled) { 720254cd2e0SRex Zhu if (RREG32_SMC(ixCURRENT_PG_STATUS) & 721254cd2e0SRex Zhu CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) { 7223a786966SRex Zhu WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK | 7233a786966SRex Zhu UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_UP_MASK | 7243a786966SRex Zhu UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK)); 7253a786966SRex Zhu mdelay(30); 7263a786966SRex Zhu } 7273a786966SRex Zhu } 728a2e73f56SAlex Deucher return uvd_v4_2_start(adev); 729a2e73f56SAlex Deucher } 730a2e73f56SAlex Deucher } 731a2e73f56SAlex Deucher 732a1255107SAlex Deucher static const struct amd_ip_funcs uvd_v4_2_ip_funcs = { 73388a907d6STom St Denis .name = "uvd_v4_2", 734a2e73f56SAlex Deucher .early_init = uvd_v4_2_early_init, 735a2e73f56SAlex Deucher .late_init = NULL, 736a2e73f56SAlex Deucher .sw_init = uvd_v4_2_sw_init, 737a2e73f56SAlex Deucher .sw_fini = uvd_v4_2_sw_fini, 738a2e73f56SAlex Deucher .hw_init = uvd_v4_2_hw_init, 739a2e73f56SAlex Deucher .hw_fini = uvd_v4_2_hw_fini, 740a2e73f56SAlex Deucher .suspend = uvd_v4_2_suspend, 741a2e73f56SAlex Deucher .resume = uvd_v4_2_resume, 742a2e73f56SAlex Deucher .is_idle = uvd_v4_2_is_idle, 743a2e73f56SAlex Deucher .wait_for_idle = uvd_v4_2_wait_for_idle, 744a2e73f56SAlex Deucher .soft_reset = uvd_v4_2_soft_reset, 745a2e73f56SAlex Deucher .set_clockgating_state = uvd_v4_2_set_clockgating_state, 746a2e73f56SAlex Deucher .set_powergating_state = uvd_v4_2_set_powergating_state, 747a2e73f56SAlex Deucher }; 748a2e73f56SAlex Deucher 749a2e73f56SAlex Deucher static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = { 75021cd942eSChristian König .type = AMDGPU_RING_TYPE_UVD, 75179887142SChristian König .align_mask = 0xf, 752536fbf94SKen Wang .support_64bit_ptrs = false, 753a2e73f56SAlex Deucher .get_rptr = uvd_v4_2_ring_get_rptr, 754a2e73f56SAlex Deucher .get_wptr = uvd_v4_2_ring_get_wptr, 755a2e73f56SAlex Deucher .set_wptr = uvd_v4_2_ring_set_wptr, 756a2e73f56SAlex Deucher .parse_cs = amdgpu_uvd_ring_parse_cs, 757e12f3d7aSChristian König .emit_frame_size = 758e12f3d7aSChristian König 14, /* uvd_v4_2_ring_emit_fence x1 no user fence */ 759e12f3d7aSChristian König .emit_ib_size = 4, /* uvd_v4_2_ring_emit_ib */ 760a2e73f56SAlex Deucher .emit_ib = uvd_v4_2_ring_emit_ib, 761a2e73f56SAlex Deucher .emit_fence = uvd_v4_2_ring_emit_fence, 762a2e73f56SAlex Deucher .test_ring = uvd_v4_2_ring_test_ring, 7638de190c9SChristian König .test_ib = amdgpu_uvd_ring_test_ib, 764def13903SLeo Liu .insert_nop = uvd_v4_2_ring_insert_nop, 7659e5d5309SChristian König .pad_ib = amdgpu_ring_generic_pad_ib, 766c4120d55SChristian König .begin_use = amdgpu_uvd_ring_begin_use, 767c4120d55SChristian König .end_use = amdgpu_uvd_ring_end_use, 768a2e73f56SAlex Deucher }; 769a2e73f56SAlex Deucher 770a2e73f56SAlex Deucher static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev) 771a2e73f56SAlex Deucher { 7722bb795f5SJames Zhu adev->uvd.inst->ring.funcs = &uvd_v4_2_ring_funcs; 773a2e73f56SAlex Deucher } 774a2e73f56SAlex Deucher 775a2e73f56SAlex Deucher static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = { 776a2e73f56SAlex Deucher .set = uvd_v4_2_set_interrupt_state, 777a2e73f56SAlex Deucher .process = uvd_v4_2_process_interrupt, 778a2e73f56SAlex Deucher }; 779a2e73f56SAlex Deucher 780a2e73f56SAlex Deucher static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev) 781a2e73f56SAlex Deucher { 7822bb795f5SJames Zhu adev->uvd.inst->irq.num_types = 1; 7832bb795f5SJames Zhu adev->uvd.inst->irq.funcs = &uvd_v4_2_irq_funcs; 784a2e73f56SAlex Deucher } 785a1255107SAlex Deucher 786a1255107SAlex Deucher const struct amdgpu_ip_block_version uvd_v4_2_ip_block = 787a1255107SAlex Deucher { 788a1255107SAlex Deucher .type = AMD_IP_BLOCK_TYPE_UVD, 789a1255107SAlex Deucher .major = 4, 790a1255107SAlex Deucher .minor = 2, 791a1255107SAlex Deucher .rev = 0, 792a1255107SAlex Deucher .funcs = &uvd_v4_2_ip_funcs, 793a1255107SAlex Deucher }; 794