1aaa36a97SAlex Deucher /* 2aaa36a97SAlex Deucher * Copyright 2014 Advanced Micro Devices, Inc. 3aaa36a97SAlex Deucher * All Rights Reserved. 4aaa36a97SAlex Deucher * 5aaa36a97SAlex Deucher * Permission is hereby granted, free of charge, to any person obtaining a 6aaa36a97SAlex Deucher * copy of this software and associated documentation files (the 7aaa36a97SAlex Deucher * "Software"), to deal in the Software without restriction, including 8aaa36a97SAlex Deucher * without limitation the rights to use, copy, modify, merge, publish, 9aaa36a97SAlex Deucher * distribute, sub license, and/or sell copies of the Software, and to 10aaa36a97SAlex Deucher * permit persons to whom the Software is furnished to do so, subject to 11aaa36a97SAlex Deucher * the following conditions: 12aaa36a97SAlex Deucher * 13aaa36a97SAlex Deucher * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14aaa36a97SAlex Deucher * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15aaa36a97SAlex Deucher * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16aaa36a97SAlex Deucher * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17aaa36a97SAlex Deucher * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18aaa36a97SAlex Deucher * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19aaa36a97SAlex Deucher * USE OR OTHER DEALINGS IN THE SOFTWARE. 20aaa36a97SAlex Deucher * 21aaa36a97SAlex Deucher * The above copyright notice and this permission notice (including the 22aaa36a97SAlex Deucher * next paragraph) shall be included in all copies or substantial portions 23aaa36a97SAlex Deucher * of the Software. 24aaa36a97SAlex Deucher * 25aaa36a97SAlex Deucher * Authors: Christian König <christian.koenig@amd.com> 26aaa36a97SAlex Deucher */ 27aaa36a97SAlex Deucher 28aaa36a97SAlex Deucher #include <linux/firmware.h> 29aaa36a97SAlex Deucher #include <drm/drmP.h> 30aaa36a97SAlex Deucher #include "amdgpu.h" 31aaa36a97SAlex Deucher #include "amdgpu_vce.h" 32aaa36a97SAlex Deucher #include "vid.h" 33aaa36a97SAlex Deucher #include "vce/vce_3_0_d.h" 34aaa36a97SAlex Deucher #include "vce/vce_3_0_sh_mask.h" 35be4f38e2SAlex Deucher #include "oss/oss_3_0_d.h" 36be4f38e2SAlex Deucher #include "oss/oss_3_0_sh_mask.h" 375bbc553aSLeo Liu #include "gca/gfx_8_0_d.h" 386a585777SAlex Deucher #include "smu/smu_7_1_2_d.h" 396a585777SAlex Deucher #include "smu/smu_7_1_2_sh_mask.h" 40115933a5SChunming Zhou #include "gca/gfx_8_0_d.h" 41115933a5SChunming Zhou #include "gca/gfx_8_0_sh_mask.h" 42091aec0bSAndrey Grodzovsky #include "ivsrcid/ivsrcid_vislands30.h" 43115933a5SChunming Zhou 445bbc553aSLeo Liu 455bbc553aSLeo Liu #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 465bbc553aSLeo Liu #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 4750a1ebc7SRex Zhu #define GRBM_GFX_INDEX__VCE_ALL_PIPE 0x07 4850a1ebc7SRex Zhu 493c0ff9f1SLeo Liu #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616 503c0ff9f1SLeo Liu #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617 513c0ff9f1SLeo Liu #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618 5250a1ebc7SRex Zhu #define mmGRBM_GFX_INDEX_DEFAULT 0xE0000000 5350a1ebc7SRex Zhu 54567e6e29Sjimqu #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02 55aaa36a97SAlex Deucher 56e9822622SLeo Liu #define VCE_V3_0_FW_SIZE (384 * 1024) 57e9822622SLeo Liu #define VCE_V3_0_STACK_SIZE (64 * 1024) 58e9822622SLeo Liu #define VCE_V3_0_DATA_SIZE ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024)) 59e9822622SLeo Liu 60ef6239e0SAlex Deucher #define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8)) 61ef6239e0SAlex Deucher 6250a1ebc7SRex Zhu #define GET_VCE_INSTANCE(i) ((i) << GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT \ 6350a1ebc7SRex Zhu | GRBM_GFX_INDEX__VCE_ALL_PIPE) 6450a1ebc7SRex Zhu 655bbc553aSLeo Liu static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx); 66aaa36a97SAlex Deucher static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev); 67aaa36a97SAlex Deucher static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev); 68567e6e29Sjimqu static int vce_v3_0_wait_for_idle(void *handle); 6926679899SRex Zhu static int vce_v3_0_set_clockgating_state(void *handle, 7026679899SRex Zhu enum amd_clockgating_state state); 71aaa36a97SAlex Deucher /** 72aaa36a97SAlex Deucher * vce_v3_0_ring_get_rptr - get read pointer 73aaa36a97SAlex Deucher * 74aaa36a97SAlex Deucher * @ring: amdgpu_ring pointer 75aaa36a97SAlex Deucher * 76aaa36a97SAlex Deucher * Returns the current hardware read pointer 77aaa36a97SAlex Deucher */ 78536fbf94SKen Wang static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring) 79aaa36a97SAlex Deucher { 80aaa36a97SAlex Deucher struct amdgpu_device *adev = ring->adev; 8145cc6586SLeo Liu u32 v; 8245cc6586SLeo Liu 8345cc6586SLeo Liu mutex_lock(&adev->grbm_idx_mutex); 8445cc6586SLeo Liu if (adev->vce.harvest_config == 0 || 8545cc6586SLeo Liu adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1) 8645cc6586SLeo Liu WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); 8745cc6586SLeo Liu else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) 8845cc6586SLeo Liu WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); 89aaa36a97SAlex Deucher 905d4af988SAlex Deucher if (ring->me == 0) 9145cc6586SLeo Liu v = RREG32(mmVCE_RB_RPTR); 925d4af988SAlex Deucher else if (ring->me == 1) 9345cc6586SLeo Liu v = RREG32(mmVCE_RB_RPTR2); 946f0359ffSAlex Deucher else 9545cc6586SLeo Liu v = RREG32(mmVCE_RB_RPTR3); 9645cc6586SLeo Liu 9745cc6586SLeo Liu WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); 9845cc6586SLeo Liu mutex_unlock(&adev->grbm_idx_mutex); 9945cc6586SLeo Liu 10045cc6586SLeo Liu return v; 101aaa36a97SAlex Deucher } 102aaa36a97SAlex Deucher 103aaa36a97SAlex Deucher /** 104aaa36a97SAlex Deucher * vce_v3_0_ring_get_wptr - get write pointer 105aaa36a97SAlex Deucher * 106aaa36a97SAlex Deucher * @ring: amdgpu_ring pointer 107aaa36a97SAlex Deucher * 108aaa36a97SAlex Deucher * Returns the current hardware write pointer 109aaa36a97SAlex Deucher */ 110536fbf94SKen Wang static uint64_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring) 111aaa36a97SAlex Deucher { 112aaa36a97SAlex Deucher struct amdgpu_device *adev = ring->adev; 11345cc6586SLeo Liu u32 v; 11445cc6586SLeo Liu 11545cc6586SLeo Liu mutex_lock(&adev->grbm_idx_mutex); 11645cc6586SLeo Liu if (adev->vce.harvest_config == 0 || 11745cc6586SLeo Liu adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1) 11845cc6586SLeo Liu WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); 11945cc6586SLeo Liu else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) 12045cc6586SLeo Liu WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); 121aaa36a97SAlex Deucher 1225d4af988SAlex Deucher if (ring->me == 0) 12345cc6586SLeo Liu v = RREG32(mmVCE_RB_WPTR); 1245d4af988SAlex Deucher else if (ring->me == 1) 12545cc6586SLeo Liu v = RREG32(mmVCE_RB_WPTR2); 1266f0359ffSAlex Deucher else 12745cc6586SLeo Liu v = RREG32(mmVCE_RB_WPTR3); 12845cc6586SLeo Liu 12945cc6586SLeo Liu WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); 13045cc6586SLeo Liu mutex_unlock(&adev->grbm_idx_mutex); 13145cc6586SLeo Liu 13245cc6586SLeo Liu return v; 133aaa36a97SAlex Deucher } 134aaa36a97SAlex Deucher 135aaa36a97SAlex Deucher /** 136aaa36a97SAlex Deucher * vce_v3_0_ring_set_wptr - set write pointer 137aaa36a97SAlex Deucher * 138aaa36a97SAlex Deucher * @ring: amdgpu_ring pointer 139aaa36a97SAlex Deucher * 140aaa36a97SAlex Deucher * Commits the write pointer to the hardware 141aaa36a97SAlex Deucher */ 142aaa36a97SAlex Deucher static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring) 143aaa36a97SAlex Deucher { 144aaa36a97SAlex Deucher struct amdgpu_device *adev = ring->adev; 145aaa36a97SAlex Deucher 14645cc6586SLeo Liu mutex_lock(&adev->grbm_idx_mutex); 14745cc6586SLeo Liu if (adev->vce.harvest_config == 0 || 14845cc6586SLeo Liu adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1) 14945cc6586SLeo Liu WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); 15045cc6586SLeo Liu else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) 15145cc6586SLeo Liu WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); 15245cc6586SLeo Liu 1535d4af988SAlex Deucher if (ring->me == 0) 154536fbf94SKen Wang WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr)); 1555d4af988SAlex Deucher else if (ring->me == 1) 156536fbf94SKen Wang WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr)); 1576f0359ffSAlex Deucher else 158536fbf94SKen Wang WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr)); 15945cc6586SLeo Liu 16045cc6586SLeo Liu WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); 16145cc6586SLeo Liu mutex_unlock(&adev->grbm_idx_mutex); 162aaa36a97SAlex Deucher } 163aaa36a97SAlex Deucher 1640689a570SEric Huang static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override) 1650689a570SEric Huang { 166f3f0ea95STom St Denis WREG32_FIELD(VCE_RB_ARB_CTRL, VCE_CGTT_OVERRIDE, override ? 1 : 0); 1670689a570SEric Huang } 1680689a570SEric Huang 1690689a570SEric Huang static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, 1700689a570SEric Huang bool gated) 1710689a570SEric Huang { 172f3f0ea95STom St Denis u32 data; 173f16fe6d3STom St Denis 1740689a570SEric Huang /* Set Override to disable Clock Gating */ 1750689a570SEric Huang vce_v3_0_override_vce_clock_gating(adev, true); 1760689a570SEric Huang 1776f906814STom St Denis /* This function enables MGCG which is controlled by firmware. 1786f906814STom St Denis With the clocks in the gated state the core is still 1796f906814STom St Denis accessible but the firmware will throttle the clocks on the 1806f906814STom St Denis fly as necessary. 1810689a570SEric Huang */ 182ecc2cf7cSMaruthi Srinivas Bayyavarapu if (!gated) { 183f3f0ea95STom St Denis data = RREG32(mmVCE_CLOCK_GATING_B); 1840689a570SEric Huang data |= 0x1ff; 1850689a570SEric Huang data &= ~0xef0000; 1860689a570SEric Huang WREG32(mmVCE_CLOCK_GATING_B, data); 1870689a570SEric Huang 188f3f0ea95STom St Denis data = RREG32(mmVCE_UENC_CLOCK_GATING); 1890689a570SEric Huang data |= 0x3ff000; 1900689a570SEric Huang data &= ~0xffc00000; 1910689a570SEric Huang WREG32(mmVCE_UENC_CLOCK_GATING, data); 1920689a570SEric Huang 193f3f0ea95STom St Denis data = RREG32(mmVCE_UENC_CLOCK_GATING_2); 1940689a570SEric Huang data |= 0x2; 1956f906814STom St Denis data &= ~0x00010000; 1960689a570SEric Huang WREG32(mmVCE_UENC_CLOCK_GATING_2, data); 1970689a570SEric Huang 198f3f0ea95STom St Denis data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); 1990689a570SEric Huang data |= 0x37f; 2000689a570SEric Huang WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); 2010689a570SEric Huang 202f3f0ea95STom St Denis data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); 2030689a570SEric Huang data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK | 2040689a570SEric Huang VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | 2050689a570SEric Huang VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | 2060689a570SEric Huang 0x8; 2070689a570SEric Huang WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data); 2080689a570SEric Huang } else { 209f3f0ea95STom St Denis data = RREG32(mmVCE_CLOCK_GATING_B); 2100689a570SEric Huang data &= ~0x80010; 2110689a570SEric Huang data |= 0xe70008; 2120689a570SEric Huang WREG32(mmVCE_CLOCK_GATING_B, data); 2136f906814STom St Denis 214f3f0ea95STom St Denis data = RREG32(mmVCE_UENC_CLOCK_GATING); 2150689a570SEric Huang data |= 0xffc00000; 2160689a570SEric Huang WREG32(mmVCE_UENC_CLOCK_GATING, data); 2176f906814STom St Denis 218f3f0ea95STom St Denis data = RREG32(mmVCE_UENC_CLOCK_GATING_2); 2190689a570SEric Huang data |= 0x10000; 2200689a570SEric Huang WREG32(mmVCE_UENC_CLOCK_GATING_2, data); 2216f906814STom St Denis 222f3f0ea95STom St Denis data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); 223e05208deSRex Zhu data &= ~0x3ff; 2240689a570SEric Huang WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); 2256f906814STom St Denis 226f3f0ea95STom St Denis data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); 2270689a570SEric Huang data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK | 2280689a570SEric Huang VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | 2290689a570SEric Huang VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | 2300689a570SEric Huang 0x8); 2310689a570SEric Huang WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data); 2320689a570SEric Huang } 2330689a570SEric Huang vce_v3_0_override_vce_clock_gating(adev, false); 2340689a570SEric Huang } 2350689a570SEric Huang 236567e6e29Sjimqu static int vce_v3_0_firmware_loaded(struct amdgpu_device *adev) 237567e6e29Sjimqu { 238567e6e29Sjimqu int i, j; 239567e6e29Sjimqu 240567e6e29Sjimqu for (i = 0; i < 10; ++i) { 241567e6e29Sjimqu for (j = 0; j < 100; ++j) { 242b7e2e9f7Sjimqu uint32_t status = RREG32(mmVCE_STATUS); 243b7e2e9f7Sjimqu 244567e6e29Sjimqu if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK) 245567e6e29Sjimqu return 0; 246567e6e29Sjimqu mdelay(10); 247567e6e29Sjimqu } 248567e6e29Sjimqu 249567e6e29Sjimqu DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n"); 250f3f0ea95STom St Denis WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1); 251567e6e29Sjimqu mdelay(10); 252f3f0ea95STom St Denis WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0); 253567e6e29Sjimqu mdelay(10); 254567e6e29Sjimqu } 255567e6e29Sjimqu 256567e6e29Sjimqu return -ETIMEDOUT; 257567e6e29Sjimqu } 258567e6e29Sjimqu 259aaa36a97SAlex Deucher /** 260aaa36a97SAlex Deucher * vce_v3_0_start - start VCE block 261aaa36a97SAlex Deucher * 262aaa36a97SAlex Deucher * @adev: amdgpu_device pointer 263aaa36a97SAlex Deucher * 264aaa36a97SAlex Deucher * Setup and start the VCE block 265aaa36a97SAlex Deucher */ 266aaa36a97SAlex Deucher static int vce_v3_0_start(struct amdgpu_device *adev) 267aaa36a97SAlex Deucher { 268aaa36a97SAlex Deucher struct amdgpu_ring *ring; 269567e6e29Sjimqu int idx, r; 270567e6e29Sjimqu 27145cc6586SLeo Liu mutex_lock(&adev->grbm_idx_mutex); 27245cc6586SLeo Liu for (idx = 0; idx < 2; ++idx) { 27345cc6586SLeo Liu if (adev->vce.harvest_config & (1 << idx)) 27445cc6586SLeo Liu continue; 27545cc6586SLeo Liu 27645cc6586SLeo Liu WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx)); 27745cc6586SLeo Liu 27845cc6586SLeo Liu /* Program instance 0 reg space for two instances or instance 0 case 27945cc6586SLeo Liu program instance 1 reg space for only instance 1 available case */ 28045cc6586SLeo Liu if (idx != 1 || adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) { 281567e6e29Sjimqu ring = &adev->vce.ring[0]; 282536fbf94SKen Wang WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr)); 283536fbf94SKen Wang WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr)); 284567e6e29Sjimqu WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr); 285567e6e29Sjimqu WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 286567e6e29Sjimqu WREG32(mmVCE_RB_SIZE, ring->ring_size / 4); 287567e6e29Sjimqu 288567e6e29Sjimqu ring = &adev->vce.ring[1]; 289536fbf94SKen Wang WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr)); 290536fbf94SKen Wang WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr)); 291567e6e29Sjimqu WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr); 292567e6e29Sjimqu WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); 293567e6e29Sjimqu WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); 294aaa36a97SAlex Deucher 2956f0359ffSAlex Deucher ring = &adev->vce.ring[2]; 296536fbf94SKen Wang WREG32(mmVCE_RB_RPTR3, lower_32_bits(ring->wptr)); 297536fbf94SKen Wang WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr)); 2986f0359ffSAlex Deucher WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr); 2996f0359ffSAlex Deucher WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr)); 3006f0359ffSAlex Deucher WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4); 30145cc6586SLeo Liu } 3026f0359ffSAlex Deucher 3035bbc553aSLeo Liu vce_v3_0_mc_resume(adev, idx); 304f3f0ea95STom St Denis WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1); 305567e6e29Sjimqu 3063c0ff9f1SLeo Liu if (adev->asic_type >= CHIP_STONEY) 3073c0ff9f1SLeo Liu WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001); 3083c0ff9f1SLeo Liu else 309f3f0ea95STom St Denis WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1); 310aaa36a97SAlex Deucher 311f3f0ea95STom St Denis WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0); 312aaa36a97SAlex Deucher mdelay(100); 313aaa36a97SAlex Deucher 314567e6e29Sjimqu r = vce_v3_0_firmware_loaded(adev); 315aaa36a97SAlex Deucher 316aaa36a97SAlex Deucher /* clear BUSY flag */ 317f3f0ea95STom St Denis WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0); 318aaa36a97SAlex Deucher 319aaa36a97SAlex Deucher if (r) { 320aaa36a97SAlex Deucher DRM_ERROR("VCE not responding, giving up!!!\n"); 3215bbc553aSLeo Liu mutex_unlock(&adev->grbm_idx_mutex); 322aaa36a97SAlex Deucher return r; 323aaa36a97SAlex Deucher } 3245bbc553aSLeo Liu } 3255bbc553aSLeo Liu 32650a1ebc7SRex Zhu WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); 3275bbc553aSLeo Liu mutex_unlock(&adev->grbm_idx_mutex); 3285bbc553aSLeo Liu 329567e6e29Sjimqu return 0; 330567e6e29Sjimqu } 3315bbc553aSLeo Liu 332567e6e29Sjimqu static int vce_v3_0_stop(struct amdgpu_device *adev) 333567e6e29Sjimqu { 334567e6e29Sjimqu int idx; 335567e6e29Sjimqu 336567e6e29Sjimqu mutex_lock(&adev->grbm_idx_mutex); 337567e6e29Sjimqu for (idx = 0; idx < 2; ++idx) { 338567e6e29Sjimqu if (adev->vce.harvest_config & (1 << idx)) 339567e6e29Sjimqu continue; 340567e6e29Sjimqu 34150a1ebc7SRex Zhu WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx)); 342567e6e29Sjimqu 343567e6e29Sjimqu if (adev->asic_type >= CHIP_STONEY) 344567e6e29Sjimqu WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001); 345567e6e29Sjimqu else 346f3f0ea95STom St Denis WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 0); 347f3f0ea95STom St Denis 348567e6e29Sjimqu /* hold on ECPU */ 349f3f0ea95STom St Denis WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1); 350567e6e29Sjimqu 35126679899SRex Zhu /* clear VCE STATUS */ 35226679899SRex Zhu WREG32(mmVCE_STATUS, 0); 353567e6e29Sjimqu } 354567e6e29Sjimqu 35550a1ebc7SRex Zhu WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); 356567e6e29Sjimqu mutex_unlock(&adev->grbm_idx_mutex); 357aaa36a97SAlex Deucher 358aaa36a97SAlex Deucher return 0; 359aaa36a97SAlex Deucher } 360aaa36a97SAlex Deucher 3616a585777SAlex Deucher #define ixVCE_HARVEST_FUSE_MACRO__ADDRESS 0xC0014074 3626a585777SAlex Deucher #define VCE_HARVEST_FUSE_MACRO__SHIFT 27 3636a585777SAlex Deucher #define VCE_HARVEST_FUSE_MACRO__MASK 0x18000000 3646a585777SAlex Deucher 3656a585777SAlex Deucher static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev) 3666a585777SAlex Deucher { 3676a585777SAlex Deucher u32 tmp; 3686a585777SAlex Deucher 369cfaba566SSamuel Li if ((adev->asic_type == CHIP_FIJI) || 37032bec2afSLeo Liu (adev->asic_type == CHIP_STONEY)) 3711dab5f06STom St Denis return AMDGPU_VCE_HARVEST_VCE1; 372188a9bcdSAlex Deucher 3732f7d10b3SJammy Zhou if (adev->flags & AMD_IS_APU) 3746a585777SAlex Deucher tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) & 3756a585777SAlex Deucher VCE_HARVEST_FUSE_MACRO__MASK) >> 3766a585777SAlex Deucher VCE_HARVEST_FUSE_MACRO__SHIFT; 3776a585777SAlex Deucher else 3786a585777SAlex Deucher tmp = (RREG32_SMC(ixCC_HARVEST_FUSES) & 3796a585777SAlex Deucher CC_HARVEST_FUSES__VCE_DISABLE_MASK) >> 3806a585777SAlex Deucher CC_HARVEST_FUSES__VCE_DISABLE__SHIFT; 3816a585777SAlex Deucher 3826a585777SAlex Deucher switch (tmp) { 3836a585777SAlex Deucher case 1: 3841dab5f06STom St Denis return AMDGPU_VCE_HARVEST_VCE0; 3856a585777SAlex Deucher case 2: 3861dab5f06STom St Denis return AMDGPU_VCE_HARVEST_VCE1; 3876a585777SAlex Deucher case 3: 3881dab5f06STom St Denis return AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1; 3896a585777SAlex Deucher default: 39032bec2afSLeo Liu if ((adev->asic_type == CHIP_POLARIS10) || 39132bec2afSLeo Liu (adev->asic_type == CHIP_POLARIS11) || 392a7712897SLeo Liu (adev->asic_type == CHIP_POLARIS12) || 393a7712897SLeo Liu (adev->asic_type == CHIP_VEGAM)) 39432bec2afSLeo Liu return AMDGPU_VCE_HARVEST_VCE1; 39532bec2afSLeo Liu 3961dab5f06STom St Denis return 0; 3976a585777SAlex Deucher } 3986a585777SAlex Deucher } 3996a585777SAlex Deucher 4005fc3aeebSyanyang1 static int vce_v3_0_early_init(void *handle) 401aaa36a97SAlex Deucher { 4025fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4035fc3aeebSyanyang1 4046a585777SAlex Deucher adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev); 4056a585777SAlex Deucher 4066a585777SAlex Deucher if ((adev->vce.harvest_config & 4076a585777SAlex Deucher (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) == 4086a585777SAlex Deucher (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) 4096a585777SAlex Deucher return -ENOENT; 4106a585777SAlex Deucher 4116f0359ffSAlex Deucher adev->vce.num_rings = 3; 41275c65480SAlex Deucher 413aaa36a97SAlex Deucher vce_v3_0_set_ring_funcs(adev); 414aaa36a97SAlex Deucher vce_v3_0_set_irq_funcs(adev); 415aaa36a97SAlex Deucher 416aaa36a97SAlex Deucher return 0; 417aaa36a97SAlex Deucher } 418aaa36a97SAlex Deucher 4195fc3aeebSyanyang1 static int vce_v3_0_sw_init(void *handle) 420aaa36a97SAlex Deucher { 4215fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 422aaa36a97SAlex Deucher struct amdgpu_ring *ring; 42375c65480SAlex Deucher int r, i; 424aaa36a97SAlex Deucher 425aaa36a97SAlex Deucher /* VCE */ 426091aec0bSAndrey Grodzovsky r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_VCE_TRAP, &adev->vce.irq); 427aaa36a97SAlex Deucher if (r) 428aaa36a97SAlex Deucher return r; 429aaa36a97SAlex Deucher 430e9822622SLeo Liu r = amdgpu_vce_sw_init(adev, VCE_V3_0_FW_SIZE + 431e9822622SLeo Liu (VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE) * 2); 432aaa36a97SAlex Deucher if (r) 433aaa36a97SAlex Deucher return r; 434aaa36a97SAlex Deucher 435ef6239e0SAlex Deucher /* 52.8.3 required for 3 ring support */ 436ef6239e0SAlex Deucher if (adev->vce.fw_version < FW_52_8_3) 437ef6239e0SAlex Deucher adev->vce.num_rings = 2; 438ef6239e0SAlex Deucher 439aaa36a97SAlex Deucher r = amdgpu_vce_resume(adev); 440aaa36a97SAlex Deucher if (r) 441aaa36a97SAlex Deucher return r; 442aaa36a97SAlex Deucher 44375c65480SAlex Deucher for (i = 0; i < adev->vce.num_rings; i++) { 44475c65480SAlex Deucher ring = &adev->vce.ring[i]; 44575c65480SAlex Deucher sprintf(ring->name, "vce%d", i); 44679887142SChristian König r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0); 447aaa36a97SAlex Deucher if (r) 448aaa36a97SAlex Deucher return r; 44975c65480SAlex Deucher } 450aaa36a97SAlex Deucher 451aaa36a97SAlex Deucher return r; 452aaa36a97SAlex Deucher } 453aaa36a97SAlex Deucher 4545fc3aeebSyanyang1 static int vce_v3_0_sw_fini(void *handle) 455aaa36a97SAlex Deucher { 456aaa36a97SAlex Deucher int r; 4575fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 458aaa36a97SAlex Deucher 459aaa36a97SAlex Deucher r = amdgpu_vce_suspend(adev); 460aaa36a97SAlex Deucher if (r) 461aaa36a97SAlex Deucher return r; 462aaa36a97SAlex Deucher 46350237287SRex Zhu return amdgpu_vce_sw_fini(adev); 464aaa36a97SAlex Deucher } 465aaa36a97SAlex Deucher 4665fc3aeebSyanyang1 static int vce_v3_0_hw_init(void *handle) 467aaa36a97SAlex Deucher { 468691ca86aSTom St Denis int r, i; 4695fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 470aaa36a97SAlex Deucher 4716fc11b0eSRex Zhu vce_v3_0_override_vce_clock_gating(adev, true); 47208ebb6e9SRex Zhu 4736fc11b0eSRex Zhu amdgpu_asic_set_vce_clocks(adev, 10000, 10000); 474aaa36a97SAlex Deucher 47575c65480SAlex Deucher for (i = 0; i < adev->vce.num_rings; i++) 47675c65480SAlex Deucher adev->vce.ring[i].ready = false; 477aaa36a97SAlex Deucher 47875c65480SAlex Deucher for (i = 0; i < adev->vce.num_rings; i++) { 479691ca86aSTom St Denis r = amdgpu_ring_test_ring(&adev->vce.ring[i]); 480691ca86aSTom St Denis if (r) 481aaa36a97SAlex Deucher return r; 482691ca86aSTom St Denis else 483691ca86aSTom St Denis adev->vce.ring[i].ready = true; 484aaa36a97SAlex Deucher } 485aaa36a97SAlex Deucher 486aaa36a97SAlex Deucher DRM_INFO("VCE initialized successfully.\n"); 487aaa36a97SAlex Deucher 488aaa36a97SAlex Deucher return 0; 489aaa36a97SAlex Deucher } 490aaa36a97SAlex Deucher 4915fc3aeebSyanyang1 static int vce_v3_0_hw_fini(void *handle) 492aaa36a97SAlex Deucher { 493567e6e29Sjimqu int r; 494567e6e29Sjimqu struct amdgpu_device *adev = (struct amdgpu_device *)handle; 495567e6e29Sjimqu 496567e6e29Sjimqu r = vce_v3_0_wait_for_idle(handle); 497567e6e29Sjimqu if (r) 498567e6e29Sjimqu return r; 499567e6e29Sjimqu 50026679899SRex Zhu vce_v3_0_stop(adev); 50126679899SRex Zhu return vce_v3_0_set_clockgating_state(adev, AMD_CG_STATE_GATE); 502aaa36a97SAlex Deucher } 503aaa36a97SAlex Deucher 5045fc3aeebSyanyang1 static int vce_v3_0_suspend(void *handle) 505aaa36a97SAlex Deucher { 506aaa36a97SAlex Deucher int r; 5075fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 508aaa36a97SAlex Deucher 509aaa36a97SAlex Deucher r = vce_v3_0_hw_fini(adev); 510aaa36a97SAlex Deucher if (r) 511aaa36a97SAlex Deucher return r; 512aaa36a97SAlex Deucher 51350237287SRex Zhu return amdgpu_vce_suspend(adev); 514aaa36a97SAlex Deucher } 515aaa36a97SAlex Deucher 5165fc3aeebSyanyang1 static int vce_v3_0_resume(void *handle) 517aaa36a97SAlex Deucher { 518aaa36a97SAlex Deucher int r; 5195fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 520aaa36a97SAlex Deucher 521aaa36a97SAlex Deucher r = amdgpu_vce_resume(adev); 522aaa36a97SAlex Deucher if (r) 523aaa36a97SAlex Deucher return r; 524aaa36a97SAlex Deucher 52550237287SRex Zhu return vce_v3_0_hw_init(adev); 526aaa36a97SAlex Deucher } 527aaa36a97SAlex Deucher 5285bbc553aSLeo Liu static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx) 529aaa36a97SAlex Deucher { 530aaa36a97SAlex Deucher uint32_t offset, size; 531aaa36a97SAlex Deucher 532aaa36a97SAlex Deucher WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16)); 533aaa36a97SAlex Deucher WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000); 534aaa36a97SAlex Deucher WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F); 5356f906814STom St Denis WREG32(mmVCE_CLOCK_GATING_B, 0x1FF); 536aaa36a97SAlex Deucher 537aaa36a97SAlex Deucher WREG32(mmVCE_LMI_CTRL, 0x00398000); 538aaa36a97SAlex Deucher WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1); 539aaa36a97SAlex Deucher WREG32(mmVCE_LMI_SWAP_CNTL, 0); 540aaa36a97SAlex Deucher WREG32(mmVCE_LMI_SWAP_CNTL1, 0); 541aaa36a97SAlex Deucher WREG32(mmVCE_LMI_VM_CTRL, 0); 542d50e5c24SAlan Harrison WREG32_OR(mmVCE_VCPU_CNTL, 0x00100000); 543d50e5c24SAlan Harrison 5443c0ff9f1SLeo Liu if (adev->asic_type >= CHIP_STONEY) { 5453c0ff9f1SLeo Liu WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR0, (adev->vce.gpu_addr >> 8)); 5463c0ff9f1SLeo Liu WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR1, (adev->vce.gpu_addr >> 8)); 5473c0ff9f1SLeo Liu WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR2, (adev->vce.gpu_addr >> 8)); 5483c0ff9f1SLeo Liu } else 549aaa36a97SAlex Deucher WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8)); 550aaa36a97SAlex Deucher offset = AMDGPU_VCE_FIRMWARE_OFFSET; 551e9822622SLeo Liu size = VCE_V3_0_FW_SIZE; 552aaa36a97SAlex Deucher WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff); 553aaa36a97SAlex Deucher WREG32(mmVCE_VCPU_CACHE_SIZE0, size); 554aaa36a97SAlex Deucher 5555bbc553aSLeo Liu if (idx == 0) { 556aaa36a97SAlex Deucher offset += size; 557e9822622SLeo Liu size = VCE_V3_0_STACK_SIZE; 558aaa36a97SAlex Deucher WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff); 559aaa36a97SAlex Deucher WREG32(mmVCE_VCPU_CACHE_SIZE1, size); 560aaa36a97SAlex Deucher offset += size; 561e9822622SLeo Liu size = VCE_V3_0_DATA_SIZE; 562aaa36a97SAlex Deucher WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff); 563aaa36a97SAlex Deucher WREG32(mmVCE_VCPU_CACHE_SIZE2, size); 5645bbc553aSLeo Liu } else { 5655bbc553aSLeo Liu offset += size + VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE; 5665bbc553aSLeo Liu size = VCE_V3_0_STACK_SIZE; 5675bbc553aSLeo Liu WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0xfffffff); 5685bbc553aSLeo Liu WREG32(mmVCE_VCPU_CACHE_SIZE1, size); 5695bbc553aSLeo Liu offset += size; 5705bbc553aSLeo Liu size = VCE_V3_0_DATA_SIZE; 5715bbc553aSLeo Liu WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0xfffffff); 5725bbc553aSLeo Liu WREG32(mmVCE_VCPU_CACHE_SIZE2, size); 5735bbc553aSLeo Liu } 574aaa36a97SAlex Deucher 575aaa36a97SAlex Deucher WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100); 576f3f0ea95STom St Denis WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1); 577aaa36a97SAlex Deucher } 578aaa36a97SAlex Deucher 5795fc3aeebSyanyang1 static bool vce_v3_0_is_idle(void *handle) 580aaa36a97SAlex Deucher { 5815fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 582be4f38e2SAlex Deucher u32 mask = 0; 5835fc3aeebSyanyang1 58474af1276STom St Denis mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_STATUS2__VCE0_BUSY_MASK; 58574af1276STom St Denis mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_STATUS2__VCE1_BUSY_MASK; 586be4f38e2SAlex Deucher 587be4f38e2SAlex Deucher return !(RREG32(mmSRBM_STATUS2) & mask); 588aaa36a97SAlex Deucher } 589aaa36a97SAlex Deucher 5905fc3aeebSyanyang1 static int vce_v3_0_wait_for_idle(void *handle) 591aaa36a97SAlex Deucher { 592aaa36a97SAlex Deucher unsigned i; 5935fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 594be4f38e2SAlex Deucher 59592988e60STom St Denis for (i = 0; i < adev->usec_timeout; i++) 59692988e60STom St Denis if (vce_v3_0_is_idle(handle)) 597aaa36a97SAlex Deucher return 0; 59892988e60STom St Denis 599aaa36a97SAlex Deucher return -ETIMEDOUT; 600aaa36a97SAlex Deucher } 601aaa36a97SAlex Deucher 602ac8e3f30SRex Zhu #define VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK 0x00000008L /* AUTO_BUSY */ 603ac8e3f30SRex Zhu #define VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK 0x00000010L /* RB0_BUSY */ 604ac8e3f30SRex Zhu #define VCE_STATUS_VCPU_REPORT_RB1_BUSY_MASK 0x00000020L /* RB1_BUSY */ 605ac8e3f30SRex Zhu #define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \ 606ac8e3f30SRex Zhu VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK) 607115933a5SChunming Zhou 608da146d3bSAlex Deucher static bool vce_v3_0_check_soft_reset(void *handle) 609115933a5SChunming Zhou { 610115933a5SChunming Zhou struct amdgpu_device *adev = (struct amdgpu_device *)handle; 611115933a5SChunming Zhou u32 srbm_soft_reset = 0; 612115933a5SChunming Zhou 613115933a5SChunming Zhou /* According to VCE team , we should use VCE_STATUS instead 614115933a5SChunming Zhou * SRBM_STATUS.VCE_BUSY bit for busy status checking. 615115933a5SChunming Zhou * GRBM_GFX_INDEX.INSTANCE_INDEX is used to specify which VCE 616115933a5SChunming Zhou * instance's registers are accessed 617115933a5SChunming Zhou * (0 for 1st instance, 10 for 2nd instance). 618115933a5SChunming Zhou * 619115933a5SChunming Zhou *VCE_STATUS 620115933a5SChunming Zhou *|UENC|ACPI|AUTO ACTIVE|RB1 |RB0 |RB2 | |FW_LOADED|JOB | 621115933a5SChunming Zhou *|----+----+-----------+----+----+----+----------+---------+----| 622115933a5SChunming Zhou *|bit8|bit7| bit6 |bit5|bit4|bit3| bit2 | bit1 |bit0| 623115933a5SChunming Zhou * 624115933a5SChunming Zhou * VCE team suggest use bit 3--bit 6 for busy status check 625115933a5SChunming Zhou */ 6269aeb774cSTom St Denis mutex_lock(&adev->grbm_idx_mutex); 62750a1ebc7SRex Zhu WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); 628115933a5SChunming Zhou if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { 629115933a5SChunming Zhou srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); 630115933a5SChunming Zhou srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); 631115933a5SChunming Zhou } 63250a1ebc7SRex Zhu WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); 633115933a5SChunming Zhou if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { 634115933a5SChunming Zhou srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); 635115933a5SChunming Zhou srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); 636115933a5SChunming Zhou } 63750a1ebc7SRex Zhu WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); 638da146d3bSAlex Deucher mutex_unlock(&adev->grbm_idx_mutex); 639115933a5SChunming Zhou 640115933a5SChunming Zhou if (srbm_soft_reset) { 641115933a5SChunming Zhou adev->vce.srbm_soft_reset = srbm_soft_reset; 642da146d3bSAlex Deucher return true; 643115933a5SChunming Zhou } else { 644115933a5SChunming Zhou adev->vce.srbm_soft_reset = 0; 645da146d3bSAlex Deucher return false; 646115933a5SChunming Zhou } 647115933a5SChunming Zhou } 648115933a5SChunming Zhou 6495fc3aeebSyanyang1 static int vce_v3_0_soft_reset(void *handle) 650aaa36a97SAlex Deucher { 6515fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 652115933a5SChunming Zhou u32 srbm_soft_reset; 6535fc3aeebSyanyang1 654da146d3bSAlex Deucher if (!adev->vce.srbm_soft_reset) 655115933a5SChunming Zhou return 0; 656115933a5SChunming Zhou srbm_soft_reset = adev->vce.srbm_soft_reset; 657be4f38e2SAlex Deucher 658115933a5SChunming Zhou if (srbm_soft_reset) { 659115933a5SChunming Zhou u32 tmp; 660115933a5SChunming Zhou 661115933a5SChunming Zhou tmp = RREG32(mmSRBM_SOFT_RESET); 662115933a5SChunming Zhou tmp |= srbm_soft_reset; 663115933a5SChunming Zhou dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); 664115933a5SChunming Zhou WREG32(mmSRBM_SOFT_RESET, tmp); 665115933a5SChunming Zhou tmp = RREG32(mmSRBM_SOFT_RESET); 666115933a5SChunming Zhou 667115933a5SChunming Zhou udelay(50); 668115933a5SChunming Zhou 669115933a5SChunming Zhou tmp &= ~srbm_soft_reset; 670115933a5SChunming Zhou WREG32(mmSRBM_SOFT_RESET, tmp); 671115933a5SChunming Zhou tmp = RREG32(mmSRBM_SOFT_RESET); 672115933a5SChunming Zhou 673115933a5SChunming Zhou /* Wait a little for things to settle down */ 674115933a5SChunming Zhou udelay(50); 675115933a5SChunming Zhou } 676115933a5SChunming Zhou 677115933a5SChunming Zhou return 0; 678115933a5SChunming Zhou } 679115933a5SChunming Zhou 680115933a5SChunming Zhou static int vce_v3_0_pre_soft_reset(void *handle) 681115933a5SChunming Zhou { 682115933a5SChunming Zhou struct amdgpu_device *adev = (struct amdgpu_device *)handle; 683115933a5SChunming Zhou 684da146d3bSAlex Deucher if (!adev->vce.srbm_soft_reset) 685115933a5SChunming Zhou return 0; 686115933a5SChunming Zhou 687aaa36a97SAlex Deucher mdelay(5); 688aaa36a97SAlex Deucher 689115933a5SChunming Zhou return vce_v3_0_suspend(adev); 690115933a5SChunming Zhou } 691115933a5SChunming Zhou 692115933a5SChunming Zhou 693115933a5SChunming Zhou static int vce_v3_0_post_soft_reset(void *handle) 694115933a5SChunming Zhou { 695115933a5SChunming Zhou struct amdgpu_device *adev = (struct amdgpu_device *)handle; 696115933a5SChunming Zhou 697da146d3bSAlex Deucher if (!adev->vce.srbm_soft_reset) 698115933a5SChunming Zhou return 0; 699115933a5SChunming Zhou 700115933a5SChunming Zhou mdelay(5); 701115933a5SChunming Zhou 702115933a5SChunming Zhou return vce_v3_0_resume(adev); 703aaa36a97SAlex Deucher } 704aaa36a97SAlex Deucher 705aaa36a97SAlex Deucher static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev, 706aaa36a97SAlex Deucher struct amdgpu_irq_src *source, 707aaa36a97SAlex Deucher unsigned type, 708aaa36a97SAlex Deucher enum amdgpu_interrupt_state state) 709aaa36a97SAlex Deucher { 710aaa36a97SAlex Deucher uint32_t val = 0; 711aaa36a97SAlex Deucher 712aaa36a97SAlex Deucher if (state == AMDGPU_IRQ_STATE_ENABLE) 713aaa36a97SAlex Deucher val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK; 714aaa36a97SAlex Deucher 715aaa36a97SAlex Deucher WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); 716aaa36a97SAlex Deucher return 0; 717aaa36a97SAlex Deucher } 718aaa36a97SAlex Deucher 719aaa36a97SAlex Deucher static int vce_v3_0_process_interrupt(struct amdgpu_device *adev, 720aaa36a97SAlex Deucher struct amdgpu_irq_src *source, 721aaa36a97SAlex Deucher struct amdgpu_iv_entry *entry) 722aaa36a97SAlex Deucher { 723aaa36a97SAlex Deucher DRM_DEBUG("IH: VCE\n"); 724d6c29c30SLeo Liu 725f3f0ea95STom St Denis WREG32_FIELD(VCE_SYS_INT_STATUS, VCE_SYS_INT_TRAP_INTERRUPT_INT, 1); 726d6c29c30SLeo Liu 7277ccf5aa8SAlex Deucher switch (entry->src_data[0]) { 728aaa36a97SAlex Deucher case 0: 729aaa36a97SAlex Deucher case 1: 7306f0359ffSAlex Deucher case 2: 7317ccf5aa8SAlex Deucher amdgpu_fence_process(&adev->vce.ring[entry->src_data[0]]); 732aaa36a97SAlex Deucher break; 733aaa36a97SAlex Deucher default: 734aaa36a97SAlex Deucher DRM_ERROR("Unhandled interrupt: %d %d\n", 7357ccf5aa8SAlex Deucher entry->src_id, entry->src_data[0]); 736aaa36a97SAlex Deucher break; 737aaa36a97SAlex Deucher } 738aaa36a97SAlex Deucher 739aaa36a97SAlex Deucher return 0; 740aaa36a97SAlex Deucher } 741aaa36a97SAlex Deucher 7425fc3aeebSyanyang1 static int vce_v3_0_set_clockgating_state(void *handle, 7435fc3aeebSyanyang1 enum amd_clockgating_state state) 744aaa36a97SAlex Deucher { 7450689a570SEric Huang struct amdgpu_device *adev = (struct amdgpu_device *)handle; 7460689a570SEric Huang bool enable = (state == AMD_CG_STATE_GATE) ? true : false; 7470689a570SEric Huang int i; 7480689a570SEric Huang 749e3b04bc7SAlex Deucher if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) 7500689a570SEric Huang return 0; 7510689a570SEric Huang 7520689a570SEric Huang mutex_lock(&adev->grbm_idx_mutex); 7530689a570SEric Huang for (i = 0; i < 2; i++) { 7540689a570SEric Huang /* Program VCE Instance 0 or 1 if not harvested */ 7550689a570SEric Huang if (adev->vce.harvest_config & (1 << i)) 7560689a570SEric Huang continue; 7570689a570SEric Huang 75850a1ebc7SRex Zhu WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(i)); 7590689a570SEric Huang 76026679899SRex Zhu if (!enable) { 7610689a570SEric Huang /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */ 7620689a570SEric Huang uint32_t data = RREG32(mmVCE_CLOCK_GATING_A); 7630689a570SEric Huang data &= ~(0xf | 0xff0); 7640689a570SEric Huang data |= ((0x0 << 0) | (0x04 << 4)); 7650689a570SEric Huang WREG32(mmVCE_CLOCK_GATING_A, data); 7660689a570SEric Huang 7670689a570SEric Huang /* initialize VCE_UENC_CLOCK_GATING: Clock ON/OFF delay */ 7680689a570SEric Huang data = RREG32(mmVCE_UENC_CLOCK_GATING); 7690689a570SEric Huang data &= ~(0xf | 0xff0); 7700689a570SEric Huang data |= ((0x0 << 0) | (0x04 << 4)); 7710689a570SEric Huang WREG32(mmVCE_UENC_CLOCK_GATING, data); 7720689a570SEric Huang } 7730689a570SEric Huang 7740689a570SEric Huang vce_v3_0_set_vce_sw_clock_gating(adev, enable); 7750689a570SEric Huang } 7760689a570SEric Huang 77750a1ebc7SRex Zhu WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); 7780689a570SEric Huang mutex_unlock(&adev->grbm_idx_mutex); 7790689a570SEric Huang 780aaa36a97SAlex Deucher return 0; 781aaa36a97SAlex Deucher } 782aaa36a97SAlex Deucher 7835fc3aeebSyanyang1 static int vce_v3_0_set_powergating_state(void *handle, 7845fc3aeebSyanyang1 enum amd_powergating_state state) 785aaa36a97SAlex Deucher { 786aaa36a97SAlex Deucher /* This doesn't actually powergate the VCE block. 787aaa36a97SAlex Deucher * That's done in the dpm code via the SMC. This 788aaa36a97SAlex Deucher * just re-inits the block as necessary. The actual 789aaa36a97SAlex Deucher * gating still happens in the dpm code. We should 790aaa36a97SAlex Deucher * revisit this when there is a cleaner line between 791aaa36a97SAlex Deucher * the smc and the hw blocks 792aaa36a97SAlex Deucher */ 7935fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 794c79b5561SHuang Rui int ret = 0; 7955fc3aeebSyanyang1 796c79b5561SHuang Rui if (state == AMD_PG_STATE_GATE) { 7976fc11b0eSRex Zhu ret = vce_v3_0_stop(adev); 7986fc11b0eSRex Zhu if (ret) 7996fc11b0eSRex Zhu goto out; 800c79b5561SHuang Rui } else { 801c79b5561SHuang Rui ret = vce_v3_0_start(adev); 802c79b5561SHuang Rui if (ret) 803c79b5561SHuang Rui goto out; 804c79b5561SHuang Rui } 805c79b5561SHuang Rui 806c79b5561SHuang Rui out: 807c79b5561SHuang Rui return ret; 808c79b5561SHuang Rui } 809c79b5561SHuang Rui 810c79b5561SHuang Rui static void vce_v3_0_get_clockgating_state(void *handle, u32 *flags) 811c79b5561SHuang Rui { 812c79b5561SHuang Rui struct amdgpu_device *adev = (struct amdgpu_device *)handle; 813c79b5561SHuang Rui int data; 814c79b5561SHuang Rui 815c79b5561SHuang Rui mutex_lock(&adev->pm.mutex); 816c79b5561SHuang Rui 8171c622002SRex Zhu if (adev->flags & AMD_IS_APU) 8181c622002SRex Zhu data = RREG32_SMC(ixCURRENT_PG_STATUS_APU); 8191c622002SRex Zhu else 8201c622002SRex Zhu data = RREG32_SMC(ixCURRENT_PG_STATUS); 8211c622002SRex Zhu 8221c622002SRex Zhu if (data & CURRENT_PG_STATUS__VCE_PG_STATUS_MASK) { 823c79b5561SHuang Rui DRM_INFO("Cannot get clockgating state when VCE is powergated.\n"); 824c79b5561SHuang Rui goto out; 825c79b5561SHuang Rui } 826c79b5561SHuang Rui 827c79b5561SHuang Rui WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); 828c79b5561SHuang Rui 829c79b5561SHuang Rui /* AMD_CG_SUPPORT_VCE_MGCG */ 830c79b5561SHuang Rui data = RREG32(mmVCE_CLOCK_GATING_A); 831c79b5561SHuang Rui if (data & (0x04 << 4)) 832c79b5561SHuang Rui *flags |= AMD_CG_SUPPORT_VCE_MGCG; 833c79b5561SHuang Rui 834c79b5561SHuang Rui out: 835c79b5561SHuang Rui mutex_unlock(&adev->pm.mutex); 836aaa36a97SAlex Deucher } 837aaa36a97SAlex Deucher 838ea4a8c1dSMaruthi Srinivas Bayyavarapu static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring, 839c4f46f22SChristian König struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch) 840ea4a8c1dSMaruthi Srinivas Bayyavarapu { 841ea4a8c1dSMaruthi Srinivas Bayyavarapu amdgpu_ring_write(ring, VCE_CMD_IB_VM); 842c4f46f22SChristian König amdgpu_ring_write(ring, vmid); 843ea4a8c1dSMaruthi Srinivas Bayyavarapu amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); 844ea4a8c1dSMaruthi Srinivas Bayyavarapu amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 845ea4a8c1dSMaruthi Srinivas Bayyavarapu amdgpu_ring_write(ring, ib->length_dw); 846ea4a8c1dSMaruthi Srinivas Bayyavarapu } 847ea4a8c1dSMaruthi Srinivas Bayyavarapu 848ea4a8c1dSMaruthi Srinivas Bayyavarapu static void vce_v3_0_emit_vm_flush(struct amdgpu_ring *ring, 849c633c00bSChristian König unsigned int vmid, uint64_t pd_addr) 850ea4a8c1dSMaruthi Srinivas Bayyavarapu { 851ea4a8c1dSMaruthi Srinivas Bayyavarapu amdgpu_ring_write(ring, VCE_CMD_UPDATE_PTB); 852c4f46f22SChristian König amdgpu_ring_write(ring, vmid); 853ea4a8c1dSMaruthi Srinivas Bayyavarapu amdgpu_ring_write(ring, pd_addr >> 12); 854ea4a8c1dSMaruthi Srinivas Bayyavarapu 855ea4a8c1dSMaruthi Srinivas Bayyavarapu amdgpu_ring_write(ring, VCE_CMD_FLUSH_TLB); 856c4f46f22SChristian König amdgpu_ring_write(ring, vmid); 857ea4a8c1dSMaruthi Srinivas Bayyavarapu amdgpu_ring_write(ring, VCE_CMD_END); 858ea4a8c1dSMaruthi Srinivas Bayyavarapu } 859ea4a8c1dSMaruthi Srinivas Bayyavarapu 860ea4a8c1dSMaruthi Srinivas Bayyavarapu static void vce_v3_0_emit_pipeline_sync(struct amdgpu_ring *ring) 861ea4a8c1dSMaruthi Srinivas Bayyavarapu { 862ea4a8c1dSMaruthi Srinivas Bayyavarapu uint32_t seq = ring->fence_drv.sync_seq; 863ea4a8c1dSMaruthi Srinivas Bayyavarapu uint64_t addr = ring->fence_drv.gpu_addr; 864ea4a8c1dSMaruthi Srinivas Bayyavarapu 865ea4a8c1dSMaruthi Srinivas Bayyavarapu amdgpu_ring_write(ring, VCE_CMD_WAIT_GE); 866ea4a8c1dSMaruthi Srinivas Bayyavarapu amdgpu_ring_write(ring, lower_32_bits(addr)); 867ea4a8c1dSMaruthi Srinivas Bayyavarapu amdgpu_ring_write(ring, upper_32_bits(addr)); 868ea4a8c1dSMaruthi Srinivas Bayyavarapu amdgpu_ring_write(ring, seq); 869ea4a8c1dSMaruthi Srinivas Bayyavarapu } 870ea4a8c1dSMaruthi Srinivas Bayyavarapu 871a1255107SAlex Deucher static const struct amd_ip_funcs vce_v3_0_ip_funcs = { 87288a907d6STom St Denis .name = "vce_v3_0", 873aaa36a97SAlex Deucher .early_init = vce_v3_0_early_init, 874aaa36a97SAlex Deucher .late_init = NULL, 875aaa36a97SAlex Deucher .sw_init = vce_v3_0_sw_init, 876aaa36a97SAlex Deucher .sw_fini = vce_v3_0_sw_fini, 877aaa36a97SAlex Deucher .hw_init = vce_v3_0_hw_init, 878aaa36a97SAlex Deucher .hw_fini = vce_v3_0_hw_fini, 879aaa36a97SAlex Deucher .suspend = vce_v3_0_suspend, 880aaa36a97SAlex Deucher .resume = vce_v3_0_resume, 881aaa36a97SAlex Deucher .is_idle = vce_v3_0_is_idle, 882aaa36a97SAlex Deucher .wait_for_idle = vce_v3_0_wait_for_idle, 883115933a5SChunming Zhou .check_soft_reset = vce_v3_0_check_soft_reset, 884115933a5SChunming Zhou .pre_soft_reset = vce_v3_0_pre_soft_reset, 885aaa36a97SAlex Deucher .soft_reset = vce_v3_0_soft_reset, 886115933a5SChunming Zhou .post_soft_reset = vce_v3_0_post_soft_reset, 887aaa36a97SAlex Deucher .set_clockgating_state = vce_v3_0_set_clockgating_state, 888aaa36a97SAlex Deucher .set_powergating_state = vce_v3_0_set_powergating_state, 889c79b5561SHuang Rui .get_clockgating_state = vce_v3_0_get_clockgating_state, 890aaa36a97SAlex Deucher }; 891aaa36a97SAlex Deucher 892ea4a8c1dSMaruthi Srinivas Bayyavarapu static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = { 89321cd942eSChristian König .type = AMDGPU_RING_TYPE_VCE, 89479887142SChristian König .align_mask = 0xf, 89579887142SChristian König .nop = VCE_CMD_NO_OP, 896536fbf94SKen Wang .support_64bit_ptrs = false, 897aaa36a97SAlex Deucher .get_rptr = vce_v3_0_ring_get_rptr, 898aaa36a97SAlex Deucher .get_wptr = vce_v3_0_ring_get_wptr, 899aaa36a97SAlex Deucher .set_wptr = vce_v3_0_ring_set_wptr, 900aaa36a97SAlex Deucher .parse_cs = amdgpu_vce_ring_parse_cs, 901e12f3d7aSChristian König .emit_frame_size = 902e12f3d7aSChristian König 4 + /* vce_v3_0_emit_pipeline_sync */ 903e12f3d7aSChristian König 6, /* amdgpu_vce_ring_emit_fence x1 no user fence */ 9043413accbSAlex Deucher .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */ 905aaa36a97SAlex Deucher .emit_ib = amdgpu_vce_ring_emit_ib, 906aaa36a97SAlex Deucher .emit_fence = amdgpu_vce_ring_emit_fence, 907aaa36a97SAlex Deucher .test_ring = amdgpu_vce_ring_test_ring, 908aaa36a97SAlex Deucher .test_ib = amdgpu_vce_ring_test_ib, 909edff0e28SJammy Zhou .insert_nop = amdgpu_ring_insert_nop, 9109e5d5309SChristian König .pad_ib = amdgpu_ring_generic_pad_ib, 911ebff485eSChristian König .begin_use = amdgpu_vce_ring_begin_use, 912ebff485eSChristian König .end_use = amdgpu_vce_ring_end_use, 913aaa36a97SAlex Deucher }; 914aaa36a97SAlex Deucher 915ea4a8c1dSMaruthi Srinivas Bayyavarapu static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = { 91621cd942eSChristian König .type = AMDGPU_RING_TYPE_VCE, 91779887142SChristian König .align_mask = 0xf, 91879887142SChristian König .nop = VCE_CMD_NO_OP, 919536fbf94SKen Wang .support_64bit_ptrs = false, 920ea4a8c1dSMaruthi Srinivas Bayyavarapu .get_rptr = vce_v3_0_ring_get_rptr, 921ea4a8c1dSMaruthi Srinivas Bayyavarapu .get_wptr = vce_v3_0_ring_get_wptr, 922ea4a8c1dSMaruthi Srinivas Bayyavarapu .set_wptr = vce_v3_0_ring_set_wptr, 92398614701SChristian König .parse_cs = amdgpu_vce_ring_parse_cs_vm, 924e12f3d7aSChristian König .emit_frame_size = 925e12f3d7aSChristian König 6 + /* vce_v3_0_emit_vm_flush */ 926e12f3d7aSChristian König 4 + /* vce_v3_0_emit_pipeline_sync */ 927e12f3d7aSChristian König 6 + 6, /* amdgpu_vce_ring_emit_fence x2 vm fence */ 9283413accbSAlex Deucher .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */ 929ea4a8c1dSMaruthi Srinivas Bayyavarapu .emit_ib = vce_v3_0_ring_emit_ib, 930ea4a8c1dSMaruthi Srinivas Bayyavarapu .emit_vm_flush = vce_v3_0_emit_vm_flush, 931ea4a8c1dSMaruthi Srinivas Bayyavarapu .emit_pipeline_sync = vce_v3_0_emit_pipeline_sync, 932ea4a8c1dSMaruthi Srinivas Bayyavarapu .emit_fence = amdgpu_vce_ring_emit_fence, 933ea4a8c1dSMaruthi Srinivas Bayyavarapu .test_ring = amdgpu_vce_ring_test_ring, 934ea4a8c1dSMaruthi Srinivas Bayyavarapu .test_ib = amdgpu_vce_ring_test_ib, 935ea4a8c1dSMaruthi Srinivas Bayyavarapu .insert_nop = amdgpu_ring_insert_nop, 936ea4a8c1dSMaruthi Srinivas Bayyavarapu .pad_ib = amdgpu_ring_generic_pad_ib, 937ea4a8c1dSMaruthi Srinivas Bayyavarapu .begin_use = amdgpu_vce_ring_begin_use, 938ea4a8c1dSMaruthi Srinivas Bayyavarapu .end_use = amdgpu_vce_ring_end_use, 939ea4a8c1dSMaruthi Srinivas Bayyavarapu }; 940ea4a8c1dSMaruthi Srinivas Bayyavarapu 941aaa36a97SAlex Deucher static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev) 942aaa36a97SAlex Deucher { 94375c65480SAlex Deucher int i; 94475c65480SAlex Deucher 945ea4a8c1dSMaruthi Srinivas Bayyavarapu if (adev->asic_type >= CHIP_STONEY) { 9465d4af988SAlex Deucher for (i = 0; i < adev->vce.num_rings; i++) { 947ea4a8c1dSMaruthi Srinivas Bayyavarapu adev->vce.ring[i].funcs = &vce_v3_0_ring_vm_funcs; 9485d4af988SAlex Deucher adev->vce.ring[i].me = i; 9495d4af988SAlex Deucher } 950ea4a8c1dSMaruthi Srinivas Bayyavarapu DRM_INFO("VCE enabled in VM mode\n"); 951ea4a8c1dSMaruthi Srinivas Bayyavarapu } else { 9525d4af988SAlex Deucher for (i = 0; i < adev->vce.num_rings; i++) { 953ea4a8c1dSMaruthi Srinivas Bayyavarapu adev->vce.ring[i].funcs = &vce_v3_0_ring_phys_funcs; 9545d4af988SAlex Deucher adev->vce.ring[i].me = i; 9555d4af988SAlex Deucher } 956ea4a8c1dSMaruthi Srinivas Bayyavarapu DRM_INFO("VCE enabled in physical mode\n"); 957ea4a8c1dSMaruthi Srinivas Bayyavarapu } 958aaa36a97SAlex Deucher } 959aaa36a97SAlex Deucher 960aaa36a97SAlex Deucher static const struct amdgpu_irq_src_funcs vce_v3_0_irq_funcs = { 961aaa36a97SAlex Deucher .set = vce_v3_0_set_interrupt_state, 962aaa36a97SAlex Deucher .process = vce_v3_0_process_interrupt, 963aaa36a97SAlex Deucher }; 964aaa36a97SAlex Deucher 965aaa36a97SAlex Deucher static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev) 966aaa36a97SAlex Deucher { 967aaa36a97SAlex Deucher adev->vce.irq.num_types = 1; 968aaa36a97SAlex Deucher adev->vce.irq.funcs = &vce_v3_0_irq_funcs; 969aaa36a97SAlex Deucher }; 970a1255107SAlex Deucher 971a1255107SAlex Deucher const struct amdgpu_ip_block_version vce_v3_0_ip_block = 972a1255107SAlex Deucher { 973a1255107SAlex Deucher .type = AMD_IP_BLOCK_TYPE_VCE, 974a1255107SAlex Deucher .major = 3, 975a1255107SAlex Deucher .minor = 0, 976a1255107SAlex Deucher .rev = 0, 977a1255107SAlex Deucher .funcs = &vce_v3_0_ip_funcs, 978a1255107SAlex Deucher }; 979a1255107SAlex Deucher 980a1255107SAlex Deucher const struct amdgpu_ip_block_version vce_v3_1_ip_block = 981a1255107SAlex Deucher { 982a1255107SAlex Deucher .type = AMD_IP_BLOCK_TYPE_VCE, 983a1255107SAlex Deucher .major = 3, 984a1255107SAlex Deucher .minor = 1, 985a1255107SAlex Deucher .rev = 0, 986a1255107SAlex Deucher .funcs = &vce_v3_0_ip_funcs, 987a1255107SAlex Deucher }; 988a1255107SAlex Deucher 989a1255107SAlex Deucher const struct amdgpu_ip_block_version vce_v3_4_ip_block = 990a1255107SAlex Deucher { 991a1255107SAlex Deucher .type = AMD_IP_BLOCK_TYPE_VCE, 992a1255107SAlex Deucher .major = 3, 993a1255107SAlex Deucher .minor = 4, 994a1255107SAlex Deucher .rev = 0, 995a1255107SAlex Deucher .funcs = &vce_v3_0_ip_funcs, 996a1255107SAlex Deucher }; 997