1aaa36a97SAlex Deucher /* 2aaa36a97SAlex Deucher * Copyright 2014 Advanced Micro Devices, Inc. 3aaa36a97SAlex Deucher * All Rights Reserved. 4aaa36a97SAlex Deucher * 5aaa36a97SAlex Deucher * Permission is hereby granted, free of charge, to any person obtaining a 6aaa36a97SAlex Deucher * copy of this software and associated documentation files (the 7aaa36a97SAlex Deucher * "Software"), to deal in the Software without restriction, including 8aaa36a97SAlex Deucher * without limitation the rights to use, copy, modify, merge, publish, 9aaa36a97SAlex Deucher * distribute, sub license, and/or sell copies of the Software, and to 10aaa36a97SAlex Deucher * permit persons to whom the Software is furnished to do so, subject to 11aaa36a97SAlex Deucher * the following conditions: 12aaa36a97SAlex Deucher * 13aaa36a97SAlex Deucher * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14aaa36a97SAlex Deucher * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15aaa36a97SAlex Deucher * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16aaa36a97SAlex Deucher * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17aaa36a97SAlex Deucher * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18aaa36a97SAlex Deucher * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19aaa36a97SAlex Deucher * USE OR OTHER DEALINGS IN THE SOFTWARE. 20aaa36a97SAlex Deucher * 21aaa36a97SAlex Deucher * The above copyright notice and this permission notice (including the 22aaa36a97SAlex Deucher * next paragraph) shall be included in all copies or substantial portions 23aaa36a97SAlex Deucher * of the Software. 24aaa36a97SAlex Deucher * 25aaa36a97SAlex Deucher * Authors: Christian König <christian.koenig@amd.com> 26aaa36a97SAlex Deucher */ 27aaa36a97SAlex Deucher 28aaa36a97SAlex Deucher #include <linux/firmware.h> 29aaa36a97SAlex Deucher #include <drm/drmP.h> 30aaa36a97SAlex Deucher #include "amdgpu.h" 31aaa36a97SAlex Deucher #include "amdgpu_vce.h" 32aaa36a97SAlex Deucher #include "vid.h" 33aaa36a97SAlex Deucher #include "vce/vce_3_0_d.h" 34aaa36a97SAlex Deucher #include "vce/vce_3_0_sh_mask.h" 35be4f38e2SAlex Deucher #include "oss/oss_3_0_d.h" 36be4f38e2SAlex Deucher #include "oss/oss_3_0_sh_mask.h" 375bbc553aSLeo Liu #include "gca/gfx_8_0_d.h" 386a585777SAlex Deucher #include "smu/smu_7_1_2_d.h" 396a585777SAlex Deucher #include "smu/smu_7_1_2_sh_mask.h" 40115933a5SChunming Zhou #include "gca/gfx_8_0_sh_mask.h" 41091aec0bSAndrey Grodzovsky #include "ivsrcid/ivsrcid_vislands30.h" 42115933a5SChunming Zhou 435bbc553aSLeo Liu 445bbc553aSLeo Liu #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 455bbc553aSLeo Liu #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 4650a1ebc7SRex Zhu #define GRBM_GFX_INDEX__VCE_ALL_PIPE 0x07 4750a1ebc7SRex Zhu 483c0ff9f1SLeo Liu #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616 493c0ff9f1SLeo Liu #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617 503c0ff9f1SLeo Liu #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618 5150a1ebc7SRex Zhu #define mmGRBM_GFX_INDEX_DEFAULT 0xE0000000 5250a1ebc7SRex Zhu 53567e6e29Sjimqu #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02 54aaa36a97SAlex Deucher 55e9822622SLeo Liu #define VCE_V3_0_FW_SIZE (384 * 1024) 56e9822622SLeo Liu #define VCE_V3_0_STACK_SIZE (64 * 1024) 57e9822622SLeo Liu #define VCE_V3_0_DATA_SIZE ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024)) 58e9822622SLeo Liu 59ef6239e0SAlex Deucher #define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8)) 60ef6239e0SAlex Deucher 6150a1ebc7SRex Zhu #define GET_VCE_INSTANCE(i) ((i) << GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT \ 6250a1ebc7SRex Zhu | GRBM_GFX_INDEX__VCE_ALL_PIPE) 6350a1ebc7SRex Zhu 645bbc553aSLeo Liu static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx); 65aaa36a97SAlex Deucher static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev); 66aaa36a97SAlex Deucher static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev); 67567e6e29Sjimqu static int vce_v3_0_wait_for_idle(void *handle); 6826679899SRex Zhu static int vce_v3_0_set_clockgating_state(void *handle, 6926679899SRex Zhu enum amd_clockgating_state state); 70aaa36a97SAlex Deucher /** 71aaa36a97SAlex Deucher * vce_v3_0_ring_get_rptr - get read pointer 72aaa36a97SAlex Deucher * 73aaa36a97SAlex Deucher * @ring: amdgpu_ring pointer 74aaa36a97SAlex Deucher * 75aaa36a97SAlex Deucher * Returns the current hardware read pointer 76aaa36a97SAlex Deucher */ 77536fbf94SKen Wang static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring) 78aaa36a97SAlex Deucher { 79aaa36a97SAlex Deucher struct amdgpu_device *adev = ring->adev; 8045cc6586SLeo Liu u32 v; 8145cc6586SLeo Liu 8245cc6586SLeo Liu mutex_lock(&adev->grbm_idx_mutex); 8345cc6586SLeo Liu if (adev->vce.harvest_config == 0 || 8445cc6586SLeo Liu adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1) 8545cc6586SLeo Liu WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); 8645cc6586SLeo Liu else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) 8745cc6586SLeo Liu WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); 88aaa36a97SAlex Deucher 895d4af988SAlex Deucher if (ring->me == 0) 9045cc6586SLeo Liu v = RREG32(mmVCE_RB_RPTR); 915d4af988SAlex Deucher else if (ring->me == 1) 9245cc6586SLeo Liu v = RREG32(mmVCE_RB_RPTR2); 936f0359ffSAlex Deucher else 9445cc6586SLeo Liu v = RREG32(mmVCE_RB_RPTR3); 9545cc6586SLeo Liu 9645cc6586SLeo Liu WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); 9745cc6586SLeo Liu mutex_unlock(&adev->grbm_idx_mutex); 9845cc6586SLeo Liu 9945cc6586SLeo Liu return v; 100aaa36a97SAlex Deucher } 101aaa36a97SAlex Deucher 102aaa36a97SAlex Deucher /** 103aaa36a97SAlex Deucher * vce_v3_0_ring_get_wptr - get write pointer 104aaa36a97SAlex Deucher * 105aaa36a97SAlex Deucher * @ring: amdgpu_ring pointer 106aaa36a97SAlex Deucher * 107aaa36a97SAlex Deucher * Returns the current hardware write pointer 108aaa36a97SAlex Deucher */ 109536fbf94SKen Wang static uint64_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring) 110aaa36a97SAlex Deucher { 111aaa36a97SAlex Deucher struct amdgpu_device *adev = ring->adev; 11245cc6586SLeo Liu u32 v; 11345cc6586SLeo Liu 11445cc6586SLeo Liu mutex_lock(&adev->grbm_idx_mutex); 11545cc6586SLeo Liu if (adev->vce.harvest_config == 0 || 11645cc6586SLeo Liu adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1) 11745cc6586SLeo Liu WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); 11845cc6586SLeo Liu else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) 11945cc6586SLeo Liu WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); 120aaa36a97SAlex Deucher 1215d4af988SAlex Deucher if (ring->me == 0) 12245cc6586SLeo Liu v = RREG32(mmVCE_RB_WPTR); 1235d4af988SAlex Deucher else if (ring->me == 1) 12445cc6586SLeo Liu v = RREG32(mmVCE_RB_WPTR2); 1256f0359ffSAlex Deucher else 12645cc6586SLeo Liu v = RREG32(mmVCE_RB_WPTR3); 12745cc6586SLeo Liu 12845cc6586SLeo Liu WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); 12945cc6586SLeo Liu mutex_unlock(&adev->grbm_idx_mutex); 13045cc6586SLeo Liu 13145cc6586SLeo Liu return v; 132aaa36a97SAlex Deucher } 133aaa36a97SAlex Deucher 134aaa36a97SAlex Deucher /** 135aaa36a97SAlex Deucher * vce_v3_0_ring_set_wptr - set write pointer 136aaa36a97SAlex Deucher * 137aaa36a97SAlex Deucher * @ring: amdgpu_ring pointer 138aaa36a97SAlex Deucher * 139aaa36a97SAlex Deucher * Commits the write pointer to the hardware 140aaa36a97SAlex Deucher */ 141aaa36a97SAlex Deucher static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring) 142aaa36a97SAlex Deucher { 143aaa36a97SAlex Deucher struct amdgpu_device *adev = ring->adev; 144aaa36a97SAlex Deucher 14545cc6586SLeo Liu mutex_lock(&adev->grbm_idx_mutex); 14645cc6586SLeo Liu if (adev->vce.harvest_config == 0 || 14745cc6586SLeo Liu adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1) 14845cc6586SLeo Liu WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); 14945cc6586SLeo Liu else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) 15045cc6586SLeo Liu WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); 15145cc6586SLeo Liu 1525d4af988SAlex Deucher if (ring->me == 0) 153536fbf94SKen Wang WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr)); 1545d4af988SAlex Deucher else if (ring->me == 1) 155536fbf94SKen Wang WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr)); 1566f0359ffSAlex Deucher else 157536fbf94SKen Wang WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr)); 15845cc6586SLeo Liu 15945cc6586SLeo Liu WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); 16045cc6586SLeo Liu mutex_unlock(&adev->grbm_idx_mutex); 161aaa36a97SAlex Deucher } 162aaa36a97SAlex Deucher 1630689a570SEric Huang static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override) 1640689a570SEric Huang { 165f3f0ea95STom St Denis WREG32_FIELD(VCE_RB_ARB_CTRL, VCE_CGTT_OVERRIDE, override ? 1 : 0); 1660689a570SEric Huang } 1670689a570SEric Huang 1680689a570SEric Huang static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, 1690689a570SEric Huang bool gated) 1700689a570SEric Huang { 171f3f0ea95STom St Denis u32 data; 172f16fe6d3STom St Denis 1730689a570SEric Huang /* Set Override to disable Clock Gating */ 1740689a570SEric Huang vce_v3_0_override_vce_clock_gating(adev, true); 1750689a570SEric Huang 1766f906814STom St Denis /* This function enables MGCG which is controlled by firmware. 1776f906814STom St Denis With the clocks in the gated state the core is still 1786f906814STom St Denis accessible but the firmware will throttle the clocks on the 1796f906814STom St Denis fly as necessary. 1800689a570SEric Huang */ 181ecc2cf7cSMaruthi Srinivas Bayyavarapu if (!gated) { 182f3f0ea95STom St Denis data = RREG32(mmVCE_CLOCK_GATING_B); 1830689a570SEric Huang data |= 0x1ff; 1840689a570SEric Huang data &= ~0xef0000; 1850689a570SEric Huang WREG32(mmVCE_CLOCK_GATING_B, data); 1860689a570SEric Huang 187f3f0ea95STom St Denis data = RREG32(mmVCE_UENC_CLOCK_GATING); 1880689a570SEric Huang data |= 0x3ff000; 1890689a570SEric Huang data &= ~0xffc00000; 1900689a570SEric Huang WREG32(mmVCE_UENC_CLOCK_GATING, data); 1910689a570SEric Huang 192f3f0ea95STom St Denis data = RREG32(mmVCE_UENC_CLOCK_GATING_2); 1930689a570SEric Huang data |= 0x2; 1946f906814STom St Denis data &= ~0x00010000; 1950689a570SEric Huang WREG32(mmVCE_UENC_CLOCK_GATING_2, data); 1960689a570SEric Huang 197f3f0ea95STom St Denis data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); 1980689a570SEric Huang data |= 0x37f; 1990689a570SEric Huang WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); 2000689a570SEric Huang 201f3f0ea95STom St Denis data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); 2020689a570SEric Huang data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK | 2030689a570SEric Huang VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | 2040689a570SEric Huang VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | 2050689a570SEric Huang 0x8; 2060689a570SEric Huang WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data); 2070689a570SEric Huang } else { 208f3f0ea95STom St Denis data = RREG32(mmVCE_CLOCK_GATING_B); 2090689a570SEric Huang data &= ~0x80010; 2100689a570SEric Huang data |= 0xe70008; 2110689a570SEric Huang WREG32(mmVCE_CLOCK_GATING_B, data); 2126f906814STom St Denis 213f3f0ea95STom St Denis data = RREG32(mmVCE_UENC_CLOCK_GATING); 2140689a570SEric Huang data |= 0xffc00000; 2150689a570SEric Huang WREG32(mmVCE_UENC_CLOCK_GATING, data); 2166f906814STom St Denis 217f3f0ea95STom St Denis data = RREG32(mmVCE_UENC_CLOCK_GATING_2); 2180689a570SEric Huang data |= 0x10000; 2190689a570SEric Huang WREG32(mmVCE_UENC_CLOCK_GATING_2, data); 2206f906814STom St Denis 221f3f0ea95STom St Denis data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); 222e05208deSRex Zhu data &= ~0x3ff; 2230689a570SEric Huang WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); 2246f906814STom St Denis 225f3f0ea95STom St Denis data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); 2260689a570SEric Huang data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK | 2270689a570SEric Huang VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | 2280689a570SEric Huang VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | 2290689a570SEric Huang 0x8); 2300689a570SEric Huang WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data); 2310689a570SEric Huang } 2320689a570SEric Huang vce_v3_0_override_vce_clock_gating(adev, false); 2330689a570SEric Huang } 2340689a570SEric Huang 235567e6e29Sjimqu static int vce_v3_0_firmware_loaded(struct amdgpu_device *adev) 236567e6e29Sjimqu { 237567e6e29Sjimqu int i, j; 238567e6e29Sjimqu 239567e6e29Sjimqu for (i = 0; i < 10; ++i) { 240567e6e29Sjimqu for (j = 0; j < 100; ++j) { 241b7e2e9f7Sjimqu uint32_t status = RREG32(mmVCE_STATUS); 242b7e2e9f7Sjimqu 243567e6e29Sjimqu if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK) 244567e6e29Sjimqu return 0; 245567e6e29Sjimqu mdelay(10); 246567e6e29Sjimqu } 247567e6e29Sjimqu 248567e6e29Sjimqu DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n"); 249f3f0ea95STom St Denis WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1); 250567e6e29Sjimqu mdelay(10); 251f3f0ea95STom St Denis WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0); 252567e6e29Sjimqu mdelay(10); 253567e6e29Sjimqu } 254567e6e29Sjimqu 255567e6e29Sjimqu return -ETIMEDOUT; 256567e6e29Sjimqu } 257567e6e29Sjimqu 258aaa36a97SAlex Deucher /** 259aaa36a97SAlex Deucher * vce_v3_0_start - start VCE block 260aaa36a97SAlex Deucher * 261aaa36a97SAlex Deucher * @adev: amdgpu_device pointer 262aaa36a97SAlex Deucher * 263aaa36a97SAlex Deucher * Setup and start the VCE block 264aaa36a97SAlex Deucher */ 265aaa36a97SAlex Deucher static int vce_v3_0_start(struct amdgpu_device *adev) 266aaa36a97SAlex Deucher { 267aaa36a97SAlex Deucher struct amdgpu_ring *ring; 268567e6e29Sjimqu int idx, r; 269567e6e29Sjimqu 27045cc6586SLeo Liu mutex_lock(&adev->grbm_idx_mutex); 27145cc6586SLeo Liu for (idx = 0; idx < 2; ++idx) { 27245cc6586SLeo Liu if (adev->vce.harvest_config & (1 << idx)) 27345cc6586SLeo Liu continue; 27445cc6586SLeo Liu 27545cc6586SLeo Liu WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx)); 27645cc6586SLeo Liu 27745cc6586SLeo Liu /* Program instance 0 reg space for two instances or instance 0 case 27845cc6586SLeo Liu program instance 1 reg space for only instance 1 available case */ 27945cc6586SLeo Liu if (idx != 1 || adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) { 280567e6e29Sjimqu ring = &adev->vce.ring[0]; 281536fbf94SKen Wang WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr)); 282536fbf94SKen Wang WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr)); 283567e6e29Sjimqu WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr); 284567e6e29Sjimqu WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 285567e6e29Sjimqu WREG32(mmVCE_RB_SIZE, ring->ring_size / 4); 286567e6e29Sjimqu 287567e6e29Sjimqu ring = &adev->vce.ring[1]; 288536fbf94SKen Wang WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr)); 289536fbf94SKen Wang WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr)); 290567e6e29Sjimqu WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr); 291567e6e29Sjimqu WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); 292567e6e29Sjimqu WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); 293aaa36a97SAlex Deucher 2946f0359ffSAlex Deucher ring = &adev->vce.ring[2]; 295536fbf94SKen Wang WREG32(mmVCE_RB_RPTR3, lower_32_bits(ring->wptr)); 296536fbf94SKen Wang WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr)); 2976f0359ffSAlex Deucher WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr); 2986f0359ffSAlex Deucher WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr)); 2996f0359ffSAlex Deucher WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4); 30045cc6586SLeo Liu } 3016f0359ffSAlex Deucher 3025bbc553aSLeo Liu vce_v3_0_mc_resume(adev, idx); 303f3f0ea95STom St Denis WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1); 304567e6e29Sjimqu 3053c0ff9f1SLeo Liu if (adev->asic_type >= CHIP_STONEY) 3063c0ff9f1SLeo Liu WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001); 3073c0ff9f1SLeo Liu else 308f3f0ea95STom St Denis WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1); 309aaa36a97SAlex Deucher 310f3f0ea95STom St Denis WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0); 311aaa36a97SAlex Deucher mdelay(100); 312aaa36a97SAlex Deucher 313567e6e29Sjimqu r = vce_v3_0_firmware_loaded(adev); 314aaa36a97SAlex Deucher 315aaa36a97SAlex Deucher /* clear BUSY flag */ 316f3f0ea95STom St Denis WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0); 317aaa36a97SAlex Deucher 318aaa36a97SAlex Deucher if (r) { 319aaa36a97SAlex Deucher DRM_ERROR("VCE not responding, giving up!!!\n"); 3205bbc553aSLeo Liu mutex_unlock(&adev->grbm_idx_mutex); 321aaa36a97SAlex Deucher return r; 322aaa36a97SAlex Deucher } 3235bbc553aSLeo Liu } 3245bbc553aSLeo Liu 32550a1ebc7SRex Zhu WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); 3265bbc553aSLeo Liu mutex_unlock(&adev->grbm_idx_mutex); 3275bbc553aSLeo Liu 328567e6e29Sjimqu return 0; 329567e6e29Sjimqu } 3305bbc553aSLeo Liu 331567e6e29Sjimqu static int vce_v3_0_stop(struct amdgpu_device *adev) 332567e6e29Sjimqu { 333567e6e29Sjimqu int idx; 334567e6e29Sjimqu 335567e6e29Sjimqu mutex_lock(&adev->grbm_idx_mutex); 336567e6e29Sjimqu for (idx = 0; idx < 2; ++idx) { 337567e6e29Sjimqu if (adev->vce.harvest_config & (1 << idx)) 338567e6e29Sjimqu continue; 339567e6e29Sjimqu 34050a1ebc7SRex Zhu WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx)); 341567e6e29Sjimqu 342567e6e29Sjimqu if (adev->asic_type >= CHIP_STONEY) 343567e6e29Sjimqu WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001); 344567e6e29Sjimqu else 345f3f0ea95STom St Denis WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 0); 346f3f0ea95STom St Denis 347567e6e29Sjimqu /* hold on ECPU */ 348f3f0ea95STom St Denis WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1); 349567e6e29Sjimqu 35026679899SRex Zhu /* clear VCE STATUS */ 35126679899SRex Zhu WREG32(mmVCE_STATUS, 0); 352567e6e29Sjimqu } 353567e6e29Sjimqu 35450a1ebc7SRex Zhu WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); 355567e6e29Sjimqu mutex_unlock(&adev->grbm_idx_mutex); 356aaa36a97SAlex Deucher 357aaa36a97SAlex Deucher return 0; 358aaa36a97SAlex Deucher } 359aaa36a97SAlex Deucher 3606a585777SAlex Deucher #define ixVCE_HARVEST_FUSE_MACRO__ADDRESS 0xC0014074 3616a585777SAlex Deucher #define VCE_HARVEST_FUSE_MACRO__SHIFT 27 3626a585777SAlex Deucher #define VCE_HARVEST_FUSE_MACRO__MASK 0x18000000 3636a585777SAlex Deucher 3646a585777SAlex Deucher static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev) 3656a585777SAlex Deucher { 3666a585777SAlex Deucher u32 tmp; 3676a585777SAlex Deucher 368cfaba566SSamuel Li if ((adev->asic_type == CHIP_FIJI) || 36932bec2afSLeo Liu (adev->asic_type == CHIP_STONEY)) 3701dab5f06STom St Denis return AMDGPU_VCE_HARVEST_VCE1; 371188a9bcdSAlex Deucher 3722f7d10b3SJammy Zhou if (adev->flags & AMD_IS_APU) 3736a585777SAlex Deucher tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) & 3746a585777SAlex Deucher VCE_HARVEST_FUSE_MACRO__MASK) >> 3756a585777SAlex Deucher VCE_HARVEST_FUSE_MACRO__SHIFT; 3766a585777SAlex Deucher else 3776a585777SAlex Deucher tmp = (RREG32_SMC(ixCC_HARVEST_FUSES) & 3786a585777SAlex Deucher CC_HARVEST_FUSES__VCE_DISABLE_MASK) >> 3796a585777SAlex Deucher CC_HARVEST_FUSES__VCE_DISABLE__SHIFT; 3806a585777SAlex Deucher 3816a585777SAlex Deucher switch (tmp) { 3826a585777SAlex Deucher case 1: 3831dab5f06STom St Denis return AMDGPU_VCE_HARVEST_VCE0; 3846a585777SAlex Deucher case 2: 3851dab5f06STom St Denis return AMDGPU_VCE_HARVEST_VCE1; 3866a585777SAlex Deucher case 3: 3871dab5f06STom St Denis return AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1; 3886a585777SAlex Deucher default: 38932bec2afSLeo Liu if ((adev->asic_type == CHIP_POLARIS10) || 39032bec2afSLeo Liu (adev->asic_type == CHIP_POLARIS11) || 391a7712897SLeo Liu (adev->asic_type == CHIP_POLARIS12) || 392a7712897SLeo Liu (adev->asic_type == CHIP_VEGAM)) 39332bec2afSLeo Liu return AMDGPU_VCE_HARVEST_VCE1; 39432bec2afSLeo Liu 3951dab5f06STom St Denis return 0; 3966a585777SAlex Deucher } 3976a585777SAlex Deucher } 3986a585777SAlex Deucher 3995fc3aeebSyanyang1 static int vce_v3_0_early_init(void *handle) 400aaa36a97SAlex Deucher { 4015fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4025fc3aeebSyanyang1 4036a585777SAlex Deucher adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev); 4046a585777SAlex Deucher 4056a585777SAlex Deucher if ((adev->vce.harvest_config & 4066a585777SAlex Deucher (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) == 4076a585777SAlex Deucher (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) 4086a585777SAlex Deucher return -ENOENT; 4096a585777SAlex Deucher 4106f0359ffSAlex Deucher adev->vce.num_rings = 3; 41175c65480SAlex Deucher 412aaa36a97SAlex Deucher vce_v3_0_set_ring_funcs(adev); 413aaa36a97SAlex Deucher vce_v3_0_set_irq_funcs(adev); 414aaa36a97SAlex Deucher 415aaa36a97SAlex Deucher return 0; 416aaa36a97SAlex Deucher } 417aaa36a97SAlex Deucher 4185fc3aeebSyanyang1 static int vce_v3_0_sw_init(void *handle) 419aaa36a97SAlex Deucher { 4205fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 421aaa36a97SAlex Deucher struct amdgpu_ring *ring; 42275c65480SAlex Deucher int r, i; 423aaa36a97SAlex Deucher 424aaa36a97SAlex Deucher /* VCE */ 4251ffdeca6SChristian König r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_VCE_TRAP, &adev->vce.irq); 426aaa36a97SAlex Deucher if (r) 427aaa36a97SAlex Deucher return r; 428aaa36a97SAlex Deucher 429e9822622SLeo Liu r = amdgpu_vce_sw_init(adev, VCE_V3_0_FW_SIZE + 430e9822622SLeo Liu (VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE) * 2); 431aaa36a97SAlex Deucher if (r) 432aaa36a97SAlex Deucher return r; 433aaa36a97SAlex Deucher 434ef6239e0SAlex Deucher /* 52.8.3 required for 3 ring support */ 435ef6239e0SAlex Deucher if (adev->vce.fw_version < FW_52_8_3) 436ef6239e0SAlex Deucher adev->vce.num_rings = 2; 437ef6239e0SAlex Deucher 438aaa36a97SAlex Deucher r = amdgpu_vce_resume(adev); 439aaa36a97SAlex Deucher if (r) 440aaa36a97SAlex Deucher return r; 441aaa36a97SAlex Deucher 44275c65480SAlex Deucher for (i = 0; i < adev->vce.num_rings; i++) { 44375c65480SAlex Deucher ring = &adev->vce.ring[i]; 44475c65480SAlex Deucher sprintf(ring->name, "vce%d", i); 44579887142SChristian König r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0); 446aaa36a97SAlex Deucher if (r) 447aaa36a97SAlex Deucher return r; 44875c65480SAlex Deucher } 449aaa36a97SAlex Deucher 45020acbed4SEmily Deng r = amdgpu_vce_entity_init(adev); 45120acbed4SEmily Deng 452aaa36a97SAlex Deucher return r; 453aaa36a97SAlex Deucher } 454aaa36a97SAlex Deucher 4555fc3aeebSyanyang1 static int vce_v3_0_sw_fini(void *handle) 456aaa36a97SAlex Deucher { 457aaa36a97SAlex Deucher int r; 4585fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 459aaa36a97SAlex Deucher 460aaa36a97SAlex Deucher r = amdgpu_vce_suspend(adev); 461aaa36a97SAlex Deucher if (r) 462aaa36a97SAlex Deucher return r; 463aaa36a97SAlex Deucher 46450237287SRex Zhu return amdgpu_vce_sw_fini(adev); 465aaa36a97SAlex Deucher } 466aaa36a97SAlex Deucher 4675fc3aeebSyanyang1 static int vce_v3_0_hw_init(void *handle) 468aaa36a97SAlex Deucher { 469691ca86aSTom St Denis int r, i; 4705fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 471aaa36a97SAlex Deucher 4726fc11b0eSRex Zhu vce_v3_0_override_vce_clock_gating(adev, true); 47308ebb6e9SRex Zhu 4746fc11b0eSRex Zhu amdgpu_asic_set_vce_clocks(adev, 10000, 10000); 475aaa36a97SAlex Deucher 47675c65480SAlex Deucher for (i = 0; i < adev->vce.num_rings; i++) { 477c66ed765SAndrey Grodzovsky r = amdgpu_ring_test_helper(&adev->vce.ring[i]); 478691ca86aSTom St Denis if (r) 479aaa36a97SAlex Deucher return r; 480aaa36a97SAlex Deucher } 481aaa36a97SAlex Deucher 482aaa36a97SAlex Deucher DRM_INFO("VCE initialized successfully.\n"); 483aaa36a97SAlex Deucher 484aaa36a97SAlex Deucher return 0; 485aaa36a97SAlex Deucher } 486aaa36a97SAlex Deucher 4875fc3aeebSyanyang1 static int vce_v3_0_hw_fini(void *handle) 488aaa36a97SAlex Deucher { 489567e6e29Sjimqu int r; 490567e6e29Sjimqu struct amdgpu_device *adev = (struct amdgpu_device *)handle; 491567e6e29Sjimqu 492567e6e29Sjimqu r = vce_v3_0_wait_for_idle(handle); 493567e6e29Sjimqu if (r) 494567e6e29Sjimqu return r; 495567e6e29Sjimqu 49626679899SRex Zhu vce_v3_0_stop(adev); 49726679899SRex Zhu return vce_v3_0_set_clockgating_state(adev, AMD_CG_STATE_GATE); 498aaa36a97SAlex Deucher } 499aaa36a97SAlex Deucher 5005fc3aeebSyanyang1 static int vce_v3_0_suspend(void *handle) 501aaa36a97SAlex Deucher { 502aaa36a97SAlex Deucher int r; 5035fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 504aaa36a97SAlex Deucher 505aaa36a97SAlex Deucher r = vce_v3_0_hw_fini(adev); 506aaa36a97SAlex Deucher if (r) 507aaa36a97SAlex Deucher return r; 508aaa36a97SAlex Deucher 50950237287SRex Zhu return amdgpu_vce_suspend(adev); 510aaa36a97SAlex Deucher } 511aaa36a97SAlex Deucher 5125fc3aeebSyanyang1 static int vce_v3_0_resume(void *handle) 513aaa36a97SAlex Deucher { 514aaa36a97SAlex Deucher int r; 5155fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 516aaa36a97SAlex Deucher 517aaa36a97SAlex Deucher r = amdgpu_vce_resume(adev); 518aaa36a97SAlex Deucher if (r) 519aaa36a97SAlex Deucher return r; 520aaa36a97SAlex Deucher 52150237287SRex Zhu return vce_v3_0_hw_init(adev); 522aaa36a97SAlex Deucher } 523aaa36a97SAlex Deucher 5245bbc553aSLeo Liu static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx) 525aaa36a97SAlex Deucher { 526aaa36a97SAlex Deucher uint32_t offset, size; 527aaa36a97SAlex Deucher 528aaa36a97SAlex Deucher WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16)); 529aaa36a97SAlex Deucher WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000); 530aaa36a97SAlex Deucher WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F); 5316f906814STom St Denis WREG32(mmVCE_CLOCK_GATING_B, 0x1FF); 532aaa36a97SAlex Deucher 533aaa36a97SAlex Deucher WREG32(mmVCE_LMI_CTRL, 0x00398000); 534aaa36a97SAlex Deucher WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1); 535aaa36a97SAlex Deucher WREG32(mmVCE_LMI_SWAP_CNTL, 0); 536aaa36a97SAlex Deucher WREG32(mmVCE_LMI_SWAP_CNTL1, 0); 537aaa36a97SAlex Deucher WREG32(mmVCE_LMI_VM_CTRL, 0); 538d50e5c24SAlan Harrison WREG32_OR(mmVCE_VCPU_CNTL, 0x00100000); 539d50e5c24SAlan Harrison 5403c0ff9f1SLeo Liu if (adev->asic_type >= CHIP_STONEY) { 5413c0ff9f1SLeo Liu WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR0, (adev->vce.gpu_addr >> 8)); 5423c0ff9f1SLeo Liu WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR1, (adev->vce.gpu_addr >> 8)); 5433c0ff9f1SLeo Liu WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR2, (adev->vce.gpu_addr >> 8)); 5443c0ff9f1SLeo Liu } else 545aaa36a97SAlex Deucher WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8)); 546aaa36a97SAlex Deucher offset = AMDGPU_VCE_FIRMWARE_OFFSET; 547e9822622SLeo Liu size = VCE_V3_0_FW_SIZE; 548aaa36a97SAlex Deucher WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff); 549aaa36a97SAlex Deucher WREG32(mmVCE_VCPU_CACHE_SIZE0, size); 550aaa36a97SAlex Deucher 5515bbc553aSLeo Liu if (idx == 0) { 552aaa36a97SAlex Deucher offset += size; 553e9822622SLeo Liu size = VCE_V3_0_STACK_SIZE; 554aaa36a97SAlex Deucher WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff); 555aaa36a97SAlex Deucher WREG32(mmVCE_VCPU_CACHE_SIZE1, size); 556aaa36a97SAlex Deucher offset += size; 557e9822622SLeo Liu size = VCE_V3_0_DATA_SIZE; 558aaa36a97SAlex Deucher WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff); 559aaa36a97SAlex Deucher WREG32(mmVCE_VCPU_CACHE_SIZE2, size); 5605bbc553aSLeo Liu } else { 5615bbc553aSLeo Liu offset += size + VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE; 5625bbc553aSLeo Liu size = VCE_V3_0_STACK_SIZE; 5635bbc553aSLeo Liu WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0xfffffff); 5645bbc553aSLeo Liu WREG32(mmVCE_VCPU_CACHE_SIZE1, size); 5655bbc553aSLeo Liu offset += size; 5665bbc553aSLeo Liu size = VCE_V3_0_DATA_SIZE; 5675bbc553aSLeo Liu WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0xfffffff); 5685bbc553aSLeo Liu WREG32(mmVCE_VCPU_CACHE_SIZE2, size); 5695bbc553aSLeo Liu } 570aaa36a97SAlex Deucher 571aaa36a97SAlex Deucher WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100); 572f3f0ea95STom St Denis WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1); 573aaa36a97SAlex Deucher } 574aaa36a97SAlex Deucher 5755fc3aeebSyanyang1 static bool vce_v3_0_is_idle(void *handle) 576aaa36a97SAlex Deucher { 5775fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 578be4f38e2SAlex Deucher u32 mask = 0; 5795fc3aeebSyanyang1 58074af1276STom St Denis mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_STATUS2__VCE0_BUSY_MASK; 58174af1276STom St Denis mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_STATUS2__VCE1_BUSY_MASK; 582be4f38e2SAlex Deucher 583be4f38e2SAlex Deucher return !(RREG32(mmSRBM_STATUS2) & mask); 584aaa36a97SAlex Deucher } 585aaa36a97SAlex Deucher 5865fc3aeebSyanyang1 static int vce_v3_0_wait_for_idle(void *handle) 587aaa36a97SAlex Deucher { 588aaa36a97SAlex Deucher unsigned i; 5895fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 590be4f38e2SAlex Deucher 59192988e60STom St Denis for (i = 0; i < adev->usec_timeout; i++) 59292988e60STom St Denis if (vce_v3_0_is_idle(handle)) 593aaa36a97SAlex Deucher return 0; 59492988e60STom St Denis 595aaa36a97SAlex Deucher return -ETIMEDOUT; 596aaa36a97SAlex Deucher } 597aaa36a97SAlex Deucher 598ac8e3f30SRex Zhu #define VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK 0x00000008L /* AUTO_BUSY */ 599ac8e3f30SRex Zhu #define VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK 0x00000010L /* RB0_BUSY */ 600ac8e3f30SRex Zhu #define VCE_STATUS_VCPU_REPORT_RB1_BUSY_MASK 0x00000020L /* RB1_BUSY */ 601ac8e3f30SRex Zhu #define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \ 602ac8e3f30SRex Zhu VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK) 603115933a5SChunming Zhou 604da146d3bSAlex Deucher static bool vce_v3_0_check_soft_reset(void *handle) 605115933a5SChunming Zhou { 606115933a5SChunming Zhou struct amdgpu_device *adev = (struct amdgpu_device *)handle; 607115933a5SChunming Zhou u32 srbm_soft_reset = 0; 608115933a5SChunming Zhou 609115933a5SChunming Zhou /* According to VCE team , we should use VCE_STATUS instead 610115933a5SChunming Zhou * SRBM_STATUS.VCE_BUSY bit for busy status checking. 611115933a5SChunming Zhou * GRBM_GFX_INDEX.INSTANCE_INDEX is used to specify which VCE 612115933a5SChunming Zhou * instance's registers are accessed 613115933a5SChunming Zhou * (0 for 1st instance, 10 for 2nd instance). 614115933a5SChunming Zhou * 615115933a5SChunming Zhou *VCE_STATUS 616115933a5SChunming Zhou *|UENC|ACPI|AUTO ACTIVE|RB1 |RB0 |RB2 | |FW_LOADED|JOB | 617115933a5SChunming Zhou *|----+----+-----------+----+----+----+----------+---------+----| 618115933a5SChunming Zhou *|bit8|bit7| bit6 |bit5|bit4|bit3| bit2 | bit1 |bit0| 619115933a5SChunming Zhou * 620115933a5SChunming Zhou * VCE team suggest use bit 3--bit 6 for busy status check 621115933a5SChunming Zhou */ 6229aeb774cSTom St Denis mutex_lock(&adev->grbm_idx_mutex); 62350a1ebc7SRex Zhu WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); 624115933a5SChunming Zhou if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { 625115933a5SChunming Zhou srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); 626115933a5SChunming Zhou srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); 627115933a5SChunming Zhou } 62850a1ebc7SRex Zhu WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); 629115933a5SChunming Zhou if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { 630115933a5SChunming Zhou srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); 631115933a5SChunming Zhou srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); 632115933a5SChunming Zhou } 63350a1ebc7SRex Zhu WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); 634da146d3bSAlex Deucher mutex_unlock(&adev->grbm_idx_mutex); 635115933a5SChunming Zhou 636115933a5SChunming Zhou if (srbm_soft_reset) { 637115933a5SChunming Zhou adev->vce.srbm_soft_reset = srbm_soft_reset; 638da146d3bSAlex Deucher return true; 639115933a5SChunming Zhou } else { 640115933a5SChunming Zhou adev->vce.srbm_soft_reset = 0; 641da146d3bSAlex Deucher return false; 642115933a5SChunming Zhou } 643115933a5SChunming Zhou } 644115933a5SChunming Zhou 6455fc3aeebSyanyang1 static int vce_v3_0_soft_reset(void *handle) 646aaa36a97SAlex Deucher { 6475fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 648115933a5SChunming Zhou u32 srbm_soft_reset; 6495fc3aeebSyanyang1 650da146d3bSAlex Deucher if (!adev->vce.srbm_soft_reset) 651115933a5SChunming Zhou return 0; 652115933a5SChunming Zhou srbm_soft_reset = adev->vce.srbm_soft_reset; 653be4f38e2SAlex Deucher 654115933a5SChunming Zhou if (srbm_soft_reset) { 655115933a5SChunming Zhou u32 tmp; 656115933a5SChunming Zhou 657115933a5SChunming Zhou tmp = RREG32(mmSRBM_SOFT_RESET); 658115933a5SChunming Zhou tmp |= srbm_soft_reset; 659115933a5SChunming Zhou dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); 660115933a5SChunming Zhou WREG32(mmSRBM_SOFT_RESET, tmp); 661115933a5SChunming Zhou tmp = RREG32(mmSRBM_SOFT_RESET); 662115933a5SChunming Zhou 663115933a5SChunming Zhou udelay(50); 664115933a5SChunming Zhou 665115933a5SChunming Zhou tmp &= ~srbm_soft_reset; 666115933a5SChunming Zhou WREG32(mmSRBM_SOFT_RESET, tmp); 667115933a5SChunming Zhou tmp = RREG32(mmSRBM_SOFT_RESET); 668115933a5SChunming Zhou 669115933a5SChunming Zhou /* Wait a little for things to settle down */ 670115933a5SChunming Zhou udelay(50); 671115933a5SChunming Zhou } 672115933a5SChunming Zhou 673115933a5SChunming Zhou return 0; 674115933a5SChunming Zhou } 675115933a5SChunming Zhou 676115933a5SChunming Zhou static int vce_v3_0_pre_soft_reset(void *handle) 677115933a5SChunming Zhou { 678115933a5SChunming Zhou struct amdgpu_device *adev = (struct amdgpu_device *)handle; 679115933a5SChunming Zhou 680da146d3bSAlex Deucher if (!adev->vce.srbm_soft_reset) 681115933a5SChunming Zhou return 0; 682115933a5SChunming Zhou 683aaa36a97SAlex Deucher mdelay(5); 684aaa36a97SAlex Deucher 685115933a5SChunming Zhou return vce_v3_0_suspend(adev); 686115933a5SChunming Zhou } 687115933a5SChunming Zhou 688115933a5SChunming Zhou 689115933a5SChunming Zhou static int vce_v3_0_post_soft_reset(void *handle) 690115933a5SChunming Zhou { 691115933a5SChunming Zhou struct amdgpu_device *adev = (struct amdgpu_device *)handle; 692115933a5SChunming Zhou 693da146d3bSAlex Deucher if (!adev->vce.srbm_soft_reset) 694115933a5SChunming Zhou return 0; 695115933a5SChunming Zhou 696115933a5SChunming Zhou mdelay(5); 697115933a5SChunming Zhou 698115933a5SChunming Zhou return vce_v3_0_resume(adev); 699aaa36a97SAlex Deucher } 700aaa36a97SAlex Deucher 701aaa36a97SAlex Deucher static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev, 702aaa36a97SAlex Deucher struct amdgpu_irq_src *source, 703aaa36a97SAlex Deucher unsigned type, 704aaa36a97SAlex Deucher enum amdgpu_interrupt_state state) 705aaa36a97SAlex Deucher { 706aaa36a97SAlex Deucher uint32_t val = 0; 707aaa36a97SAlex Deucher 708aaa36a97SAlex Deucher if (state == AMDGPU_IRQ_STATE_ENABLE) 709aaa36a97SAlex Deucher val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK; 710aaa36a97SAlex Deucher 711aaa36a97SAlex Deucher WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); 712aaa36a97SAlex Deucher return 0; 713aaa36a97SAlex Deucher } 714aaa36a97SAlex Deucher 715aaa36a97SAlex Deucher static int vce_v3_0_process_interrupt(struct amdgpu_device *adev, 716aaa36a97SAlex Deucher struct amdgpu_irq_src *source, 717aaa36a97SAlex Deucher struct amdgpu_iv_entry *entry) 718aaa36a97SAlex Deucher { 719aaa36a97SAlex Deucher DRM_DEBUG("IH: VCE\n"); 720d6c29c30SLeo Liu 721f3f0ea95STom St Denis WREG32_FIELD(VCE_SYS_INT_STATUS, VCE_SYS_INT_TRAP_INTERRUPT_INT, 1); 722d6c29c30SLeo Liu 7237ccf5aa8SAlex Deucher switch (entry->src_data[0]) { 724aaa36a97SAlex Deucher case 0: 725aaa36a97SAlex Deucher case 1: 7266f0359ffSAlex Deucher case 2: 7277ccf5aa8SAlex Deucher amdgpu_fence_process(&adev->vce.ring[entry->src_data[0]]); 728aaa36a97SAlex Deucher break; 729aaa36a97SAlex Deucher default: 730aaa36a97SAlex Deucher DRM_ERROR("Unhandled interrupt: %d %d\n", 7317ccf5aa8SAlex Deucher entry->src_id, entry->src_data[0]); 732aaa36a97SAlex Deucher break; 733aaa36a97SAlex Deucher } 734aaa36a97SAlex Deucher 735aaa36a97SAlex Deucher return 0; 736aaa36a97SAlex Deucher } 737aaa36a97SAlex Deucher 7385fc3aeebSyanyang1 static int vce_v3_0_set_clockgating_state(void *handle, 7395fc3aeebSyanyang1 enum amd_clockgating_state state) 740aaa36a97SAlex Deucher { 7410689a570SEric Huang struct amdgpu_device *adev = (struct amdgpu_device *)handle; 7420689a570SEric Huang bool enable = (state == AMD_CG_STATE_GATE) ? true : false; 7430689a570SEric Huang int i; 7440689a570SEric Huang 745e3b04bc7SAlex Deucher if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) 7460689a570SEric Huang return 0; 7470689a570SEric Huang 7480689a570SEric Huang mutex_lock(&adev->grbm_idx_mutex); 7490689a570SEric Huang for (i = 0; i < 2; i++) { 7500689a570SEric Huang /* Program VCE Instance 0 or 1 if not harvested */ 7510689a570SEric Huang if (adev->vce.harvest_config & (1 << i)) 7520689a570SEric Huang continue; 7530689a570SEric Huang 75450a1ebc7SRex Zhu WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(i)); 7550689a570SEric Huang 75626679899SRex Zhu if (!enable) { 7570689a570SEric Huang /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */ 7580689a570SEric Huang uint32_t data = RREG32(mmVCE_CLOCK_GATING_A); 7590689a570SEric Huang data &= ~(0xf | 0xff0); 7600689a570SEric Huang data |= ((0x0 << 0) | (0x04 << 4)); 7610689a570SEric Huang WREG32(mmVCE_CLOCK_GATING_A, data); 7620689a570SEric Huang 7630689a570SEric Huang /* initialize VCE_UENC_CLOCK_GATING: Clock ON/OFF delay */ 7640689a570SEric Huang data = RREG32(mmVCE_UENC_CLOCK_GATING); 7650689a570SEric Huang data &= ~(0xf | 0xff0); 7660689a570SEric Huang data |= ((0x0 << 0) | (0x04 << 4)); 7670689a570SEric Huang WREG32(mmVCE_UENC_CLOCK_GATING, data); 7680689a570SEric Huang } 7690689a570SEric Huang 7700689a570SEric Huang vce_v3_0_set_vce_sw_clock_gating(adev, enable); 7710689a570SEric Huang } 7720689a570SEric Huang 77350a1ebc7SRex Zhu WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); 7740689a570SEric Huang mutex_unlock(&adev->grbm_idx_mutex); 7750689a570SEric Huang 776aaa36a97SAlex Deucher return 0; 777aaa36a97SAlex Deucher } 778aaa36a97SAlex Deucher 7795fc3aeebSyanyang1 static int vce_v3_0_set_powergating_state(void *handle, 7805fc3aeebSyanyang1 enum amd_powergating_state state) 781aaa36a97SAlex Deucher { 782aaa36a97SAlex Deucher /* This doesn't actually powergate the VCE block. 783aaa36a97SAlex Deucher * That's done in the dpm code via the SMC. This 784aaa36a97SAlex Deucher * just re-inits the block as necessary. The actual 785aaa36a97SAlex Deucher * gating still happens in the dpm code. We should 786aaa36a97SAlex Deucher * revisit this when there is a cleaner line between 787aaa36a97SAlex Deucher * the smc and the hw blocks 788aaa36a97SAlex Deucher */ 7895fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 790c79b5561SHuang Rui int ret = 0; 7915fc3aeebSyanyang1 792c79b5561SHuang Rui if (state == AMD_PG_STATE_GATE) { 7936fc11b0eSRex Zhu ret = vce_v3_0_stop(adev); 7946fc11b0eSRex Zhu if (ret) 7956fc11b0eSRex Zhu goto out; 796c79b5561SHuang Rui } else { 797c79b5561SHuang Rui ret = vce_v3_0_start(adev); 798c79b5561SHuang Rui if (ret) 799c79b5561SHuang Rui goto out; 800c79b5561SHuang Rui } 801c79b5561SHuang Rui 802c79b5561SHuang Rui out: 803c79b5561SHuang Rui return ret; 804c79b5561SHuang Rui } 805c79b5561SHuang Rui 806c79b5561SHuang Rui static void vce_v3_0_get_clockgating_state(void *handle, u32 *flags) 807c79b5561SHuang Rui { 808c79b5561SHuang Rui struct amdgpu_device *adev = (struct amdgpu_device *)handle; 809c79b5561SHuang Rui int data; 810c79b5561SHuang Rui 811c79b5561SHuang Rui mutex_lock(&adev->pm.mutex); 812c79b5561SHuang Rui 8131c622002SRex Zhu if (adev->flags & AMD_IS_APU) 8141c622002SRex Zhu data = RREG32_SMC(ixCURRENT_PG_STATUS_APU); 8151c622002SRex Zhu else 8161c622002SRex Zhu data = RREG32_SMC(ixCURRENT_PG_STATUS); 8171c622002SRex Zhu 8181c622002SRex Zhu if (data & CURRENT_PG_STATUS__VCE_PG_STATUS_MASK) { 819c79b5561SHuang Rui DRM_INFO("Cannot get clockgating state when VCE is powergated.\n"); 820c79b5561SHuang Rui goto out; 821c79b5561SHuang Rui } 822c79b5561SHuang Rui 823c79b5561SHuang Rui WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); 824c79b5561SHuang Rui 825c79b5561SHuang Rui /* AMD_CG_SUPPORT_VCE_MGCG */ 826c79b5561SHuang Rui data = RREG32(mmVCE_CLOCK_GATING_A); 827c79b5561SHuang Rui if (data & (0x04 << 4)) 828c79b5561SHuang Rui *flags |= AMD_CG_SUPPORT_VCE_MGCG; 829c79b5561SHuang Rui 830c79b5561SHuang Rui out: 831c79b5561SHuang Rui mutex_unlock(&adev->pm.mutex); 832aaa36a97SAlex Deucher } 833aaa36a97SAlex Deucher 834ea4a8c1dSMaruthi Srinivas Bayyavarapu static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring, 83534955e03SRex Zhu struct amdgpu_job *job, 83634955e03SRex Zhu struct amdgpu_ib *ib, 837c4c905ecSJack Xiao uint32_t flags) 838ea4a8c1dSMaruthi Srinivas Bayyavarapu { 83934955e03SRex Zhu unsigned vmid = AMDGPU_JOB_GET_VMID(job); 84034955e03SRex Zhu 841ea4a8c1dSMaruthi Srinivas Bayyavarapu amdgpu_ring_write(ring, VCE_CMD_IB_VM); 842c4f46f22SChristian König amdgpu_ring_write(ring, vmid); 843ea4a8c1dSMaruthi Srinivas Bayyavarapu amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); 844ea4a8c1dSMaruthi Srinivas Bayyavarapu amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 845ea4a8c1dSMaruthi Srinivas Bayyavarapu amdgpu_ring_write(ring, ib->length_dw); 846ea4a8c1dSMaruthi Srinivas Bayyavarapu } 847ea4a8c1dSMaruthi Srinivas Bayyavarapu 848ea4a8c1dSMaruthi Srinivas Bayyavarapu static void vce_v3_0_emit_vm_flush(struct amdgpu_ring *ring, 849c633c00bSChristian König unsigned int vmid, uint64_t pd_addr) 850ea4a8c1dSMaruthi Srinivas Bayyavarapu { 851ea4a8c1dSMaruthi Srinivas Bayyavarapu amdgpu_ring_write(ring, VCE_CMD_UPDATE_PTB); 852c4f46f22SChristian König amdgpu_ring_write(ring, vmid); 853ea4a8c1dSMaruthi Srinivas Bayyavarapu amdgpu_ring_write(ring, pd_addr >> 12); 854ea4a8c1dSMaruthi Srinivas Bayyavarapu 855ea4a8c1dSMaruthi Srinivas Bayyavarapu amdgpu_ring_write(ring, VCE_CMD_FLUSH_TLB); 856c4f46f22SChristian König amdgpu_ring_write(ring, vmid); 857ea4a8c1dSMaruthi Srinivas Bayyavarapu amdgpu_ring_write(ring, VCE_CMD_END); 858ea4a8c1dSMaruthi Srinivas Bayyavarapu } 859ea4a8c1dSMaruthi Srinivas Bayyavarapu 860ea4a8c1dSMaruthi Srinivas Bayyavarapu static void vce_v3_0_emit_pipeline_sync(struct amdgpu_ring *ring) 861ea4a8c1dSMaruthi Srinivas Bayyavarapu { 862ea4a8c1dSMaruthi Srinivas Bayyavarapu uint32_t seq = ring->fence_drv.sync_seq; 863ea4a8c1dSMaruthi Srinivas Bayyavarapu uint64_t addr = ring->fence_drv.gpu_addr; 864ea4a8c1dSMaruthi Srinivas Bayyavarapu 865ea4a8c1dSMaruthi Srinivas Bayyavarapu amdgpu_ring_write(ring, VCE_CMD_WAIT_GE); 866ea4a8c1dSMaruthi Srinivas Bayyavarapu amdgpu_ring_write(ring, lower_32_bits(addr)); 867ea4a8c1dSMaruthi Srinivas Bayyavarapu amdgpu_ring_write(ring, upper_32_bits(addr)); 868ea4a8c1dSMaruthi Srinivas Bayyavarapu amdgpu_ring_write(ring, seq); 869ea4a8c1dSMaruthi Srinivas Bayyavarapu } 870ea4a8c1dSMaruthi Srinivas Bayyavarapu 871a1255107SAlex Deucher static const struct amd_ip_funcs vce_v3_0_ip_funcs = { 87288a907d6STom St Denis .name = "vce_v3_0", 873aaa36a97SAlex Deucher .early_init = vce_v3_0_early_init, 874aaa36a97SAlex Deucher .late_init = NULL, 875aaa36a97SAlex Deucher .sw_init = vce_v3_0_sw_init, 876aaa36a97SAlex Deucher .sw_fini = vce_v3_0_sw_fini, 877aaa36a97SAlex Deucher .hw_init = vce_v3_0_hw_init, 878aaa36a97SAlex Deucher .hw_fini = vce_v3_0_hw_fini, 879aaa36a97SAlex Deucher .suspend = vce_v3_0_suspend, 880aaa36a97SAlex Deucher .resume = vce_v3_0_resume, 881aaa36a97SAlex Deucher .is_idle = vce_v3_0_is_idle, 882aaa36a97SAlex Deucher .wait_for_idle = vce_v3_0_wait_for_idle, 883115933a5SChunming Zhou .check_soft_reset = vce_v3_0_check_soft_reset, 884115933a5SChunming Zhou .pre_soft_reset = vce_v3_0_pre_soft_reset, 885aaa36a97SAlex Deucher .soft_reset = vce_v3_0_soft_reset, 886115933a5SChunming Zhou .post_soft_reset = vce_v3_0_post_soft_reset, 887aaa36a97SAlex Deucher .set_clockgating_state = vce_v3_0_set_clockgating_state, 888aaa36a97SAlex Deucher .set_powergating_state = vce_v3_0_set_powergating_state, 889c79b5561SHuang Rui .get_clockgating_state = vce_v3_0_get_clockgating_state, 890aaa36a97SAlex Deucher }; 891aaa36a97SAlex Deucher 892ea4a8c1dSMaruthi Srinivas Bayyavarapu static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = { 89321cd942eSChristian König .type = AMDGPU_RING_TYPE_VCE, 89479887142SChristian König .align_mask = 0xf, 89579887142SChristian König .nop = VCE_CMD_NO_OP, 896536fbf94SKen Wang .support_64bit_ptrs = false, 897f61334b5SLeo Liu .no_user_fence = true, 898aaa36a97SAlex Deucher .get_rptr = vce_v3_0_ring_get_rptr, 899aaa36a97SAlex Deucher .get_wptr = vce_v3_0_ring_get_wptr, 900aaa36a97SAlex Deucher .set_wptr = vce_v3_0_ring_set_wptr, 901aaa36a97SAlex Deucher .parse_cs = amdgpu_vce_ring_parse_cs, 902e12f3d7aSChristian König .emit_frame_size = 903e12f3d7aSChristian König 4 + /* vce_v3_0_emit_pipeline_sync */ 904e12f3d7aSChristian König 6, /* amdgpu_vce_ring_emit_fence x1 no user fence */ 9053413accbSAlex Deucher .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */ 906aaa36a97SAlex Deucher .emit_ib = amdgpu_vce_ring_emit_ib, 907aaa36a97SAlex Deucher .emit_fence = amdgpu_vce_ring_emit_fence, 908aaa36a97SAlex Deucher .test_ring = amdgpu_vce_ring_test_ring, 909aaa36a97SAlex Deucher .test_ib = amdgpu_vce_ring_test_ib, 910edff0e28SJammy Zhou .insert_nop = amdgpu_ring_insert_nop, 9119e5d5309SChristian König .pad_ib = amdgpu_ring_generic_pad_ib, 912ebff485eSChristian König .begin_use = amdgpu_vce_ring_begin_use, 913ebff485eSChristian König .end_use = amdgpu_vce_ring_end_use, 914aaa36a97SAlex Deucher }; 915aaa36a97SAlex Deucher 916ea4a8c1dSMaruthi Srinivas Bayyavarapu static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = { 91721cd942eSChristian König .type = AMDGPU_RING_TYPE_VCE, 91879887142SChristian König .align_mask = 0xf, 91979887142SChristian König .nop = VCE_CMD_NO_OP, 920536fbf94SKen Wang .support_64bit_ptrs = false, 921f61334b5SLeo Liu .no_user_fence = true, 922ea4a8c1dSMaruthi Srinivas Bayyavarapu .get_rptr = vce_v3_0_ring_get_rptr, 923ea4a8c1dSMaruthi Srinivas Bayyavarapu .get_wptr = vce_v3_0_ring_get_wptr, 924ea4a8c1dSMaruthi Srinivas Bayyavarapu .set_wptr = vce_v3_0_ring_set_wptr, 92598614701SChristian König .parse_cs = amdgpu_vce_ring_parse_cs_vm, 926e12f3d7aSChristian König .emit_frame_size = 927e12f3d7aSChristian König 6 + /* vce_v3_0_emit_vm_flush */ 928e12f3d7aSChristian König 4 + /* vce_v3_0_emit_pipeline_sync */ 929e12f3d7aSChristian König 6 + 6, /* amdgpu_vce_ring_emit_fence x2 vm fence */ 9303413accbSAlex Deucher .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */ 931ea4a8c1dSMaruthi Srinivas Bayyavarapu .emit_ib = vce_v3_0_ring_emit_ib, 932ea4a8c1dSMaruthi Srinivas Bayyavarapu .emit_vm_flush = vce_v3_0_emit_vm_flush, 933ea4a8c1dSMaruthi Srinivas Bayyavarapu .emit_pipeline_sync = vce_v3_0_emit_pipeline_sync, 934ea4a8c1dSMaruthi Srinivas Bayyavarapu .emit_fence = amdgpu_vce_ring_emit_fence, 935ea4a8c1dSMaruthi Srinivas Bayyavarapu .test_ring = amdgpu_vce_ring_test_ring, 936ea4a8c1dSMaruthi Srinivas Bayyavarapu .test_ib = amdgpu_vce_ring_test_ib, 937ea4a8c1dSMaruthi Srinivas Bayyavarapu .insert_nop = amdgpu_ring_insert_nop, 938ea4a8c1dSMaruthi Srinivas Bayyavarapu .pad_ib = amdgpu_ring_generic_pad_ib, 939ea4a8c1dSMaruthi Srinivas Bayyavarapu .begin_use = amdgpu_vce_ring_begin_use, 940ea4a8c1dSMaruthi Srinivas Bayyavarapu .end_use = amdgpu_vce_ring_end_use, 941ea4a8c1dSMaruthi Srinivas Bayyavarapu }; 942ea4a8c1dSMaruthi Srinivas Bayyavarapu 943aaa36a97SAlex Deucher static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev) 944aaa36a97SAlex Deucher { 94575c65480SAlex Deucher int i; 94675c65480SAlex Deucher 947ea4a8c1dSMaruthi Srinivas Bayyavarapu if (adev->asic_type >= CHIP_STONEY) { 9485d4af988SAlex Deucher for (i = 0; i < adev->vce.num_rings; i++) { 949ea4a8c1dSMaruthi Srinivas Bayyavarapu adev->vce.ring[i].funcs = &vce_v3_0_ring_vm_funcs; 9505d4af988SAlex Deucher adev->vce.ring[i].me = i; 9515d4af988SAlex Deucher } 952ea4a8c1dSMaruthi Srinivas Bayyavarapu DRM_INFO("VCE enabled in VM mode\n"); 953ea4a8c1dSMaruthi Srinivas Bayyavarapu } else { 9545d4af988SAlex Deucher for (i = 0; i < adev->vce.num_rings; i++) { 955ea4a8c1dSMaruthi Srinivas Bayyavarapu adev->vce.ring[i].funcs = &vce_v3_0_ring_phys_funcs; 9565d4af988SAlex Deucher adev->vce.ring[i].me = i; 9575d4af988SAlex Deucher } 958ea4a8c1dSMaruthi Srinivas Bayyavarapu DRM_INFO("VCE enabled in physical mode\n"); 959ea4a8c1dSMaruthi Srinivas Bayyavarapu } 960aaa36a97SAlex Deucher } 961aaa36a97SAlex Deucher 962aaa36a97SAlex Deucher static const struct amdgpu_irq_src_funcs vce_v3_0_irq_funcs = { 963aaa36a97SAlex Deucher .set = vce_v3_0_set_interrupt_state, 964aaa36a97SAlex Deucher .process = vce_v3_0_process_interrupt, 965aaa36a97SAlex Deucher }; 966aaa36a97SAlex Deucher 967aaa36a97SAlex Deucher static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev) 968aaa36a97SAlex Deucher { 969aaa36a97SAlex Deucher adev->vce.irq.num_types = 1; 970aaa36a97SAlex Deucher adev->vce.irq.funcs = &vce_v3_0_irq_funcs; 971aaa36a97SAlex Deucher }; 972a1255107SAlex Deucher 973a1255107SAlex Deucher const struct amdgpu_ip_block_version vce_v3_0_ip_block = 974a1255107SAlex Deucher { 975a1255107SAlex Deucher .type = AMD_IP_BLOCK_TYPE_VCE, 976a1255107SAlex Deucher .major = 3, 977a1255107SAlex Deucher .minor = 0, 978a1255107SAlex Deucher .rev = 0, 979a1255107SAlex Deucher .funcs = &vce_v3_0_ip_funcs, 980a1255107SAlex Deucher }; 981a1255107SAlex Deucher 982a1255107SAlex Deucher const struct amdgpu_ip_block_version vce_v3_1_ip_block = 983a1255107SAlex Deucher { 984a1255107SAlex Deucher .type = AMD_IP_BLOCK_TYPE_VCE, 985a1255107SAlex Deucher .major = 3, 986a1255107SAlex Deucher .minor = 1, 987a1255107SAlex Deucher .rev = 0, 988a1255107SAlex Deucher .funcs = &vce_v3_0_ip_funcs, 989a1255107SAlex Deucher }; 990a1255107SAlex Deucher 991a1255107SAlex Deucher const struct amdgpu_ip_block_version vce_v3_4_ip_block = 992a1255107SAlex Deucher { 993a1255107SAlex Deucher .type = AMD_IP_BLOCK_TYPE_VCE, 994a1255107SAlex Deucher .major = 3, 995a1255107SAlex Deucher .minor = 4, 996a1255107SAlex Deucher .rev = 0, 997a1255107SAlex Deucher .funcs = &vce_v3_0_ip_funcs, 998a1255107SAlex Deucher }; 999