1aaa36a97SAlex Deucher /* 2aaa36a97SAlex Deucher * Copyright 2014 Advanced Micro Devices, Inc. 3aaa36a97SAlex Deucher * All Rights Reserved. 4aaa36a97SAlex Deucher * 5aaa36a97SAlex Deucher * Permission is hereby granted, free of charge, to any person obtaining a 6aaa36a97SAlex Deucher * copy of this software and associated documentation files (the 7aaa36a97SAlex Deucher * "Software"), to deal in the Software without restriction, including 8aaa36a97SAlex Deucher * without limitation the rights to use, copy, modify, merge, publish, 9aaa36a97SAlex Deucher * distribute, sub license, and/or sell copies of the Software, and to 10aaa36a97SAlex Deucher * permit persons to whom the Software is furnished to do so, subject to 11aaa36a97SAlex Deucher * the following conditions: 12aaa36a97SAlex Deucher * 13aaa36a97SAlex Deucher * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14aaa36a97SAlex Deucher * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15aaa36a97SAlex Deucher * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16aaa36a97SAlex Deucher * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17aaa36a97SAlex Deucher * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18aaa36a97SAlex Deucher * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19aaa36a97SAlex Deucher * USE OR OTHER DEALINGS IN THE SOFTWARE. 20aaa36a97SAlex Deucher * 21aaa36a97SAlex Deucher * The above copyright notice and this permission notice (including the 22aaa36a97SAlex Deucher * next paragraph) shall be included in all copies or substantial portions 23aaa36a97SAlex Deucher * of the Software. 24aaa36a97SAlex Deucher * 25aaa36a97SAlex Deucher * Authors: Christian König <christian.koenig@amd.com> 26aaa36a97SAlex Deucher */ 27aaa36a97SAlex Deucher 28aaa36a97SAlex Deucher #include <linux/firmware.h> 29aaa36a97SAlex Deucher #include <drm/drmP.h> 30aaa36a97SAlex Deucher #include "amdgpu.h" 31aaa36a97SAlex Deucher #include "amdgpu_vce.h" 32aaa36a97SAlex Deucher #include "vid.h" 33aaa36a97SAlex Deucher #include "vce/vce_3_0_d.h" 34aaa36a97SAlex Deucher #include "vce/vce_3_0_sh_mask.h" 35be4f38e2SAlex Deucher #include "oss/oss_3_0_d.h" 36be4f38e2SAlex Deucher #include "oss/oss_3_0_sh_mask.h" 375bbc553aSLeo Liu #include "gca/gfx_8_0_d.h" 386a585777SAlex Deucher #include "smu/smu_7_1_2_d.h" 396a585777SAlex Deucher #include "smu/smu_7_1_2_sh_mask.h" 40115933a5SChunming Zhou #include "gca/gfx_8_0_d.h" 41115933a5SChunming Zhou #include "gca/gfx_8_0_sh_mask.h" 42115933a5SChunming Zhou 435bbc553aSLeo Liu 445bbc553aSLeo Liu #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 455bbc553aSLeo Liu #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 463c0ff9f1SLeo Liu #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616 473c0ff9f1SLeo Liu #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617 483c0ff9f1SLeo Liu #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618 49567e6e29Sjimqu #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02 50aaa36a97SAlex Deucher 51e9822622SLeo Liu #define VCE_V3_0_FW_SIZE (384 * 1024) 52e9822622SLeo Liu #define VCE_V3_0_STACK_SIZE (64 * 1024) 53e9822622SLeo Liu #define VCE_V3_0_DATA_SIZE ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024)) 54e9822622SLeo Liu 555bbc553aSLeo Liu static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx); 56aaa36a97SAlex Deucher static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev); 57aaa36a97SAlex Deucher static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev); 58567e6e29Sjimqu static int vce_v3_0_wait_for_idle(void *handle); 59aaa36a97SAlex Deucher 60aaa36a97SAlex Deucher /** 61aaa36a97SAlex Deucher * vce_v3_0_ring_get_rptr - get read pointer 62aaa36a97SAlex Deucher * 63aaa36a97SAlex Deucher * @ring: amdgpu_ring pointer 64aaa36a97SAlex Deucher * 65aaa36a97SAlex Deucher * Returns the current hardware read pointer 66aaa36a97SAlex Deucher */ 67aaa36a97SAlex Deucher static uint32_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring) 68aaa36a97SAlex Deucher { 69aaa36a97SAlex Deucher struct amdgpu_device *adev = ring->adev; 70aaa36a97SAlex Deucher 71aaa36a97SAlex Deucher if (ring == &adev->vce.ring[0]) 72aaa36a97SAlex Deucher return RREG32(mmVCE_RB_RPTR); 73aaa36a97SAlex Deucher else 74aaa36a97SAlex Deucher return RREG32(mmVCE_RB_RPTR2); 75aaa36a97SAlex Deucher } 76aaa36a97SAlex Deucher 77aaa36a97SAlex Deucher /** 78aaa36a97SAlex Deucher * vce_v3_0_ring_get_wptr - get write pointer 79aaa36a97SAlex Deucher * 80aaa36a97SAlex Deucher * @ring: amdgpu_ring pointer 81aaa36a97SAlex Deucher * 82aaa36a97SAlex Deucher * Returns the current hardware write pointer 83aaa36a97SAlex Deucher */ 84aaa36a97SAlex Deucher static uint32_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring) 85aaa36a97SAlex Deucher { 86aaa36a97SAlex Deucher struct amdgpu_device *adev = ring->adev; 87aaa36a97SAlex Deucher 88aaa36a97SAlex Deucher if (ring == &adev->vce.ring[0]) 89aaa36a97SAlex Deucher return RREG32(mmVCE_RB_WPTR); 90aaa36a97SAlex Deucher else 91aaa36a97SAlex Deucher return RREG32(mmVCE_RB_WPTR2); 92aaa36a97SAlex Deucher } 93aaa36a97SAlex Deucher 94aaa36a97SAlex Deucher /** 95aaa36a97SAlex Deucher * vce_v3_0_ring_set_wptr - set write pointer 96aaa36a97SAlex Deucher * 97aaa36a97SAlex Deucher * @ring: amdgpu_ring pointer 98aaa36a97SAlex Deucher * 99aaa36a97SAlex Deucher * Commits the write pointer to the hardware 100aaa36a97SAlex Deucher */ 101aaa36a97SAlex Deucher static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring) 102aaa36a97SAlex Deucher { 103aaa36a97SAlex Deucher struct amdgpu_device *adev = ring->adev; 104aaa36a97SAlex Deucher 105aaa36a97SAlex Deucher if (ring == &adev->vce.ring[0]) 106aaa36a97SAlex Deucher WREG32(mmVCE_RB_WPTR, ring->wptr); 107aaa36a97SAlex Deucher else 108aaa36a97SAlex Deucher WREG32(mmVCE_RB_WPTR2, ring->wptr); 109aaa36a97SAlex Deucher } 110aaa36a97SAlex Deucher 1110689a570SEric Huang static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override) 1120689a570SEric Huang { 113f3f0ea95STom St Denis WREG32_FIELD(VCE_RB_ARB_CTRL, VCE_CGTT_OVERRIDE, override ? 1 : 0); 1140689a570SEric Huang } 1150689a570SEric Huang 1160689a570SEric Huang static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, 1170689a570SEric Huang bool gated) 1180689a570SEric Huang { 119f3f0ea95STom St Denis u32 data; 120f16fe6d3STom St Denis 1210689a570SEric Huang /* Set Override to disable Clock Gating */ 1220689a570SEric Huang vce_v3_0_override_vce_clock_gating(adev, true); 1230689a570SEric Huang 1246f906814STom St Denis /* This function enables MGCG which is controlled by firmware. 1256f906814STom St Denis With the clocks in the gated state the core is still 1266f906814STom St Denis accessible but the firmware will throttle the clocks on the 1276f906814STom St Denis fly as necessary. 1280689a570SEric Huang */ 1296f906814STom St Denis if (gated) { 130f3f0ea95STom St Denis data = RREG32(mmVCE_CLOCK_GATING_B); 1310689a570SEric Huang data |= 0x1ff; 1320689a570SEric Huang data &= ~0xef0000; 1330689a570SEric Huang WREG32(mmVCE_CLOCK_GATING_B, data); 1340689a570SEric Huang 135f3f0ea95STom St Denis data = RREG32(mmVCE_UENC_CLOCK_GATING); 1360689a570SEric Huang data |= 0x3ff000; 1370689a570SEric Huang data &= ~0xffc00000; 1380689a570SEric Huang WREG32(mmVCE_UENC_CLOCK_GATING, data); 1390689a570SEric Huang 140f3f0ea95STom St Denis data = RREG32(mmVCE_UENC_CLOCK_GATING_2); 1410689a570SEric Huang data |= 0x2; 1426f906814STom St Denis data &= ~0x00010000; 1430689a570SEric Huang WREG32(mmVCE_UENC_CLOCK_GATING_2, data); 1440689a570SEric Huang 145f3f0ea95STom St Denis data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); 1460689a570SEric Huang data |= 0x37f; 1470689a570SEric Huang WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); 1480689a570SEric Huang 149f3f0ea95STom St Denis data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); 1500689a570SEric Huang data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK | 1510689a570SEric Huang VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | 1520689a570SEric Huang VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | 1530689a570SEric Huang 0x8; 1540689a570SEric Huang WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data); 1550689a570SEric Huang } else { 156f3f0ea95STom St Denis data = RREG32(mmVCE_CLOCK_GATING_B); 1570689a570SEric Huang data &= ~0x80010; 1580689a570SEric Huang data |= 0xe70008; 1590689a570SEric Huang WREG32(mmVCE_CLOCK_GATING_B, data); 1606f906814STom St Denis 161f3f0ea95STom St Denis data = RREG32(mmVCE_UENC_CLOCK_GATING); 1620689a570SEric Huang data |= 0xffc00000; 1630689a570SEric Huang WREG32(mmVCE_UENC_CLOCK_GATING, data); 1646f906814STom St Denis 165f3f0ea95STom St Denis data = RREG32(mmVCE_UENC_CLOCK_GATING_2); 1660689a570SEric Huang data |= 0x10000; 1670689a570SEric Huang WREG32(mmVCE_UENC_CLOCK_GATING_2, data); 1686f906814STom St Denis 169f3f0ea95STom St Denis data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); 1700689a570SEric Huang data &= ~0xffc00000; 1710689a570SEric Huang WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); 1726f906814STom St Denis 173f3f0ea95STom St Denis data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); 1740689a570SEric Huang data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK | 1750689a570SEric Huang VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | 1760689a570SEric Huang VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | 1770689a570SEric Huang 0x8); 1780689a570SEric Huang WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data); 1790689a570SEric Huang } 1800689a570SEric Huang vce_v3_0_override_vce_clock_gating(adev, false); 1810689a570SEric Huang } 1820689a570SEric Huang 183567e6e29Sjimqu static int vce_v3_0_firmware_loaded(struct amdgpu_device *adev) 184567e6e29Sjimqu { 185567e6e29Sjimqu int i, j; 186567e6e29Sjimqu 187567e6e29Sjimqu for (i = 0; i < 10; ++i) { 188567e6e29Sjimqu for (j = 0; j < 100; ++j) { 189b7e2e9f7Sjimqu uint32_t status = RREG32(mmVCE_STATUS); 190b7e2e9f7Sjimqu 191567e6e29Sjimqu if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK) 192567e6e29Sjimqu return 0; 193567e6e29Sjimqu mdelay(10); 194567e6e29Sjimqu } 195567e6e29Sjimqu 196567e6e29Sjimqu DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n"); 197f3f0ea95STom St Denis WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1); 198567e6e29Sjimqu mdelay(10); 199f3f0ea95STom St Denis WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0); 200567e6e29Sjimqu mdelay(10); 201567e6e29Sjimqu } 202567e6e29Sjimqu 203567e6e29Sjimqu return -ETIMEDOUT; 204567e6e29Sjimqu } 205567e6e29Sjimqu 206aaa36a97SAlex Deucher /** 207aaa36a97SAlex Deucher * vce_v3_0_start - start VCE block 208aaa36a97SAlex Deucher * 209aaa36a97SAlex Deucher * @adev: amdgpu_device pointer 210aaa36a97SAlex Deucher * 211aaa36a97SAlex Deucher * Setup and start the VCE block 212aaa36a97SAlex Deucher */ 213aaa36a97SAlex Deucher static int vce_v3_0_start(struct amdgpu_device *adev) 214aaa36a97SAlex Deucher { 215aaa36a97SAlex Deucher struct amdgpu_ring *ring; 216567e6e29Sjimqu int idx, r; 217567e6e29Sjimqu 218567e6e29Sjimqu ring = &adev->vce.ring[0]; 219567e6e29Sjimqu WREG32(mmVCE_RB_RPTR, ring->wptr); 220567e6e29Sjimqu WREG32(mmVCE_RB_WPTR, ring->wptr); 221567e6e29Sjimqu WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr); 222567e6e29Sjimqu WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 223567e6e29Sjimqu WREG32(mmVCE_RB_SIZE, ring->ring_size / 4); 224567e6e29Sjimqu 225567e6e29Sjimqu ring = &adev->vce.ring[1]; 226567e6e29Sjimqu WREG32(mmVCE_RB_RPTR2, ring->wptr); 227567e6e29Sjimqu WREG32(mmVCE_RB_WPTR2, ring->wptr); 228567e6e29Sjimqu WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr); 229567e6e29Sjimqu WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); 230567e6e29Sjimqu WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); 231aaa36a97SAlex Deucher 2325bbc553aSLeo Liu mutex_lock(&adev->grbm_idx_mutex); 2335bbc553aSLeo Liu for (idx = 0; idx < 2; ++idx) { 2346a585777SAlex Deucher if (adev->vce.harvest_config & (1 << idx)) 2356a585777SAlex Deucher continue; 2366a585777SAlex Deucher 237f3f0ea95STom St Denis WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx); 2385bbc553aSLeo Liu vce_v3_0_mc_resume(adev, idx); 239f3f0ea95STom St Denis WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1); 240567e6e29Sjimqu 2413c0ff9f1SLeo Liu if (adev->asic_type >= CHIP_STONEY) 2423c0ff9f1SLeo Liu WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001); 2433c0ff9f1SLeo Liu else 244f3f0ea95STom St Denis WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1); 245aaa36a97SAlex Deucher 246f3f0ea95STom St Denis WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0); 247aaa36a97SAlex Deucher mdelay(100); 248aaa36a97SAlex Deucher 249567e6e29Sjimqu r = vce_v3_0_firmware_loaded(adev); 250aaa36a97SAlex Deucher 251aaa36a97SAlex Deucher /* clear BUSY flag */ 252f3f0ea95STom St Denis WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0); 253aaa36a97SAlex Deucher 254aaa36a97SAlex Deucher if (r) { 255aaa36a97SAlex Deucher DRM_ERROR("VCE not responding, giving up!!!\n"); 2565bbc553aSLeo Liu mutex_unlock(&adev->grbm_idx_mutex); 257aaa36a97SAlex Deucher return r; 258aaa36a97SAlex Deucher } 2595bbc553aSLeo Liu } 2605bbc553aSLeo Liu 261f3f0ea95STom St Denis WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); 2625bbc553aSLeo Liu mutex_unlock(&adev->grbm_idx_mutex); 2635bbc553aSLeo Liu 264567e6e29Sjimqu return 0; 265567e6e29Sjimqu } 2665bbc553aSLeo Liu 267567e6e29Sjimqu static int vce_v3_0_stop(struct amdgpu_device *adev) 268567e6e29Sjimqu { 269567e6e29Sjimqu int idx; 270567e6e29Sjimqu 271567e6e29Sjimqu mutex_lock(&adev->grbm_idx_mutex); 272567e6e29Sjimqu for (idx = 0; idx < 2; ++idx) { 273567e6e29Sjimqu if (adev->vce.harvest_config & (1 << idx)) 274567e6e29Sjimqu continue; 275567e6e29Sjimqu 276f3f0ea95STom St Denis WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx); 277567e6e29Sjimqu 278567e6e29Sjimqu if (adev->asic_type >= CHIP_STONEY) 279567e6e29Sjimqu WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001); 280567e6e29Sjimqu else 281f3f0ea95STom St Denis WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 0); 282f3f0ea95STom St Denis 283567e6e29Sjimqu /* hold on ECPU */ 284f3f0ea95STom St Denis WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1); 285567e6e29Sjimqu 286567e6e29Sjimqu /* clear BUSY flag */ 287f3f0ea95STom St Denis WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0); 288567e6e29Sjimqu 289567e6e29Sjimqu /* Set Clock-Gating off */ 290567e6e29Sjimqu if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG) 291567e6e29Sjimqu vce_v3_0_set_vce_sw_clock_gating(adev, false); 292567e6e29Sjimqu } 293567e6e29Sjimqu 294f3f0ea95STom St Denis WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); 295567e6e29Sjimqu mutex_unlock(&adev->grbm_idx_mutex); 296aaa36a97SAlex Deucher 297aaa36a97SAlex Deucher return 0; 298aaa36a97SAlex Deucher } 299aaa36a97SAlex Deucher 3006a585777SAlex Deucher #define ixVCE_HARVEST_FUSE_MACRO__ADDRESS 0xC0014074 3016a585777SAlex Deucher #define VCE_HARVEST_FUSE_MACRO__SHIFT 27 3026a585777SAlex Deucher #define VCE_HARVEST_FUSE_MACRO__MASK 0x18000000 3036a585777SAlex Deucher 3046a585777SAlex Deucher static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev) 3056a585777SAlex Deucher { 3066a585777SAlex Deucher u32 tmp; 3076a585777SAlex Deucher 3082cc0c0b5SFlora Cui /* Fiji, Stoney, Polaris10, Polaris11 are single pipe */ 309cfaba566SSamuel Li if ((adev->asic_type == CHIP_FIJI) || 3101b4eeea5SSonny Jiang (adev->asic_type == CHIP_STONEY) || 3112cc0c0b5SFlora Cui (adev->asic_type == CHIP_POLARIS10) || 3122cc0c0b5SFlora Cui (adev->asic_type == CHIP_POLARIS11)) 3131dab5f06STom St Denis return AMDGPU_VCE_HARVEST_VCE1; 314188a9bcdSAlex Deucher 315188a9bcdSAlex Deucher /* Tonga and CZ are dual or single pipe */ 3162f7d10b3SJammy Zhou if (adev->flags & AMD_IS_APU) 3176a585777SAlex Deucher tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) & 3186a585777SAlex Deucher VCE_HARVEST_FUSE_MACRO__MASK) >> 3196a585777SAlex Deucher VCE_HARVEST_FUSE_MACRO__SHIFT; 3206a585777SAlex Deucher else 3216a585777SAlex Deucher tmp = (RREG32_SMC(ixCC_HARVEST_FUSES) & 3226a585777SAlex Deucher CC_HARVEST_FUSES__VCE_DISABLE_MASK) >> 3236a585777SAlex Deucher CC_HARVEST_FUSES__VCE_DISABLE__SHIFT; 3246a585777SAlex Deucher 3256a585777SAlex Deucher switch (tmp) { 3266a585777SAlex Deucher case 1: 3271dab5f06STom St Denis return AMDGPU_VCE_HARVEST_VCE0; 3286a585777SAlex Deucher case 2: 3291dab5f06STom St Denis return AMDGPU_VCE_HARVEST_VCE1; 3306a585777SAlex Deucher case 3: 3311dab5f06STom St Denis return AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1; 3326a585777SAlex Deucher default: 3331dab5f06STom St Denis return 0; 3346a585777SAlex Deucher } 3356a585777SAlex Deucher } 3366a585777SAlex Deucher 3375fc3aeebSyanyang1 static int vce_v3_0_early_init(void *handle) 338aaa36a97SAlex Deucher { 3395fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3405fc3aeebSyanyang1 3416a585777SAlex Deucher adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev); 3426a585777SAlex Deucher 3436a585777SAlex Deucher if ((adev->vce.harvest_config & 3446a585777SAlex Deucher (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) == 3456a585777SAlex Deucher (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) 3466a585777SAlex Deucher return -ENOENT; 3476a585777SAlex Deucher 34875c65480SAlex Deucher adev->vce.num_rings = 2; 34975c65480SAlex Deucher 350aaa36a97SAlex Deucher vce_v3_0_set_ring_funcs(adev); 351aaa36a97SAlex Deucher vce_v3_0_set_irq_funcs(adev); 352aaa36a97SAlex Deucher 353aaa36a97SAlex Deucher return 0; 354aaa36a97SAlex Deucher } 355aaa36a97SAlex Deucher 3565fc3aeebSyanyang1 static int vce_v3_0_sw_init(void *handle) 357aaa36a97SAlex Deucher { 3585fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 359aaa36a97SAlex Deucher struct amdgpu_ring *ring; 36075c65480SAlex Deucher int r, i; 361aaa36a97SAlex Deucher 362aaa36a97SAlex Deucher /* VCE */ 363aaa36a97SAlex Deucher r = amdgpu_irq_add_id(adev, 167, &adev->vce.irq); 364aaa36a97SAlex Deucher if (r) 365aaa36a97SAlex Deucher return r; 366aaa36a97SAlex Deucher 367e9822622SLeo Liu r = amdgpu_vce_sw_init(adev, VCE_V3_0_FW_SIZE + 368e9822622SLeo Liu (VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE) * 2); 369aaa36a97SAlex Deucher if (r) 370aaa36a97SAlex Deucher return r; 371aaa36a97SAlex Deucher 372aaa36a97SAlex Deucher r = amdgpu_vce_resume(adev); 373aaa36a97SAlex Deucher if (r) 374aaa36a97SAlex Deucher return r; 375aaa36a97SAlex Deucher 37675c65480SAlex Deucher for (i = 0; i < adev->vce.num_rings; i++) { 37775c65480SAlex Deucher ring = &adev->vce.ring[i]; 37875c65480SAlex Deucher sprintf(ring->name, "vce%d", i); 379a3f1cf35SChristian König r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf, 380aaa36a97SAlex Deucher &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE); 381aaa36a97SAlex Deucher if (r) 382aaa36a97SAlex Deucher return r; 38375c65480SAlex Deucher } 384aaa36a97SAlex Deucher 385aaa36a97SAlex Deucher return r; 386aaa36a97SAlex Deucher } 387aaa36a97SAlex Deucher 3885fc3aeebSyanyang1 static int vce_v3_0_sw_fini(void *handle) 389aaa36a97SAlex Deucher { 390aaa36a97SAlex Deucher int r; 3915fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 392aaa36a97SAlex Deucher 393aaa36a97SAlex Deucher r = amdgpu_vce_suspend(adev); 394aaa36a97SAlex Deucher if (r) 395aaa36a97SAlex Deucher return r; 396aaa36a97SAlex Deucher 397aaa36a97SAlex Deucher r = amdgpu_vce_sw_fini(adev); 398aaa36a97SAlex Deucher if (r) 399aaa36a97SAlex Deucher return r; 400aaa36a97SAlex Deucher 401aaa36a97SAlex Deucher return r; 402aaa36a97SAlex Deucher } 403aaa36a97SAlex Deucher 4045fc3aeebSyanyang1 static int vce_v3_0_hw_init(void *handle) 405aaa36a97SAlex Deucher { 406691ca86aSTom St Denis int r, i; 4075fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 408aaa36a97SAlex Deucher 409aaa36a97SAlex Deucher r = vce_v3_0_start(adev); 410aaa36a97SAlex Deucher if (r) 411aaa36a97SAlex Deucher return r; 412aaa36a97SAlex Deucher 41375c65480SAlex Deucher for (i = 0; i < adev->vce.num_rings; i++) 41475c65480SAlex Deucher adev->vce.ring[i].ready = false; 415aaa36a97SAlex Deucher 41675c65480SAlex Deucher for (i = 0; i < adev->vce.num_rings; i++) { 417691ca86aSTom St Denis r = amdgpu_ring_test_ring(&adev->vce.ring[i]); 418691ca86aSTom St Denis if (r) 419aaa36a97SAlex Deucher return r; 420691ca86aSTom St Denis else 421691ca86aSTom St Denis adev->vce.ring[i].ready = true; 422aaa36a97SAlex Deucher } 423aaa36a97SAlex Deucher 424aaa36a97SAlex Deucher DRM_INFO("VCE initialized successfully.\n"); 425aaa36a97SAlex Deucher 426aaa36a97SAlex Deucher return 0; 427aaa36a97SAlex Deucher } 428aaa36a97SAlex Deucher 4295fc3aeebSyanyang1 static int vce_v3_0_hw_fini(void *handle) 430aaa36a97SAlex Deucher { 431567e6e29Sjimqu int r; 432567e6e29Sjimqu struct amdgpu_device *adev = (struct amdgpu_device *)handle; 433567e6e29Sjimqu 434567e6e29Sjimqu r = vce_v3_0_wait_for_idle(handle); 435567e6e29Sjimqu if (r) 436567e6e29Sjimqu return r; 437567e6e29Sjimqu 438567e6e29Sjimqu return vce_v3_0_stop(adev); 439aaa36a97SAlex Deucher } 440aaa36a97SAlex Deucher 4415fc3aeebSyanyang1 static int vce_v3_0_suspend(void *handle) 442aaa36a97SAlex Deucher { 443aaa36a97SAlex Deucher int r; 4445fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 445aaa36a97SAlex Deucher 446aaa36a97SAlex Deucher r = vce_v3_0_hw_fini(adev); 447aaa36a97SAlex Deucher if (r) 448aaa36a97SAlex Deucher return r; 449aaa36a97SAlex Deucher 450aaa36a97SAlex Deucher r = amdgpu_vce_suspend(adev); 451aaa36a97SAlex Deucher if (r) 452aaa36a97SAlex Deucher return r; 453aaa36a97SAlex Deucher 454aaa36a97SAlex Deucher return r; 455aaa36a97SAlex Deucher } 456aaa36a97SAlex Deucher 4575fc3aeebSyanyang1 static int vce_v3_0_resume(void *handle) 458aaa36a97SAlex Deucher { 459aaa36a97SAlex Deucher int r; 4605fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 461aaa36a97SAlex Deucher 462aaa36a97SAlex Deucher r = amdgpu_vce_resume(adev); 463aaa36a97SAlex Deucher if (r) 464aaa36a97SAlex Deucher return r; 465aaa36a97SAlex Deucher 466aaa36a97SAlex Deucher r = vce_v3_0_hw_init(adev); 467aaa36a97SAlex Deucher if (r) 468aaa36a97SAlex Deucher return r; 469aaa36a97SAlex Deucher 470aaa36a97SAlex Deucher return r; 471aaa36a97SAlex Deucher } 472aaa36a97SAlex Deucher 4735bbc553aSLeo Liu static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx) 474aaa36a97SAlex Deucher { 475aaa36a97SAlex Deucher uint32_t offset, size; 476aaa36a97SAlex Deucher 477aaa36a97SAlex Deucher WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16)); 478aaa36a97SAlex Deucher WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000); 479aaa36a97SAlex Deucher WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F); 4806f906814STom St Denis WREG32(mmVCE_CLOCK_GATING_B, 0x1FF); 481aaa36a97SAlex Deucher 482aaa36a97SAlex Deucher WREG32(mmVCE_LMI_CTRL, 0x00398000); 483aaa36a97SAlex Deucher WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1); 484aaa36a97SAlex Deucher WREG32(mmVCE_LMI_SWAP_CNTL, 0); 485aaa36a97SAlex Deucher WREG32(mmVCE_LMI_SWAP_CNTL1, 0); 486aaa36a97SAlex Deucher WREG32(mmVCE_LMI_VM_CTRL, 0); 4873c0ff9f1SLeo Liu if (adev->asic_type >= CHIP_STONEY) { 4883c0ff9f1SLeo Liu WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR0, (adev->vce.gpu_addr >> 8)); 4893c0ff9f1SLeo Liu WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR1, (adev->vce.gpu_addr >> 8)); 4903c0ff9f1SLeo Liu WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR2, (adev->vce.gpu_addr >> 8)); 4913c0ff9f1SLeo Liu } else 492aaa36a97SAlex Deucher WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8)); 493aaa36a97SAlex Deucher offset = AMDGPU_VCE_FIRMWARE_OFFSET; 494e9822622SLeo Liu size = VCE_V3_0_FW_SIZE; 495aaa36a97SAlex Deucher WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff); 496aaa36a97SAlex Deucher WREG32(mmVCE_VCPU_CACHE_SIZE0, size); 497aaa36a97SAlex Deucher 4985bbc553aSLeo Liu if (idx == 0) { 499aaa36a97SAlex Deucher offset += size; 500e9822622SLeo Liu size = VCE_V3_0_STACK_SIZE; 501aaa36a97SAlex Deucher WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff); 502aaa36a97SAlex Deucher WREG32(mmVCE_VCPU_CACHE_SIZE1, size); 503aaa36a97SAlex Deucher offset += size; 504e9822622SLeo Liu size = VCE_V3_0_DATA_SIZE; 505aaa36a97SAlex Deucher WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff); 506aaa36a97SAlex Deucher WREG32(mmVCE_VCPU_CACHE_SIZE2, size); 5075bbc553aSLeo Liu } else { 5085bbc553aSLeo Liu offset += size + VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE; 5095bbc553aSLeo Liu size = VCE_V3_0_STACK_SIZE; 5105bbc553aSLeo Liu WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0xfffffff); 5115bbc553aSLeo Liu WREG32(mmVCE_VCPU_CACHE_SIZE1, size); 5125bbc553aSLeo Liu offset += size; 5135bbc553aSLeo Liu size = VCE_V3_0_DATA_SIZE; 5145bbc553aSLeo Liu WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0xfffffff); 5155bbc553aSLeo Liu WREG32(mmVCE_VCPU_CACHE_SIZE2, size); 5165bbc553aSLeo Liu } 517aaa36a97SAlex Deucher 518aaa36a97SAlex Deucher WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100); 519f3f0ea95STom St Denis WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1); 520aaa36a97SAlex Deucher } 521aaa36a97SAlex Deucher 5225fc3aeebSyanyang1 static bool vce_v3_0_is_idle(void *handle) 523aaa36a97SAlex Deucher { 5245fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 525be4f38e2SAlex Deucher u32 mask = 0; 5265fc3aeebSyanyang1 52774af1276STom St Denis mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_STATUS2__VCE0_BUSY_MASK; 52874af1276STom St Denis mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_STATUS2__VCE1_BUSY_MASK; 529be4f38e2SAlex Deucher 530be4f38e2SAlex Deucher return !(RREG32(mmSRBM_STATUS2) & mask); 531aaa36a97SAlex Deucher } 532aaa36a97SAlex Deucher 5335fc3aeebSyanyang1 static int vce_v3_0_wait_for_idle(void *handle) 534aaa36a97SAlex Deucher { 535aaa36a97SAlex Deucher unsigned i; 5365fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 537be4f38e2SAlex Deucher 53892988e60STom St Denis for (i = 0; i < adev->usec_timeout; i++) 53992988e60STom St Denis if (vce_v3_0_is_idle(handle)) 540aaa36a97SAlex Deucher return 0; 54192988e60STom St Denis 542aaa36a97SAlex Deucher return -ETIMEDOUT; 543aaa36a97SAlex Deucher } 544aaa36a97SAlex Deucher 545ac8e3f30SRex Zhu #define VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK 0x00000008L /* AUTO_BUSY */ 546ac8e3f30SRex Zhu #define VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK 0x00000010L /* RB0_BUSY */ 547ac8e3f30SRex Zhu #define VCE_STATUS_VCPU_REPORT_RB1_BUSY_MASK 0x00000020L /* RB1_BUSY */ 548ac8e3f30SRex Zhu #define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \ 549ac8e3f30SRex Zhu VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK) 550115933a5SChunming Zhou 551115933a5SChunming Zhou static int vce_v3_0_check_soft_reset(void *handle) 552115933a5SChunming Zhou { 553115933a5SChunming Zhou struct amdgpu_device *adev = (struct amdgpu_device *)handle; 554115933a5SChunming Zhou u32 srbm_soft_reset = 0; 555115933a5SChunming Zhou 556115933a5SChunming Zhou /* According to VCE team , we should use VCE_STATUS instead 557115933a5SChunming Zhou * SRBM_STATUS.VCE_BUSY bit for busy status checking. 558115933a5SChunming Zhou * GRBM_GFX_INDEX.INSTANCE_INDEX is used to specify which VCE 559115933a5SChunming Zhou * instance's registers are accessed 560115933a5SChunming Zhou * (0 for 1st instance, 10 for 2nd instance). 561115933a5SChunming Zhou * 562115933a5SChunming Zhou *VCE_STATUS 563115933a5SChunming Zhou *|UENC|ACPI|AUTO ACTIVE|RB1 |RB0 |RB2 | |FW_LOADED|JOB | 564115933a5SChunming Zhou *|----+----+-----------+----+----+----+----------+---------+----| 565115933a5SChunming Zhou *|bit8|bit7| bit6 |bit5|bit4|bit3| bit2 | bit1 |bit0| 566115933a5SChunming Zhou * 567115933a5SChunming Zhou * VCE team suggest use bit 3--bit 6 for busy status check 568115933a5SChunming Zhou */ 5699aeb774cSTom St Denis mutex_lock(&adev->grbm_idx_mutex); 570f3f0ea95STom St Denis WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); 571115933a5SChunming Zhou if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { 572115933a5SChunming Zhou srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); 573115933a5SChunming Zhou srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); 574115933a5SChunming Zhou } 575f3f0ea95STom St Denis WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10); 576115933a5SChunming Zhou if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { 577115933a5SChunming Zhou srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); 578115933a5SChunming Zhou srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); 579115933a5SChunming Zhou } 580f3f0ea95STom St Denis WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); 581115933a5SChunming Zhou 582115933a5SChunming Zhou if (srbm_soft_reset) { 583115933a5SChunming Zhou adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = true; 584115933a5SChunming Zhou adev->vce.srbm_soft_reset = srbm_soft_reset; 585115933a5SChunming Zhou } else { 586115933a5SChunming Zhou adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = false; 587115933a5SChunming Zhou adev->vce.srbm_soft_reset = 0; 588115933a5SChunming Zhou } 5899aeb774cSTom St Denis mutex_unlock(&adev->grbm_idx_mutex); 590115933a5SChunming Zhou return 0; 591115933a5SChunming Zhou } 592115933a5SChunming Zhou 5935fc3aeebSyanyang1 static int vce_v3_0_soft_reset(void *handle) 594aaa36a97SAlex Deucher { 5955fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 596115933a5SChunming Zhou u32 srbm_soft_reset; 5975fc3aeebSyanyang1 598115933a5SChunming Zhou if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang) 599115933a5SChunming Zhou return 0; 600115933a5SChunming Zhou srbm_soft_reset = adev->vce.srbm_soft_reset; 601be4f38e2SAlex Deucher 602115933a5SChunming Zhou if (srbm_soft_reset) { 603115933a5SChunming Zhou u32 tmp; 604115933a5SChunming Zhou 605115933a5SChunming Zhou tmp = RREG32(mmSRBM_SOFT_RESET); 606115933a5SChunming Zhou tmp |= srbm_soft_reset; 607115933a5SChunming Zhou dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); 608115933a5SChunming Zhou WREG32(mmSRBM_SOFT_RESET, tmp); 609115933a5SChunming Zhou tmp = RREG32(mmSRBM_SOFT_RESET); 610115933a5SChunming Zhou 611115933a5SChunming Zhou udelay(50); 612115933a5SChunming Zhou 613115933a5SChunming Zhou tmp &= ~srbm_soft_reset; 614115933a5SChunming Zhou WREG32(mmSRBM_SOFT_RESET, tmp); 615115933a5SChunming Zhou tmp = RREG32(mmSRBM_SOFT_RESET); 616115933a5SChunming Zhou 617115933a5SChunming Zhou /* Wait a little for things to settle down */ 618115933a5SChunming Zhou udelay(50); 619115933a5SChunming Zhou } 620115933a5SChunming Zhou 621115933a5SChunming Zhou return 0; 622115933a5SChunming Zhou } 623115933a5SChunming Zhou 624115933a5SChunming Zhou static int vce_v3_0_pre_soft_reset(void *handle) 625115933a5SChunming Zhou { 626115933a5SChunming Zhou struct amdgpu_device *adev = (struct amdgpu_device *)handle; 627115933a5SChunming Zhou 628115933a5SChunming Zhou if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang) 629115933a5SChunming Zhou return 0; 630115933a5SChunming Zhou 631aaa36a97SAlex Deucher mdelay(5); 632aaa36a97SAlex Deucher 633115933a5SChunming Zhou return vce_v3_0_suspend(adev); 634115933a5SChunming Zhou } 635115933a5SChunming Zhou 636115933a5SChunming Zhou 637115933a5SChunming Zhou static int vce_v3_0_post_soft_reset(void *handle) 638115933a5SChunming Zhou { 639115933a5SChunming Zhou struct amdgpu_device *adev = (struct amdgpu_device *)handle; 640115933a5SChunming Zhou 641115933a5SChunming Zhou if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang) 642115933a5SChunming Zhou return 0; 643115933a5SChunming Zhou 644115933a5SChunming Zhou mdelay(5); 645115933a5SChunming Zhou 646115933a5SChunming Zhou return vce_v3_0_resume(adev); 647aaa36a97SAlex Deucher } 648aaa36a97SAlex Deucher 649aaa36a97SAlex Deucher static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev, 650aaa36a97SAlex Deucher struct amdgpu_irq_src *source, 651aaa36a97SAlex Deucher unsigned type, 652aaa36a97SAlex Deucher enum amdgpu_interrupt_state state) 653aaa36a97SAlex Deucher { 654aaa36a97SAlex Deucher uint32_t val = 0; 655aaa36a97SAlex Deucher 656aaa36a97SAlex Deucher if (state == AMDGPU_IRQ_STATE_ENABLE) 657aaa36a97SAlex Deucher val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK; 658aaa36a97SAlex Deucher 659aaa36a97SAlex Deucher WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); 660aaa36a97SAlex Deucher return 0; 661aaa36a97SAlex Deucher } 662aaa36a97SAlex Deucher 663aaa36a97SAlex Deucher static int vce_v3_0_process_interrupt(struct amdgpu_device *adev, 664aaa36a97SAlex Deucher struct amdgpu_irq_src *source, 665aaa36a97SAlex Deucher struct amdgpu_iv_entry *entry) 666aaa36a97SAlex Deucher { 667aaa36a97SAlex Deucher DRM_DEBUG("IH: VCE\n"); 668d6c29c30SLeo Liu 669f3f0ea95STom St Denis WREG32_FIELD(VCE_SYS_INT_STATUS, VCE_SYS_INT_TRAP_INTERRUPT_INT, 1); 670d6c29c30SLeo Liu 671aaa36a97SAlex Deucher switch (entry->src_data) { 672aaa36a97SAlex Deucher case 0: 673aaa36a97SAlex Deucher case 1: 67481da2edeSTom St Denis amdgpu_fence_process(&adev->vce.ring[entry->src_data]); 675aaa36a97SAlex Deucher break; 676aaa36a97SAlex Deucher default: 677aaa36a97SAlex Deucher DRM_ERROR("Unhandled interrupt: %d %d\n", 678aaa36a97SAlex Deucher entry->src_id, entry->src_data); 679aaa36a97SAlex Deucher break; 680aaa36a97SAlex Deucher } 681aaa36a97SAlex Deucher 682aaa36a97SAlex Deucher return 0; 683aaa36a97SAlex Deucher } 684aaa36a97SAlex Deucher 685ec38f188SRex Zhu static void vce_v3_set_bypass_mode(struct amdgpu_device *adev, bool enable) 686ec38f188SRex Zhu { 687ec38f188SRex Zhu u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL); 688ec38f188SRex Zhu 689ec38f188SRex Zhu if (enable) 690ec38f188SRex Zhu tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK; 691ec38f188SRex Zhu else 692ec38f188SRex Zhu tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK; 693ec38f188SRex Zhu 694ec38f188SRex Zhu WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp); 695ec38f188SRex Zhu } 696ec38f188SRex Zhu 6975fc3aeebSyanyang1 static int vce_v3_0_set_clockgating_state(void *handle, 6985fc3aeebSyanyang1 enum amd_clockgating_state state) 699aaa36a97SAlex Deucher { 7000689a570SEric Huang struct amdgpu_device *adev = (struct amdgpu_device *)handle; 7010689a570SEric Huang bool enable = (state == AMD_CG_STATE_GATE) ? true : false; 7020689a570SEric Huang int i; 7030689a570SEric Huang 704ec38f188SRex Zhu if (adev->asic_type == CHIP_POLARIS10) 705ec38f188SRex Zhu vce_v3_set_bypass_mode(adev, enable); 706ec38f188SRex Zhu 707e3b04bc7SAlex Deucher if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) 7080689a570SEric Huang return 0; 7090689a570SEric Huang 7100689a570SEric Huang mutex_lock(&adev->grbm_idx_mutex); 7110689a570SEric Huang for (i = 0; i < 2; i++) { 7120689a570SEric Huang /* Program VCE Instance 0 or 1 if not harvested */ 7130689a570SEric Huang if (adev->vce.harvest_config & (1 << i)) 7140689a570SEric Huang continue; 7150689a570SEric Huang 716f3f0ea95STom St Denis WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i); 7170689a570SEric Huang 7180689a570SEric Huang if (enable) { 7190689a570SEric Huang /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */ 7200689a570SEric Huang uint32_t data = RREG32(mmVCE_CLOCK_GATING_A); 7210689a570SEric Huang data &= ~(0xf | 0xff0); 7220689a570SEric Huang data |= ((0x0 << 0) | (0x04 << 4)); 7230689a570SEric Huang WREG32(mmVCE_CLOCK_GATING_A, data); 7240689a570SEric Huang 7250689a570SEric Huang /* initialize VCE_UENC_CLOCK_GATING: Clock ON/OFF delay */ 7260689a570SEric Huang data = RREG32(mmVCE_UENC_CLOCK_GATING); 7270689a570SEric Huang data &= ~(0xf | 0xff0); 7280689a570SEric Huang data |= ((0x0 << 0) | (0x04 << 4)); 7290689a570SEric Huang WREG32(mmVCE_UENC_CLOCK_GATING, data); 7300689a570SEric Huang } 7310689a570SEric Huang 7320689a570SEric Huang vce_v3_0_set_vce_sw_clock_gating(adev, enable); 7330689a570SEric Huang } 7340689a570SEric Huang 735f3f0ea95STom St Denis WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); 7360689a570SEric Huang mutex_unlock(&adev->grbm_idx_mutex); 7370689a570SEric Huang 738aaa36a97SAlex Deucher return 0; 739aaa36a97SAlex Deucher } 740aaa36a97SAlex Deucher 7415fc3aeebSyanyang1 static int vce_v3_0_set_powergating_state(void *handle, 7425fc3aeebSyanyang1 enum amd_powergating_state state) 743aaa36a97SAlex Deucher { 744aaa36a97SAlex Deucher /* This doesn't actually powergate the VCE block. 745aaa36a97SAlex Deucher * That's done in the dpm code via the SMC. This 746aaa36a97SAlex Deucher * just re-inits the block as necessary. The actual 747aaa36a97SAlex Deucher * gating still happens in the dpm code. We should 748aaa36a97SAlex Deucher * revisit this when there is a cleaner line between 749aaa36a97SAlex Deucher * the smc and the hw blocks 750aaa36a97SAlex Deucher */ 7515fc3aeebSyanyang1 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 7525fc3aeebSyanyang1 753e3b04bc7SAlex Deucher if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE)) 754808a934fSAlex Deucher return 0; 755808a934fSAlex Deucher 7565fc3aeebSyanyang1 if (state == AMD_PG_STATE_GATE) 757aaa36a97SAlex Deucher /* XXX do we need a vce_v3_0_stop()? */ 758aaa36a97SAlex Deucher return 0; 759aaa36a97SAlex Deucher else 760aaa36a97SAlex Deucher return vce_v3_0_start(adev); 761aaa36a97SAlex Deucher } 762aaa36a97SAlex Deucher 7635fc3aeebSyanyang1 const struct amd_ip_funcs vce_v3_0_ip_funcs = { 76488a907d6STom St Denis .name = "vce_v3_0", 765aaa36a97SAlex Deucher .early_init = vce_v3_0_early_init, 766aaa36a97SAlex Deucher .late_init = NULL, 767aaa36a97SAlex Deucher .sw_init = vce_v3_0_sw_init, 768aaa36a97SAlex Deucher .sw_fini = vce_v3_0_sw_fini, 769aaa36a97SAlex Deucher .hw_init = vce_v3_0_hw_init, 770aaa36a97SAlex Deucher .hw_fini = vce_v3_0_hw_fini, 771aaa36a97SAlex Deucher .suspend = vce_v3_0_suspend, 772aaa36a97SAlex Deucher .resume = vce_v3_0_resume, 773aaa36a97SAlex Deucher .is_idle = vce_v3_0_is_idle, 774aaa36a97SAlex Deucher .wait_for_idle = vce_v3_0_wait_for_idle, 775115933a5SChunming Zhou .check_soft_reset = vce_v3_0_check_soft_reset, 776115933a5SChunming Zhou .pre_soft_reset = vce_v3_0_pre_soft_reset, 777aaa36a97SAlex Deucher .soft_reset = vce_v3_0_soft_reset, 778115933a5SChunming Zhou .post_soft_reset = vce_v3_0_post_soft_reset, 779aaa36a97SAlex Deucher .set_clockgating_state = vce_v3_0_set_clockgating_state, 780aaa36a97SAlex Deucher .set_powergating_state = vce_v3_0_set_powergating_state, 781aaa36a97SAlex Deucher }; 782aaa36a97SAlex Deucher 783aaa36a97SAlex Deucher static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = { 784aaa36a97SAlex Deucher .get_rptr = vce_v3_0_ring_get_rptr, 785aaa36a97SAlex Deucher .get_wptr = vce_v3_0_ring_get_wptr, 786aaa36a97SAlex Deucher .set_wptr = vce_v3_0_ring_set_wptr, 787aaa36a97SAlex Deucher .parse_cs = amdgpu_vce_ring_parse_cs, 788aaa36a97SAlex Deucher .emit_ib = amdgpu_vce_ring_emit_ib, 789aaa36a97SAlex Deucher .emit_fence = amdgpu_vce_ring_emit_fence, 790aaa36a97SAlex Deucher .test_ring = amdgpu_vce_ring_test_ring, 791aaa36a97SAlex Deucher .test_ib = amdgpu_vce_ring_test_ib, 792edff0e28SJammy Zhou .insert_nop = amdgpu_ring_insert_nop, 7939e5d5309SChristian König .pad_ib = amdgpu_ring_generic_pad_ib, 794ebff485eSChristian König .begin_use = amdgpu_vce_ring_begin_use, 795ebff485eSChristian König .end_use = amdgpu_vce_ring_end_use, 796aaa36a97SAlex Deucher }; 797aaa36a97SAlex Deucher 798aaa36a97SAlex Deucher static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev) 799aaa36a97SAlex Deucher { 80075c65480SAlex Deucher int i; 80175c65480SAlex Deucher 80275c65480SAlex Deucher for (i = 0; i < adev->vce.num_rings; i++) 80375c65480SAlex Deucher adev->vce.ring[i].funcs = &vce_v3_0_ring_funcs; 804aaa36a97SAlex Deucher } 805aaa36a97SAlex Deucher 806aaa36a97SAlex Deucher static const struct amdgpu_irq_src_funcs vce_v3_0_irq_funcs = { 807aaa36a97SAlex Deucher .set = vce_v3_0_set_interrupt_state, 808aaa36a97SAlex Deucher .process = vce_v3_0_process_interrupt, 809aaa36a97SAlex Deucher }; 810aaa36a97SAlex Deucher 811aaa36a97SAlex Deucher static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev) 812aaa36a97SAlex Deucher { 813aaa36a97SAlex Deucher adev->vce.irq.num_types = 1; 814aaa36a97SAlex Deucher adev->vce.irq.funcs = &vce_v3_0_irq_funcs; 815aaa36a97SAlex Deucher }; 816