1c6b6a421SHawking Zhang /* 2c6b6a421SHawking Zhang * Copyright 2019 Advanced Micro Devices, Inc. 3c6b6a421SHawking Zhang * 4c6b6a421SHawking Zhang * Permission is hereby granted, free of charge, to any person obtaining a 5c6b6a421SHawking Zhang * copy of this software and associated documentation files (the "Software"), 6c6b6a421SHawking Zhang * to deal in the Software without restriction, including without limitation 7c6b6a421SHawking Zhang * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8c6b6a421SHawking Zhang * and/or sell copies of the Software, and to permit persons to whom the 9c6b6a421SHawking Zhang * Software is furnished to do so, subject to the following conditions: 10c6b6a421SHawking Zhang * 11c6b6a421SHawking Zhang * The above copyright notice and this permission notice shall be included in 12c6b6a421SHawking Zhang * all copies or substantial portions of the Software. 13c6b6a421SHawking Zhang * 14c6b6a421SHawking Zhang * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15c6b6a421SHawking Zhang * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16c6b6a421SHawking Zhang * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17c6b6a421SHawking Zhang * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18c6b6a421SHawking Zhang * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19c6b6a421SHawking Zhang * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20c6b6a421SHawking Zhang * OTHER DEALINGS IN THE SOFTWARE. 21c6b6a421SHawking Zhang * 22c6b6a421SHawking Zhang */ 23c6b6a421SHawking Zhang #include <linux/firmware.h> 24c6b6a421SHawking Zhang #include <linux/slab.h> 25c6b6a421SHawking Zhang #include <linux/module.h> 26e9eea902SAlex Deucher #include <linux/pci.h> 27e9eea902SAlex Deucher 28c6b6a421SHawking Zhang #include "amdgpu.h" 29c6b6a421SHawking Zhang #include "amdgpu_atombios.h" 30c6b6a421SHawking Zhang #include "amdgpu_ih.h" 31c6b6a421SHawking Zhang #include "amdgpu_uvd.h" 32c6b6a421SHawking Zhang #include "amdgpu_vce.h" 33c6b6a421SHawking Zhang #include "amdgpu_ucode.h" 34c6b6a421SHawking Zhang #include "amdgpu_psp.h" 35767acabdSKevin Wang #include "amdgpu_smu.h" 36c6b6a421SHawking Zhang #include "atom.h" 37c6b6a421SHawking Zhang #include "amd_pcie.h" 38c6b6a421SHawking Zhang 39c6b6a421SHawking Zhang #include "gc/gc_10_1_0_offset.h" 40c6b6a421SHawking Zhang #include "gc/gc_10_1_0_sh_mask.h" 41c6b6a421SHawking Zhang #include "hdp/hdp_5_0_0_offset.h" 42c6b6a421SHawking Zhang #include "hdp/hdp_5_0_0_sh_mask.h" 4329bc37b4SAlex Deucher #include "smuio/smuio_11_0_0_offset.h" 44c6b6a421SHawking Zhang 45c6b6a421SHawking Zhang #include "soc15.h" 46c6b6a421SHawking Zhang #include "soc15_common.h" 47c6b6a421SHawking Zhang #include "gmc_v10_0.h" 48c6b6a421SHawking Zhang #include "gfxhub_v2_0.h" 49c6b6a421SHawking Zhang #include "mmhub_v2_0.h" 50bebc0762SHawking Zhang #include "nbio_v2_3.h" 51c6b6a421SHawking Zhang #include "nv.h" 52c6b6a421SHawking Zhang #include "navi10_ih.h" 53c6b6a421SHawking Zhang #include "gfx_v10_0.h" 54c6b6a421SHawking Zhang #include "sdma_v5_0.h" 55c6b6a421SHawking Zhang #include "vcn_v2_0.h" 565be45a26SLeo Liu #include "jpeg_v2_0.h" 57c6b6a421SHawking Zhang #include "dce_virtual.h" 58c6b6a421SHawking Zhang #include "mes_v10_1.h" 59b05b6903SJiange Zhao #include "mxgpu_nv.h" 60c6b6a421SHawking Zhang 61c6b6a421SHawking Zhang static const struct amd_ip_funcs nv_common_ip_funcs; 62c6b6a421SHawking Zhang 63c6b6a421SHawking Zhang /* 64c6b6a421SHawking Zhang * Indirect registers accessor 65c6b6a421SHawking Zhang */ 66c6b6a421SHawking Zhang static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg) 67c6b6a421SHawking Zhang { 68c6b6a421SHawking Zhang unsigned long flags, address, data; 69c6b6a421SHawking Zhang u32 r; 70bebc0762SHawking Zhang address = adev->nbio.funcs->get_pcie_index_offset(adev); 71bebc0762SHawking Zhang data = adev->nbio.funcs->get_pcie_data_offset(adev); 72c6b6a421SHawking Zhang 73c6b6a421SHawking Zhang spin_lock_irqsave(&adev->pcie_idx_lock, flags); 74c6b6a421SHawking Zhang WREG32(address, reg); 75c6b6a421SHawking Zhang (void)RREG32(address); 76c6b6a421SHawking Zhang r = RREG32(data); 77c6b6a421SHawking Zhang spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 78c6b6a421SHawking Zhang return r; 79c6b6a421SHawking Zhang } 80c6b6a421SHawking Zhang 81c6b6a421SHawking Zhang static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 82c6b6a421SHawking Zhang { 83c6b6a421SHawking Zhang unsigned long flags, address, data; 84c6b6a421SHawking Zhang 85bebc0762SHawking Zhang address = adev->nbio.funcs->get_pcie_index_offset(adev); 86bebc0762SHawking Zhang data = adev->nbio.funcs->get_pcie_data_offset(adev); 87c6b6a421SHawking Zhang 88c6b6a421SHawking Zhang spin_lock_irqsave(&adev->pcie_idx_lock, flags); 89c6b6a421SHawking Zhang WREG32(address, reg); 90c6b6a421SHawking Zhang (void)RREG32(address); 91c6b6a421SHawking Zhang WREG32(data, v); 92c6b6a421SHawking Zhang (void)RREG32(data); 93c6b6a421SHawking Zhang spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 94c6b6a421SHawking Zhang } 95c6b6a421SHawking Zhang 96c6b6a421SHawking Zhang static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg) 97c6b6a421SHawking Zhang { 98c6b6a421SHawking Zhang unsigned long flags, address, data; 99c6b6a421SHawking Zhang u32 r; 100c6b6a421SHawking Zhang 101c6b6a421SHawking Zhang address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); 102c6b6a421SHawking Zhang data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); 103c6b6a421SHawking Zhang 104c6b6a421SHawking Zhang spin_lock_irqsave(&adev->didt_idx_lock, flags); 105c6b6a421SHawking Zhang WREG32(address, (reg)); 106c6b6a421SHawking Zhang r = RREG32(data); 107c6b6a421SHawking Zhang spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 108c6b6a421SHawking Zhang return r; 109c6b6a421SHawking Zhang } 110c6b6a421SHawking Zhang 111c6b6a421SHawking Zhang static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 112c6b6a421SHawking Zhang { 113c6b6a421SHawking Zhang unsigned long flags, address, data; 114c6b6a421SHawking Zhang 115c6b6a421SHawking Zhang address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); 116c6b6a421SHawking Zhang data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); 117c6b6a421SHawking Zhang 118c6b6a421SHawking Zhang spin_lock_irqsave(&adev->didt_idx_lock, flags); 119c6b6a421SHawking Zhang WREG32(address, (reg)); 120c6b6a421SHawking Zhang WREG32(data, (v)); 121c6b6a421SHawking Zhang spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 122c6b6a421SHawking Zhang } 123c6b6a421SHawking Zhang 124c6b6a421SHawking Zhang static u32 nv_get_config_memsize(struct amdgpu_device *adev) 125c6b6a421SHawking Zhang { 126bebc0762SHawking Zhang return adev->nbio.funcs->get_memsize(adev); 127c6b6a421SHawking Zhang } 128c6b6a421SHawking Zhang 129c6b6a421SHawking Zhang static u32 nv_get_xclk(struct amdgpu_device *adev) 130c6b6a421SHawking Zhang { 131462a70d8STao Zhou return adev->clock.spll.reference_freq; 132c6b6a421SHawking Zhang } 133c6b6a421SHawking Zhang 134c6b6a421SHawking Zhang 135c6b6a421SHawking Zhang void nv_grbm_select(struct amdgpu_device *adev, 136c6b6a421SHawking Zhang u32 me, u32 pipe, u32 queue, u32 vmid) 137c6b6a421SHawking Zhang { 138c6b6a421SHawking Zhang u32 grbm_gfx_cntl = 0; 139c6b6a421SHawking Zhang grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe); 140c6b6a421SHawking Zhang grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me); 141c6b6a421SHawking Zhang grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid); 142c6b6a421SHawking Zhang grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue); 143c6b6a421SHawking Zhang 144c6b6a421SHawking Zhang WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl); 145c6b6a421SHawking Zhang } 146c6b6a421SHawking Zhang 147c6b6a421SHawking Zhang static void nv_vga_set_state(struct amdgpu_device *adev, bool state) 148c6b6a421SHawking Zhang { 149c6b6a421SHawking Zhang /* todo */ 150c6b6a421SHawking Zhang } 151c6b6a421SHawking Zhang 152c6b6a421SHawking Zhang static bool nv_read_disabled_bios(struct amdgpu_device *adev) 153c6b6a421SHawking Zhang { 154c6b6a421SHawking Zhang /* todo */ 155c6b6a421SHawking Zhang return false; 156c6b6a421SHawking Zhang } 157c6b6a421SHawking Zhang 158c6b6a421SHawking Zhang static bool nv_read_bios_from_rom(struct amdgpu_device *adev, 159c6b6a421SHawking Zhang u8 *bios, u32 length_bytes) 160c6b6a421SHawking Zhang { 16129bc37b4SAlex Deucher u32 *dw_ptr; 16229bc37b4SAlex Deucher u32 i, length_dw; 16329bc37b4SAlex Deucher 16429bc37b4SAlex Deucher if (bios == NULL) 165c6b6a421SHawking Zhang return false; 16629bc37b4SAlex Deucher if (length_bytes == 0) 16729bc37b4SAlex Deucher return false; 16829bc37b4SAlex Deucher /* APU vbios image is part of sbios image */ 16929bc37b4SAlex Deucher if (adev->flags & AMD_IS_APU) 17029bc37b4SAlex Deucher return false; 17129bc37b4SAlex Deucher 17229bc37b4SAlex Deucher dw_ptr = (u32 *)bios; 17329bc37b4SAlex Deucher length_dw = ALIGN(length_bytes, 4) / 4; 17429bc37b4SAlex Deucher 17529bc37b4SAlex Deucher /* set rom index to 0 */ 17629bc37b4SAlex Deucher WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0); 17729bc37b4SAlex Deucher /* read out the rom data */ 17829bc37b4SAlex Deucher for (i = 0; i < length_dw; i++) 17929bc37b4SAlex Deucher dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA)); 18029bc37b4SAlex Deucher 18129bc37b4SAlex Deucher return true; 182c6b6a421SHawking Zhang } 183c6b6a421SHawking Zhang 184c6b6a421SHawking Zhang static struct soc15_allowed_register_entry nv_allowed_read_registers[] = { 185c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)}, 186c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)}, 187c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)}, 188c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)}, 189c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)}, 190c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)}, 191c6b6a421SHawking Zhang #if 0 /* TODO: will set it when SDMA header is available */ 192c6b6a421SHawking Zhang { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)}, 193c6b6a421SHawking Zhang { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)}, 194c6b6a421SHawking Zhang #endif 195c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)}, 196c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)}, 197c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)}, 198c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)}, 199c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)}, 200c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)}, 201c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)}, 202664fe85aSMarek Olšák { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)}, 203c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)}, 204c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)}, 205c6b6a421SHawking Zhang { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)}, 206c6b6a421SHawking Zhang }; 207c6b6a421SHawking Zhang 208c6b6a421SHawking Zhang static uint32_t nv_read_indexed_register(struct amdgpu_device *adev, u32 se_num, 209c6b6a421SHawking Zhang u32 sh_num, u32 reg_offset) 210c6b6a421SHawking Zhang { 211c6b6a421SHawking Zhang uint32_t val; 212c6b6a421SHawking Zhang 213c6b6a421SHawking Zhang mutex_lock(&adev->grbm_idx_mutex); 214c6b6a421SHawking Zhang if (se_num != 0xffffffff || sh_num != 0xffffffff) 215c6b6a421SHawking Zhang amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); 216c6b6a421SHawking Zhang 217c6b6a421SHawking Zhang val = RREG32(reg_offset); 218c6b6a421SHawking Zhang 219c6b6a421SHawking Zhang if (se_num != 0xffffffff || sh_num != 0xffffffff) 220c6b6a421SHawking Zhang amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 221c6b6a421SHawking Zhang mutex_unlock(&adev->grbm_idx_mutex); 222c6b6a421SHawking Zhang return val; 223c6b6a421SHawking Zhang } 224c6b6a421SHawking Zhang 225c6b6a421SHawking Zhang static uint32_t nv_get_register_value(struct amdgpu_device *adev, 226c6b6a421SHawking Zhang bool indexed, u32 se_num, 227c6b6a421SHawking Zhang u32 sh_num, u32 reg_offset) 228c6b6a421SHawking Zhang { 229c6b6a421SHawking Zhang if (indexed) { 230c6b6a421SHawking Zhang return nv_read_indexed_register(adev, se_num, sh_num, reg_offset); 231c6b6a421SHawking Zhang } else { 232c6b6a421SHawking Zhang if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG)) 233c6b6a421SHawking Zhang return adev->gfx.config.gb_addr_config; 234c6b6a421SHawking Zhang return RREG32(reg_offset); 235c6b6a421SHawking Zhang } 236c6b6a421SHawking Zhang } 237c6b6a421SHawking Zhang 238c6b6a421SHawking Zhang static int nv_read_register(struct amdgpu_device *adev, u32 se_num, 239c6b6a421SHawking Zhang u32 sh_num, u32 reg_offset, u32 *value) 240c6b6a421SHawking Zhang { 241c6b6a421SHawking Zhang uint32_t i; 242c6b6a421SHawking Zhang struct soc15_allowed_register_entry *en; 243c6b6a421SHawking Zhang 244c6b6a421SHawking Zhang *value = 0; 245c6b6a421SHawking Zhang for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) { 246c6b6a421SHawking Zhang en = &nv_allowed_read_registers[i]; 247c6b6a421SHawking Zhang if (reg_offset != 248c6b6a421SHawking Zhang (adev->reg_offset[en->hwip][en->inst][en->seg] + en->reg_offset)) 249c6b6a421SHawking Zhang continue; 250c6b6a421SHawking Zhang 251c6b6a421SHawking Zhang *value = nv_get_register_value(adev, 252c6b6a421SHawking Zhang nv_allowed_read_registers[i].grbm_indexed, 253c6b6a421SHawking Zhang se_num, sh_num, reg_offset); 254c6b6a421SHawking Zhang return 0; 255c6b6a421SHawking Zhang } 256c6b6a421SHawking Zhang return -EINVAL; 257c6b6a421SHawking Zhang } 258c6b6a421SHawking Zhang 259c6b6a421SHawking Zhang #if 0 260c6b6a421SHawking Zhang static void nv_gpu_pci_config_reset(struct amdgpu_device *adev) 261c6b6a421SHawking Zhang { 262c6b6a421SHawking Zhang u32 i; 263c6b6a421SHawking Zhang 264c6b6a421SHawking Zhang dev_info(adev->dev, "GPU pci config reset\n"); 265c6b6a421SHawking Zhang 266c6b6a421SHawking Zhang /* disable BM */ 267c6b6a421SHawking Zhang pci_clear_master(adev->pdev); 268c6b6a421SHawking Zhang /* reset */ 269c6b6a421SHawking Zhang amdgpu_pci_config_reset(adev); 270c6b6a421SHawking Zhang 271c6b6a421SHawking Zhang udelay(100); 272c6b6a421SHawking Zhang 273c6b6a421SHawking Zhang /* wait for asic to come out of reset */ 274c6b6a421SHawking Zhang for (i = 0; i < adev->usec_timeout; i++) { 275c6b6a421SHawking Zhang u32 memsize = nbio_v2_3_get_memsize(adev); 276c6b6a421SHawking Zhang if (memsize != 0xffffffff) 277c6b6a421SHawking Zhang break; 278c6b6a421SHawking Zhang udelay(1); 279c6b6a421SHawking Zhang } 280c6b6a421SHawking Zhang 281c6b6a421SHawking Zhang } 282c6b6a421SHawking Zhang #endif 283c6b6a421SHawking Zhang 2843e2bb60aSKevin Wang static int nv_asic_mode1_reset(struct amdgpu_device *adev) 2853e2bb60aSKevin Wang { 2863e2bb60aSKevin Wang u32 i; 2873e2bb60aSKevin Wang int ret = 0; 2883e2bb60aSKevin Wang 2893e2bb60aSKevin Wang amdgpu_atombios_scratch_regs_engine_hung(adev, true); 2903e2bb60aSKevin Wang 2913e2bb60aSKevin Wang dev_info(adev->dev, "GPU mode1 reset\n"); 2923e2bb60aSKevin Wang 2933e2bb60aSKevin Wang /* disable BM */ 2943e2bb60aSKevin Wang pci_clear_master(adev->pdev); 2953e2bb60aSKevin Wang 2963e2bb60aSKevin Wang pci_save_state(adev->pdev); 2973e2bb60aSKevin Wang 2983e2bb60aSKevin Wang ret = psp_gpu_reset(adev); 2993e2bb60aSKevin Wang if (ret) 3003e2bb60aSKevin Wang dev_err(adev->dev, "GPU mode1 reset failed\n"); 3013e2bb60aSKevin Wang 3023e2bb60aSKevin Wang pci_restore_state(adev->pdev); 3033e2bb60aSKevin Wang 3043e2bb60aSKevin Wang /* wait for asic to come out of reset */ 3053e2bb60aSKevin Wang for (i = 0; i < adev->usec_timeout; i++) { 306bebc0762SHawking Zhang u32 memsize = adev->nbio.funcs->get_memsize(adev); 3073e2bb60aSKevin Wang 3083e2bb60aSKevin Wang if (memsize != 0xffffffff) 3093e2bb60aSKevin Wang break; 3103e2bb60aSKevin Wang udelay(1); 3113e2bb60aSKevin Wang } 3123e2bb60aSKevin Wang 3133e2bb60aSKevin Wang amdgpu_atombios_scratch_regs_engine_hung(adev, false); 3143e2bb60aSKevin Wang 3153e2bb60aSKevin Wang return ret; 3163e2bb60aSKevin Wang } 3172ddc6c3eSAlex Deucher 318ac742616SAlex Deucher static bool nv_asic_supports_baco(struct amdgpu_device *adev) 319ac742616SAlex Deucher { 320ac742616SAlex Deucher struct smu_context *smu = &adev->smu; 321ac742616SAlex Deucher 322ac742616SAlex Deucher if (smu_baco_is_support(smu)) 323ac742616SAlex Deucher return true; 324ac742616SAlex Deucher else 325ac742616SAlex Deucher return false; 326ac742616SAlex Deucher } 327ac742616SAlex Deucher 3282ddc6c3eSAlex Deucher static enum amd_reset_method 3292ddc6c3eSAlex Deucher nv_asic_reset_method(struct amdgpu_device *adev) 3302ddc6c3eSAlex Deucher { 3312ddc6c3eSAlex Deucher struct smu_context *smu = &adev->smu; 3322ddc6c3eSAlex Deucher 333b4def374SJiange Zhao if (!amdgpu_sriov_vf(adev) && smu_baco_is_support(smu)) 3342ddc6c3eSAlex Deucher return AMD_RESET_METHOD_BACO; 3352ddc6c3eSAlex Deucher else 3362ddc6c3eSAlex Deucher return AMD_RESET_METHOD_MODE1; 3372ddc6c3eSAlex Deucher } 3382ddc6c3eSAlex Deucher 339c6b6a421SHawking Zhang static int nv_asic_reset(struct amdgpu_device *adev) 340c6b6a421SHawking Zhang { 341c6b6a421SHawking Zhang 342c6b6a421SHawking Zhang /* FIXME: it doesn't work since vega10 */ 343c6b6a421SHawking Zhang #if 0 344c6b6a421SHawking Zhang amdgpu_atombios_scratch_regs_engine_hung(adev, true); 345c6b6a421SHawking Zhang 346c6b6a421SHawking Zhang nv_gpu_pci_config_reset(adev); 347c6b6a421SHawking Zhang 348c6b6a421SHawking Zhang amdgpu_atombios_scratch_regs_engine_hung(adev, false); 349c6b6a421SHawking Zhang #endif 350767acabdSKevin Wang int ret = 0; 351767acabdSKevin Wang struct smu_context *smu = &adev->smu; 352c6b6a421SHawking Zhang 353e3526257SMonk Liu if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { 3542c9a0c66SAlex Deucher if (!adev->in_suspend) 355e3526257SMonk Liu amdgpu_inc_vram_lost(adev); 35611520f27SAlex Deucher ret = smu_baco_enter(smu); 35711520f27SAlex Deucher if (ret) 35811520f27SAlex Deucher return ret; 35911520f27SAlex Deucher ret = smu_baco_exit(smu); 36011520f27SAlex Deucher if (ret) 36111520f27SAlex Deucher return ret; 362e3526257SMonk Liu } else { 3632c9a0c66SAlex Deucher if (!adev->in_suspend) 364e3526257SMonk Liu amdgpu_inc_vram_lost(adev); 3653e2bb60aSKevin Wang ret = nv_asic_mode1_reset(adev); 366e3526257SMonk Liu } 367767acabdSKevin Wang 368767acabdSKevin Wang return ret; 369c6b6a421SHawking Zhang } 370c6b6a421SHawking Zhang 371c6b6a421SHawking Zhang static int nv_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) 372c6b6a421SHawking Zhang { 373c6b6a421SHawking Zhang /* todo */ 374c6b6a421SHawking Zhang return 0; 375c6b6a421SHawking Zhang } 376c6b6a421SHawking Zhang 377c6b6a421SHawking Zhang static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) 378c6b6a421SHawking Zhang { 379c6b6a421SHawking Zhang /* todo */ 380c6b6a421SHawking Zhang return 0; 381c6b6a421SHawking Zhang } 382c6b6a421SHawking Zhang 383c6b6a421SHawking Zhang static void nv_pcie_gen3_enable(struct amdgpu_device *adev) 384c6b6a421SHawking Zhang { 385c6b6a421SHawking Zhang if (pci_is_root_bus(adev->pdev->bus)) 386c6b6a421SHawking Zhang return; 387c6b6a421SHawking Zhang 388c6b6a421SHawking Zhang if (amdgpu_pcie_gen2 == 0) 389c6b6a421SHawking Zhang return; 390c6b6a421SHawking Zhang 391c6b6a421SHawking Zhang if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 392c6b6a421SHawking Zhang CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) 393c6b6a421SHawking Zhang return; 394c6b6a421SHawking Zhang 395c6b6a421SHawking Zhang /* todo */ 396c6b6a421SHawking Zhang } 397c6b6a421SHawking Zhang 398c6b6a421SHawking Zhang static void nv_program_aspm(struct amdgpu_device *adev) 399c6b6a421SHawking Zhang { 400c6b6a421SHawking Zhang 401c6b6a421SHawking Zhang if (amdgpu_aspm == 0) 402c6b6a421SHawking Zhang return; 403c6b6a421SHawking Zhang 404c6b6a421SHawking Zhang /* todo */ 405c6b6a421SHawking Zhang } 406c6b6a421SHawking Zhang 407c6b6a421SHawking Zhang static void nv_enable_doorbell_aperture(struct amdgpu_device *adev, 408c6b6a421SHawking Zhang bool enable) 409c6b6a421SHawking Zhang { 410bebc0762SHawking Zhang adev->nbio.funcs->enable_doorbell_aperture(adev, enable); 411bebc0762SHawking Zhang adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable); 412c6b6a421SHawking Zhang } 413c6b6a421SHawking Zhang 414c6b6a421SHawking Zhang static const struct amdgpu_ip_block_version nv_common_ip_block = 415c6b6a421SHawking Zhang { 416c6b6a421SHawking Zhang .type = AMD_IP_BLOCK_TYPE_COMMON, 417c6b6a421SHawking Zhang .major = 1, 418c6b6a421SHawking Zhang .minor = 0, 419c6b6a421SHawking Zhang .rev = 0, 420c6b6a421SHawking Zhang .funcs = &nv_common_ip_funcs, 421c6b6a421SHawking Zhang }; 422c6b6a421SHawking Zhang 423b5c73856SXiaojie Yuan static int nv_reg_base_init(struct amdgpu_device *adev) 424c6b6a421SHawking Zhang { 425b5c73856SXiaojie Yuan int r; 426b5c73856SXiaojie Yuan 427b5c73856SXiaojie Yuan if (amdgpu_discovery) { 428b5c73856SXiaojie Yuan r = amdgpu_discovery_reg_base_init(adev); 429b5c73856SXiaojie Yuan if (r) { 430b5c73856SXiaojie Yuan DRM_WARN("failed to init reg base from ip discovery table, " 431b5c73856SXiaojie Yuan "fallback to legacy init method\n"); 432b5c73856SXiaojie Yuan goto legacy_init; 433b5c73856SXiaojie Yuan } 434b5c73856SXiaojie Yuan 435b5c73856SXiaojie Yuan return 0; 436b5c73856SXiaojie Yuan } 437b5c73856SXiaojie Yuan 438b5c73856SXiaojie Yuan legacy_init: 439c6b6a421SHawking Zhang switch (adev->asic_type) { 440c6b6a421SHawking Zhang case CHIP_NAVI10: 441c6b6a421SHawking Zhang navi10_reg_base_init(adev); 442c6b6a421SHawking Zhang break; 443a0f6d926SXiaojie Yuan case CHIP_NAVI14: 444a0f6d926SXiaojie Yuan navi14_reg_base_init(adev); 445a0f6d926SXiaojie Yuan break; 44603d0a073SXiaojie Yuan case CHIP_NAVI12: 44703d0a073SXiaojie Yuan navi12_reg_base_init(adev); 44803d0a073SXiaojie Yuan break; 449c6b6a421SHawking Zhang default: 450c6b6a421SHawking Zhang return -EINVAL; 451c6b6a421SHawking Zhang } 452c6b6a421SHawking Zhang 453b5c73856SXiaojie Yuan return 0; 454b5c73856SXiaojie Yuan } 455b5c73856SXiaojie Yuan 456b5c73856SXiaojie Yuan int nv_set_ip_blocks(struct amdgpu_device *adev) 457b5c73856SXiaojie Yuan { 458b5c73856SXiaojie Yuan int r; 459b5c73856SXiaojie Yuan 460b5c73856SXiaojie Yuan /* Set IP register base before any HW register access */ 461b5c73856SXiaojie Yuan r = nv_reg_base_init(adev); 462b5c73856SXiaojie Yuan if (r) 463b5c73856SXiaojie Yuan return r; 464b5c73856SXiaojie Yuan 465bebc0762SHawking Zhang adev->nbio.funcs = &nbio_v2_3_funcs; 466bebc0762SHawking Zhang adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg; 467c6b6a421SHawking Zhang 468bebc0762SHawking Zhang adev->nbio.funcs->detect_hw_virt(adev); 469c6b6a421SHawking Zhang 470b05b6903SJiange Zhao if (amdgpu_sriov_vf(adev)) 471b05b6903SJiange Zhao adev->virt.ops = &xgpu_nv_virt_ops; 472b05b6903SJiange Zhao 473c6b6a421SHawking Zhang switch (adev->asic_type) { 474c6b6a421SHawking Zhang case CHIP_NAVI10: 475d1daf850SAlex Deucher case CHIP_NAVI14: 476c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &nv_common_ip_block); 477c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); 478c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); 479c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 480c6b6a421SHawking Zhang if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && 4819530273eSEvan Quan !amdgpu_sriov_vf(adev)) 482c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 483c6b6a421SHawking Zhang if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 484c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 485f8a7976bSAlex Deucher #if defined(CONFIG_DRM_AMD_DC) 486b4f199c7SHarry Wentland else if (amdgpu_device_has_dc_support(adev)) 487b4f199c7SHarry Wentland amdgpu_device_ip_block_add(adev, &dm_ip_block); 488f8a7976bSAlex Deucher #endif 489c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); 490c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); 491c6b6a421SHawking Zhang if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && 4929530273eSEvan Quan !amdgpu_sriov_vf(adev)) 493c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 494c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); 4955be45a26SLeo Liu amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); 496c6b6a421SHawking Zhang if (adev->enable_mes) 497c6b6a421SHawking Zhang amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block); 498c6b6a421SHawking Zhang break; 49944e9e7c9SXiaojie Yuan case CHIP_NAVI12: 50044e9e7c9SXiaojie Yuan amdgpu_device_ip_block_add(adev, &nv_common_ip_block); 50144e9e7c9SXiaojie Yuan amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); 50244e9e7c9SXiaojie Yuan amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); 5036b66ae2eSXiaojie Yuan amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 5047f47efebSXiaojie Yuan if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && 5059530273eSEvan Quan !amdgpu_sriov_vf(adev)) 5067f47efebSXiaojie Yuan amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 50779902029SXiaojie Yuan if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 50879902029SXiaojie Yuan amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 50920c14ee1SPetr Cvek #if defined(CONFIG_DRM_AMD_DC) 510078655d9SLeo Li else if (amdgpu_device_has_dc_support(adev)) 511078655d9SLeo Li amdgpu_device_ip_block_add(adev, &dm_ip_block); 51220c14ee1SPetr Cvek #endif 51344e9e7c9SXiaojie Yuan amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); 51444e9e7c9SXiaojie Yuan amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); 5157f47efebSXiaojie Yuan if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && 5169530273eSEvan Quan !amdgpu_sriov_vf(adev)) 5177f47efebSXiaojie Yuan amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 5181fbed280SBoyuan Zhang amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); 5195be45a26SLeo Liu amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); 52044e9e7c9SXiaojie Yuan break; 521c6b6a421SHawking Zhang default: 522c6b6a421SHawking Zhang return -EINVAL; 523c6b6a421SHawking Zhang } 524c6b6a421SHawking Zhang 525c6b6a421SHawking Zhang return 0; 526c6b6a421SHawking Zhang } 527c6b6a421SHawking Zhang 528c6b6a421SHawking Zhang static uint32_t nv_get_rev_id(struct amdgpu_device *adev) 529c6b6a421SHawking Zhang { 530bebc0762SHawking Zhang return adev->nbio.funcs->get_rev_id(adev); 531c6b6a421SHawking Zhang } 532c6b6a421SHawking Zhang 533c6b6a421SHawking Zhang static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) 534c6b6a421SHawking Zhang { 535bebc0762SHawking Zhang adev->nbio.funcs->hdp_flush(adev, ring); 536c6b6a421SHawking Zhang } 537c6b6a421SHawking Zhang 538c6b6a421SHawking Zhang static void nv_invalidate_hdp(struct amdgpu_device *adev, 539c6b6a421SHawking Zhang struct amdgpu_ring *ring) 540c6b6a421SHawking Zhang { 541c6b6a421SHawking Zhang if (!ring || !ring->funcs->emit_wreg) { 542c6b6a421SHawking Zhang WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1); 543c6b6a421SHawking Zhang } else { 544c6b6a421SHawking Zhang amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET( 545c6b6a421SHawking Zhang HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1); 546c6b6a421SHawking Zhang } 547c6b6a421SHawking Zhang } 548c6b6a421SHawking Zhang 549c6b6a421SHawking Zhang static bool nv_need_full_reset(struct amdgpu_device *adev) 550c6b6a421SHawking Zhang { 551c6b6a421SHawking Zhang return true; 552c6b6a421SHawking Zhang } 553c6b6a421SHawking Zhang 554c6b6a421SHawking Zhang static void nv_get_pcie_usage(struct amdgpu_device *adev, 555c6b6a421SHawking Zhang uint64_t *count0, 556c6b6a421SHawking Zhang uint64_t *count1) 557c6b6a421SHawking Zhang { 558c6b6a421SHawking Zhang /*TODO*/ 559c6b6a421SHawking Zhang } 560c6b6a421SHawking Zhang 561c6b6a421SHawking Zhang static bool nv_need_reset_on_init(struct amdgpu_device *adev) 562c6b6a421SHawking Zhang { 563c6b6a421SHawking Zhang #if 0 564c6b6a421SHawking Zhang u32 sol_reg; 565c6b6a421SHawking Zhang 566c6b6a421SHawking Zhang if (adev->flags & AMD_IS_APU) 567c6b6a421SHawking Zhang return false; 568c6b6a421SHawking Zhang 569c6b6a421SHawking Zhang /* Check sOS sign of life register to confirm sys driver and sOS 570c6b6a421SHawking Zhang * are already been loaded. 571c6b6a421SHawking Zhang */ 572c6b6a421SHawking Zhang sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); 573c6b6a421SHawking Zhang if (sol_reg) 574c6b6a421SHawking Zhang return true; 575c6b6a421SHawking Zhang #endif 576c6b6a421SHawking Zhang /* TODO: re-enable it when mode1 reset is functional */ 577c6b6a421SHawking Zhang return false; 578c6b6a421SHawking Zhang } 579c6b6a421SHawking Zhang 5802af81531SKevin Wang static uint64_t nv_get_pcie_replay_count(struct amdgpu_device *adev) 5812af81531SKevin Wang { 5822af81531SKevin Wang 5832af81531SKevin Wang /* TODO 5842af81531SKevin Wang * dummy implement for pcie_replay_count sysfs interface 5852af81531SKevin Wang * */ 5862af81531SKevin Wang 5872af81531SKevin Wang return 0; 5882af81531SKevin Wang } 5892af81531SKevin Wang 590c6b6a421SHawking Zhang static void nv_init_doorbell_index(struct amdgpu_device *adev) 591c6b6a421SHawking Zhang { 592c6b6a421SHawking Zhang adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ; 593c6b6a421SHawking Zhang adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0; 594c6b6a421SHawking Zhang adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1; 595c6b6a421SHawking Zhang adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2; 596c6b6a421SHawking Zhang adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3; 597c6b6a421SHawking Zhang adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4; 598c6b6a421SHawking Zhang adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5; 599c6b6a421SHawking Zhang adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6; 600c6b6a421SHawking Zhang adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7; 601c6b6a421SHawking Zhang adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START; 602c6b6a421SHawking Zhang adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END; 603c6b6a421SHawking Zhang adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0; 604c6b6a421SHawking Zhang adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1; 605c6b6a421SHawking Zhang adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0; 606c6b6a421SHawking Zhang adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1; 607c6b6a421SHawking Zhang adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH; 608c6b6a421SHawking Zhang adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1; 609c6b6a421SHawking Zhang adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3; 610c6b6a421SHawking Zhang adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5; 611c6b6a421SHawking Zhang adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7; 612c6b6a421SHawking Zhang adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP; 613c6b6a421SHawking Zhang adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP; 614c6b6a421SHawking Zhang 615c6b6a421SHawking Zhang adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1; 616c6b6a421SHawking Zhang adev->doorbell_index.sdma_doorbell_range = 20; 617c6b6a421SHawking Zhang } 618c6b6a421SHawking Zhang 619c6b6a421SHawking Zhang static const struct amdgpu_asic_funcs nv_asic_funcs = 620c6b6a421SHawking Zhang { 621c6b6a421SHawking Zhang .read_disabled_bios = &nv_read_disabled_bios, 622c6b6a421SHawking Zhang .read_bios_from_rom = &nv_read_bios_from_rom, 623c6b6a421SHawking Zhang .read_register = &nv_read_register, 624c6b6a421SHawking Zhang .reset = &nv_asic_reset, 6252ddc6c3eSAlex Deucher .reset_method = &nv_asic_reset_method, 626c6b6a421SHawking Zhang .set_vga_state = &nv_vga_set_state, 627c6b6a421SHawking Zhang .get_xclk = &nv_get_xclk, 628c6b6a421SHawking Zhang .set_uvd_clocks = &nv_set_uvd_clocks, 629c6b6a421SHawking Zhang .set_vce_clocks = &nv_set_vce_clocks, 630c6b6a421SHawking Zhang .get_config_memsize = &nv_get_config_memsize, 631c6b6a421SHawking Zhang .flush_hdp = &nv_flush_hdp, 632c6b6a421SHawking Zhang .invalidate_hdp = &nv_invalidate_hdp, 633c6b6a421SHawking Zhang .init_doorbell_index = &nv_init_doorbell_index, 634c6b6a421SHawking Zhang .need_full_reset = &nv_need_full_reset, 635c6b6a421SHawking Zhang .get_pcie_usage = &nv_get_pcie_usage, 636c6b6a421SHawking Zhang .need_reset_on_init = &nv_need_reset_on_init, 6372af81531SKevin Wang .get_pcie_replay_count = &nv_get_pcie_replay_count, 638ac742616SAlex Deucher .supports_baco = &nv_asic_supports_baco, 639c6b6a421SHawking Zhang }; 640c6b6a421SHawking Zhang 641c6b6a421SHawking Zhang static int nv_common_early_init(void *handle) 642c6b6a421SHawking Zhang { 643923c087aSYong Zhao #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE) 644c6b6a421SHawking Zhang struct amdgpu_device *adev = (struct amdgpu_device *)handle; 645c6b6a421SHawking Zhang 646923c087aSYong Zhao adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; 647923c087aSYong Zhao adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; 648c6b6a421SHawking Zhang adev->smc_rreg = NULL; 649c6b6a421SHawking Zhang adev->smc_wreg = NULL; 650c6b6a421SHawking Zhang adev->pcie_rreg = &nv_pcie_rreg; 651c6b6a421SHawking Zhang adev->pcie_wreg = &nv_pcie_wreg; 652c6b6a421SHawking Zhang 653c6b6a421SHawking Zhang /* TODO: will add them during VCN v2 implementation */ 654c6b6a421SHawking Zhang adev->uvd_ctx_rreg = NULL; 655c6b6a421SHawking Zhang adev->uvd_ctx_wreg = NULL; 656c6b6a421SHawking Zhang 657c6b6a421SHawking Zhang adev->didt_rreg = &nv_didt_rreg; 658c6b6a421SHawking Zhang adev->didt_wreg = &nv_didt_wreg; 659c6b6a421SHawking Zhang 660c6b6a421SHawking Zhang adev->asic_funcs = &nv_asic_funcs; 661c6b6a421SHawking Zhang 662c6b6a421SHawking Zhang adev->rev_id = nv_get_rev_id(adev); 663c6b6a421SHawking Zhang adev->external_rev_id = 0xff; 664c6b6a421SHawking Zhang switch (adev->asic_type) { 665c6b6a421SHawking Zhang case CHIP_NAVI10: 666c6b6a421SHawking Zhang adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 667c6b6a421SHawking Zhang AMD_CG_SUPPORT_GFX_CGCG | 668c6b6a421SHawking Zhang AMD_CG_SUPPORT_IH_CG | 669c6b6a421SHawking Zhang AMD_CG_SUPPORT_HDP_MGCG | 670c6b6a421SHawking Zhang AMD_CG_SUPPORT_HDP_LS | 671c6b6a421SHawking Zhang AMD_CG_SUPPORT_SDMA_MGCG | 672c6b6a421SHawking Zhang AMD_CG_SUPPORT_SDMA_LS | 673c6b6a421SHawking Zhang AMD_CG_SUPPORT_MC_MGCG | 674c6b6a421SHawking Zhang AMD_CG_SUPPORT_MC_LS | 675c6b6a421SHawking Zhang AMD_CG_SUPPORT_ATHUB_MGCG | 676c6b6a421SHawking Zhang AMD_CG_SUPPORT_ATHUB_LS | 677c6b6a421SHawking Zhang AMD_CG_SUPPORT_VCN_MGCG | 678099d66e4SLeo Liu AMD_CG_SUPPORT_JPEG_MGCG | 679c6b6a421SHawking Zhang AMD_CG_SUPPORT_BIF_MGCG | 680c6b6a421SHawking Zhang AMD_CG_SUPPORT_BIF_LS; 681157710eaSLeo Liu adev->pg_flags = AMD_PG_SUPPORT_VCN | 682c12d410fSHuang Rui AMD_PG_SUPPORT_VCN_DPG | 683099d66e4SLeo Liu AMD_PG_SUPPORT_JPEG | 684a201b6acSHuang Rui AMD_PG_SUPPORT_ATHUB; 685c6b6a421SHawking Zhang adev->external_rev_id = adev->rev_id + 0x1; 686c6b6a421SHawking Zhang break; 6875e71e011SXiaojie Yuan case CHIP_NAVI14: 688d0c39f8cSXiaojie Yuan adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 689d0c39f8cSXiaojie Yuan AMD_CG_SUPPORT_GFX_CGCG | 690d0c39f8cSXiaojie Yuan AMD_CG_SUPPORT_IH_CG | 691d0c39f8cSXiaojie Yuan AMD_CG_SUPPORT_HDP_MGCG | 692d0c39f8cSXiaojie Yuan AMD_CG_SUPPORT_HDP_LS | 693d0c39f8cSXiaojie Yuan AMD_CG_SUPPORT_SDMA_MGCG | 694d0c39f8cSXiaojie Yuan AMD_CG_SUPPORT_SDMA_LS | 695d0c39f8cSXiaojie Yuan AMD_CG_SUPPORT_MC_MGCG | 696d0c39f8cSXiaojie Yuan AMD_CG_SUPPORT_MC_LS | 697d0c39f8cSXiaojie Yuan AMD_CG_SUPPORT_ATHUB_MGCG | 698d0c39f8cSXiaojie Yuan AMD_CG_SUPPORT_ATHUB_LS | 699d0c39f8cSXiaojie Yuan AMD_CG_SUPPORT_VCN_MGCG | 700099d66e4SLeo Liu AMD_CG_SUPPORT_JPEG_MGCG | 701d0c39f8cSXiaojie Yuan AMD_CG_SUPPORT_BIF_MGCG | 702d0c39f8cSXiaojie Yuan AMD_CG_SUPPORT_BIF_LS; 7030377b088SXiaojie Yuan adev->pg_flags = AMD_PG_SUPPORT_VCN | 704099d66e4SLeo Liu AMD_PG_SUPPORT_JPEG | 7050377b088SXiaojie Yuan AMD_PG_SUPPORT_VCN_DPG; 70635ef88faStiancyin adev->external_rev_id = adev->rev_id + 20; 7075e71e011SXiaojie Yuan break; 70874b5e509SXiaojie Yuan case CHIP_NAVI12: 709dca009e7SXiaojie Yuan adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 710dca009e7SXiaojie Yuan AMD_CG_SUPPORT_GFX_MGLS | 711dca009e7SXiaojie Yuan AMD_CG_SUPPORT_GFX_CGCG | 712dca009e7SXiaojie Yuan AMD_CG_SUPPORT_GFX_CP_LS | 7135211c37aSXiaojie Yuan AMD_CG_SUPPORT_GFX_RLC_LS | 714fbe0bc57SXiaojie Yuan AMD_CG_SUPPORT_IH_CG | 7155211c37aSXiaojie Yuan AMD_CG_SUPPORT_HDP_MGCG | 716358ab97fSXiaojie Yuan AMD_CG_SUPPORT_HDP_LS | 717358ab97fSXiaojie Yuan AMD_CG_SUPPORT_SDMA_MGCG | 7188b797b3dSXiaojie Yuan AMD_CG_SUPPORT_SDMA_LS | 7198b797b3dSXiaojie Yuan AMD_CG_SUPPORT_MC_MGCG | 720ca51678dSXiaojie Yuan AMD_CG_SUPPORT_MC_LS | 721ca51678dSXiaojie Yuan AMD_CG_SUPPORT_ATHUB_MGCG | 72265872e59SXiaojie Yuan AMD_CG_SUPPORT_ATHUB_LS | 723099d66e4SLeo Liu AMD_CG_SUPPORT_VCN_MGCG | 724099d66e4SLeo Liu AMD_CG_SUPPORT_JPEG_MGCG; 725c1653ea0SXiaojie Yuan adev->pg_flags = AMD_PG_SUPPORT_VCN | 7265ef3b8acSXiaojie Yuan AMD_PG_SUPPORT_VCN_DPG | 727099d66e4SLeo Liu AMD_PG_SUPPORT_JPEG | 7285ef3b8acSXiaojie Yuan AMD_PG_SUPPORT_ATHUB; 72974b5e509SXiaojie Yuan adev->external_rev_id = adev->rev_id + 0xa; 73074b5e509SXiaojie Yuan break; 731c6b6a421SHawking Zhang default: 732c6b6a421SHawking Zhang /* FIXME: not supported yet */ 733c6b6a421SHawking Zhang return -EINVAL; 734c6b6a421SHawking Zhang } 735c6b6a421SHawking Zhang 736b05b6903SJiange Zhao if (amdgpu_sriov_vf(adev)) { 737b05b6903SJiange Zhao amdgpu_virt_init_setting(adev); 738b05b6903SJiange Zhao xgpu_nv_mailbox_set_irq_funcs(adev); 739b05b6903SJiange Zhao } 740b05b6903SJiange Zhao 741c6b6a421SHawking Zhang return 0; 742c6b6a421SHawking Zhang } 743c6b6a421SHawking Zhang 744c6b6a421SHawking Zhang static int nv_common_late_init(void *handle) 745c6b6a421SHawking Zhang { 746b05b6903SJiange Zhao struct amdgpu_device *adev = (struct amdgpu_device *)handle; 747b05b6903SJiange Zhao 748b05b6903SJiange Zhao if (amdgpu_sriov_vf(adev)) 749b05b6903SJiange Zhao xgpu_nv_mailbox_get_irq(adev); 750b05b6903SJiange Zhao 751c6b6a421SHawking Zhang return 0; 752c6b6a421SHawking Zhang } 753c6b6a421SHawking Zhang 754c6b6a421SHawking Zhang static int nv_common_sw_init(void *handle) 755c6b6a421SHawking Zhang { 756b05b6903SJiange Zhao struct amdgpu_device *adev = (struct amdgpu_device *)handle; 757b05b6903SJiange Zhao 758b05b6903SJiange Zhao if (amdgpu_sriov_vf(adev)) 759b05b6903SJiange Zhao xgpu_nv_mailbox_add_irq_id(adev); 760b05b6903SJiange Zhao 761c6b6a421SHawking Zhang return 0; 762c6b6a421SHawking Zhang } 763c6b6a421SHawking Zhang 764c6b6a421SHawking Zhang static int nv_common_sw_fini(void *handle) 765c6b6a421SHawking Zhang { 766c6b6a421SHawking Zhang return 0; 767c6b6a421SHawking Zhang } 768c6b6a421SHawking Zhang 769c6b6a421SHawking Zhang static int nv_common_hw_init(void *handle) 770c6b6a421SHawking Zhang { 771c6b6a421SHawking Zhang struct amdgpu_device *adev = (struct amdgpu_device *)handle; 772c6b6a421SHawking Zhang 773c6b6a421SHawking Zhang /* enable pcie gen2/3 link */ 774c6b6a421SHawking Zhang nv_pcie_gen3_enable(adev); 775c6b6a421SHawking Zhang /* enable aspm */ 776c6b6a421SHawking Zhang nv_program_aspm(adev); 777c6b6a421SHawking Zhang /* setup nbio registers */ 778bebc0762SHawking Zhang adev->nbio.funcs->init_registers(adev); 779923c087aSYong Zhao /* remap HDP registers to a hole in mmio space, 780923c087aSYong Zhao * for the purpose of expose those registers 781923c087aSYong Zhao * to process space 782923c087aSYong Zhao */ 783923c087aSYong Zhao if (adev->nbio.funcs->remap_hdp_registers) 784923c087aSYong Zhao adev->nbio.funcs->remap_hdp_registers(adev); 785c6b6a421SHawking Zhang /* enable the doorbell aperture */ 786c6b6a421SHawking Zhang nv_enable_doorbell_aperture(adev, true); 787c6b6a421SHawking Zhang 788c6b6a421SHawking Zhang return 0; 789c6b6a421SHawking Zhang } 790c6b6a421SHawking Zhang 791c6b6a421SHawking Zhang static int nv_common_hw_fini(void *handle) 792c6b6a421SHawking Zhang { 793c6b6a421SHawking Zhang struct amdgpu_device *adev = (struct amdgpu_device *)handle; 794c6b6a421SHawking Zhang 795c6b6a421SHawking Zhang /* disable the doorbell aperture */ 796c6b6a421SHawking Zhang nv_enable_doorbell_aperture(adev, false); 797c6b6a421SHawking Zhang 798c6b6a421SHawking Zhang return 0; 799c6b6a421SHawking Zhang } 800c6b6a421SHawking Zhang 801c6b6a421SHawking Zhang static int nv_common_suspend(void *handle) 802c6b6a421SHawking Zhang { 803c6b6a421SHawking Zhang struct amdgpu_device *adev = (struct amdgpu_device *)handle; 804c6b6a421SHawking Zhang 805c6b6a421SHawking Zhang return nv_common_hw_fini(adev); 806c6b6a421SHawking Zhang } 807c6b6a421SHawking Zhang 808c6b6a421SHawking Zhang static int nv_common_resume(void *handle) 809c6b6a421SHawking Zhang { 810c6b6a421SHawking Zhang struct amdgpu_device *adev = (struct amdgpu_device *)handle; 811c6b6a421SHawking Zhang 812c6b6a421SHawking Zhang return nv_common_hw_init(adev); 813c6b6a421SHawking Zhang } 814c6b6a421SHawking Zhang 815c6b6a421SHawking Zhang static bool nv_common_is_idle(void *handle) 816c6b6a421SHawking Zhang { 817c6b6a421SHawking Zhang return true; 818c6b6a421SHawking Zhang } 819c6b6a421SHawking Zhang 820c6b6a421SHawking Zhang static int nv_common_wait_for_idle(void *handle) 821c6b6a421SHawking Zhang { 822c6b6a421SHawking Zhang return 0; 823c6b6a421SHawking Zhang } 824c6b6a421SHawking Zhang 825c6b6a421SHawking Zhang static int nv_common_soft_reset(void *handle) 826c6b6a421SHawking Zhang { 827c6b6a421SHawking Zhang return 0; 828c6b6a421SHawking Zhang } 829c6b6a421SHawking Zhang 830c6b6a421SHawking Zhang static void nv_update_hdp_mem_power_gating(struct amdgpu_device *adev, 831c6b6a421SHawking Zhang bool enable) 832c6b6a421SHawking Zhang { 833c6b6a421SHawking Zhang uint32_t hdp_clk_cntl, hdp_clk_cntl1; 834c6b6a421SHawking Zhang uint32_t hdp_mem_pwr_cntl; 835c6b6a421SHawking Zhang 836c6b6a421SHawking Zhang if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | 837c6b6a421SHawking Zhang AMD_CG_SUPPORT_HDP_DS | 838c6b6a421SHawking Zhang AMD_CG_SUPPORT_HDP_SD))) 839c6b6a421SHawking Zhang return; 840c6b6a421SHawking Zhang 841c6b6a421SHawking Zhang hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); 842c6b6a421SHawking Zhang hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL); 843c6b6a421SHawking Zhang 844c6b6a421SHawking Zhang /* Before doing clock/power mode switch, 845c6b6a421SHawking Zhang * forced on IPH & RC clock */ 846c6b6a421SHawking Zhang hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, 847c6b6a421SHawking Zhang IPH_MEM_CLK_SOFT_OVERRIDE, 1); 848c6b6a421SHawking Zhang hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, 849c6b6a421SHawking Zhang RC_MEM_CLK_SOFT_OVERRIDE, 1); 850c6b6a421SHawking Zhang WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl); 851c6b6a421SHawking Zhang 852c6b6a421SHawking Zhang /* HDP 5.0 doesn't support dynamic power mode switch, 853c6b6a421SHawking Zhang * disable clock and power gating before any changing */ 854c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 855c6b6a421SHawking Zhang IPH_MEM_POWER_CTRL_EN, 0); 856c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 857c6b6a421SHawking Zhang IPH_MEM_POWER_LS_EN, 0); 858c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 859c6b6a421SHawking Zhang IPH_MEM_POWER_DS_EN, 0); 860c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 861c6b6a421SHawking Zhang IPH_MEM_POWER_SD_EN, 0); 862c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 863c6b6a421SHawking Zhang RC_MEM_POWER_CTRL_EN, 0); 864c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 865c6b6a421SHawking Zhang RC_MEM_POWER_LS_EN, 0); 866c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 867c6b6a421SHawking Zhang RC_MEM_POWER_DS_EN, 0); 868c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, 869c6b6a421SHawking Zhang RC_MEM_POWER_SD_EN, 0); 870c6b6a421SHawking Zhang WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl); 871c6b6a421SHawking Zhang 872c6b6a421SHawking Zhang /* only one clock gating mode (LS/DS/SD) can be enabled */ 873c6b6a421SHawking Zhang if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) { 874c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 875c6b6a421SHawking Zhang HDP_MEM_POWER_CTRL, 876c6b6a421SHawking Zhang IPH_MEM_POWER_LS_EN, enable); 877c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 878c6b6a421SHawking Zhang HDP_MEM_POWER_CTRL, 879c6b6a421SHawking Zhang RC_MEM_POWER_LS_EN, enable); 880c6b6a421SHawking Zhang } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) { 881c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 882c6b6a421SHawking Zhang HDP_MEM_POWER_CTRL, 883c6b6a421SHawking Zhang IPH_MEM_POWER_DS_EN, enable); 884c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 885c6b6a421SHawking Zhang HDP_MEM_POWER_CTRL, 886c6b6a421SHawking Zhang RC_MEM_POWER_DS_EN, enable); 887c6b6a421SHawking Zhang } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) { 888c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 889c6b6a421SHawking Zhang HDP_MEM_POWER_CTRL, 890c6b6a421SHawking Zhang IPH_MEM_POWER_SD_EN, enable); 891c6b6a421SHawking Zhang /* RC should not use shut down mode, fallback to ds */ 892c6b6a421SHawking Zhang hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, 893c6b6a421SHawking Zhang HDP_MEM_POWER_CTRL, 894c6b6a421SHawking Zhang RC_MEM_POWER_DS_EN, enable); 895c6b6a421SHawking Zhang } 896c6b6a421SHawking Zhang 897c6b6a421SHawking Zhang WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl); 898c6b6a421SHawking Zhang 899c6b6a421SHawking Zhang /* restore IPH & RC clock override after clock/power mode changing */ 900c6b6a421SHawking Zhang WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl1); 901c6b6a421SHawking Zhang } 902c6b6a421SHawking Zhang 903c6b6a421SHawking Zhang static void nv_update_hdp_clock_gating(struct amdgpu_device *adev, 904c6b6a421SHawking Zhang bool enable) 905c6b6a421SHawking Zhang { 906c6b6a421SHawking Zhang uint32_t hdp_clk_cntl; 907c6b6a421SHawking Zhang 908c6b6a421SHawking Zhang if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) 909c6b6a421SHawking Zhang return; 910c6b6a421SHawking Zhang 911c6b6a421SHawking Zhang hdp_clk_cntl = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); 912c6b6a421SHawking Zhang 913c6b6a421SHawking Zhang if (enable) { 914c6b6a421SHawking Zhang hdp_clk_cntl &= 915c6b6a421SHawking Zhang ~(uint32_t) 916c6b6a421SHawking Zhang (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | 917c6b6a421SHawking Zhang HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | 918c6b6a421SHawking Zhang HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | 919c6b6a421SHawking Zhang HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | 920c6b6a421SHawking Zhang HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | 921c6b6a421SHawking Zhang HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK); 922c6b6a421SHawking Zhang } else { 923c6b6a421SHawking Zhang hdp_clk_cntl |= HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | 924c6b6a421SHawking Zhang HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | 925c6b6a421SHawking Zhang HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | 926c6b6a421SHawking Zhang HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | 927c6b6a421SHawking Zhang HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | 928c6b6a421SHawking Zhang HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK; 929c6b6a421SHawking Zhang } 930c6b6a421SHawking Zhang 931c6b6a421SHawking Zhang WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl); 932c6b6a421SHawking Zhang } 933c6b6a421SHawking Zhang 934c6b6a421SHawking Zhang static int nv_common_set_clockgating_state(void *handle, 935c6b6a421SHawking Zhang enum amd_clockgating_state state) 936c6b6a421SHawking Zhang { 937c6b6a421SHawking Zhang struct amdgpu_device *adev = (struct amdgpu_device *)handle; 938c6b6a421SHawking Zhang 939c6b6a421SHawking Zhang if (amdgpu_sriov_vf(adev)) 940c6b6a421SHawking Zhang return 0; 941c6b6a421SHawking Zhang 942c6b6a421SHawking Zhang switch (adev->asic_type) { 943c6b6a421SHawking Zhang case CHIP_NAVI10: 9445e71e011SXiaojie Yuan case CHIP_NAVI14: 9457e17e58bSXiaojie Yuan case CHIP_NAVI12: 946bebc0762SHawking Zhang adev->nbio.funcs->update_medium_grain_clock_gating(adev, 947c6b6a421SHawking Zhang state == AMD_CG_STATE_GATE ? true : false); 948bebc0762SHawking Zhang adev->nbio.funcs->update_medium_grain_light_sleep(adev, 949c6b6a421SHawking Zhang state == AMD_CG_STATE_GATE ? true : false); 950c6b6a421SHawking Zhang nv_update_hdp_mem_power_gating(adev, 951c6b6a421SHawking Zhang state == AMD_CG_STATE_GATE ? true : false); 952c6b6a421SHawking Zhang nv_update_hdp_clock_gating(adev, 953c6b6a421SHawking Zhang state == AMD_CG_STATE_GATE ? true : false); 954c6b6a421SHawking Zhang break; 955c6b6a421SHawking Zhang default: 956c6b6a421SHawking Zhang break; 957c6b6a421SHawking Zhang } 958c6b6a421SHawking Zhang return 0; 959c6b6a421SHawking Zhang } 960c6b6a421SHawking Zhang 961c6b6a421SHawking Zhang static int nv_common_set_powergating_state(void *handle, 962c6b6a421SHawking Zhang enum amd_powergating_state state) 963c6b6a421SHawking Zhang { 964c6b6a421SHawking Zhang /* TODO */ 965c6b6a421SHawking Zhang return 0; 966c6b6a421SHawking Zhang } 967c6b6a421SHawking Zhang 968c6b6a421SHawking Zhang static void nv_common_get_clockgating_state(void *handle, u32 *flags) 969c6b6a421SHawking Zhang { 970c6b6a421SHawking Zhang struct amdgpu_device *adev = (struct amdgpu_device *)handle; 971c6b6a421SHawking Zhang uint32_t tmp; 972c6b6a421SHawking Zhang 973c6b6a421SHawking Zhang if (amdgpu_sriov_vf(adev)) 974c6b6a421SHawking Zhang *flags = 0; 975c6b6a421SHawking Zhang 976bebc0762SHawking Zhang adev->nbio.funcs->get_clockgating_state(adev, flags); 977c6b6a421SHawking Zhang 978c6b6a421SHawking Zhang /* AMD_CG_SUPPORT_HDP_MGCG */ 979c6b6a421SHawking Zhang tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); 980c6b6a421SHawking Zhang if (!(tmp & (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | 981c6b6a421SHawking Zhang HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | 982c6b6a421SHawking Zhang HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | 983c6b6a421SHawking Zhang HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | 984c6b6a421SHawking Zhang HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | 985c6b6a421SHawking Zhang HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK))) 986c6b6a421SHawking Zhang *flags |= AMD_CG_SUPPORT_HDP_MGCG; 987c6b6a421SHawking Zhang 988c6b6a421SHawking Zhang /* AMD_CG_SUPPORT_HDP_LS/DS/SD */ 989c6b6a421SHawking Zhang tmp = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL); 990c6b6a421SHawking Zhang if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK) 991c6b6a421SHawking Zhang *flags |= AMD_CG_SUPPORT_HDP_LS; 992c6b6a421SHawking Zhang else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK) 993c6b6a421SHawking Zhang *flags |= AMD_CG_SUPPORT_HDP_DS; 994c6b6a421SHawking Zhang else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK) 995c6b6a421SHawking Zhang *flags |= AMD_CG_SUPPORT_HDP_SD; 996c6b6a421SHawking Zhang 997c6b6a421SHawking Zhang return; 998c6b6a421SHawking Zhang } 999c6b6a421SHawking Zhang 1000c6b6a421SHawking Zhang static const struct amd_ip_funcs nv_common_ip_funcs = { 1001c6b6a421SHawking Zhang .name = "nv_common", 1002c6b6a421SHawking Zhang .early_init = nv_common_early_init, 1003c6b6a421SHawking Zhang .late_init = nv_common_late_init, 1004c6b6a421SHawking Zhang .sw_init = nv_common_sw_init, 1005c6b6a421SHawking Zhang .sw_fini = nv_common_sw_fini, 1006c6b6a421SHawking Zhang .hw_init = nv_common_hw_init, 1007c6b6a421SHawking Zhang .hw_fini = nv_common_hw_fini, 1008c6b6a421SHawking Zhang .suspend = nv_common_suspend, 1009c6b6a421SHawking Zhang .resume = nv_common_resume, 1010c6b6a421SHawking Zhang .is_idle = nv_common_is_idle, 1011c6b6a421SHawking Zhang .wait_for_idle = nv_common_wait_for_idle, 1012c6b6a421SHawking Zhang .soft_reset = nv_common_soft_reset, 1013c6b6a421SHawking Zhang .set_clockgating_state = nv_common_set_clockgating_state, 1014c6b6a421SHawking Zhang .set_powergating_state = nv_common_set_powergating_state, 1015c6b6a421SHawking Zhang .get_clockgating_state = nv_common_get_clockgating_state, 1016c6b6a421SHawking Zhang }; 1017